<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="discussion">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Med.</journal-id>
<journal-title>Frontiers in Medicine</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Med.</abbrev-journal-title>
<issn pub-type="epub">2296-858X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fmed.2024.1514741</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Medicine</subject>
<subj-group>
<subject>Opinion</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Bridging the gap in AI integration: enhancing clinician education and establishing pharmaceutical-level regulation for ethical healthcare</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name><surname>Perrella</surname> <given-names>Alessandro</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1955491/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Bernardi</surname> <given-names>Francesca F.</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/276210/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Bisogno</surname> <given-names>Massimo</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Trama</surname> <given-names>Ugo</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>I UOC Emergin Infectious Disease and High Countagiousness, AORN Ospedali dei Colli&#x02014;P.O. D. Cotugno</institution>, <addr-line>Naples</addr-line>, <country>Italy</country></aff>
<aff id="aff2"><sup>2</sup><institution>Coordination of the Regional Health System, General Directorate for Health Protection</institution>, <addr-line>Naples</addr-line>, <country>Italy</country></aff>
<aff id="aff3"><sup>3</sup><institution>Department of Experimental Medicine, University of Campania Luigi Vanvitelli</institution>, <addr-line>Naples</addr-line>, <country>Italy</country></aff>
<aff id="aff4"><sup>4</sup><institution>Regional Special Office for Digital Transformation, Campania Region</institution>, <addr-line>Naples</addr-line>, <country>Italy</country></aff>
<author-notes>
<fn fn-type="edited-by"><p>Edited by: Giovanni Maio, University of Freiburg, Germany</p></fn>
<fn fn-type="edited-by"><p>Reviewed by: Adamantios Koumpis, University Hospital of Cologne, Germany</p>
<p>Luk Arbuckle, IQVIA Applied AI Science, Canada</p></fn>
<corresp id="c001">&#x0002A;Correspondence: Alessandro Perrella <email>alessandro.perrella&#x00040;ospedalideicolli.it</email></corresp>
</author-notes>
<pub-date pub-type="epub">
<day>19</day>
<month>12</month>
<year>2024</year>
</pub-date>
<pub-date pub-type="collection">
<year>2024</year>
</pub-date>
<volume>11</volume>
<elocation-id>1514741</elocation-id>
<history>
<date date-type="received">
<day>21</day>
<month>10</month>
<year>2024</year>
</date>
<date date-type="accepted">
<day>09</day>
<month>12</month>
<year>2024</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2024 Perrella, Bernardi, Bisogno and Trama.</copyright-statement>
<copyright-year>2024</copyright-year>
<copyright-holder>Perrella, Bernardi, Bisogno and Trama</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license>
</permissions>
<kwd-group>
<kwd>regulatory drug development</kwd>
<kwd>European Medicines Agency (EMA)</kwd>
<kwd>artificial intelligence</kwd>
<kwd>antimicrobial stewardship (AMS)</kwd>
<kwd>ethic and development</kwd>
<kwd>ethic</kwd>
<kwd>medical AI governance</kwd>
<kwd>pharmaceutical validation</kwd>
</kwd-group>
<counts>
<fig-count count="1"/>
<table-count count="1"/>
<equation-count count="0"/>
<ref-count count="11"/>
<page-count count="5"/>
<word-count count="2539"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Regulatory Science</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<title>1 Introduction</title>
<p>Recently the role of AI in healthcare has been deeply studied and discussed in scientific literature. Promising applications of artificial intelligence and machine learning (AI/ML) are revolutionizing both clinical and administrative domains, with significant advancements demonstrated in drug discovery, precise analysis and interpretation of radiological images, early and accurate sepsis detection, efficient hospital resource management, automated documentation of clinical encounters and decision support system (DSS). These use cases underscore the immense potential of AI/ML to enhance efficiency, accuracy, and outcomes across the healthcare spectrum (<xref ref-type="bibr" rid="B1">1</xref>). However, a very recent article raises essential considerations about the adoption and regulation of AI in clinical settings (<xref ref-type="bibr" rid="B2">2</xref>). Therefore, the integration of Artificial Intelligence (AI) in healthcare presents numerous opportunities and challenges. Conversely, there is a significant gap between clinician education regarding AI and the regulatory measures necessary for ethical deployment. To address this gap effectively, a structured, organized approach must be followed, encompassing clearly defined steps for both the education of clinicians and the establishment of rigorous regulatory frameworks. This paper argues that bridging this gap requires a dual approach: enhancing clinicians&#x00027; understanding of AI technologies and treating AI systems as rigorously as pharmaceuticals through strict regulatory processes. By doing so, we can foster ethical and effective AI integration into clinical practice, ensuring patient safety and better healthcare outcomes. Here, we outline four key considerations that should guide the planning and regulation of AI integration in healthcare.</p></sec>
<sec id="s2">
<title>2 Expanding AI education among clinicians</title>
<p>Looking at the current clinical practice in healthcare according to the increase diffusion of AI is arguable the knowledge of physician about this new technology. In fact despite the increasing prevalence of AI in healthcare, many clinicians remain inadequately educated about what AI entails, its limitations and its implications too (<xref ref-type="bibr" rid="B1">1</xref>). Given that clinicians play a central role in patient care, a comprehensive AI education program should be a priority. Education initiatives should focus not only on how AI systems operate but also on how their regulatory framework should move on as for pharmaceuticals. This lack of understanding represents a significant barrier to the responsible adoption of AI technologies in clinical practice. To effectively bridge this gap, clinician education must cover both technical and ethical aspects of AI and should be part of medical degree course as well. A deeper understanding of the processes involved in validating AI tools can empower clinicians to participate meaningfully in discussions about the safety and efficacy of these systems. By building a foundational knowledge of AI, clinicians will be better prepared to evaluate and use AI tools within an ethical context and advocate for appropriate use and safety measures (<xref ref-type="bibr" rid="B3">3</xref>). This approach could be compared to that currently used to improve clinical or healthcare activity against antimicrobial resistance (AMR) or as for antimicrobial stewardship (<xref ref-type="bibr" rid="B4">4</xref>).</p></sec>
<sec id="s3">
<title>3 Understanding legal and ethical implications</title>
<p>Legal and ethical education must be extended beyond decision-makers and regulators to the clinicians who interact directly with AI systems. For clinicians to use AI responsibly, they must understand the legal implications, such as data privacy, accountability, and the ethical risks of biases<xref ref-type="fn" rid="fn0001"><sup>1</sup></xref><sup>,</sup><xref ref-type="fn" rid="fn0002"><sup>2</sup></xref> (<xref ref-type="bibr" rid="B5">5</xref>). The recent European Union Artificial Intelligence Act lays out a regulatory framework for AI; however, this information is not often communicated to those on the front lines of healthcare (<xref ref-type="bibr" rid="B3">3</xref>). Educating clinicians about such regulatory initiatives is crucial to align their practical use of AI with ethical guidelines.<xref ref-type="fn" rid="fn0001"><sup>1</sup></xref> As they navigate the clinical environment, an understanding of legal parameters will help them mitigate risks and ensure that AI integration prioritizes patient safety and respects privacy rights.</p></sec>
<sec id="s4">
<title>4 AI should be managed like a drug</title>
<p>There is a growing consensus that AI systems used in healthcare should be regulated in a manner similar to pharmaceuticals. Just as pharmaceuticals undergo a series of clinical trials to evaluate safety, efficacy, and ethical considerations, AI technologies should also follow rigorous validation processes before widespread implementation.</p>
<p><bold>Parallels to pharmaceutical validation</bold>:</p>
<list list-type="bullet">
<list-item><p><bold>Phases of testing</bold>: AI could adopt phases similar to drug development&#x02014;preclinical (testing in controlled environments), Phase I (safety in small clinical settings), Phase II (efficacy trials), and Phase III (large-scale clinical testing). This structure would ensure that AI is tested for both safety and effectiveness in diverse, real-world environments.</p></list-item>
<list-item><p><bold>Classification</bold>: AI tools could be classified as drugs according to a system like ATC. For instance according to the type of AI tools type [drug discovery, precise analysis and interpretation of analysis, decision support system (DSS)] we could give them a code and a related regulatory activity.</p></list-item>
<list-item><p><bold>Risk assessments</bold>: Like drugs, AI must undergo thorough risk assessments, which include evaluating potential biases, unintended outcomes, and ethical implications.</p></list-item>
<list-item><p><bold>Regulatory oversight</bold>: A dedicated regulatory agency&#x02014;potentially within the European Medicines Agency (EMA)&#x02014;should combine expertise from both medical and engineering domains. This mixed approach would ensure a balanced evaluation of both the medical efficacy and technical performance of AI tools.</p></list-item>
</list>
<p>Finally, as in pharmaceuticals, AI systems should come with comprehensive documentation, including a &#x0201C;<bold>Summary of Product Characteristics&#x0201D; and &#x0201C;Package Leaflet&#x0201D;</bold> that outlines their intended use, limitations, and instructions for safe implementation. This approach will standardize AI information, enabling healthcare providers to make informed decisions based on clear guidance (<xref ref-type="bibr" rid="B6">6</xref>).</p>
<p>However, the two frameworks Pharmaceuticals and AI have some substantial differences (<xref ref-type="table" rid="T1">Table 1</xref>). In fact, while for innovative drug the time required to be approved tor clinical use is usually 9.1 years (<xref ref-type="bibr" rid="B7">7</xref>) AI technologies have shorter development cycles compared to traditional drug due to iterative model improvements and less dependency on long-term biological testing. This rapid pace requires an expedited but still rigorous framework for validation and deployment. Unlike drugs, AI tools benefit from iterative deployment, allowing for updates and enhancements after initial deployment based on real-world performance and user feedback, which means ongoing evaluation is critical. AI tools also offer significant advantages in scalability and adaptability. They can rapidly scale across diverse clinical environments and adjust to new datasets as they become available, setting them apart from more static pharmacological solutions. Therefore, AI should be managed and regulated as a Drug but according to a <bold>Phase of Testing</bold>, <bold>Risk Assessment</bold> and <bold>Regulatory Oversight</bold> being based to on specific tools for AI (<xref ref-type="bibr" rid="B8">8</xref>) (<xref ref-type="fig" rid="F1">Figure 1</xref>).</p>
<table-wrap position="float" id="T1">
<label>Table 1</label>
<caption><p>Key differences between drug and AI evaluation.</p></caption>
<table frame="box" rules="all">
<thead>
<tr style="background-color:#919498;color:#ffffff">
<th valign="top" align="left"><bold>Aspect</bold></th>
<th valign="top" align="left"><bold>Drugs</bold></th>
<th valign="top" align="left"><bold>AI systems</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Development</td>
<td valign="top" align="left">Fixed chemical or biological entity, formula remains constant throughout.</td>
<td valign="top" align="left">Iterative, data-driven models that evolve with new data and retraining.</td>
</tr> <tr>
<td valign="top" align="left">Testing approach</td>
<td valign="top" align="left">Static population and fixed protocols in controlled trials.</td>
<td valign="top" align="left">Dynamic testing, adapting to real-world scenarios and diverse populations.</td>
</tr> <tr>
<td valign="top" align="left">Deployment</td>
<td valign="top" align="left">Single approval process with fixed guidelines for use.</td>
<td valign="top" align="left">Continuous deployment with ongoing validation, monitoring, and updates.</td>
</tr> <tr>
<td valign="top" align="left">Regulation</td>
<td valign="top" align="left">Linear, phase-based evaluation (preclinical to post-marketing).</td>
<td valign="top" align="left">Cyclical, iterative evaluation requiring periodic reassessments and real-time monitoring.</td>
</tr> <tr>
<td valign="top" align="left">Impact</td>
<td valign="top" align="left">Direct physiological or biochemical impact on patients.</td>
<td valign="top" align="left">Indirect influence through decision-making, workflow optimization, and recommendations.</td>
</tr> <tr>
<td valign="top" align="left">Failure impact</td>
<td valign="top" align="left">Specific adverse effects, often localized to the drug.</td>
<td valign="top" align="left">Systematic risks like bias amplification, incorrect predictions, or workflow disruption.</td>
</tr> <tr>
<td valign="top" align="left">Lifecycle</td>
<td valign="top" align="left">Fixed lifecycle, with few post-market changes.</td>
<td valign="top" align="left">Continuous lifecycle, requiring retraining, updates, and adaptation to new contexts.</td>
</tr> <tr>
<td valign="top" align="left">Validation metrics</td>
<td valign="top" align="left">Efficacy and safety measured against standard endpoints in trials.</td>
<td valign="top" align="left">Performance measured by accuracy, precision, recall, and real-world outcome improvements.</td>
</tr> <tr>
<td valign="top" align="left">Monitoring</td>
<td valign="top" align="left">Post-marketing surveillance for adverse effects.</td>
<td valign="top" align="left">Continuous monitoring with feedback loops and performance optimization.</td>
</tr> <tr>
<td valign="top" align="left">Human involvement</td>
<td valign="top" align="left">Limited after initial trials (patient compliance is key).</td>
<td valign="top" align="left">High, with human-in-the-loop design during development, deployment, and monitoring.</td>
</tr></tbody>
</table>
</table-wrap>
<fig id="F1" position="float">
<label>Figure 1</label>
<caption><p>Comparison of drug and AI frameworks. Figure shows difference in frameworks of Drugs and AI development with several differences that highlight the need to find for AI quick as secure system for validation and deployment. The drug development timetable process is based on Brown et al. (<xref ref-type="bibr" rid="B7">7</xref>).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmed-11-1514741-g0001.tif"/>
</fig>
<p>Figure shows difference in frameworks of Drugs and AI development with several differences that highlight the need to find for AI quick as secure system for validation and deployment.</p></sec>
<sec id="s5">
<title>5 Economic factors should not compromise ethical standards</title>
<p>While the commercialization of AI in healthcare is inevitable, ethical standards must not be compromised for economic gain. Proper governance, including stringent oversight by a mixed medical-engineering regulatory body, will ensure that AI systems adhere to healthcare&#x00027;s core ethical principles. Regulatory frameworks must ensure that <bold>economic motivations should drive innovation but must always be secondary to patient safety</bold>, quality of care, and ethical obligations. This balance will foster trust in AI technologies among both clinicians and patients (<xref ref-type="bibr" rid="B9">9</xref>).</p></sec>
<sec sec-type="conclusions" id="s6">
<title>6 Conclusion</title>
<p>Artificial intelligence (AI) offers immense potential to transform healthcare, from improving patient outcomes to enhancing clinical workflows and driving medical innovation. However, its successful integration into healthcare requires overcoming several key challenges, including gaps in clinician education, the need for ethical governance, and the absence of tailored regulatory frameworks. This paper argues for regulating AI with the same rigor as pharmaceuticals, incorporating validation phases, risk assessments, and detailed documentation, while adapting these processes to AI&#x00027;s fast and iterative development cycles.</p>
<p>A critical first step is educating clinicians about AI. Many healthcare professionals lack a clear understanding of how AI systems work, their limitations, and the ethical and legal issues they raise. Comprehensive training programs are needed to build this knowledge. Such education should focus not only on technical aspects but also on teaching clinicians how to assess AI tools for safety, effectiveness, and ethical implications. This would empower clinicians to confidently use AI in their practice, ensuring that its benefits are fully realized while safeguarding patient trust.</p>
<p>Equally important is the development of regulatory frameworks that match AI&#x00027;s unique characteristics. Unlike pharmaceuticals, which follow a linear path from development to deployment, AI evolves continuously. Regulations must therefore balance rapid innovation with robust oversight, ensuring AI systems are safe, effective, and free from bias. This approach requires collaboration between healthcare professionals, technology developers, and regulators to create guidelines that are both practical and ethical.</p>
<p>Economic pressures should not overshadow the ethical responsibilities involved in AI integration. While commercialization drives innovation, patient safety and quality of care must always come first. By fostering partnerships between industry and healthcare that prioritize ethical principles, we can build trust in AI technologies and their use in clinical practice.</p>
<p>To address these challenges, the following steps should guide AI integration:</p>
<list list-type="order">
<list-item><p>Establish clear and practical processes for validating and monitoring AI systems, drawing inspiration from pharmaceutical regulation but tailoring these to AI&#x00027;s specific needs.</p></list-item>
<list-item><p>Develop accessible training programs for clinicians, focusing on building their confidence and competence in using AI tools.</p></list-item>
<list-item><p>Support pilot projects and real-world case studies to demonstrate how AI can be safely and effectively used in different healthcare settings.</p></list-item>
</list>
<p>By taking this structured and adaptive approach, we can bridge the current gaps in AI integration. This will ensure that AI evolves responsibly, supporting healthcare providers and benefiting patients while maintaining the highest ethical standards. Ultimately, this balanced strategy will enable AI to fulfill its promise of transforming healthcare in a way that is safe, effective, and equitable (<xref ref-type="bibr" rid="B10">10</xref>, <xref ref-type="bibr" rid="B11">11</xref>).</p></sec>
</body>
<back>
<sec sec-type="author-contributions" id="s7">
<title>Author contributions</title>
<p>AP: Conceptualization, Formal analysis, Supervision, Writing &#x02013; original draft, Writing &#x02013; review &#x00026; editing. FB: Methodology, Writing &#x02013; review &#x00026; editing. MB: Conceptualization, Writing &#x02013; review &#x00026; editing. UT: Resources, Writing &#x02013; review &#x00026; editing.</p>
</sec>
<sec sec-type="funding-information" id="s8">
<title>Funding</title>
<p>The author(s) declare that no financial support was received for the research, authorship, and/or publication of this article.</p>
</sec>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="s9">
<title>Generative AI statement</title>
<p>The author(s) declare that no Gen AI was used in the creation of this manuscript.</p></sec>
<sec sec-type="disclaimer" id="s10">
<title>Publisher&#x00027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<fn-group>
<fn id="fn0001"><p><sup>1</sup><ext-link ext-link-type="uri" xlink:href="https://eur-lex.europa.eu/eli/reg/2017/746/oj">https://eur-lex.europa.eu/eli/reg/2017/746/oj</ext-link></p></fn>
<fn id="fn0002"><p><sup>2</sup><ext-link ext-link-type="uri" xlink:href="https://www.isaca.org/resources/isaca-journal/issues/2023/volume-2/the-potential-impact-of-the-european-commissions-proposed-ai-act-on-smes">https://www.isaca.org/resources/isaca-journal/issues/2023/volume-2/the-potential-impact-of-the-european-commissions-proposed-ai-act-on-smes</ext-link></p></fn>
</fn-group>
<ref-list>
<title>References</title>
<ref id="B1">
<label>1.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rajpurkar</surname> <given-names>P</given-names></name> <name><surname>Chen</surname> <given-names>E</given-names></name> <name><surname>Banerjee</surname> <given-names>O</given-names></name> <name><surname>Topol</surname> <given-names>EJ</given-names></name></person-group>. <article-title>AI in health and medicine</article-title>. <source>Nat Med.</source> (<year>2022</year>) <volume>28</volume>:<fpage>31</fpage>&#x02013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1038/s41591-021-01614-0</pub-id><pub-id pub-id-type="pmid">35058619</pub-id></citation></ref>
<ref id="B2">
<label>2.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Busch</surname> <given-names>F</given-names></name> <name><surname>Kather</surname> <given-names>JN</given-names></name> <name><surname>Johner</surname> <given-names>C</given-names></name> <name><surname>Moser</surname> <given-names>M</given-names></name> <name><surname>Truhn</surname> <given-names>D</given-names></name> <name><surname>Adams</surname> <given-names>LC</given-names></name> <etal/></person-group>. <article-title>Navigating the European Union Artificial Intelligence Act for Healthcare</article-title>. <source>npj Digit Med.</source> (<year>2024</year>) <volume>7</volume>:<fpage>210</fpage>. <pub-id pub-id-type="doi">10.1038/s41746-024-01213-6</pub-id><pub-id pub-id-type="pmid">39134637</pub-id></citation></ref>
<ref id="B3">
<label>3.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gilbert</surname> <given-names>S</given-names></name></person-group>. <article-title>The EU passes the AI Act and its implications for digital medicine are unclear</article-title>. <source>NPJ Digit Med.</source> (<year>2024</year>) <volume>7</volume>:<fpage>135</fpage>. <pub-id pub-id-type="doi">10.1038/s41746-024-01116-6</pub-id><pub-id pub-id-type="pmid">38778162</pub-id></citation></ref>
<ref id="B4">
<label>4.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chetty</surname> <given-names>S</given-names></name> <name><surname>Swe-Han</surname> <given-names>KS</given-names></name> <name><surname>Mahabeer</surname> <given-names>Y</given-names></name> <name><surname>Pillay</surname> <given-names>A</given-names></name> <name><surname>Essack</surname> <given-names>SY</given-names></name></person-group>. <article-title>Interprofessional education in antimicrobial stewardship, a collaborative effort</article-title>. <source>JAC Antimicrob Resist.</source> (<year>2024</year>) <volume>6</volume>:<fpage>dlae054</fpage>. <pub-id pub-id-type="doi">10.1093/jacamr/dlae054</pub-id><pub-id pub-id-type="pmid">38562216</pub-id></citation></ref>
<ref id="B5">
<label>5.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mennella</surname> <given-names>C</given-names></name> <name><surname>Maniscalco</surname> <given-names>U</given-names></name> <name><surname>De Pietro</surname> <given-names>G</given-names></name> <name><surname>Esposito</surname> <given-names>M</given-names></name></person-group>. <article-title>Ethical and regulatory challenges of AI technologies in healthcare: a narrative review</article-title>. <source>Heliyon.</source> (<year>2024</year>) <volume>10</volume>:<fpage>e26297</fpage>. <pub-id pub-id-type="doi">10.1016/j.heliyon.2024.e26297</pub-id><pub-id pub-id-type="pmid">38384518</pub-id></citation></ref>
<ref id="B6">
<label>6.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Palaniappan</surname> <given-names>K</given-names></name> <name><surname>Lin</surname> <given-names>EYT</given-names></name> <name><surname>Vogel</surname> <given-names>S</given-names></name></person-group>. <article-title>Global regulatory frameworks for the use of artificial intelligence (AI) in the healthcare services sector</article-title>. <source>Healthcare.</source> (<year>2024</year>) <volume>12</volume>:<fpage>562</fpage>. <pub-id pub-id-type="doi">10.3390/healthcare12050562</pub-id><pub-id pub-id-type="pmid">38470673</pub-id></citation></ref>
<ref id="B7">
<label>7.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Brown</surname> <given-names>DG</given-names></name> <name><surname>Wobst</surname> <given-names>HJ</given-names></name> <name><surname>Kapoor</surname> <given-names>A</given-names></name> <name><surname>Kenna</surname> <given-names>LA</given-names></name> <name><surname>Southall</surname> <given-names>N</given-names></name></person-group>. <article-title>Clinical development times for innovative drugs</article-title>. <source>Nat Rev Drug Discov.</source> (<year>2022</year>) <volume>21</volume>:<fpage>793</fpage>&#x02013;<lpage>4</lpage>. <pub-id pub-id-type="doi">10.1038/d41573-021-00190-9</pub-id><pub-id pub-id-type="pmid">34759309</pub-id></citation></ref>
<ref id="B8">
<label>8.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ben Charif</surname> <given-names>A</given-names></name> <name><surname>Zomahoun</surname> <given-names>HTV</given-names></name> <name><surname>Gogovor</surname> <given-names>A</given-names></name> <name><surname>Abdoulaye Samri</surname> <given-names>M</given-names></name> <name><surname>Massougbodji</surname> <given-names>J</given-names></name> <name><surname>Wolfenden</surname> <given-names>L</given-names></name> <etal/></person-group>. <article-title>Tools for assessing the scalability of innovations in health: a systematic review</article-title>. <source>Health Res Policy Sys.</source> (<year>2022</year>) <volume>20</volume>:<fpage>34</fpage>. <pub-id pub-id-type="doi">10.1186/s12961-022-00830-5</pub-id><pub-id pub-id-type="pmid">35331260</pub-id></citation></ref>
<ref id="B9">
<label>9.</label>
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dankwa-Mullan</surname> <given-names>I</given-names></name></person-group>. <article-title>Health equity and ethical considerations in using artificial intelligence in public health and medicine</article-title>. <source>Prev Chronic Dis.</source> (<year>2024</year>) <volume>21</volume>:<fpage>240245</fpage>. <pub-id pub-id-type="doi">10.5888/pcd21.240245</pub-id><pub-id pub-id-type="pmid">39173183</pub-id></citation></ref>
<ref id="B10">
<label>10.</label>
<citation citation-type="web"><person-group person-group-type="author"><name><surname>Cheatham</surname> <given-names>B</given-names></name> <name><surname>Javanmardian</surname> <given-names>K</given-names></name> <name><surname>Samandari</surname> <given-names>H</given-names></name></person-group>. <article-title>Confronting the risks of artificial intelligence</article-title>. <source>McKinsey</source> (<year>2019</year>). Available at: <ext-link ext-link-type="uri" xlink:href="https://perma.cc/T2CX-HYZF">https://perma.cc/T2CX-HYZF</ext-link></citation>
</ref>
<ref id="B11">
<label>11.</label>
<citation citation-type="web"><person-group person-group-type="author"><name><surname>Brundage</surname> <given-names>M</given-names></name> <name><surname>Avin</surname> <given-names>S</given-names></name> <name><surname>Clark</surname> <given-names>J</given-names></name> <name><surname>Toner</surname> <given-names>H</given-names></name> <name><surname>Eckersley</surname> <given-names>P</given-names></name> <name><surname>Garfinkel</surname> <given-names>B</given-names></name> <etal/></person-group>. <source>The Malicious Use of Artificial Intelligence</source>. Future of Humanity Institute (<year>2018</year>). Available at: <ext-link ext-link-type="uri" xlink:href="https://arxiv.org/ftp/arxiv/papers/1802/1802.07228.pdf">https://arxiv.org/ftp/arxiv/papers/1802/1802.07228.pdf</ext-link> (accessed October 1, 2024).</citation>
</ref>
</ref-list>
</back>
</article>