<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" article-type="brief-report" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Artif. Intell.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Artificial Intelligence</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Artif. Intell.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2624-8212</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/frai.2026.1744544</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Perspective</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>The augmented physician: AI and the future of clinical cognition</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Bouabida</surname>
<given-names>Khayreddine</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1363137"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Chaves</surname>
<given-names>Breitner Gomes</given-names>
</name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2232133"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Anane</surname>
<given-names>Enoch</given-names>
</name>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3391986"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>UConn Health</institution>, <city>Farmington</city>, <state>CT</state>, <country country="us">United States</country></aff>
<aff id="aff2"><label>2</label><institution>Connecticut Children's Medical Center</institution>, <city>Hartford</city>, <state>CT</state>, <country country="us">United States</country></aff>
<aff id="aff3"><label>3</label><institution>Research Center of the University of Montreal Hospital Centre</institution>, <city>Montreal</city>, <state>QC</state>, <country country="ca">Canada</country></aff>
<aff id="aff4"><label>4</label><institution>Department of Community Health Sciences, Universit&#x00E9; de Sherbrooke</institution>, <city>Sherbrooke</city>, <state>QC</state>, <country country="ca">Canada</country></aff>
<aff id="aff5"><label>5</label><institution>UMass Chan Medical School</institution>, <city>Worcester</city>, <state>MA</state>, <country country="us">United States</country></aff>
<author-notes>
<corresp id="c001"><label>&#x002A;</label>Correspondence: Khayreddine Bouabida, <email xlink:href="mailto:khayreddine.bouabida@umontreal.ca">khayreddine.bouabida@umontreal.ca</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-23">
<day>23</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>9</volume>
<elocation-id>1744544</elocation-id>
<history>
<date date-type="received">
<day>21</day>
<month>11</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>29</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>12</day>
<month>02</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2026 Bouabida, Chaves and Anane.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Bouabida, Chaves and Anane</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-23">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<p>Medicine stands at a cognitive tipping point as the volume of biomedical information expands faster than clinicians can realistically monitor, synthesize, and apply new evidence in routine practice. Once a marker of scientific progress, this acceleration now challenges the foundations of clinical expertise, patient safety, and medical education. This Perspective examines the widening gap between evidence generation and evidence implementation, arguing that artificial intelligence should not replace clinicians but serve as a cognitive partner. Properly designed and ethically governed systems can assist clinicians by organizing and contextualizing large bodies of information, enabling greater focus on clinical judgment, empathy, and human connection. When integrated thoughtfully, artificial intelligence has the potential to strengthen patient engagement, reduce administrative burden, and support shared human and machine cognition in care delivery. Sustaining clinical excellence in an era of accelerating information growth will depend on embracing artificial intelligence as a collaborative tool and redefining how physicians learn, think, and care. The future of medicine will remain profoundly human, precisely because it is intelligently augmented.</p>
</abstract>
<kwd-group>
<kwd>artificial intelligence (AI)</kwd>
<kwd>clinical cognition</kwd>
<kwd>clinical decision support</kwd>
<kwd>medical education</kwd>
<kwd>patient-centered care</kwd>
<kwd>physician burnout</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was not received for this work and/or its publication.</funding-statement>
</funding-group>
<counts>
<fig-count count="0"/>
<table-count count="0"/>
<equation-count count="0"/>
<ref-count count="19"/>
<page-count count="6"/>
<word-count count="4971"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Medicine and Public Health</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec1">
<title>Introduction</title>
<p>This article is a reflective Perspective on one of medicine&#x2019;s most underestimated challenges: the rapid expansion of biomedical information and its cognitive implications for clinicians. As the volume of research publications, clinical data, and digital health outputs grows, physicians are increasingly required to navigate an information environment that no individual can realistically monitor or synthesize alone. The traditional model of physician expertise, rooted in memorization, manual synthesis, and lifelong recall, has reached its practical limits within this evolving ecosystem (<xref ref-type="bibr" rid="ref4">Densen, 2011</xref>; <xref ref-type="bibr" rid="ref7">Kelly et al., 2019</xref>; <xref ref-type="bibr" rid="ref10">MacIntyre et al., 2023</xref>).</p>
<p>Historically, the growth of medical knowledge was sufficiently gradual to allow clinicians to remain current through conventional educational pathways. In 1950, the body of indexed medical information was estimated to double approximately every 50&#x202F;years. By 1980, this interval had shortened to about seven years, and by 2010 to roughly 3.5&#x202F;years. Contemporary estimates suggest that the volume of biomedical information and published literature now expands even faster and is often described as doubling within months (<xref ref-type="bibr" rid="ref4">Densen, 2011</xref>; <xref ref-type="bibr" rid="ref7">Kelly et al., 2019</xref>; <xref ref-type="bibr" rid="ref2">Balas and Boren, 2000</xref>; <xref ref-type="bibr" rid="ref17">Topol, 2019</xref>; <xref ref-type="bibr" rid="ref11">Mir et al., 2023</xref>). Importantly, this acceleration reflects the proliferation of data, publications, and subspecialty evidence rather than the rapid replacement of foundational clinical principles, many of which remain stable over decades of practice.</p>
<p>Even so, the consequences for clinical cognition are substantial. Clinicians must continually evaluate new evidence, guideline updates, subgroup analyses, and context-specific findings layered onto existing knowledge. This expanding informational periphery contributes to a widening gap between evidence generation and evidence implementation, challenging clinicians&#x2019; ability to deliver consistently evidence-informed care while preserving the relational and ethical dimensions central to medicine (<xref ref-type="bibr" rid="ref4">Densen, 2011</xref>; <xref ref-type="bibr" rid="ref7">Kelly et al., 2019</xref>; <xref ref-type="bibr" rid="ref2">Balas and Boren, 2000</xref>; <xref ref-type="bibr" rid="ref17">Topol, 2019</xref>; <xref ref-type="bibr" rid="ref11">Mir et al., 2023</xref>).</p>
<p>This Perspective explores how artificial intelligence should be understood not as a replacement for physicians, but as a cognitive partner within this increasingly complex information landscape. Rather than automating judgment or supplanting expertise, artificial intelligence can assist clinicians in organizing, contextualizing, and translating large volumes of biomedical information into clinically relevant insights. Framed as an instrument of cognitive augmentation rather than epistemic authority, artificial intelligence emerges as a necessary component for preserving clinical excellence, patient safety, and ethical decision making in modern medical practice (<xref ref-type="bibr" rid="ref4">Densen, 2011</xref>; <xref ref-type="bibr" rid="ref7">Kelly et al., 2019</xref>; <xref ref-type="bibr" rid="ref2">Balas and Boren, 2000</xref>; <xref ref-type="bibr" rid="ref17">Topol, 2019</xref>; <xref ref-type="bibr" rid="ref11">Mir et al., 2023</xref>).</p>
</sec>
<sec id="sec2">
<title>The unequal burden of information overload</title>
<p>The cognitive burden imposed by the contemporary biomedical information environment is not distributed evenly across medical specialties. Certain fields face disproportionate exposure to information overload because of the intensity, breadth, and time sensitivity of their clinical responsibilities. Emergency medicine, primary care, and high-complexity specialties illustrate how information overload translates directly into clinician distress and patient safety risk.</p>
<p>Emergency medicine physicians are particularly vulnerable. A multisite National Health Service study across four hospital trusts in Northern England found that most emergency physicians perceived information overload as a serious and worsening problem. Contributors included expectations of constant availability, excessive email volume, and dense multidisciplinary communication. Reported consequences included guideline fatigue, prolonged working hours, increased stress, and impaired decision making (<xref ref-type="bibr" rid="ref15">Sbaffi et al., 2020</xref>). These findings align with a meta-analysis showing that approximately 40% of emergency physicians experience high levels of emotional exhaustion and depersonalization, with emergency medicine physicians more susceptible to burnout than physicians in many other specialties (<xref ref-type="bibr" rid="ref18">Zhang et al., 2020</xref>).</p>
<p>Primary care physicians face a different but equally overwhelming burden driven more by scope than acuity. A qualitative study published in <italic>JAMA Internal Medicine</italic> described primary care work as insurmountable, overwhelming, and undoable. Physicians reported that specialists routinely defer medication refills, prior authorizations, test results, and diagnostic follow-up to them, creating a &#x201C;giant funnel&#x201D; effect that shifts responsibility for data management and coordination to primary care (<xref ref-type="bibr" rid="ref1">Agarwal et al., 2020</xref>). One physician noted that clinical work increasingly involves managing data, numbers, and processes rather than caring for people. National data from <italic>JAMA Network Open</italic> show that primary care physicians had the highest burnout rates among all health care occupations between 2018 and 2023, ranging from 46.2 to 57.6% (<xref ref-type="bibr" rid="ref12">Mohr et al., 2025</xref>).</p>
<p>Specialty-specific analyses reveal additional vulnerabilities. A national study of 3,588 United States resident physicians found significantly higher burnout rates in urology, neurology, emergency medicine, ophthalmology, and general surgery compared with internal medicine, with relative risks ranging from 1.23 to 1.48 (<xref ref-type="bibr" rid="ref5">Dyrbye et al., 2018</xref>). Burnout also has direct implications for patient safety. A large systematic review and meta-analysis demonstrated that physician burnout is associated with more than double the odds of patient safety incidents and low professionalism, with the strongest association observed in emergency medicine (<xref ref-type="bibr" rid="ref6">Hodkinson et al., 2022</xref>).</p>
<p>Taken together, these findings indicate that information overload is not an abstract cognitive concern, but a specialty-dependent threat to clinician well-being and patient safety. They suggest that artificial intelligence-enabled cognitive support may be most urgently needed in emergency medicine, primary care, and other high-complexity clinical environments where information overload directly compromises decision quality and care delivery.</p>
</sec>
<sec id="sec3">
<title>The collapse of the medical knowledge timeline</title>
<p>The pace at which biomedical information is generated and disseminated has shifted from a manageable evolution to an unprecedented expansion. In 1950, the volume of indexed medical literature was estimated to double roughly every 50&#x202F;years, allowing many physicians to complete a career without facing a fundamentally altered information landscape. By 1980, this interval had shortened to approximately seven years, and by 2010 to about 3.5&#x202F;years. Contemporary estimates suggest that the volume of biomedical publications and digital medical data continues to expand at a rapid pace and is often described as doubling within months (<xref ref-type="bibr" rid="ref4">Densen, 2011</xref>; <xref ref-type="bibr" rid="ref7">Kelly et al., 2019</xref>; <xref ref-type="bibr" rid="ref2">Balas and Boren, 2000</xref>; <xref ref-type="bibr" rid="ref17">Topol, 2019</xref>; <xref ref-type="bibr" rid="ref11">Mir et al., 2023</xref>).</p>
<p>It is important to distinguish this acceleration in information volume from changes in validated clinical knowledge. The rapid growth of publications does not imply that foundational principles of diagnosis and treatment are overturned at the same rate. Much of this expansion occurs at the periphery of medical practice, including incremental refinements in subspecialty care, studies of variable quality, exploratory findings, and data relevant to narrowly defined patient populations. For many clinicians, the core frameworks guiding common conditions such as hypertension, diabetes, or acute chest pain remain stable over long periods of practice.</p>
<p>Nevertheless, the expansion of the informational environment has profound implications for daily clinical work. A medical student entering training in 2020 will encounter a literature base that grows substantially across medical school and residency, even if core clinical principles remain relatively consistent. Clinicians are expected to evaluate new trials, updated guidelines, subgroup analyses, and emerging safety signals layered onto established knowledge. Traditional mechanisms of knowledge transfer, including textbooks, lectures, and episodic continuing medical education, are poorly suited to this continuous evaluation process. The result is a widening gap between the generation of evidence and its consistent application in practice (<xref ref-type="bibr" rid="ref4">Densen, 2011</xref>; <xref ref-type="bibr" rid="ref2">Balas and Boren, 2000</xref>).</p>
<p>In earlier eras, professional expertise was largely defined by the depth of an individual physician&#x2019;s internalized knowledge. In the current environment, expertise is increasingly defined by the ability to navigate, interpret, and operationalize complex information systems that exceed individual cognitive capacity. The collapse of the medical knowledge timeline reflects not an erosion of clinical fundamentals, but a transformation in how medical knowledge must be accessed, filtered, and applied. This shift signals the need for new tools, new learning models, and new forms of cognitive partnership to sustain high-quality care (<xref ref-type="bibr" rid="ref4">Densen, 2011</xref>; <xref ref-type="bibr" rid="ref7">Kelly et al., 2019</xref>; <xref ref-type="bibr" rid="ref2">Balas and Boren, 2000</xref>; <xref ref-type="bibr" rid="ref17">Topol, 2019</xref>; <xref ref-type="bibr" rid="ref11">Mir et al., 2023</xref>).</p>
</sec>
<sec id="sec4">
<title>The impossible cognitive burden</title>
<p>Even the most dedicated physician cannot realistically keep pace with the contemporary biomedical information environment. Clinicians manage numerous patient encounters each day, each requiring the application of established clinical principles together with selective integration of new evidence, updated guidelines, and context-specific considerations. At the same time, thousands of biomedical manuscripts are published daily across journals, preprint servers, and clinical trial registries (<xref ref-type="bibr" rid="ref2">Balas and Boren, 2000</xref>; <xref ref-type="bibr" rid="ref17">Topol, 2019</xref>). Although much of this literature does not alter routine practice, clinicians are expected to remain alert to findings that may affect patient safety, therapeutic effectiveness, or standards of care.</p>
<p>This imbalance has contributed to a widening knowledge-practice gap that reflects structural difficulty rather than individual incompetence. Classic analyses suggest that validated evidence may take more than a decade to become consistently incorporated into clinical care (<xref ref-type="bibr" rid="ref2">Balas and Boren, 2000</xref>). During this lag, patients may receive care that is concordant with existing standards yet misaligned with the most current evidence, highlighting system-level limitations rather than individual neglect.</p>
<p>The burden of this environment extends beyond clinical accuracy to clinician well-being. Physicians navigating dense documentation requirements, expanding quality metrics, and evolving evidence bases report high levels of cognitive fatigue, burnout, and moral distress, particularly when aware of emerging data they lack the time or tools to evaluate fully. National data and specialty-specific analyses demonstrate that burnout affects a substantial proportion of clinicians, with rates exceeding 40% in several frontline specialties and strong associations with reduced professional engagement and patient safety incidents (<xref ref-type="bibr" rid="ref12">Mohr et al., 2025</xref>; <xref ref-type="bibr" rid="ref5">Dyrbye et al., 2018</xref>; <xref ref-type="bibr" rid="ref6">Hodkinson et al., 2022</xref>). Administrative and regulatory demands compete directly with clinical reasoning and patient interaction, further straining limited cognitive resources.</p>
<p>Expecting unaided human cognition to manage this expanding informational landscape is no longer sustainable. The challenge confronting medicine is not one of insufficient intelligence or motivation, but one of cognitive bandwidth. To protect both patients and clinicians, medical practice must evolve from a model centered on individual memory and manual synthesis to a model supported by systems that assist with evidence filtering, prioritization, and contextualization. Properly designed and ethically governed artificial intelligence offers a scalable means of supporting this transition toward system-assisted cognition (<xref ref-type="bibr" rid="ref7">Kelly et al., 2019</xref>; <xref ref-type="bibr" rid="ref17">Topol, 2019</xref>; <xref ref-type="bibr" rid="ref14">Sahni and Carrus, 2023</xref>; <xref ref-type="bibr" rid="ref3">Dave et al., 2025</xref>; <xref ref-type="bibr" rid="ref9">Lim et al., 2026</xref>).</p>
</sec>
<sec id="sec5">
<title>AI as a cognitive extension and partnership, not a replacement</title>
<p>The solution to medicine&#x2019;s growing cognitive burden cannot rest on increased individual effort alone. It requires systemic augmentation. Artificial intelligence offers such augmentation not as a replacement for physicians, but as an extension of human cognition designed to support clinical reasoning within an increasingly complex information environment.</p>
<p>In this perspective, artificial intelligence is used as an umbrella term encompassing a range of technologies with distinct functions and levels of maturity. These include supervised machine learning models embedded in electronic health records for risk prediction, clinical decision support algorithms, automated documentation and scribing tools, large language models (LLMs) capable of summarizing medical literature, and foundation models trained on multimodal clinical data. While these systems differ substantially in implementation, adoption, and risk profiles, they share a common function: assisting clinicians in organizing, prioritizing, and contextualizing information at a scale that exceeds individual cognitive capacity (<xref ref-type="bibr" rid="ref7">Kelly et al., 2019</xref>; <xref ref-type="bibr" rid="ref17">Topol, 2019</xref>; <xref ref-type="bibr" rid="ref14">Sahni and Carrus, 2023</xref>).</p>
<p>Across these applications, artificial intelligence supports clinical practice by filtering large bodies of literature, identifying relevant guideline updates, flagging safety signals, and synthesizing patient-specific data into interpretable outputs. In doing so, AI functions not as an autonomous decision maker, but as cognitive scaffolding that enhances situational awareness and supports more informed human judgment. This reframes artificial intelligence as a tool for knowledge translation rather than knowledge generation, directly addressing the widening gap between evidence production and bedside implementation (<xref ref-type="bibr" rid="ref7">Kelly et al., 2019</xref>; <xref ref-type="bibr" rid="ref2">Balas and Boren, 2000</xref>; <xref ref-type="bibr" rid="ref17">Topol, 2019</xref>).</p>
<p>The central question, therefore, is not whether artificial intelligence can replace clinicians, but whether contemporary medicine can function safely without system-assisted cognitive support. In an environment characterized by expanding subspecialty literature, regulatory complexity, and administrative burden, clinicians who rely solely on manual information processing face increasing difficulty maintaining situational awareness across evolving evidence landscapes. This reflects not diminished competence, but an information ecosystem that has outgrown unaided cognitive strategies.</p>
<p>At the same time, artificial intelligence systems operating in isolation are inherently limited. Algorithms lack contextual understanding, moral reasoning, and the capacity for empathy. They are vulnerable to bias introduced through training data and may perform inconsistently across populations and clinical settings (<xref ref-type="bibr" rid="ref10">MacIntyre et al., 2023</xref>; <xref ref-type="bibr" rid="ref14">Sahni and Carrus, 2023</xref>). The most robust model for the future of medicine is therefore a human&#x2013;AI partnership in which machines contribute speed, scale, and pattern recognition, while clinicians provide judgment, ethical oversight, and accountability for final decisions.</p>
<p>Within this partnership, the physician is not diminished but amplified. Artificial intelligence enables clinicians to access relevant information more efficiently, reducing time spent searching for data and increasing time available for interpretation, communication, and shared decision making. In this sense, AI strengthens rather than undermines the therapeutic relationship by allowing clinicians to focus more fully on the human dimensions of care. As described by Topol and others, this convergence represents not the end of the physician&#x2019;s role, but its evolution toward an augmented form of clinical expertise grounded in both technological support and human values (<xref ref-type="bibr" rid="ref4">Densen, 2011</xref>; <xref ref-type="bibr" rid="ref7">Kelly et al., 2019</xref>; <xref ref-type="bibr" rid="ref10">MacIntyre et al., 2023</xref>; <xref ref-type="bibr" rid="ref2">Balas and Boren, 2000</xref>; <xref ref-type="bibr" rid="ref17">Topol, 2019</xref>).</p>
</sec>
<sec id="sec6">
<title>Empirical evidence that AI can reduce cognitive burden and burnout</title>
<p>Beyond conceptual promise, a growing body of empirical evidence demonstrates that artificial intelligence can meaningfully reduce documentation burden, cognitive load, and clinician burnout when implemented as a clinician supervised tool. Recent systematic reviews and multicenter studies provide early but compelling support for AI driven workflow augmentation across diverse clinical settings.</p>
<p>A 2025 systematic review and meta analysis found that artificial intelligence assisted documentation tools were associated with a moderate reduction in documentation workload and burnout when clinicians reviewed and edited AI generated drafts, with a standardized mean difference of &#x2212;0.72 (<xref ref-type="bibr" rid="ref19">Zhao et al., 2025</xref>). These findings indicate that AI functions most effectively not as an autonomous system, but as a supportive drafting and organizational aid embedded within existing clinical workflows.</p>
<p>Evidence from real world implementation further supports this conclusion. In a multicenter quality improvement study involving 263 physicians, the introduction of an ambient artificial intelligence scribe was associated with a significant reduction in burnout from 51.9 to 38.8% after 30&#x202F;days of use. Clinicians also reported improvements in cognitive task load and an average reduction of nearly one hour per day in after hours documentation (<xref ref-type="bibr" rid="ref13">Olson et al., 2025</xref>). Importantly, these gains were achieved without reducing clinician oversight or responsibility for final documentation.</p>
<p>Additional data from <italic>JAMA Network Open</italic> demonstrate that clinicians using artificial intelligence powered documentation platforms experienced significantly reduced cognitive demand and documentation effort, alongside perceived improvements in clinical efficiency and patient centered care (<xref ref-type="bibr" rid="ref16">Stults et al., 2025</xref>). These findings are particularly salient given that physicians now spend more than 50% of their working time interacting with electronic health records, a well documented contributor to burnout and professional dissatisfaction (<xref ref-type="bibr" rid="ref14">Sahni and Carrus, 2023</xref>).</p>
<p>Broader scoping reviews further reinforce these observations. Across studies examining natural language processing, AI integrated electronic health records, clinical decision support systems, and generative AI tools, reductions in administrative burden and improvements in job satisfaction were consistently reported, with burnout emerging as the most frequently addressed mental health outcome (<xref ref-type="bibr" rid="ref3">Dave et al., 2025</xref>). In diagnostic specialties, artificial intelligence assisted interpretation has been shown to substantially reduce interpretation time for abnormal imaging and laboratory findings without compromising diagnostic accuracy (<xref ref-type="bibr" rid="ref9">Lim et al., 2026</xref>).</p>
<p>Critically, these efficiency gains do not appear to trade speed for safety. Generative artificial intelligence based electronic medical record systems reduce documentation time by approximately 40%, while voice recognition and AI scribing technologies reduce charting time by nearly 30%, resulting in overall administrative burden reductions exceeding 30% (<xref ref-type="bibr" rid="ref9">Lim et al., 2026</xref>). Across studies, these benefits consistently depend on clinician review and oversight, reinforcing the central premise that artificial intelligence functions most effectively as cognitive augmentation rather than autonomous decision making.</p>
</sec>
<sec id="sec7">
<title>AI, patient engagement, and the future of patient-centered care</title>
<p>Concerns are often raised that increasing reliance on artificial intelligence may distance clinicians from patients or erode the relational foundations of care. However, when integrated thoughtfully, artificial intelligence has the potential to strengthen patient engagement and reinforce patient centered care by reshaping how clinical time and attention are allocated (<xref ref-type="bibr" rid="ref2">Balas and Boren, 2000</xref>; <xref ref-type="bibr" rid="ref15">Sbaffi et al., 2020</xref>; <xref ref-type="bibr" rid="ref18">Zhang et al., 2020</xref>).</p>
<p>Evidence from telehealth, digital health platforms, and clinical decision support systems suggests that these tools can improve access to care, particularly for patients facing geographic, mobility, or scheduling barriers. By streamlining documentation, automating routine administrative tasks, and supporting rapid information retrieval, artificial intelligence can reduce nonclinical workload and allow clinicians to devote greater attention to direct communication with patients, families, and caregivers. This redistribution of effort creates space for shared decision making, education, and emotional presence, dimensions of care that are frequently constrained in contemporary practice (<xref ref-type="bibr" rid="ref2">Balas and Boren, 2000</xref>; <xref ref-type="bibr" rid="ref17">Topol, 2019</xref>; <xref ref-type="bibr" rid="ref14">Sahni and Carrus, 2023</xref>).</p>
<p>Artificial intelligence also enables patients to take a more active role in their own care. Remote monitoring systems, personalized digital feedback tools, and algorithm supported care pathways can provide patients with timely and understandable information about their health status and treatment options. When governed appropriately, these tools enhance patient understanding, self management, and engagement without supplanting the clinician&#x2019;s role. Rather than reinforcing hierarchical models of care, they support collaborative relationships in which information and responsibility flow in both directions.</p>
<p>These changes affect not only physicians but also nurses and other health professionals whose workflows are deeply intertwined with documentation, coordination, and patient communication. AI enabled tools that reduce repetitive charting and information retrieval can free nursing and interprofessional teams to spend more time on direct patient care, education, and advocacy, thereby strengthening team based, patient centered practice.</p>
<p>Crucially, the impact of artificial intelligence on patient engagement depends on how it is implemented. Systems designed to support transparency, explanation, and dialogue can deepen trust and understanding, while poorly designed tools risk confusion or detachment. When guided by ethical oversight and clinician leadership, artificial intelligence represents not a threat to patient centered care, but an opportunity to strengthen it by improving accessibility, continuity, and the quality of clinician patient interaction (<xref ref-type="bibr" rid="ref15">Sbaffi et al., 2020</xref>; <xref ref-type="bibr" rid="ref18">Zhang et al., 2020</xref>).</p>
</sec>
<sec id="sec8">
<title>Ethical and educational imperatives</title>
<p>The integration of artificial intelligence into medicine introduces both significant opportunity and substantial responsibility. While algorithmic systems can assist clinicians by processing information at scale, they also reflect the biases, gaps, and structural inequities present in the data on which they are trained (<xref ref-type="bibr" rid="ref10">MacIntyre et al., 2023</xref>). For this reason, artificial intelligence must be developed and implemented under rigorous standards of transparency, accountability, and equity. Clinical responsibility cannot be delegated to machines. Physicians remain accountable for interpreting algorithm-supported insights within clinical, ethical, and social contexts.</p>
<p>Ethical implementation alone, however, is insufficient. Medicine must also confront the reality that prevailing educational models are increasingly misaligned with the contemporary information environment. Training structures that emphasize memorization and recall were developed for an era in which knowledge evolved gradually and predictably. They are poorly suited to a clinical landscape characterized by continuous evidence generation, heterogeneous data quality, and rapid guideline evolution (<xref ref-type="bibr" rid="ref4">Densen, 2011</xref>; <xref ref-type="bibr" rid="ref7">Kelly et al., 2019</xref>; <xref ref-type="bibr" rid="ref10">MacIntyre et al., 2023</xref>). Preparing physicians for this environment requires a shift from knowledge accumulation toward skills in evidence appraisal, contextual reasoning, and information stewardship. This reframing places lifelong learning at the center of professional identity, with clinicians expected to continually reassess evidence, update practices, and adapt to new information throughout their careers.</p>
<p>This shift demands a revised educational paradigm that prioritizes data literacy, critical thinking, and familiarity with artificial intelligence&#x2013;enabled tools as an extension of digital competence. AI literacy builds on foundational digital skills by enabling clinicians to understand how algorithms are trained, how bias can emerge, and how outputs should be interpreted rather than accepted uncritically. Importantly, artificial intelligence literacy should not aim to turn clinicians into engineers, but to equip them with sufficient understanding to evaluate system limitations, recognize failure modes, and maintain clinical autonomy (<xref ref-type="bibr" rid="ref7">Kelly et al., 2019</xref>; <xref ref-type="bibr" rid="ref10">MacIntyre et al., 2023</xref>; <xref ref-type="bibr" rid="ref14">Sahni and Carrus, 2023</xref>).</p>
<p>A related ethical challenge is the risk of automation bias. When algorithm-generated recommendations are delivered with speed and apparent authority, clinicians may defer judgment prematurely or fail to apply necessary skepticism. Preventing this requires systems designed to encourage reflection rather than passive acceptance, including explainable outputs, uncertainty signaling, and workflows that preserve deliberate human oversight. Education must reinforce these habits to ensure that artificial intelligence augments rather than erodes diagnostic reasoning.</p>
<p>Finally, equity must remain central to artificial intelligence deployment. High-resource institutions may rapidly benefit from advanced systems, while lower-resource settings risk exclusion due to infrastructure limitations. If artificial intelligence becomes a determinant of care quality, unequal access could widen existing health disparities. Ensuring that these technologies function as equalizers rather than stratifiers will require investment in inclusive datasets, open standards, public governance, and models designed to perform across diverse populations rather than exclusively in data-rich environments (<xref ref-type="bibr" rid="ref10">MacIntyre et al., 2023</xref>; <xref ref-type="bibr" rid="ref14">Sahni and Carrus, 2023</xref>). In support of this need for caution, a recent systematic review identified 19 major barriers to artificial intelligence implementation across technical, stakeholder, and societal domains, including limited explainability, workflow integration challenges, liability concerns, and uncertainty regarding cost-effectiveness, underscoring that ethical adoption requires more than technical capability alone (<xref ref-type="bibr" rid="ref8">Li et al., 2023</xref>).</p>
</sec>
<sec sec-type="conclusions" id="sec9">
<title>Conclusion</title>
<p>Medicine has reached a cognitive inflection point. The rapid expansion of biomedical information, once a hallmark of scientific progress, now challenges the limits of unaided human cognition. The central issue facing clinicians is no longer simply what they know, but how they access, evaluate, and apply relevant evidence within an increasingly complex information environment.</p>
<p>Artificial intelligence offers not an escape from this challenge, but a navigational aid through it. When used as a cognitive partner, artificial intelligence can support clinicians in translating large and evolving bodies of information into contextually relevant insights while preserving human judgment, ethical reasoning, and relational care. Importantly, advances in telehealth and digital health demonstrate that thoughtfully implemented artificial intelligence can also strengthen patient engagement and patient-centered care by restoring time and attention to communication, shared decision making, and trust.</p>
<p>Failing to engage with this partnership risks leaving clinicians increasingly constrained by systems that evolve faster than individual cognitive strategies can adapt. The defining question for modern medicine is therefore not whether artificial intelligence will influence healthcare, but whether clinicians will actively shape its integration or passively respond to it. The most ethical and sustainable future lies not in replacement, but in collaboration, one in which machines extend the reach of human cognition and physicians remain responsible for meaning, values, and care.</p>
<p>The cognitive edge confronting medicine is not a threat to professional identity, but an opportunity to redefine it. By embracing artificial intelligence as a supportive tool rather than an epistemic authority, medicine can remain both scientifically rigorous and deeply human. The future of medicine will remain human, precisely because it is intelligently augmented.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="sec10">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/supplementary material, further inquiries can be directed to the corresponding author.</p>
</sec>
<sec sec-type="author-contributions" id="sec11">
<title>Author contributions</title>
<p>KB: Conceptualization, Formal analysis, Validation, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. BC: Conceptualization, Validation, Writing &#x2013; review &#x0026; editing. EA: Conceptualization, Validation, Writing &#x2013; original draft.</p>
</sec>
<sec sec-type="COI-statement" id="sec12">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="sec13">
<title>Generative AI statement</title>
<p>The author(s) declared that Generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="sec14">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="ref1"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Agarwal</surname><given-names>S. D.</given-names></name> <name><surname>Pabo</surname><given-names>E.</given-names></name> <name><surname>Rozenblum</surname><given-names>R.</given-names></name> <name><surname>Sherritt</surname><given-names>K. M.</given-names></name></person-group> (<year>2020</year>). <article-title>Professional dissonance and burnout in primary care: a qualitative study</article-title>. <source>JAMA Intern. Med.</source> <volume>180</volume>, <fpage>395</fpage>&#x2013;<lpage>401</lpage>. doi: <pub-id pub-id-type="doi">10.1001/jamainternmed.2019.6326</pub-id>, <pub-id pub-id-type="pmid">31904796</pub-id></mixed-citation></ref>
<ref id="ref2"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Balas</surname><given-names>E. A.</given-names></name> <name><surname>Boren</surname><given-names>S. A.</given-names></name></person-group> (<year>2000</year>). <article-title>Managing clinical knowledge for health care improvement</article-title>. <source>Yearb. Med. Inform.</source> <volume>1</volume>, <fpage>65</fpage>&#x2013;<lpage>70</lpage>.</mixed-citation></ref>
<ref id="ref3"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Dave</surname><given-names>B.</given-names></name> <name><surname>Martin</surname><given-names>P.</given-names></name> <name><surname>David</surname><given-names>S. S.</given-names></name> <name><surname>Kumar</surname><given-names>S.</given-names></name> <name><surname>Chakraborty</surname><given-names>T.</given-names></name></person-group> (<year>2025</year>). <article-title>Enhancing healthcare worker mental health via artificial intelligence-driven work process improvements: a scoping review</article-title>. <source>Int. J. Med. Inform.</source> <volume>205</volume>:<fpage>106122</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ijmedinf.2025.106122</pub-id></mixed-citation></ref>
<ref id="ref4"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Densen</surname><given-names>P.</given-names></name></person-group> (<year>2011</year>). <article-title>Challenges and opportunities facing medical education</article-title>. <source>Trans. Am. Clin. Climatol. Assoc.</source> <volume>122</volume>, <fpage>48</fpage>&#x2013;<lpage>58</lpage>.</mixed-citation></ref>
<ref id="ref5"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Dyrbye</surname><given-names>L. N.</given-names></name> <name><surname>Burke</surname><given-names>S. E.</given-names></name> <name><surname>Hardeman</surname><given-names>R. R.</given-names></name> <name><surname>Herrin</surname><given-names>J.</given-names></name> <name><surname>Wittlin</surname><given-names>N. M.</given-names></name> <name><surname>Yeazel</surname><given-names>M.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>Association of clinical specialty with symptoms of burnout and career choice regret among US resident physicians</article-title>. <source>JAMA</source> <volume>320</volume>, <fpage>1114</fpage>&#x2013;<lpage>1130</lpage>. doi: <pub-id pub-id-type="doi">10.1001/jama.2018.12615</pub-id>, <pub-id pub-id-type="pmid">30422299</pub-id></mixed-citation></ref>
<ref id="ref6"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hodkinson</surname><given-names>A.</given-names></name> <name><surname>Zhou</surname><given-names>A.</given-names></name> <name><surname>Johnson</surname><given-names>J.</given-names></name> <name><surname>Geraghty</surname><given-names>K.</given-names></name> <name><surname>Riley</surname><given-names>R.</given-names></name> <name><surname>Panagopoulou</surname><given-names>E.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Associations of physician burnout with career engagement and quality of patient care: a systematic review and meta-analysis</article-title>. <source>BMJ</source> <volume>378</volume>:<fpage>e070442</fpage>. doi: <pub-id pub-id-type="doi">10.1136/bmj-2022-070442</pub-id>, <pub-id pub-id-type="pmid">36104064</pub-id></mixed-citation></ref>
<ref id="ref7"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kelly</surname><given-names>C. J.</given-names></name> <name><surname>Karthikesalingam</surname><given-names>A.</given-names></name> <name><surname>Suleyman</surname><given-names>M.</given-names></name> <name><surname>Corrado</surname><given-names>G.</given-names></name> <name><surname>King</surname><given-names>D.</given-names></name></person-group> (<year>2019</year>). <article-title>Key challenges for delivering clinical impact with artificial intelligence</article-title>. <source>BMC Med.</source> <volume>17</volume>:<fpage>195</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s12916-019-1426-2</pub-id>, <pub-id pub-id-type="pmid">31665002</pub-id></mixed-citation></ref>
<ref id="ref8"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Li</surname><given-names>L. T.</given-names></name> <name><surname>Haley</surname><given-names>C. L.</given-names></name> <name><surname>Boyd</surname><given-names>A. K.</given-names></name> <name><surname>Bernstam</surname><given-names>E. V.</given-names></name></person-group> (<year>2023</year>). <article-title>Technical/algorithm, stakeholder, and society (TASS) barriers to the application of artificial intelligence in medicine: a systematic review</article-title>. <source>J. Biomed. Inform.</source> <volume>147</volume>:<fpage>104531</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jbi.2023.104531</pub-id>, <pub-id pub-id-type="pmid">37884177</pub-id></mixed-citation></ref>
<ref id="ref9"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lim</surname><given-names>J. Y.</given-names></name> <name><surname>Kim</surname><given-names>K. H.</given-names></name> <name><surname>Mun</surname><given-names>S. K.</given-names></name></person-group> (<year>2026</year>). <article-title>How does medical artificial intelligence revolutionize physician productivity?</article-title> <source>Yonsei Med. J.</source> <volume>67</volume>, <fpage>1</fpage>&#x2013;<lpage>8</lpage>. doi: <pub-id pub-id-type="doi">10.3349/ymj.2025.0389</pub-id>, <pub-id pub-id-type="pmid">41431407</pub-id></mixed-citation></ref>
<ref id="ref10"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>MacIntyre</surname><given-names>M. R.</given-names></name> <name><surname>Cockerill</surname><given-names>R. G.</given-names></name> <name><surname>Mirza</surname><given-names>O. F.</given-names></name> <name><surname>Appel</surname><given-names>J. M.</given-names></name></person-group> (<year>2023</year>). <article-title>Ethical considerations for the use of artificial intelligence in medical decision-making capacity assessments</article-title>. <source>Psychiatry Res.</source> <volume>328</volume>:<fpage>115466</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.psychres.2023.115466</pub-id>, <pub-id pub-id-type="pmid">37717548</pub-id></mixed-citation></ref>
<ref id="ref11"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mir</surname><given-names>M. M.</given-names></name> <name><surname>Mir</surname><given-names>G. M.</given-names></name> <name><surname>Raina</surname><given-names>N. T.</given-names></name> <name><surname>Mir</surname><given-names>S. M.</given-names></name> <name><surname>Miskeen</surname><given-names>E.</given-names></name> <name><surname>Alharthi</surname><given-names>M. H.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Application of artificial intelligence in medical education: current scenario and future perspectives</article-title>. <source>J. Adv. Med. Educ. Prof.</source> <volume>11</volume>, <fpage>133</fpage>&#x2013;<lpage>140</lpage>. doi: <pub-id pub-id-type="doi">10.30476/JAMP.2023.98655.1803</pub-id>, <pub-id pub-id-type="pmid">37469385</pub-id></mixed-citation></ref>
<ref id="ref12"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mohr</surname><given-names>D. C.</given-names></name> <name><surname>Elnahal</surname><given-names>S.</given-names></name> <name><surname>Marks</surname><given-names>M. L.</given-names></name> <name><surname>Derickson</surname><given-names>R.</given-names></name> <name><surname>Osatuke</surname><given-names>K.</given-names></name></person-group> (<year>2025</year>). <article-title>Burnout trends among US health care workers</article-title>. <source>JAMA Netw. Open</source> <volume>8</volume>:<fpage>e255954</fpage>. doi: <pub-id pub-id-type="doi">10.1001/jamanetworkopen.2025.5954</pub-id>, <pub-id pub-id-type="pmid">40257797</pub-id></mixed-citation></ref>
<ref id="ref13"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Olson</surname><given-names>K. D.</given-names></name> <name><surname>Meeker</surname><given-names>D.</given-names></name> <name><surname>Troup</surname><given-names>M.</given-names></name> <name><surname>Barker</surname><given-names>T. D.</given-names></name> <name><surname>Nguyen</surname><given-names>V. H.</given-names></name> <name><surname>Manders</surname><given-names>J. B.</given-names></name> <etal/></person-group>. (<year>2025</year>). <article-title>Use of ambient AI scribes to reduce administrative burden and professional burnout</article-title>. <source>JAMA Netw. Open</source> <volume>8</volume>:<fpage>e2534976</fpage>. doi: <pub-id pub-id-type="doi">10.1001/jamanetworkopen.2025.34976</pub-id>, <pub-id pub-id-type="pmid">41037268</pub-id></mixed-citation></ref>
<ref id="ref14"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sahni</surname><given-names>N. R.</given-names></name> <name><surname>Carrus</surname><given-names>B.</given-names></name></person-group> (<year>2023</year>). <article-title>Artificial intelligence in US health care delivery</article-title>. <source>N. Engl. J. Med.</source> <volume>389</volume>, <fpage>348</fpage>&#x2013;<lpage>358</lpage>. doi: <pub-id pub-id-type="doi">10.1056/NEJMra2204673</pub-id>, <pub-id pub-id-type="pmid">37494486</pub-id></mixed-citation></ref>
<ref id="ref15"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sbaffi</surname><given-names>L.</given-names></name> <name><surname>Walton</surname><given-names>J.</given-names></name> <name><surname>Blenkinsopp</surname><given-names>J.</given-names></name> <name><surname>Walton</surname><given-names>G.</given-names></name></person-group> (<year>2020</year>). <article-title>Information overload in emergency medicine physicians: a multisite case study exploring causes, impact, and solutions in four North England NHS trusts</article-title>. <source>J. Med. Internet Res.</source> <volume>22</volume>:<fpage>e19126</fpage>. doi: <pub-id pub-id-type="doi">10.2196/19126</pub-id>, <pub-id pub-id-type="pmid">32716313</pub-id></mixed-citation></ref>
<ref id="ref16"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Stults</surname><given-names>C. D.</given-names></name> <name><surname>Deng</surname><given-names>S.</given-names></name> <name><surname>Martinez</surname><given-names>M. C.</given-names></name> <name><surname>Wilcox</surname><given-names>J.</given-names></name> <name><surname>Szwerinski</surname><given-names>N.</given-names></name> <name><surname>Chen</surname><given-names>K. H.</given-names></name> <etal/></person-group>. (<year>2025</year>). <article-title>Evaluation of an ambient artificial intelligence documentation platform for clinicians</article-title>. <source>JAMA Netw. Open</source> <volume>8</volume>:<fpage>e258614</fpage>. doi: <pub-id pub-id-type="doi">10.1001/jamanetworkopen.2025.8614</pub-id>, <pub-id pub-id-type="pmid">40314951</pub-id></mixed-citation></ref>
<ref id="ref17"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Topol</surname><given-names>E. J.</given-names></name></person-group> (<year>2019</year>). <article-title>High-performance medicine: the convergence of human and artificial intelligence</article-title>. <source>Nat. Med.</source> <volume>25</volume>, <fpage>44</fpage>&#x2013;<lpage>56</lpage>. doi: <pub-id pub-id-type="doi">10.1038/s41591-018-0300-7</pub-id>, <pub-id pub-id-type="pmid">30617339</pub-id></mixed-citation></ref>
<ref id="ref18"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname><given-names>Q.</given-names></name> <name><surname>Mu</surname><given-names>M. C.</given-names></name> <name><surname>He</surname><given-names>Y.</given-names></name> <name><surname>Cai</surname><given-names>Z. L.</given-names></name> <name><surname>Li</surname><given-names>Z. C.</given-names></name></person-group> (<year>2020</year>). <article-title>Burnout in emergency medicine physicians: a meta-analysis and systematic review</article-title>. <source>Medicine (Baltimore)</source> <volume>99</volume>:<fpage>e21462</fpage>. doi: <pub-id pub-id-type="doi">10.1097/MD.0000000000021462</pub-id>, <pub-id pub-id-type="pmid">32769876</pub-id></mixed-citation></ref>
<ref id="ref19"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhao</surname><given-names>J.</given-names></name> <name><surname>Liu</surname><given-names>H.</given-names></name> <name><surname>Chen</surname><given-names>Y.</given-names></name> <name><surname>Song</surname><given-names>F.</given-names></name></person-group> (<year>2025</year>). <article-title>Application of artificial intelligence tools and clinical documentation burden: a systematic review and meta-analysis</article-title>. <source>BMC Med. Inform. Decis. Mak.</source> <volume>26</volume>:<fpage>29</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s12911-025-03324-w</pub-id>, <pub-id pub-id-type="pmid">41444884</pub-id></mixed-citation></ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by" id="fn0001">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2378614/overview">Tom Karl Schaal</ext-link>, West Saxon University of Applied Sciences of Zwickau, Germany</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by" id="fn0002">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2500165/overview">Tim Tischendorf</ext-link>, West Saxon University of Applied Sciences of Zwickau, Germany</p>
</fn>
</fn-group>
</back>
</article>