<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="brief-report" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Psychiatry</journal-id>
<journal-title-group>
<journal-title>Frontiers in Psychiatry</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Psychiatry</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">1664-0640</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fpsyt.2026.1729175</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Perspective</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>The augmented clinician as a framework for human-AI collaboration in mental healthcare</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Ruan</surname><given-names>Qian-Nan</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1389816/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Hu</surname><given-names>Shuang-Qian</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>ShangGuan</surname><given-names>Zhi-Hui</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Zhou</surname><given-names>Sun-Meng</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>*</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Wenzhou Seventh People&#x2019;s Hospital</institution>, <city>Wenzhou</city>,&#xa0;<country country="cn">China</country></aff>
<aff id="aff2"><label>2</label><institution>Wenzhou Center for Disease Control and Prevention</institution>, <city>Wenzhou</city>,&#xa0;<country country="cn">China</country></aff>
<aff id="aff3"><label>3</label><institution>The Affiliated Kangning Hospital of Wenzhou Medical University, Zhejiang Provincial Clinical Research Center for Mental Health</institution>, <city>Wenzhou</city>,&#xa0;<country country="cn">China</country></aff>
<author-notes>
<corresp id="c001"><label>*</label>Correspondence: Sun-Meng Zhou, <email xlink:href="mailto:405461331@qq.com">405461331@qq.com</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-03-02">
<day>02</day>
<month>03</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>17</volume>
<elocation-id>1729175</elocation-id>
<history>
<date date-type="received">
<day>21</day>
<month>10</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>29</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="rev-recd">
<day>28</day>
<month>01</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2026 Ruan, Hu, ShangGuan and Zhou.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Ruan, Hu, ShangGuan and Zhou</copyright-holder>
<license>
<ali:license_ref start_date="2026-03-02">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<p>The global mental health system faces an unprecedented crisis of access, with demand for care far outstripping the supply of trained professionals. Artificial Intelligence (AI) has emerged with immense promise to bridge this gap through scalable and accessible solutions. However, its rapid and often unregulated deployment introduces significant ethical perils, including the dehumanization of care, the perpetuation of societal biases, and the risk of clinical harm. This perspective argues against the pursuit of autonomous AI therapists and instead advocates for the Augmented Clinician model. This framework positions AI as a sophisticated and transparent supportive tool that enhances, rather than replaces, human clinicians. By delegating data-intensive and administrative tasks to AI, clinicians can dedicate more time to the irreplaceable human elements of therapy such as empathy, nuanced judgment, and fostering the therapeutic alliance. We propose that this collaborative human-AI synergy is the most effective and ethically sound path to harness technology&#x2019;s power while ensuring mental healthcare remains fundamentally human-centered.</p>
</abstract>
<kwd-group>
<kwd>artificial intelligence</kwd>
<kwd>augmented clinician</kwd>
<kwd>ethics</kwd>
<kwd>human-AI collaboration</kwd>
<kwd>mental health</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This work was supported by the Science and Technology Plan Project of Wenzhou (Grant No. Y20210112).</funding-statement>
</funding-group>
<counts>
<fig-count count="0"/>
<table-count count="1"/>
<equation-count count="0"/>
<ref-count count="24"/>
<page-count count="4"/>
<word-count count="2011"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Digital Mental Health</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec id="s1" sec-type="intro">
<label>1</label>
<title>Introduction</title>
<p>The 21st-century mental healthcare landscape presents a contrast: while public awareness has increased, the global care infrastructure faces significant systemic challenges (<xref ref-type="bibr" rid="B1">1</xref>). Estimates indicate that over one billion people live with a mental health disorder, yet a substantial treatment gap persists, leaving a majority of individuals without adequate support (<xref ref-type="bibr" rid="B2">2</xref>). This systemic deficiency, attributed to chronic underinvestment and workforce shortages, necessitates innovation in care delivery models.</p>
<p>Artificial Intelligence (AI) has entered this landscape as a promising solution, offering scalability, 24/7 availability, and cost-effectiveness that traditional models cannot match (<xref ref-type="bibr" rid="B3">3</xref>). The potential to deliver evidence-based support via smartphones represents a paradigm shift. This promise, however, is shadowed by significant risk. A burgeoning market of direct-to-consumer AI applications is rapidly filling the care vacuum, often with minimal clinical validation or regulatory oversight (<xref ref-type="bibr" rid="B4">4</xref>). This dynamic threatens to establish a de facto standard of care dictated by market forces rather than evidence-based principles.</p>
<p>The central question is not whether to use technology, but how to integrate it safely, effectively, and equitably. This article presents a perspective that the most viable path forward is not automation but augmentation. We argue for the Augmented Clinician model, a framework where AI functions as a subordinate partner to the human professional (<xref ref-type="bibr" rid="B5">5</xref>). This approach seeks to leverage AI&#x2019;s computational strengths to re-humanize care, freeing clinicians to focus on the essential therapeutic relationship that remains the most reliable predictor of positive outcomes (<xref ref-type="bibr" rid="B6">6</xref>).</p>
</sec>
<sec id="s2">
<label>2</label>
<title>Current advances and emerging risks in mental health AI</title>
<p>The application of AI in mental healthcare is no longer theoretical; it is an active and rapidly evolving field. In diagnostics and prognostics, AI offers new levels of objectivity (<xref ref-type="bibr" rid="B7">7</xref>). Machine learning models can synthesize vast, heterogeneous datasets from neurophysiological signals, clinical notes, and even social media to identify subtle patterns of illness (<xref ref-type="bibr" rid="B8">8</xref>). A transformative innovation is digital phenotyping, which uses passive sensor data from personal smartphones to create an objective, real-world measure of a patient&#x2019;s functioning (<xref ref-type="bibr" rid="B9">9</xref>). This can reveal behavioral changes that signal the onset of a manic or depressive episode, enabling proactive intervention. Predictive models show remarkable accuracy in identifying at-risk youth using simple questionnaire data, providing clinicians with a powerful tool for preventive care (<xref ref-type="bibr" rid="B10">10</xref>).</p>
<p>The most visible advance is the therapeutic chatbot. Platforms like Wysa and Woebot deliver structured interventions based on Cognitive Behavioral Therapy (CBT) and have demonstrated efficacy in reducing symptoms of depression and anxiety in multiple randomized controlled trials (<xref ref-type="bibr" rid="B11">11</xref>). Users often value the anonymity and constant availability of these tools. This has led to the phenomenon of users forming a &#x201c;digital therapeutic bond&#x201d; with chatbots, with research indicating that this bond can be comparable to that measured in human psychotherapy (<xref ref-type="bibr" rid="B12">12</xref>).</p>
<p>This finding, however, must be interpreted with caution. This bond is unidirectional, reflecting a profound human need for connection that can be projected onto a non-sentient algorithm. To conflate this simulated relationship with the co-constructed, reciprocal nature of a true human therapeutic alliance is a category error. Relying on this fragile bond fosters dependency on a system that cannot truly understand or challenge the user, creating significant long-term psychological risks. The recent emergence of powerful Large Language Models (LLMs) like GPT-4 has amplified this concern. Millions are turning to these highly conversational AIs for mental health support in a regulatory vacuum, creating an uncontrolled risks. Recent technical evaluations indicate that these models can systematically violate ethical standards in mental health practice, including inappropriate responses to crisis situations (<xref ref-type="bibr" rid="B13">13</xref>).</p>
</sec>
<sec id="s3">
<label>3</label>
<title>Preserving the human core amid algorithmic perils</title>
<p>Before integrating any technology, we must define what is non-negotiable. Decades of research confirm that the quality of the therapeutic alliance is the single most robust predictor of successful treatment outcomes across all modalities (<xref ref-type="bibr" rid="B14">14</xref>). This alliance is built on collaboration, trust, and a shared understanding of goals. It is a dynamic process co-constructed between two human beings.</p>
<p>The medium for this alliance is empathy. Human empathy is a complex process involving both affective empathy (the ability to feel with another) and cognitive empathy (the ability to understand another&#x2019;s perspective) (<xref ref-type="bibr" rid="B15">15</xref>). AI, as a computational system, is fundamentally incapable of affective empathy. It has no biological substrate for emotion (<xref ref-type="bibr" rid="B16">16</xref>). At its best, AI can perform a sophisticated simulation of cognitive empathy, analyzing user input to generate a statistically appropriate response. Crucially, recent advancements in LLMs have made this simulation increasingly indistinguishable from human interaction to the lay observer. This high-fidelity mimicry heightens the risk of deception, as patients may feel deeply understood by a system that, in reality, understands nothing. This makes the genuine, shared interpersonal experience at the core of the therapeutic bond irreplaceable.</p>
<p>Beyond these relational limitations, AI models carry inherent technical risks. Many powerful deep learning models are &#x201c;black boxes&#x201d; whose internal decision-making is opaque even to their creators (<xref ref-type="bibr" rid="B17">17</xref>). In a clinical setting, this opacity is a critical failure. It erodes trust, prevents the debugging of errors, and creates a crisis of accountability when a harmful recommendation is made. A recommendation that cannot be understood and defended by a clinician is not clinically valid. Therefore, explainability is not a feature to be traded for accuracy; it is a prerequisite for safe deployment.</p>
<p>Perhaps the most insidious risk is algorithmic bias. AI models learn from data, and if that data reflects existing societal inequities, the AI will learn, reproduce, and scale those same biases under a veneer of technological objectivity (<xref ref-type="bibr" rid="B18">18</xref>). For instance, a widely used healthcare algorithm was found to systematically underestimate the health needs of Black patients because it used historical spending as a proxy for need, reflecting systemic inequities in access to care. In mental health, this risk is acute. Models trained on unrepresentative data or biased clinical records threaten to deepen health disparities for marginalized communities (<xref ref-type="bibr" rid="B19">19</xref>).</p>
</sec>
<sec id="s4">
<label>4</label>
<title>The augmented clinician model</title>
<p>Given that the human elements of therapy are irreplaceable and that autonomous AI carries profound risks, the clearest path forward is human-AI collaboration. We propose the Augmented Clinician model as a guiding philosophy. This framework reframes AI from a replacement to a powerful instrument that enhances the clinician&#x2019;s cognitive and administrative capacities. The core principle is that by delegating appropriate tasks to the machine, the clinician is freed to focus on the uniquely human aspects of care.</p>
<p>This model is defined by four key tenets. First, the human-in-the-loop is a non-negotiable default. The human clinician must retain ultimate authority, accountability, and responsibility for all clinical decisions (<xref ref-type="bibr" rid="B20">20</xref>). Second is task delegation based on complementary strengths. Computationally intensive work like data analysis and pattern recognition is assigned to the AI, while relationship-intensive work requiring empathy and ethical deliberation is reserved for the human. Third, the tools must be transparent and explainable. Clinicians must be able to scrutinize the AI&#x2019;s reasoning to evaluate its validity and identify potential bias. Fourth, all technology use must occur within the context of the co-constructed therapeutic alliance, with the patient&#x2019;s full informed consent.</p>
<p>To illustrate, consider a patient presenting with depression. An AI decision support system could synthesize the patient&#x2019;s electronic health record, lab results, and intake questionnaires. It would cross-reference this data with clinical literature to generate a concise, explainable brief for the clinician, highlighting potential comorbidities, risk factors, and evidence-based treatment options. The clinician, having reviewed this brief, can then devote the entire session to building rapport and exploring the patient&#x2019;s subjective experience. However, for this workflow to be viable, it is imperative to address the risk of cognitive overload. The interface must be designed with user-centered principles to seamlessly integrate into clinical workflows, ensuring that verifying AI outputs does not become a burden that detracts from patient care. While AI functions as a &#x201c;cognitive exoskeleton&#x201d; that ensures comprehensive data integration, the final clinical formulation remains a product of human judgment and shared decision-making with the patient. This model uses AI not to automate care, but to re-humanize it (<xref ref-type="table" rid="T1"><bold>Table 1</bold></xref>).</p>
<table-wrap id="T1" position="float">
<label>Table&#xa0;1</label>
<caption>
<p>Key ethical risks and mitigation strategies within the augmented clinician model.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="left">Ethical principle and key risk</th>
<th valign="middle" align="left">Proposed mitigation strategy</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">Transparency and informed consent<break/>Patients are unaware of AI&#x2019;s role, leading to a loss of autonomy and trust</td>
<td valign="middle" align="left">Mandate explicit disclosure of AI use in consent forms. Clinicians must explain the tool&#x2019;s purpose, limitations, and the patient&#x2019;s right to opt-out</td>
</tr>
<tr>
<td valign="middle" align="left">Bias and equity<break/>AI models trained on unrepresentative data amplify health disparities for marginalized groups</td>
<td valign="middle" align="left">Require fairness audits on AI tools. Use Explainable AI to identify biased reasoning. Mandate clinician oversight to contextualize AI output for individual patients</td>
</tr>
<tr>
<td valign="middle" align="left">Human oversight and accountability<break/>Over-reliance on &#x201c;black box&#x201d; AI abrogates professional responsibility and makes it impossible to assign liability for errors</td>
<td valign="middle" align="left">Legislate a &#x201c;human-in-the-loop&#x201d; requirement for all high-risk clinical decisions. Prohibit autonomous therapeutic AI. The clinician retains ultimate accountability</td>
</tr>
<tr>
<td valign="middle" align="left">Privacy and security<break/>Sensitive mental health data is breached or used unethically for commercial purposes</td>
<td valign="middle" align="left">Enforce strict compliance with data protection laws like HIPAA. Use privacy-preserving techniques. Mandate clear data use policies</td>
</tr>
<tr>
<td valign="middle" align="left">Safety and efficacy<break/>Unvalidated AI tools provide harmful or inaccurate advice, particularly in crisis situations</td>
<td valign="middle" align="left">Establish rigorous validation standards for AI medical devices (e.g., via FDA/MHRA). Prohibit AI use for autonomous crisis response. AI alerts must be routed to a human professional</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s5" sec-type="discussion">
<label>5</label>
<title>Discussion and future directions</title>
<p>Translating the Augmented Clinician model from concept to practice requires a concerted, multi-stakeholder effort. The rapid pace of technological development is far outstripping the capacity of our regulatory and educational systems. A responsible transition depends on building robust governance, transforming professional training, and strategically redirecting research.</p>
<p>First, we need a coherent governance framework. This should be built on international ethical principles like those from the WHO and employ a risk-based approach to regulation, as mandated by the EU Artificial Intelligence Act (Regulation (EU) 2024/1689). Under this framework, AI systems used in healthcare are often classified as &#x201c;high-risk,&#x201d; requiring stringent validation and human oversight (Article 14) (<xref ref-type="bibr" rid="B21">21</xref>). National and state-level legislation, such as the Illinois Wellness and Oversight for Psychological Resources Act (Public Act 104-0054), which explicitly prohibits AI from making independent therapeutic decisions or engaging in &#x201c;therapeutic communication&#x201d; without licensed professional oversight, is crucial for codifying the human-in-the-loop principle into law (<xref ref-type="bibr" rid="B22">22</xref>).</p>
<p>Second, we must address the significant competency gap among healthcare professionals by embedding AI literacy into professional education. Medical, psychology, and nursing curricula must include foundational training on the principles, capabilities, and limitations of AI in clinical contexts (<xref ref-type="bibr" rid="B23">23</xref>). Crucially, this training must cultivate a mindset of healthy skepticism and critical appraisal. Unlike standard diagnostic lab tests, AI outputs are probabilistic suggestions rather than objective facts. Clinicians must be retrained to verify rather than blindly trust these algorithmic recommendations. This is not about teaching clinicians to code but empowering them to be informed and critical users of these new tools.</p>
<p>Finally, the scientific research agenda must shift from a competitive to a collaborative framework. For too long, the focus has been on proving AI can perform a task in isolation, often pitting human against machine. Future research should prioritize understanding and optimizing human-AI collaboration (<xref ref-type="bibr" rid="B24">24</xref>). Key questions include how to design intuitive interfaces that support clinical reasoning, how AI tools can be used to strengthen the human therapeutic alliance, and what best practices are for co-designing equitable tools with underserved communities.</p>
<p>In conclusion, the global mental health crisis demands bold solutions, and AI offers tools of undeniable power. The unguided pursuit of automation, however, threatens to replace the genuine human connection at the heart of healing with a fragile simulation. The Augmented Clinician model offers a pragmatic and ethically grounded alternative. It provides a roadmap to harness the power of AI not to simulate humanity, but to empower it. By leveraging technology to support and unburden our human clinicians, we can ensure that as our tools become more intelligent, our care becomes more profoundly human.</p>
</sec>
</body>
<back>
<sec id="s6" sec-type="author-contributions">
<title>Author contributions</title>
<p>Q-NR: Writing &#x2013; original draft. S-QH: Writing &#x2013; review &amp; editing. Z-HS: Writing &#x2013; review &amp; editing. S-MZ: Writing &#x2013; review &amp; editing, Resources.</p></sec>
<sec id="s8" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p></sec>
<sec id="s9" sec-type="ai-statement">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was used in the creation of this manuscript. Generative AI was used solely for the purpose of language editing and polishing, which included improving grammar, syntax, and clarity to enhance the manuscript&#x2019;s readability. The authors meticulously reviewed and revised all AI-generated suggestions and retain full responsibility for the intellectual content, arguments, and final wording of the article.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p></sec>
<sec id="s10" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p></sec>
<ref-list>
<title>References</title>
<ref id="B1">
<label>1</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Heinz</surname> <given-names>A</given-names></name>
<name><surname>Liu</surname> <given-names>S</given-names></name>
</person-group>. 
<article-title>Challenges and chances for mental health care in the 21st century</article-title>. <source>World Psychiatry</source>. (<year>2022</year>) <volume>21</volume>:<page-range>423&#x2013;4</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1002/wps.21006</pub-id>, PMID: <pub-id pub-id-type="pmid">36073712</pub-id>
</mixed-citation>
</ref>
<ref id="B2">
<label>2</label>
<mixed-citation publication-type="book">
<person-group person-group-type="author"><collab>World Health Organization</collab>
</person-group>. <source>World mental health report: transforming mental health for all</source>. <publisher-loc>Geneva</publisher-loc>: 
<publisher-name>World Health Organization</publisher-name> (<year>2022</year>).
</mixed-citation>
</ref>
<ref id="B3">
<label>3</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Nyakhar</surname> <given-names>S</given-names></name>
<name><surname>Wang</surname> <given-names>H</given-names></name>
</person-group>. 
<article-title>Effectiveness of artificial intelligence chatbots on mental health and well-being in college students: a rapid systematic review</article-title>. <source>Front Psychiatry</source>. (<year>2025</year>) <volume>16</volume>:<elocation-id>1621768</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fpsyt.2025.1621768</pub-id>, PMID: <pub-id pub-id-type="pmid">41195352</pub-id>
</mixed-citation>
</ref>
<ref id="B4">
<label>4</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Warraich</surname> <given-names>HJ</given-names></name>
<name><surname>Tazbaz</surname> <given-names>T</given-names></name>
<name><surname>Califf</surname> <given-names>RM</given-names></name>
</person-group>. 
<article-title>FDA perspective on the regulation of artificial intelligence in health care and biomedicine</article-title>. <source>JAMA</source>. (<year>2025</year>) <volume>333</volume>:<page-range>241&#x2013;7</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1001/jama.2024.21451</pub-id>, PMID: <pub-id pub-id-type="pmid">39405330</pub-id>
</mixed-citation>
</ref>
<ref id="B5">
<label>5</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Faulconbridge</surname> <given-names>J</given-names></name>
<name><surname>Sarwar</surname> <given-names>A</given-names></name>
<name><surname>Spring</surname> <given-names>M</given-names></name>
</person-group>. 
<article-title>How professionals adapt to artificial intelligence: the role of intertwined boundary work</article-title>. <source>J Manag Stud</source>. (<year>2025</year>) <volume>62</volume>:<fpage>1991</fpage>&#x2013;<lpage>2024</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1111/joms.12936</pub-id>, PMID: <pub-id pub-id-type="pmid">41738386</pub-id>
</mixed-citation>
</ref>
<ref id="B6">
<label>6</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Fl&#xfc;ckiger</surname> <given-names>C</given-names></name>
<name><surname>Del Re</surname> <given-names>AC</given-names></name>
<name><surname>Wampold</surname> <given-names>BE</given-names></name>
<name><surname>Horvath</surname> <given-names>AO</given-names></name>
</person-group>. 
<article-title>The alliance in adult psychotherapy: a meta-analytic synthesis</article-title>. <source>Psychotherapy</source>. (<year>2018</year>) <volume>55</volume>:<page-range>316&#x2013;40</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1037/pst0000172</pub-id>, PMID: <pub-id pub-id-type="pmid">29792475</pub-id>
</mixed-citation>
</ref>
<ref id="B7">
<label>7</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Cruz-Gonzalez</surname> <given-names>P</given-names></name>
<name><surname>He</surname> <given-names>AWJ</given-names></name>
<name><surname>Lam</surname> <given-names>EP</given-names></name>
<name><surname>Ng</surname> <given-names>IMC</given-names></name>
<name><surname>Li</surname> <given-names>MW</given-names></name>
<name><surname>Hou</surname> <given-names>R</given-names></name>
<etal/>
</person-group>. 
<article-title>Artificial intelligence in mental health care: a systematic review of diagnosis, monitoring, and intervention applications</article-title>. <source>Psychol Med</source>. (<year>2025</year>) <volume>55</volume>:<fpage>e18</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1017/S0033291724003295</pub-id>, PMID: <pub-id pub-id-type="pmid">39911020</pub-id>
</mixed-citation>
</ref>
<ref id="B8">
<label>8</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Xu</surname> <given-names>X</given-names></name>
<name><surname>Li</surname> <given-names>J</given-names></name>
<name><surname>Zhu</surname> <given-names>Z</given-names></name>
<name><surname>Zhao</surname> <given-names>L</given-names></name>
<name><surname>Wang</surname> <given-names>H</given-names></name>
<name><surname>Song</surname> <given-names>C</given-names></name>
<etal/>
</person-group>. 
<article-title>A comprehensive review on synergy of multi-modal data and AI technologies in medical diagnosis</article-title>. <source>Bioengineering</source>. (<year>2024</year>) <volume>11</volume>:<elocation-id>219</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/bioengineering11030219</pub-id>, PMID: <pub-id pub-id-type="pmid">38534493</pub-id>
</mixed-citation>
</ref>
<ref id="B9">
<label>9</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Onnela</surname> <given-names>JP</given-names></name>
</person-group>. 
<article-title>Opportunities and challenges in the collection and analysis of digital phenotyping data</article-title>. <source>Neuropsychopharmacology</source>. (<year>2021</year>) <volume>46</volume>:<fpage>45</fpage>&#x2013;<lpage>54</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41386-020-0771-3</pub-id>, PMID: <pub-id pub-id-type="pmid">32679583</pub-id>
</mixed-citation>
</ref>
<ref id="B10">
<label>10</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Hill</surname> <given-names>ED</given-names></name>
<name><surname>Kashyap</surname> <given-names>P</given-names></name>
<name><surname>Raffanello</surname> <given-names>E</given-names></name>
<name><surname>Wang</surname> <given-names>Y</given-names></name>
<name><surname>Moffitt</surname> <given-names>TE</given-names></name>
<name><surname>Caspi</surname> <given-names>A</given-names></name>
<etal/>
</person-group>. 
<article-title>Prediction of mental health risk in adolescents</article-title>. <source>Nat Med</source>. (<year>2025</year>) <volume>31</volume>:<page-range>1840&#x2013;6</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41591-025-03560-7</pub-id>, PMID: <pub-id pub-id-type="pmid">40044931</pub-id>
</mixed-citation>
</ref>
<ref id="B11">
<label>11</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Farzan</surname> <given-names>M</given-names></name>
<name><surname>Ebrahimi</surname> <given-names>H</given-names></name>
<name><surname>Pourali</surname> <given-names>M</given-names></name>
<name><surname>Sabeti</surname> <given-names>F</given-names></name>
</person-group>. 
<article-title>Artificial intelligence-powered cognitive behavioral therapy chatbots: a systematic review</article-title>. <source>Iran J Psychiatry</source>. (<year>2025</year>) <volume>20</volume>:<page-range>102&#x2013;10</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.18502/ijps.v20i1.17395</pub-id>, PMID: <pub-id pub-id-type="pmid">40093525</pub-id>
</mixed-citation>
</ref>
<ref id="B12">
<label>12</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Darcy</surname> <given-names>A</given-names></name>
<name><surname>Daniels</surname> <given-names>J</given-names></name>
<name><surname>Salinger</surname> <given-names>D</given-names></name>
<name><surname>Wicks</surname> <given-names>P</given-names></name>
<name><surname>Robinson</surname> <given-names>A</given-names></name>
</person-group>. 
<article-title>Evidence of human-level bonds established with a digital conversational agent: cross-sectional, retrospective observational study</article-title>. <source>JMIR Form Res</source>. (<year>2021</year>) <volume>5</volume>:<fpage>e27868</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.2196/27868</pub-id>, PMID: <pub-id pub-id-type="pmid">33973854</pub-id>
</mixed-citation>
</ref>
<ref id="B13">
<label>13</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Iftikhar</surname> <given-names>Z</given-names></name>
<name><surname>Xiao</surname> <given-names>A</given-names></name>
<name><surname>Ransom</surname> <given-names>S</given-names></name>
<name><surname>Huang</surname> <given-names>J</given-names></name>
<name><surname>Suresh</surname> <given-names>H</given-names></name>
</person-group>. 
<article-title>How LLM counselors violate ethical standards in mental health practice: a practitioner-informed framework</article-title>. <source>Proc AAAI/ACM Conf AI Ethics Soc</source>. (<year>2025</year>) <volume>8</volume>:<page-range>1311&#x2013;23</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1609/aies.v8i2.36632</pub-id>
</mixed-citation>
</ref>
<ref id="B14">
<label>14</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Aafjes-van Doorn</surname> <given-names>K</given-names></name>
<name><surname>Spina</surname> <given-names>DS</given-names></name>
<name><surname>Horne</surname> <given-names>SJ</given-names></name>
<name><surname>Bekes</surname> <given-names>V</given-names></name>
</person-group>. 
<article-title>The association between quality of therapeutic alliance and treatment outcomes in teletherapy: a systematic review and meta-analysis</article-title>. <source>Clin Psychol Rev</source>. (<year>2024</year>) <volume>110</volume>:<elocation-id>102430</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.cpr.2024.102430</pub-id>, PMID: <pub-id pub-id-type="pmid">38636207</pub-id>
</mixed-citation>
</ref>
<ref id="B15">
<label>15</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Guthridge</surname> <given-names>M</given-names></name>
<name><surname>Giummarra</surname> <given-names>MJ</given-names></name>
</person-group>. 
<article-title>The taxonomy of empathy: a meta-definition and the nine dimensions of the empathic system</article-title>. <source>J Humanist Psychol</source>. (<year>2025</year>) <volume>65</volume>:<page-range>1386&#x2013;403</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1177/00221678211018015</pub-id>, PMID: <pub-id pub-id-type="pmid">41732152</pub-id>
</mixed-citation>
</ref>
<ref id="B16">
<label>16</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Montemayor</surname> <given-names>C</given-names></name>
<name><surname>Halpern</surname> <given-names>J</given-names></name>
<name><surname>Fairweather</surname> <given-names>A</given-names></name>
</person-group>. 
<article-title>In-principle obstacles for empathic AI: why we can&#x2019;t replace human empathy in healthcare</article-title>. <source>AI Soc</source>. (<year>2022</year>) <volume>37</volume>:<page-range>1353&#x2013;9</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s00146-021-01230-z</pub-id>, PMID: <pub-id pub-id-type="pmid">34054228</pub-id>
</mixed-citation>
</ref>
<ref id="B17">
<label>17</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Hassija</surname> <given-names>V</given-names></name>
<name><surname>Chamola</surname> <given-names>V</given-names></name>
<name><surname>Mahapatra</surname> <given-names>A</given-names></name>
<name><surname>Singal</surname> <given-names>A</given-names></name>
<name><surname>Goel</surname> <given-names>D</given-names></name>
<name><surname>Huang</surname> <given-names>K</given-names></name>
<etal/>
</person-group>. 
<article-title>Interpreting black-box models: a review on explainable artificial intelligence</article-title>. <source>Cognit Comput</source>. (<year>2024</year>) <volume>16</volume>:<fpage>45</fpage>&#x2013;<lpage>74</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s12559-023-10179-8</pub-id>, PMID: <pub-id pub-id-type="pmid">41737715</pub-id>
</mixed-citation>
</ref>
<ref id="B18">
<label>18</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Obermeyer</surname> <given-names>Z</given-names></name>
<name><surname>Powers</surname> <given-names>B</given-names></name>
<name><surname>Vogeli</surname> <given-names>C</given-names></name>
<name><surname>Mullainathan</surname> <given-names>S</given-names></name>
</person-group>. 
<article-title>Dissecting racial bias in an algorithm used to manage the health of populations</article-title>. <source>Science</source>. (<year>2019</year>) <volume>366</volume>:<page-range>447&#x2013;53</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1126/science.aax2342</pub-id>, PMID: <pub-id pub-id-type="pmid">31649194</pub-id>
</mixed-citation>
</ref>
<ref id="B19">
<label>19</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Norori</surname> <given-names>N</given-names></name>
<name><surname>Hu</surname> <given-names>Q</given-names></name>
<name><surname>Aellen</surname> <given-names>FM</given-names></name>
<name><surname>Faraci</surname> <given-names>FD</given-names></name>
<name><surname>Tzovara</surname> <given-names>A</given-names></name>
</person-group>. 
<article-title>Addressing bias in big data and AI for health care: a call for open science</article-title>. <source>Patterns</source>. (<year>2021</year>) <volume>2</volume>:<elocation-id>100347</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.patter.2021.100347</pub-id>, PMID: <pub-id pub-id-type="pmid">34693373</pub-id>
</mixed-citation>
</ref>
<ref id="B20">
<label>20</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Smith</surname> <given-names>H</given-names></name>
<name><surname>Birchley</surname> <given-names>G</given-names></name>
<name><surname>Ives</surname> <given-names>J</given-names></name>
</person-group>. 
<article-title>Artificial intelligence in clinical decision-making: rethinking personal moral responsibility</article-title>. <source>Bioethics</source>. (<year>2024</year>) <volume>38</volume>:<fpage>78</fpage>&#x2013;<lpage>86</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1111/bioe.13222</pub-id>, PMID: <pub-id pub-id-type="pmid">37724044</pub-id>
</mixed-citation>
</ref>
<ref id="B21">
<label>21</label>
<mixed-citation publication-type="journal">
<article-title>European parliament, council of the european union</article-title>. <source>OJ L</source>. (<year>2024</year>) 2024/1689, <page-range>1&#x2013;144</page-range>. Available online at: <uri xlink:href="https://eur-lex.europa.eu/legal-content/EN/TXT/?uri=CELEX:32024R1689">https://eur-lex.europa.eu/legal-content/EN/TXT/?uri=CELEX:32024R1689</uri> (Accessed <date-in-citation content-type="access-date">November 2, 2025</date-in-citation>).
</mixed-citation>
</ref>
<ref id="B22">
<label>22</label>
<mixed-citation publication-type="web">
<person-group person-group-type="author"><collab>Illinois General Assembly</collab>
</person-group>. <source>Wellness and oversight for psychological resources act. Public act 104-0054 (HB 1806)</source> (<year>2025</year>). Available online at: <uri xlink:href="https://www.ilga.gov/legislation/publicacts/fulltext.asp?Name=104-0054">https://www.ilga.gov/legislation/publicacts/fulltext.asp?Name=104-0054</uri> (Accessed <date-in-citation content-type="access-date">November 2, 2025</date-in-citation>).
</mixed-citation>
</ref>
<ref id="B23">
<label>23</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Alowais</surname> <given-names>SA</given-names></name>
<name><surname>Alghamdi</surname> <given-names>SS</given-names></name>
<name><surname>Alsuhebany</surname> <given-names>N</given-names></name>
<name><surname>Alqahtani</surname> <given-names>T</given-names></name>
<name><surname>Alshaya</surname> <given-names>AI</given-names></name>
<name><surname>Almohareb</surname> <given-names>SN</given-names></name>
<etal/>
</person-group>. 
<article-title>Revolutionizing healthcare: the role of artificial intelligence in clinical practice</article-title>. <source>BMC Med Educ</source>. (<year>2023</year>) <volume>23</volume>:<fpage>689</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s12909-023-04698-z</pub-id>, PMID: <pub-id pub-id-type="pmid">37740191</pub-id>
</mixed-citation>
</ref>
<ref id="B24">
<label>24</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Ali</surname> <given-names>I</given-names></name>
<name><surname>Nguyen</surname> <given-names>K</given-names></name>
<name><surname>Ali</surname> <given-names>AM</given-names></name>
<name><surname>Cui</surname> <given-names>T</given-names></name>
</person-group>. 
<article-title>Human&#x2013;AI collaboration in knowledge ecosystems: a multidisciplinary review, integrative framework and future directions</article-title>. <source>J Knowl Manage</source>. (<year>2025</year>), <fpage>1</fpage>&#x2013;<lpage>22</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1108/JKM-03-2025-0431</pub-id>, PMID: <pub-id pub-id-type="pmid">35579975</pub-id>
</mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn id="n1" fn-type="custom" custom-type="edited-by">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/516828">Xiaoqian Liu</ext-link>, Chinese Academy of Sciences (CAS), China</p></fn>
<fn id="n2" fn-type="custom" custom-type="reviewed-by">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1775625">Youjuan Hong</ext-link>, Fujian Medical University, China</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3142644">Mohammad Hossein Salemi</ext-link>, University of Tehran, Iran</p></fn>
</fn-group>
</back>
</article>