<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" article-type="research-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Psychol.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Psychology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Psychol.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">1664-1078</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fpsyg.2025.1730902</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Who is more trustworthy? The influence mechanism of AI vs. human doctor triage on user trust: testing the mediating effect of psychological distance, and a multiple moderating effects analysis</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Chen</surname>
<given-names>Jinghao</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2076139"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Wang</surname>
<given-names>Huayang</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Qiu</surname>
<given-names>Xiaoyu</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3251277"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>School of Public Policy and Management, Guangxi University</institution>, <city>Nanning</city>, <country country="cn">China</country></aff>
<aff id="aff2"><label>2</label><institution>Regional Social Governance Innovation Research Center, Guangxi University</institution>, <city>Nanning</city>, <country country="cn">China</country></aff>
<author-notes>
<corresp id="c001"><label>&#x002A;</label>Correspondence: Xiaoyu Qiu, <email xlink:href="mailto:qiuxiaoyu@st.gxu.edu.cn">qiuxiaoyu@st.gxu.edu.cn</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-01-12">
<day>12</day>
<month>01</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2025</year>
</pub-date>
<volume>16</volume>
<elocation-id>1730902</elocation-id>
<history>
<date date-type="received">
<day>23</day>
<month>10</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>23</day>
<month>12</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>23</day>
<month>12</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2026 Chen, Wang and Qiu.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Chen, Wang and Qiu</copyright-holder>
<license>
<ali:license_ref start_date="2026-01-12">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<sec>
<title>Purpose/significance</title>
<p>To optimize smart healthcare services and advance the sustainable deployment of AI in medical triage, this study investigates differences in user trust between AI and human medical triage doctors and the underlying psychological mechanisms.</p>
</sec>
<sec>
<title>Methods/procedures</title>
<p>Four online experiments were conducted using a between-group design to systematically manipulate the medical triage doctors (AI vs. human), degree of anthropomorphism (high vs. low), task sensitivity (high vs. low), and AI technology adoption level (high vs. low). Participants were recruited online to view medical triage engagement screenshots, and respond to measures assessing perceived psychological distance, anthropomorphism, task sensitivity, AI technology adoption level, and user trust. Process macros were used to test the mediation and moderation effects.</p>
</sec>
<sec>
<title>Results/conclusions</title>
<p>The study found that (1) participants placed greater trust in human than in AI medical triage doctors; (2) psychological distance played a partial mediating role; (3) a high degree of anthropomorphism effectively reduced the psychological distance between participants and AI medical triage doctors; (4) in low task sensitivity scenarios, there was no significant difference in psychological distance from high-anthropomorphism AI and human medical triage doctors, and both were perceived as closer than low-anthropomorphism AI medical triage doctors. In high task sensitivity scenarios, psychological distance was closest for human medical triage doctors, followed by high-anthropomorphism AI medical triage doctors, and farthest for low-anthropomorphism AI medical triage doctors; and (5) high AI technology adoption level diminished the trust disparity between AI and human medical triage doctors; however, participants still exhibited a higher level of trust in human medical triage doctors. These results emphasize the importance of considering psychological distance in AI healthcare trust research, revealing the task reliance of anthropomorphism. The study also develops a comprehensive trust model that incorporates various moderating influences.</p>
</sec>
</abstract>
<kwd-group>
<kwd>AI technology adoption level</kwd>
<kwd>anthropomorphism</kwd>
<kwd>medical triage</kwd>
<kwd>psychological distance</kwd>
<kwd>task sensitivity</kwd>
<kwd>user trust</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This work was supported by the National Social Science Fund of China (Grant nos. 22&#x0026;ZD326).</funding-statement>
</funding-group>
<counts>
<fig-count count="5"/>
<table-count count="9"/>
<equation-count count="0"/>
<ref-count count="88"/>
<page-count count="18"/>
<word-count count="13802"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Personality and Social Psychology</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec1">
<label>1</label>
<title>Introduction</title>
<p>A new wave of technological innovation, led by Artificial Intelligence (AI), is reshaping today&#x2019;s healthcare practices. In particular, large language models (LLMs) such as ChatGPT are catalyzing rapid advances in AI healthcare (<xref ref-type="bibr" rid="ref12">Chen et al., 2024</xref>; <xref ref-type="bibr" rid="ref50">Lu et al., 2023</xref>). AI-utilization is now present throughout the entire healthcare continuum, from smart hospital administration and drug-targeting identification to early disease screening (<xref ref-type="bibr" rid="ref36">Jiang et al., 2017</xref>; <xref ref-type="bibr" rid="ref86">Yu et al., 2018</xref>; <xref ref-type="bibr" rid="ref38">Keskinbora, 2019</xref>; <xref ref-type="bibr" rid="ref4">Attia et al., 2019</xref>), operating with unmatched breadth and depth. The profound integration of AI and healthcare has not only accelerated the progression of medical technology, it has also emerged as a central concern in general society. With regards to human-AI collaboration, calibrating users&#x2019; reliance so that they neither over-rely on nor categorically distrust AI systems, but instead achieve appropriate reliance, has become a central topic in human factors and automation-trust research (<xref ref-type="bibr" rid="ref43">Lee and See, 2004</xref>), offering an important lens through which to understand trust in AI systems in healthcare.</p>
<p>Increasing demand for healthcare services presents significant problems to the traditional &#x201C;patient-to-department&#x201D; arrangement. Patients frequently register for inappropriate departments due to a lack of medical knowledge, or suffer long wait times due to misunderstanding hospital protocols or departmental practitioners&#x2019; lack of specialist knowledge, leading to ineffective care and unsatisfactory experiences. Medical personnel, overwhelmed by heightened service demands, find it challenging to deliver individualized treatment, leading to intensified doctor-patient conflicts (<xref ref-type="bibr" rid="ref19">Cui and Liu, 2020</xref>; <xref ref-type="bibr" rid="ref75">Tyler et al., 2024</xref>). To address these issues, National Health Commission of the People&#x2019;s Republic of China released a policy guideline in November 2024 titled Reference Guidelines for Artificial Intelligence Application Scenarios in the Health Care Industry, which explicitly designated &#x201C;AI&#x202F;+&#x202F;triage&#x201D; as a priority area of development (<xref ref-type="bibr" rid="ref59">National Health Commission of the People&#x2019;s Republic of China, 2024</xref>). As a result, AI medical triage doctors have been increasingly proposed and developed. (<xref ref-type="bibr" rid="ref36">Jiang et al., 2017</xref>). Through close integration of AI with clinical workflows, doctors are able to utilize big data, natural language processing, and deep learning to automate the collection of patient histories, generate preliminary diagnostic suggestions, and direct patients to appropriate care pathways, thereby easing clinicians&#x2019; workload (<xref ref-type="bibr" rid="ref75">Tyler et al., 2024</xref>; <xref ref-type="bibr" rid="ref70">Steerling et al., 2025</xref>). This has enabled shifts from &#x201C;patients seeking departments&#x201D; to &#x201C;departments seeking patients,&#x201D; and from &#x201C;blind registration&#x201D; to &#x201C;precision services,&#x201D; to ultimately deliver a far more streamlined, patient-centered care experience (<xref ref-type="bibr" rid="ref41">Leachman and Merlino, 2017</xref>). Nevertheless, despite the significant benefits AI medical triage doctors can provide, their functions as primary healthcare doctors continue to be under public scrutiny and judgment. Studies suggest that the intricate and multifaceted characteristics of medical situations as well as the &#x201C;black-box&#x201D; nature and uncertainty of AI algorithms lead to widespread skepticism among users regarding AI medical triage doctors, affecting its advancement and implementation within healthcare practices (<xref ref-type="bibr" rid="ref19">Cui and Liu, 2020</xref>; <xref ref-type="bibr" rid="ref30">Hamet and Tremblay, 2017</xref>). Research suggests that integrating Theory of Mind (ToM) and Metacognition (MC) into AI processes can significantly enhance the social and self-regulatory capabilities of intelligent systems. Specifically, when interacting with humans and other agents, ToM improves the social cognition and interaction quality of AI, while MC helps the AI monitor and correct its own behavior, thereby reducing errors and increasing autonomy (<xref ref-type="bibr" rid="ref6">Bamicha and Drigas, 2024</xref>). However, even when AI medical triage doctors perform as well as or better than human medical triage doctors on objective diagnostic tasks, patients may still reject medical decisions made by AI algorithms because of concerns about moral responsibility and a perceived lack of warmth or human touch, a phenomenon that has been described as &#x201C;resistance to medical AI&#x201D; (<xref ref-type="bibr" rid="ref49">Longoni et al., 2019</xref>). Patients&#x2019; attitudes toward AI medical triage doctors are therefore not determined solely by accuracy or convenience, but are also strongly shaped by affective experience and ethical concerns.</p>
<p>Despite its technical power, then, AI integration in healthcare should aim to be a cognitive amplifier and assistive tool for medical professionals rather than as a full replacement for human medical triage doctors. In populations with neurodevelopmental disorders and other complex conditions, for example, AI tools can provide valuable support in assessment and communication, but they should remain under the supervision of human medical triage doctors&#x2019; professional judgment (<xref ref-type="bibr" rid="ref55">Moraiti and Drigas, 2023</xref>). Furthermore, in the development and deployment of medical AI, it is crucial to emphasize adherence to ethical principles concerning data protection, safeguards against algorithmic bias, transparency and explainability, and clear accountability mechanisms, prioritizing the assistive and augmentative uses of AI rather than its replacement possibilities. Such practices are key to fostering more positive user attitudes and understandings regarding the use of intelligent systems in healthcare (<xref ref-type="bibr" rid="ref69">Solanki et al., 2023</xref>; <xref ref-type="bibr" rid="ref60">Palaniappan et al., 2024</xref>).</p>
<p>Consequently, it is of considerable practical significance to explore the trust disparity between AI and human medical triage doctors, along with its contributing elements, for the advancement of AI technology adoption within medical triage practices, and to enable the sustainable development of AI medical triage doctors. Therefore, this study concentrates on the following fundamental inquiries:</p>
<list list-type="simple">
<list-item>
<p>(1) In medical triage services, can users&#x2019; trust in AI medical triage doctors equal their trust in traditional human medical triage doctors? In other words, who do patients find to be more trustworthy: AI or human medical triage doctors?</p>
</list-item>
<list-item>
<p>(2) What impact do medical triage doctors have on user trust, and what psychological mechanisms might explain trust disparities?</p>
</list-item>
<list-item>
<p>(3) Are there particular task-related environmental elements or individual user traits that can influence the establishment of this trust relationship?</p>
</list-item>
</list>
</sec>
<sec id="sec2">
<label>2</label>
<title>Literature review</title>
<p>AI adoption in medicine involves the use of complex algorithms and methods, such as machine learning, to analyze medical data and support clinical decision-making. By thoroughly analyzing vast amounts of medical data, including imaging and electronic health records, AI seeks to replicate the cognitive capabilities of human medical triage doctors to provide cognitive aids and decision-making support for various tasks, including disease diagnosis, treatment planning, and patient health management (<xref ref-type="bibr" rid="ref64">Rajpurkar et al., 2022</xref>; <xref ref-type="bibr" rid="ref54">Moor et al., 2023</xref>). Due to advances in digital health technologies, AI is no longer confined to analyzing static medical records. By integrating electronic health records, medical imaging, wearable sensors, and remote monitoring data, AI extends clinical assessment from a single encounter to continuous monitoring, helping doctors identify risks earlier and more accurately, and allowing for the development of individualized treatment plans. In this process, machine learning methods such as neural networks, support vector machines, decision trees, and rule-based classifiers are widely used to build disease prediction and classification models (<xref ref-type="bibr" rid="ref7">Bamicha et al., 2024</xref>; <xref ref-type="bibr" rid="ref8">Bamicha et al., 2025</xref>), substantially improving the speed and accuracy of diagnosis and clinical decision-making.</p>
<p>Scholars worldwide have been exploring the driving role of big data in healthcare, conducting extensive work on core areas such as multimodal data fusion and knowledge graph construction. This body of research consistently shows that AI holds substantial promises for basic medical research as well as disease diagnosis and treatment; on certain well-defined tasks, its diagnostic accuracy and efficiency have even surpassed those of human medical triage doctors (<xref ref-type="bibr" rid="ref87">Zhu et al., 2024</xref>; <xref ref-type="bibr" rid="ref18">Contreras and Vehi, 2018</xref>; <xref ref-type="bibr" rid="ref29">Haenssle et al., 2018</xref>; <xref ref-type="bibr" rid="ref51">MacIntyre et al., 2023</xref>; <xref ref-type="bibr" rid="ref71">Ting et al., 2019</xref>). For example, AlphaFold successfully predicted the three-dimensional structures of proteins using deep learning (<xref ref-type="bibr" rid="ref37">Jumper et al., 2021</xref>). In diagnostics, AI medical triage doctors have demonstrated the ability to identify lung nodules, breast cancer (<xref ref-type="bibr" rid="ref18">Contreras and Vehi, 2018</xref>), and various skin diseases (<xref ref-type="bibr" rid="ref29">Haenssle et al., 2018</xref>). Furthermore, IBM&#x2019;s Watson not only aids in diagnosing heart conditions (<xref ref-type="bibr" rid="ref34">Hutson, 2017</xref>), but has also shown higher accuracy than human medical triage doctors in over a thousand cancer diagnoses (<xref ref-type="bibr" rid="ref48">Lohr, 2016</xref>). Driven by this strong technological foundation, AI medical triage is rapidly emerging as a key application of AI in the healthcare service process. Initially, AI medical triage doctors functioned primarily as a public-facing, self-service tool for consultation and triage. Based predominantly on rules and machine learning, its goal was to provide patients with triage recommendations on whether to seek medical attention, and where to do so (<xref ref-type="bibr" rid="ref5">Baker et al., 2020</xref>). Later, AI medical triage doctors manifested as physical robots providing initial consultations, appointment scheduling, information retrieval, and hospital navigation services, effectively mitigating conventional healthcare challenges such as appointment booking difficulties, appointment inaccuracies, and prolonged wait times. With the widespread adoption of mobile internet, triage services swiftly transitioned to online platforms, such as official accounts and mini-programs. Applications like Tencent Miying and Babylon Health transcended temporal and spatial limitations, allowing patients to participate in remote initial consultations and pre-consultation services at any time, from any location. By processing textual or vocal descriptions from patients, intelligent AI medical triage doctors employed backend algorithms to deliver accurate triage advice as well as offering relevant disease-related information. Subsequently, AI medical triage doctors progressed into intelligent wearable devices, transitioning from the passive &#x201C;question-and-answer&#x201D; format to proactive &#x201C;health management&#x201D; (<xref ref-type="bibr" rid="ref21">Dias and Paulo, 2018</xref>; <xref ref-type="bibr" rid="ref81">Xie et al., 2021</xref>). Current AI medical triage doctors have evolved to an advanced stage, characterized by the integration of cutting-edge technologies such as natural language processing and medical LLMs which allow for in-depth analysis of multi-source, heterogeneous data across the entire clinical workflow. Unlike traditional question-answer systems which were reliant on fixed rules or knowledge graphs, the latest generation of AI medical triage doctors, driven by LLMs, are able to handle complex, open-ended questions with superior flexibility, generating logical and coherent in-depth responses, ultimately redefining the quality and dynamics of patient&#x2013;provider interactions (<xref ref-type="bibr" rid="ref83">Xu et al., 2024</xref>; <xref ref-type="bibr" rid="ref66">Shaheen, 2021</xref>). In summary, AI medical triage doctors have transformed from a singular technological application into a holistic, multi-modal intelligent solution encompassing the full healthcare continuum.</p>
<p>Despite notable progress in the technology, applications, and functions of AI medical triage doctors, several gaps remain. First, existing research lacks a systematic account of trust mechanisms. Most studies have focused on diagnostic accuracy, efficiency, and system performance, or on the general user acceptance of AI in healthcare (<xref ref-type="bibr" rid="ref5">Baker et al., 2020</xref>; <xref ref-type="bibr" rid="ref35">Ilicki, 2022</xref>; <xref ref-type="bibr" rid="ref72">Townsend et al., 2023</xref>; <xref ref-type="bibr" rid="ref75">Tyler et al., 2024</xref>), rarely exploring users&#x2019; deeper psychological perceptions &#x2013; especially that of psychological distance &#x2013; to unpack trust differences between AI and human medical triage doctors and the mechanisms underlying them. Second, research contexts and variables have generally been constrained. Numerous studies have been limited to individual variables or particular situations, neglecting to thoroughly investigate the interconnectivity among various components. Much of the existing research has focused on macro, conceptual level and lacked systematic empirical testing in concrete medical triage tasks and settings of how multiple factors might shape users&#x2019; trust in and reliance on AI. Third, inadequate consideration has been given to the individual patient differences. Current studies have rarely explored how individual user characteristics influence their trust in AI medical triage doctors. Debates on the normative use of AI in healthcare from ethical or governance perspectives have increasingly emphasized the need for actionable developer guidelines and regulatory frameworks addressing data protection, safeguards against algorithmic bias, transparency and explainability, and clear accountability mechanisms (<xref ref-type="bibr" rid="ref69">Solanki et al., 2023</xref>; <xref ref-type="bibr" rid="ref60">Palaniappan et al., 2024</xref>). However, relatively little is known about how the individual characteristics of users might moderate their trust in AI medical triage.</p>
<p>Therefore, the current study incorporates psychological distance in exploring the effectiveness of AI medical triage doctors, allowing for the development of a comprehensive model that includes both mediating and multiple moderating effects to explain gaps. This study seeks to thoroughly investigate the fundamental question of whether patients find AI or human medical triage doctors to be more reliable, offering focused theoretical and empirical insights for the enhancement and application of AI medical triage doctors going forward.</p>
</sec>
<sec id="sec3">
<label>3</label>
<title>Study hypotheses</title>
<sec id="sec4">
<label>3.1</label>
<title>The effect of medical triage doctors (AI vs. human) on user trust</title>
<p>The adoption of medical services hinges critically on the trust of the patient. The Competence&#x2013;Personality Theory posits that individuals prioritize two essential needs when selecting between human and AI decision-making: functional needs (i.e., objective criteria for effective task execution) and psychological needs (i.e., subjective aspirations for recognition as distinct individuals; <xref ref-type="bibr" rid="ref63">Qin et al., 2025</xref>). In comparison to conventional healthcare models, AI medical triage doctors provide enhanced, more convenient, and more efficient medical services (<xref ref-type="bibr" rid="ref67">Shukur et al., 2024</xref>). Nonetheless, these AI medical triage doctors are devoid of emotion, empathy, and human warmth, hindering their ability to deliver individualized guidance with a human touch. Their decision-making processes are characterized by a deficiency in transparency and interpretability, and they are incapable of delivering appropriate psychological support or communication akin to that of human medical triage doctors (<xref ref-type="bibr" rid="ref67">Shukur et al., 2024</xref>; <xref ref-type="bibr" rid="ref31">Harada et al., 2020</xref>; <xref ref-type="bibr" rid="ref11">Chan, 2023</xref>). Numerous studies have demonstrated that patients prefer healthcare services performed by human medical triage doctors (<xref ref-type="bibr" rid="ref49">Longoni et al., 2019</xref>). Human medical triage doctors are able to render more empathetic and adaptable decisions informed by experience, intuition, ethical considerations, and patient requirements (<xref ref-type="bibr" rid="ref67">Shukur et al., 2024</xref>). Even when AI medical triage doctors responses achieve or exceed the accuracy of human medical triage doctors, AI medical triage doctors remain fundamentally auxiliary tools under the supervision of human medical triage doctors (<xref ref-type="bibr" rid="ref76">Von Eschenbach, 2021</xref>) and act as a complement to, rather than a replacement of, human medical triage doctors (<xref ref-type="bibr" rid="ref55">Moraiti and Drigas, 2023</xref>). Patients routinely exhibit heightened trust in human medical triage doctors (<xref ref-type="bibr" rid="ref65">Riedl et al., 2024</xref>). Consequently, the following hypothesis was proposed:</p>
<disp-quote>
<p><italic>Hypothesis 1 (H1)</italic>: In contrast to the AI medical triage doctor, the human medical triage doctor is more likely to engender user trust.</p>
</disp-quote>
</sec>
<sec id="sec5">
<label>3.2</label>
<title>The mediating effect of psychological distance</title>
<p>Psychological distance denotes the subjective experience of proximity regarding time, space, and other dimensions that humans associate with people, events, and things in their cognition, emotions, and behavior (<xref ref-type="bibr" rid="ref74">Trope and Liberman, 2010</xref>). The Theory of Interpretive Levels (<xref ref-type="bibr" rid="ref46">Liberman and Trope, 2008</xref>) posits that subjective perceived distance significantly impacts cognitive information processing, as well as individuals&#x2019; judgments, evaluations, predictions, choices (<xref ref-type="bibr" rid="ref8001">Park and Park, 2016</xref>; <xref ref-type="bibr" rid="ref73">Trope and Liberman, 2003</xref>), and behaviors (<xref ref-type="bibr" rid="ref74">Trope and Liberman, 2010</xref>). Comprehensive research findings indicate a strong correlation between psychological distance and trust, whereby diminishing psychological distance markedly increases trust (<xref ref-type="bibr" rid="ref77">Wang et al., 2008</xref>; <xref ref-type="bibr" rid="ref9">Bandara et al., 2021</xref>; <xref ref-type="bibr" rid="ref61">Park et al., 2024</xref>).</p>
<p>Human medical triage doctors, having intrinsically human attributes such as social presence, empathy, and emotional resonance, are better equipped to bridge the psychological gap between them and their patients (<xref ref-type="bibr" rid="ref2">Ahn et al., 2021</xref>; <xref ref-type="bibr" rid="ref15">Chin et al., 2024</xref>). Conversely, AI medical triage doctors have been shown to demonstrate increased psychological detachment from patients owing to constraints in their emotional empathy and intricate communication abilities (<xref ref-type="bibr" rid="ref33">Hsieh and Lee, 2024</xref>; <xref ref-type="bibr" rid="ref82">Xiong et al., 2024</xref>). Therefore, the following hypothesis was proposed:</p>
<disp-quote>
<p><italic>Hypothesis 2 (H2)</italic>: Psychological distance plays a positive mediating role in the influence of medical triage doctors (both AI and human) on user trust.</p>
</disp-quote>
</sec>
<sec id="sec6">
<label>3.3</label>
<title>The moderating effect of anthropomorphism</title>
<p>Anthropomorphism is the assignment of human-like traits, motives, intents, or feelings to actual or fictional non-human entities (<xref ref-type="bibr" rid="ref23">Epley et al., 2007</xref>; <xref ref-type="bibr" rid="ref22">Epley et al., 2008</xref>). Current studies have demonstrated that anthropomorphism designs, such as human-like figures or mascots, can markedly diminish psychological distance between users and non-human intelligent agents (<xref ref-type="bibr" rid="ref3">Ali et al., 2021</xref>; <xref ref-type="bibr" rid="ref16">Chung and Han, 2022</xref>; <xref ref-type="bibr" rid="ref28">Guido and Peluso, 2015</xref>; <xref ref-type="bibr" rid="ref44">Li and Sung, 2021</xref>). In 1970, Japanese robotics expert Masahiro Mori introduced the Uncanny Valley effect (<xref ref-type="bibr" rid="ref56">Mori et al., 2012</xref>), which posits that within a specific range, human attraction toward robots escalates with the increase in their anthropomorphism characteristics, but when an entity becomes almost human yet still imperfect, people may experience feelings of eeriness, discomfort, or dislike, causing affinity (and potentially trust) to drop sharply&#x2014;forming the &#x201C;valley&#x201D; (<xref ref-type="bibr" rid="ref56">Mori et al., 2012</xref>). When AI attains a specific degree of anthropomorphism, before triggering the Uncanny Valley effect, its proficiency at mimicking human interaction, offering emotional support, and exhibiting adaptive cognition is markedly enhanced (<xref ref-type="bibr" rid="ref80">Waytz et al., 2010</xref>). At lower levels of anthropomorphism, the non-human characteristics of AI become increasingly apparent, prompting users to favor trust and rapport with actual human medical triage doctors. Provided anthropomorphism remains below the uncanny-valley threshold, the ability of AI to mimic human dialogue and provide affective support helps bridge the psychological distance between it and its users, and through this aligned perceived proximity, its human-like interface can replace certain functions of physicians (<xref ref-type="bibr" rid="ref84">Yang et al., 2023</xref>). Therefore, the following hypotheses were proposed:</p>
<disp-quote>
<p><italic>Hypothesis 3 (H3)</italic>: The degree of anthropomorphism influences the effect of the AI medical triage doctor on users&#x2019; perceived psychological distance.</p>
<p><italic>Hypothesis H3a (H3a)</italic>: When anthropomorphism of AI is minimal, the human medical triage doctor is able to establish a closer psychological distance with users than the AI medical triage doctor.</p>
<p><italic>Hypothesis 3b (H3b)</italic>: When the degree anthropomorphism is high, no substantial disparity is seen in users&#x2019; perceived psychological distance with AI and human medical triage doctors.</p>
</disp-quote>
</sec>
<sec id="sec7">
<label>3.4</label>
<title>The remodulating effect of task sensitivity</title>
<p>Task sensitivity denotes the extent to which individuals recognize privacy hazards, societal pressures, and possible adverse outcomes while discussing particular health concerns (<xref ref-type="bibr" rid="ref45">Liang et al., 2018</xref>). Privacy Computing Theory posits that internet or computer users evaluate potential advantages against potential dangers prior to revealing personal information. As perceived threats escalate, users&#x2019; psychological demand for security and trust intensifies (<xref ref-type="bibr" rid="ref39">Kim et al., 2019</xref>; <xref ref-type="bibr" rid="ref79">Wang et al., 2024</xref>).</p>
<p>In low task sensitivity scenarios (e.g., consultations for common, non-sensitive illnesses), users perceive lower risks in revealing information. In such scenarios, both human and high-anthropomorphism AI medical triage doctors are capable of delivering human-like encounters are able to cultivate a sense of psychological closeness in the user. In contrast, low-anthropomorphism AI medical triage doctors, which are devoid of human traits, foster increased psychological distance from users. In high task sensitivity scenarios (e.g., consultations involving extremely sensitive or private issues or severe health conditions), users&#x2019; expectations of their service-providing doctor&#x2019;s professionalism, empathy, and privacy safeguards increase significantly. Currently, human medical triage doctors utilize their distinctive interpersonal warmth and intuitive talents to foster a sense of security and trust that is difficult for AI medical triage doctors to emulate, allowing the human doctors to create a closer psychological connection with the patient. Even a high-anthropomorphism AI medical triage doctor may evoke user apprehension due to its non-human characteristics, increasing the psychological distance experienced by the patient, compared to the psychological distance they perceive with a human medical triage doctor. The robotic characteristics of low-anthropomorphism AI exacerbates this disadvantage, resulting in even more psychological distance. As such, the following hypothesis was proposed:</p>
<disp-quote>
<p><italic>Hypothesis 4 (H4)</italic>: The moderating effect of anthropomorphism is further mitigated by task sensitivity.</p>
<p><italic>Hypothesis 4a (H4a)</italic>: In low task sensitivity scenarios, users experience no significant difference in perceived psychological distance from either the human or the high-anthropomorphism AI medical triage doctors. Furthermore, users&#x2019; perceived psychological distance from both doctors is much less than that with the low-anthropomorphism AI medical triage doctor.</p>
<p><italic>Hypothesis 4b (H4b)</italic>: In high task sensitivity scenarios, the human medical triage doctor will be regarded as having the closest psychological distance to users, followed by the high-anthropomorphism AI medical triage doctor, while users perceive the furthest psychological distance from the low-anthropomorphism AI medical triage doctor.</p>
</disp-quote>
</sec>
<sec id="sec8">
<label>3.5</label>
<title>The moderating effect of users&#x2019; level of AI adoption</title>
<p>AI technology adoption level refers to users&#x2019; approval of both the functional and emotional dimensions of AI &#x2013; affecting their willingness to utilize AI tools &#x2013; which are fundamentally rooted in one&#x2019;s pre-existing attitudes and subjective views of new technologies. This concept has been examined in-depth through the Technology Acceptance Model (TAM; <xref ref-type="bibr" rid="ref58">Na et al., 2022</xref>) as well as the Unified Theory of Acceptance and Use of Technology (UTAUT; <xref ref-type="bibr" rid="ref57">Na et al., 2023</xref>). In the context of AI technology&#x2019;s growing incorporation into healthcare services, users&#x2019; previous opinions and acceptance levels toward AI technology may influence their trust in both AI and human medical triage doctors.</p>
<p>Individuals exhibiting an elevated level of adoption of AI technology demonstrate increased receptiveness toward novel technologies, enhanced awareness of AI&#x2019;s potential in healthcare, and greater understanding of the benefits of AI in improving efficiency, objectivity, and continuous service (<xref ref-type="bibr" rid="ref24">Esfandiari et al., 2024</xref>). As a result, individuals with a high adoption level of AI may be more inclined to trust AI medical triage doctors, and may regard AI medical triage doctors to be comparable to human medical triage doctors in specific respects. Conversely, people with a lower level of AI adoption are more prone to exhibit heightened suspicion and resistance toward the reliability of AI medical triage doctors, as well as of claims of them demonstrating an ability for &#x201C;human touch&#x201D; (<xref ref-type="bibr" rid="ref14">Chen et al., 2022</xref>). Overall, users&#x2019; confidence in human medical triage doctors markedly surpasses their confidence in AI medical triage doctors. Therefore, the following hypotheses was proposed:</p>
<disp-quote>
<p><italic>Hypothesis 5 (H5)</italic>: A user&#x2019;s level of adoption of AI technology moderates the impact of the medical triage doctor on user trust.</p>
</disp-quote>
<disp-quote>
<p><italic>Hypothesis 5a (H5a)</italic>: Among users with a high level of AI technology adoption, no substantial disparity exists between the AI and human medical triage doctors in terms of user trust.</p>
<p><italic>Hypothesis 5b (H5b)</italic>: Among users who have a low level of adoption of AI technology, the human medical triage doctor engenders increased trust in users than the AI medical triage doctors.</p>
</disp-quote>
<p>The study framework is presented in <xref ref-type="fig" rid="fig1">Figure 1</xref>.</p>
<fig position="float" id="fig1">
<label>Figure 1</label>
<caption>
<p>Research framework. &#x002A;&#x002A;&#x002A; <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001.</p>
</caption>
<graphic xlink:href="fpsyg-16-1730902-g001.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Flowchart depicting relationships between several variables. Medical triage doctors (AI vs. Human) influence psychological distance, linked to user trust. Anthropomorphism and task sensitivity affect psychological distance through H3 and H4. AI technology adoption level influences psychological distance and user trust via H1 and H5.</alt-text>
</graphic>
</fig>
</sec>
</sec>
<sec id="sec9">
<label>4</label>
<title>Study design</title>
<sec id="sec10">
<label>4.1</label>
<title>Experimental design</title>
<p>This study examined the influence of medical triage doctors (AI vs. human) on user trust and the mechanisms involved, utilizing four separate experiments. Study 1 employed a single-factor, two-level (medical triage doctor: AI vs. human) between-subjects experimental design to investigate the effect of the doctor on user trust and the mediating effect of psychological distance. Study 2 employed a 2 (medical triage doctor: AI vs. human)&#x202F;&#x00D7;&#x202F;2 (anthropomorphism: high vs. low) between-subjects design. This experiment incorporated anthropomorphism as a moderator to assess its impact on the relationship between the medical triage doctor and user psychological distance. Study 3 employed a 2 (medical triage doctor: AI vs. human)&#x202F;&#x00D7;&#x202F;2 (anthropomorphism: high vs. low)&#x202F;&#x00D7;&#x202F;2 (task sensitivity: high vs. low) between-subjects experimental framework. Task sensitivity was incorporated as an additional moderating variable to investigate the interplay between the medical triage doctors and the degree of anthropomorphism attributed to the AI medical triage doctor across various task sensitivity conditions. Study 4 employed a 2 (medical triage doctor: AI vs. human)&#x202F;&#x00D7;&#x202F;2 (AI technology acceptance: high vs. low) between-subjects experimental design to investigate the moderating effect of users&#x2019; AI technology acceptance on the impact of the triage medical doctor on user trust.</p>
</sec>
<sec id="sec11">
<label>4.2</label>
<title>Manipulation design of medical triage doctors, anthropomorphism, and task sensitivity</title>
<p>To help participants accurately distinguish between the two types of medical triage doctors, the manipulation of doctor identity was based on the research conducted by <xref ref-type="bibr" rid="ref78">Wang et al. (2021)</xref>. In Study 1, the human medical triage doctor was depicted by a still image of an actual human doctor (with a neutral facial expression), supplemented by the doctor&#x2019;s name and textual introductions written in the first person. The AI medical triage doctor was depicted by a robotic-looking customer service avatar. With the exception of the avatar image and name, all elements of the human and AI medical triage doctors were the same (see <xref rid="SM1" ref-type="supplementary-material">Appendix 1</xref> for details). &#x201C;Headache&#x201D; was chosen as the experimental scenario for two reasons. First, as a high-frequency complaint presented in frontline consultations, it was similar to everyday experiences, which would enable participants to empathize with the scenario and immerse themselves more quickly, thus reducing comprehension burden and social-desirability bias (<xref ref-type="bibr" rid="ref53">Minen et al., 2020</xref>). Second, the symptom satisfies the ethical requirement of minimal risk and, under controlled conditions, is well-suited to examining how differences in triage providers elicit changes in trust and psychological distance (<xref ref-type="bibr" rid="ref47">Lin et al., 2021</xref>).</p>
<p>In Study 2, the introductory interface for the high-anthropomorphism AI medical triage doctor was the same used for the human medical triage doctor in Study 1, except that the AI medical triage doctor in Study 2 was explicitly identified as such. The introductory interface for the low-anthropomorphism AI medical triage doctor was presented visually with a human-like face, but no first-person pronouns nor names were used in the written introduction (see <xref rid="SM1" ref-type="supplementary-material">Appendix 2</xref> for details).</p>
<p>In Study 3, the medical triage doctor presentation was the same as in Study 2, but the interaction content differed based on low vs. high task sensitivity. The modulation of high and low task sensitivity in Study 3 was formulated by adapting and developing content following the classifications of <xref ref-type="bibr" rid="ref62">Pescosolido (2015)</xref> and <xref ref-type="bibr" rid="ref40">Kokolakis (2017)</xref>. Four medical triage task scenarios with low task sensitivity were developed, as well as four with high task sensitivity. A sample of 31 participants (13 males) participated in a straightforward voting process to identify the most representative scenarios, which were also considered to be the most appropriate scenarios to use for each level. The low task sensitivity scenario which garnered the largest number of votes (17 votes, 54.84%) was as follows: A patient presents with a sore throat, slight cough, and nasal congestion, seeking a diagnosis and self-care recommendations. The high task sensitivity scenario which garnered the garnered the most votes (18 votes, 58.06%) was as follows: A patient presents with a month-long low-grade fever, nausea, vomiting, substantial weight loss (approximately 10 pounds), recurrent sore throat, and swollen lymph nodes, raising suspicion of a human immunodeficiency virus (HIV) infection.</p>
<p>In Study 4, the manipulations for the medical triage doctor and the interaction content were identical to those in Study 1 (see <xref rid="SM1" ref-type="supplementary-material">Appendix 3</xref> for details).</p>
</sec>
<sec id="sec12">
<label>4.3</label>
<title>Questionnaire design and pre-experimentation</title>
<p>The scales used in this study were adapted from established scales published in various international journals, modified appropriately to fit the research context while ensuring content validity. The specific measurement items are presented in <xref ref-type="table" rid="tab1">Table 1</xref>.</p>
<table-wrap position="float" id="tab1">
<label>Table 1</label>
<caption>
<p>Variable measurement scales.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Measured variable</th>
<th align="left" valign="top">Measurement Items</th>
<th align="left" valign="top">References</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top" rowspan="2">Visual presentation of medical triage doctor</td>
<td align="left" valign="top">To what extent do you believe that this doctor is a human medical triage doctor?</td>
<td align="left" valign="top" rowspan="2">(<xref ref-type="bibr" rid="ref85">Youn and Jin, 2021</xref>)</td>
</tr>
<tr>
<td align="left" valign="top">To what extent do you believe that this doctor is an AI medical triage doctor?</td>
</tr>
<tr>
<td align="left" valign="top" rowspan="5">Psychological distance</td>
<td align="left" valign="top">I feel very close to this medical triage doctor during our interaction.</td>
<td align="left" valign="top" rowspan="5">(<xref ref-type="bibr" rid="ref44">Li and Sung, 2021</xref>; <xref ref-type="bibr" rid="ref13">Chen and Li, 2018</xref>)</td>
</tr>
<tr>
<td align="left" valign="top">I feel no sense of distance when interacting with this medical triage doctor.</td>
</tr>
<tr>
<td align="left" valign="top">My interaction with this medical triage doctor is smooth and comfortable.</td>
</tr>
<tr>
<td align="left" valign="top">This medical triage doctor is able to understand my true needs and feelings.</td>
</tr>
<tr>
<td align="left" valign="top">This medical triage doctor can empathize with my situation.</td>
</tr>
<tr>
<td align="left" valign="top" rowspan="5">Anthropomorphism</td>
<td align="left" valign="top">This medical triage doctor looks like an actual human medical triage doctor.</td>
<td align="left" valign="top" rowspan="5">(<xref ref-type="bibr" rid="ref10">Bartneck et al., 2009</xref>; <xref ref-type="bibr" rid="ref80">Waytz et al., 2010</xref>; <xref ref-type="bibr" rid="ref78">Wang et al., 2021</xref>)</td>
</tr>
<tr>
<td align="left" valign="top">This medical triage doctor seems to have their own thoughts and judgments.</td>
</tr>
<tr>
<td align="left" valign="top">This medical triage doctor seems able to understand my needs and responds accordingly.</td>
</tr>
<tr>
<td align="left" valign="top">This medical triage doctor seems able to understand my mood and feelings.</td>
</tr>
<tr>
<td align="left" valign="top">This medical triage doctor seems to genuinely want to help me solve my problem.</td>
</tr>
<tr>
<td align="left" valign="top" rowspan="4">Task sensitivity</td>
<td align="left" valign="top">This type of medical scenario is sensitive and not suitable for public discussion.</td>
<td align="left" valign="top" rowspan="4">(<xref ref-type="bibr" rid="ref68">Smith et al., 1996</xref>; <xref ref-type="bibr" rid="ref52">Malhotra et al., 2004</xref>)</td>
</tr>
<tr>
<td align="left" valign="top">This type of medical scenario is generally considered to be personal and private.</td>
</tr>
<tr>
<td align="left" valign="top">Consulting about this type of medical scenario makes me feel anxious or worried.</td>
</tr>
<tr>
<td align="left" valign="top">I worry that others will have a negative opinion of me for consulting about this type of medical scenario.</td>
</tr>
<tr>
<td align="left" valign="top" rowspan="5">AI technology adoption level</td>
<td align="left" valign="top">I believe that AI technology will improve my life.</td>
<td align="left" valign="top" rowspan="5">(<xref ref-type="bibr" rid="ref20">Davis, 1989</xref>; <xref ref-type="bibr" rid="ref27">Grassini, 2023</xref>)</td>
</tr>
<tr>
<td align="left" valign="top">I believe that AI technology can improve my learning and work efficiency.</td>
</tr>
<tr>
<td align="left" valign="top">I believe that AI technology is reliable and safe.</td>
</tr>
<tr>
<td align="left" valign="top">I believe that AI technology is beneficial to humanity.</td>
</tr>
<tr>
<td align="left" valign="top">I believe that I will use AI technology in the future.</td>
</tr>
<tr>
<td align="left" valign="top" rowspan="4">User trust</td>
<td align="left" valign="top">This medical triage doctor is honest.</td>
<td align="left" valign="top" rowspan="4">(<xref ref-type="bibr" rid="ref25">Everard and Galletta, 2005</xref>; <xref ref-type="bibr" rid="ref42">Lee and Choi, 2017</xref>)</td>
</tr>
<tr>
<td align="left" valign="top">This medical triage doctor is trustworthy.</td>
</tr>
<tr>
<td align="left" valign="top">This medical triage doctor has the professional knowledge and ability to solve my problem.</td>
</tr>
<tr>
<td align="left" valign="top">The information, services, and advice provided by this medical triage doctor are credible.</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The independent variable, medical triage doctors, was assessed using a two-item scale derived from <xref ref-type="bibr" rid="ref85">Youn and Jin (2021)</xref>. The two items evaluated the manipulation check for the doctor type: &#x201C;To what extent do you believe that this doctor is a human medical triage doctor?&#x201D; and &#x201C;To what extent do you believe that this doctor is an AI medical triage doctor?&#x201D; Both items were rated using a seven-point Likert scale (1&#x202F;=&#x202F;strongly disagree, 7&#x202F;=&#x202F;strongly agree).</p>
<p>The mediating variable of psychological distance was assessed using a five-item scale derived from the research of <xref ref-type="bibr" rid="ref44">Li and Sung (2021)</xref> and <xref ref-type="bibr" rid="ref13">Chen and Li (2018)</xref>. An example item is: &#x201C;I feel very close to this medical triage doctor during our interaction.&#x201D; Higher scores on this scale signified a diminished perceived psychological gap between the user and the medical triage doctor.</p>
<p>The moderating variable, anthropomorphism, was assessed using a five-item scale developed from the research of <xref ref-type="bibr" rid="ref10">Bartneck et al. (2009)</xref>, <xref ref-type="bibr" rid="ref80">Waytz et al. (2010)</xref>, and <xref ref-type="bibr" rid="ref78">Wang et al. (2021)</xref>. An example item is: &#x201C;This medical triage doctor looks like an actual human medical triage doctor.&#x201D; Higher scores indicated an increased level of perceived anthropomorphism. The task sensitivity scale had four items, and was based on the studies conducted by <xref ref-type="bibr" rid="ref68">Smith et al. (1996)</xref> and <xref ref-type="bibr" rid="ref52">Malhotra et al. (2004)</xref>. An example item is: &#x201C;This type of medical scenario is sensitive and not suitable for public discussion.&#x201D; Higher scores signified an increased degree of task sensitivity. The four-item AI technology adoption level scale was developed based on the research of <xref ref-type="bibr" rid="ref20">Davis (1989)</xref> and <xref ref-type="bibr" rid="ref27">Grassini (2023)</xref>. An example item is: &#x201C;I believe that AI technology will improve my life.&#x201D; Higher ratings signified an increased degree of user adoption of AI technology.</p>
<p>The dependent variable, user trust, was assessed using a four-item scale derived from the research of <xref ref-type="bibr" rid="ref25">Everard and Galletta (2005)</xref> and <xref ref-type="bibr" rid="ref42">Lee and Choi (2017)</xref>. An example item is: &#x201C;This medical triage doctor is honest.&#x201D; Higher scores on the scale signified an increased level of user trust in the medical triage doctor.</p>
<p>Several measures were also incorporated in the study as control variables and manipulation assessments. Participants&#x2019; previous acceptance of new technology items was assessed using the question, &#x201C;How would you rate your acceptance of innovative technology products (e.g., AI, smart devices)?&#x201D; The item was rated using a seven-point scale (1&#x202F;=&#x202F;extremely reluctant, 7&#x202F;=&#x202F;extremely willing to try new technologies). Participants&#x2019; prior level of trust in AI medical products was assessed using the question, &#x201C;Do you already trust a range of AI medical products or services, such as AI medical triage doctors?&#x201D; The item was rated using a seven-point scale (1&#x202F;=&#x202F;highly distrustful, 7&#x202F;=&#x202F;strongly trusting). Scenario participation was assessed using a single item derived from <xref ref-type="bibr" rid="ref78">Wang et al. (2021)</xref>: &#x201C;Can you envision yourself as the patient in the scenario?&#x201D; (1&#x202F;=&#x202F;not at all, 7&#x202F;=&#x202F;completely). This strategy aimed to mitigate potential interference in the results due to insufficient involvement with the experimental circumstances.</p>
<p>Before the formal experiments began, three pre-experiments were executed. The objective of the initial pre-examination was to ascertain whether participants could accurately differentiate between the two medical triage doctors manipulated in Study 1. We recruited 49 participants (25 males) through the Credamo survey platform. Participants were randomly assigned to one of two groups, and presented with the image and introduction of the medical triage doctor. They were then asked to evaluate the doctor after viewing the full presentation. An independent samples <italic>t</italic>-test indicated that, under the human medical triage doctor condition, participants&#x2019; evaluation of the AI medical triage doctor was considerably lower than that of the human medical triage doctor (M<sub>Human medical triage doctor</sub>&#x202F;=&#x202F;4.83, SD&#x202F;=&#x202F;0.92 vs. M<sub>AI medical triage doctor</sub>&#x202F;=&#x202F;2.96, SD&#x202F;=&#x202F;1.21; <italic>t</italic>&#x202F;=&#x202F;&#x2212;6.099, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001). The elevated recognition score demonstrated that the participants distinctly identified the target as being a human medical triage doctor. Similarly, in the AI medical triage doctor condition, the rating for the AI medical triage doctor was significantly higher than the rating for the human medical triage doctor (M<sub>AI medical triage doctor</sub>&#x202F;=&#x202F;5.08, SD&#x202F;=&#x202F;1.26 vs. M<sub>Human medical triage doctor</sub>&#x202F;=&#x202F;3.54, SD&#x202F;=&#x202F;1.47; <italic>t</italic>&#x202F;=&#x202F;3.926, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001), indicating that the participants correctly identified the medical triage doctor as being AI. Thus, the experimental materials satisfactorily fulfilled the manipulation check criteria, thereby supporting the commencement of the official experiment.</p>
<p>The objective of the second pre-experiment was to ascertain participants&#x2019; ability to accurately differentiate between the high- and low-anthropomorphism AI medical triage doctors. The methodology followed that used in Study 1. A total of 73 participants (37 males) assessed the level of the medical triage doctor&#x2019;s anthropomorphism (<italic>&#x03B1;</italic>&#x202F;=&#x202F;0.916). An independent samples <italic>t</italic>-test indicated that the high-anthropomorphism AI medical triage doctor received a substantially higher rating for anthropomorphism as compared to the low-anthropomorphism AI medical triage doctor (M <sub>High-anthropomorphism</sub>&#x202F;=&#x202F;5.35, SD&#x202F;=&#x202F;1.08 vs. M <sub>Low-anthropomorphism</sub>&#x202F;=&#x202F;4.66, SD&#x202F;=&#x202F;1.41; <italic>t</italic> (71)&#x202F;=&#x202F;2.356, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.05). This validated the effective manipulation of the AI medical triage doctors&#x2019; level of anthropomorphism.</p>
<p>The objective of the third pre-experiment was to confirm the effective modulation of task sensitivity. Eighty-five participants (47 males) were randomly allocated to either a high or low task-sensitivity scenario, and were asked to evaluate the task sensitivity of the scenario (<italic>&#x03B1;</italic>&#x202F;=&#x202F;0.752). The findings indicated that participants&#x2019; reported sensitivity was markedly greater in the high task scenario than in the low task scenario (M <sub>High task sensitivity</sub>&#x202F;=&#x202F;3.70, SD&#x202F;=&#x202F;0.72 vs. M <sub>Low task sensitivity</sub>&#x202F;=&#x202F;3.12, SD&#x202F;=&#x202F;0.75; <italic>t</italic>(83)&#x202F;=&#x202F;3.627, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001). This validated the effective manipulation of task sensitivity for Study 3.</p>
</sec>
<sec id="sec13">
<label>4.4</label>
<title>Experimental procedure</title>
<p>The four formal experiments in this study employed a scenario-based design integrating both text and images, and were based on the methodology utilized by <xref ref-type="bibr" rid="ref1">Aaker et al. (2004)</xref>. Participants were randomly allocated to either the AI or human medical triage doctors condition, and the experimental procedure went as follows: (1) Demographic information (e.g., sex, age, education level) was obtained. (2) Prior attitude assessments (e.g., acceptance of innovative technological products) were conducted. (3) The medical triage consultation scenarios were explained and presented visually. (4) Participants engaged with either the AI or human medical triage interaction screenshots, depending on the condition to which they were assigned (see Supplementary material Appendix 3 for details). (5) The relevant questionnaire assessments were completed. The content and complexity of the task remained consistent across all consultation scenarios. The primary experimental manipulation was the medical triage doctor: either AI or human. To determine the quality and validity of the experimental data, a manipulation check item was incorporated into the questionnaires; specifically, in Studies 1 and 4, after participants viewed the triage interaction screenshots, they were asked the question: &#x201C;Based on the consultation you have observed, the medical triage doctor is: (1) AI, or (2) human.&#x201D; In Studies 2 and 3, the question was similar, but the response options were: &#x201C;(1) low-anthropomorphism AI, (2) high-anthropomorphism AI, or (3) human.&#x201D; If any participant answered this question incorrectly their data was subsequently excluded from the study.</p>
</sec>
<sec id="sec14">
<label>4.5</label>
<title>Sample and data collection</title>
<p>An <italic>a priori</italic> power analysis was conducted using G&#x002A;Power 3.1 to determine the minimum required sample size for each study. For all analyses, the significance level (<italic>&#x03B1;</italic>) was set at 0.05, statistical power (1-effect) was set at 0.80, and a medium effect size was assumed. Study 1 aimed to examine group differences and a mediation effect. Based on <xref ref-type="bibr" rid="ref17">Cohen&#x2019;s (2013)</xref> criteria, a medium effect size was set (<italic>d</italic>&#x202F;=&#x202F;0.5). Referencing the authoritative sample size recommendations for bootstrap mediation tests by <xref ref-type="bibr" rid="ref26">Fritz and MacKinnon (2007)</xref>, the calculation indicated that a minimum of 128 participants was required. For the subsequent studies, an effect size of <italic>f</italic>&#x202F;=&#x202F;0.25 was set. Calculations indicated a minimum requirement of 156 participants for Study 2, 210 participants for Study 3, and 128 participants for Study 4.</p>
<p>Participants for this research were recruited online via the Credamo platform data service. Each participant was randomly assigned to one experimental scenario. After removing invalid questionnaires (e.g., those with excessively short completion times or incorrect answers to manipulation check items), a total of 976 valid questionnaires were obtained: Study 1 (<italic>N</italic>&#x202F;=&#x202F;191), Study 2 (<italic>N</italic>&#x202F;=&#x202F;210), Study 3 (<italic>N</italic>&#x202F;=&#x202F;355), and Study 4 (<italic>N</italic>&#x202F;=&#x202F;220). All final sample sizes met the minimum requirements suggested by the a priori power analysis. The demographic data of the sample are shown in <xref ref-type="table" rid="tab2">Table 2</xref>.</p>
<table-wrap position="float" id="tab2">
<label>Table 2</label>
<caption>
<p>Demographic data of the four studies sample.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Category</th>
<th align="left" valign="top">Option</th>
<th align="center" valign="top">Frequency</th>
<th align="center" valign="top">Percentage (%)</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top" rowspan="2">Sex</td>
<td align="left" valign="top">Male</td>
<td align="center" valign="top">483</td>
<td align="center" valign="top">49.49</td>
</tr>
<tr>
<td align="left" valign="top">Female</td>
<td align="center" valign="top">493</td>
<td align="center" valign="top">50.51</td>
</tr>
<tr>
<td align="left" valign="top" rowspan="6">Age</td>
<td align="left" valign="top">Under 18&#x202F;years</td>
<td align="center" valign="top">1</td>
<td align="center" valign="top">0.10</td>
</tr>
<tr>
<td align="left" valign="top">18&#x2013;25</td>
<td align="center" valign="top">307</td>
<td align="center" valign="top">31.45</td>
</tr>
<tr>
<td align="left" valign="top">26&#x2013;35</td>
<td align="center" valign="top">375</td>
<td align="center" valign="top">38.42</td>
</tr>
<tr>
<td align="left" valign="top">36&#x2013;45</td>
<td align="center" valign="top">146</td>
<td align="center" valign="top">14.96</td>
</tr>
<tr>
<td align="left" valign="top">46&#x2013;55</td>
<td align="center" valign="top">82</td>
<td align="center" valign="top">8.40</td>
</tr>
<tr>
<td align="left" valign="top">Over 55</td>
<td align="center" valign="top">65</td>
<td align="center" valign="top">6.66</td>
</tr>
<tr>
<td align="left" valign="top" rowspan="5">Education level</td>
<td align="left" valign="top">Middle school or below</td>
<td align="center" valign="top">62</td>
<td align="center" valign="top">6.35</td>
</tr>
<tr>
<td align="left" valign="top">High school/secondary/technical school</td>
<td align="center" valign="top">96</td>
<td align="center" valign="top">9.84</td>
</tr>
<tr>
<td align="left" valign="top">Junior college (associate degree)</td>
<td align="center" valign="top">117</td>
<td align="center" valign="top">11.99</td>
</tr>
<tr>
<td align="left" valign="top">Bachelor&#x2019;s degree</td>
<td align="center" valign="top">514</td>
<td align="center" valign="top">52.66</td>
</tr>
<tr>
<td align="left" valign="top">Postgraduate (master&#x2019;s) and above</td>
<td align="center" valign="top">187</td>
<td align="center" valign="top">19.16</td>
</tr>
<tr>
<td align="left" valign="top" rowspan="7">Occupation</td>
<td align="left" valign="top">Student</td>
<td align="center" valign="top">166</td>
<td align="center" valign="top">17.01</td>
</tr>
<tr>
<td align="left" valign="top">State-owned enterprise</td>
<td align="center" valign="top">145</td>
<td align="center" valign="top">14.86</td>
</tr>
<tr>
<td align="left" valign="top">Public institution</td>
<td align="center" valign="top">117</td>
<td align="center" valign="top">11.99</td>
</tr>
<tr>
<td align="left" valign="top">Civil servant</td>
<td align="center" valign="top">77</td>
<td align="center" valign="top">7.89</td>
</tr>
<tr>
<td align="left" valign="top">Private enterprise</td>
<td align="center" valign="top">352</td>
<td align="center" valign="top">36.07</td>
</tr>
<tr>
<td align="left" valign="top">Foreign-funded enterprise</td>
<td align="center" valign="top">102</td>
<td align="center" valign="top">10.45</td>
</tr>
<tr>
<td align="left" valign="top">Other</td>
<td align="center" valign="top">17</td>
<td align="center" valign="top">1.74</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec id="sec15">
<label>5</label>
<title>Data analysis</title>
<sec id="sec16">
<label>5.1</label>
<title>Study 1: the mediating effect of psychological distance in user trust toward medical triage doctors</title>
<p>We recruited 191 participants (92 males). Ages were concentrated in the ranges of 18&#x2013;25&#x202F;years (30.37%) and 26&#x2013;35&#x202F;years (51.31%). In the interaction task, participants sought triage assistance for persistent headaches, described as being particularly severe upon waking in the morning (materials available upon request). The psychological distance scale (<italic>&#x03B1;</italic>&#x202F;=&#x202F;0.828) and the user trust scale (<italic>&#x03B1;</italic>&#x202F;=&#x202F;0.781) demonstrated good internal consistency.</p>
<p><italic>Manipulation check</italic>. Participants were required to successfully distinguish between the AI and human medical triage doctors (<italic>p</italic>&#x202F;&#x003C;&#x202F;0.001). A one-sample <italic>t</italic>-test against the midpoint (4) of the seven-point scale indicated strong immersion in the scenario (M&#x202F;=&#x202F;6.225, <italic>t</italic>(190)&#x202F;=&#x202F;108.564, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001), mitigating concerns that low involvement could bias causal inferences. No between-condition differences were found in receptiveness to novel technologies, trust in AI medical products, or scenario involvement (all <italic>p</italic>&#x202F;&#x003E;&#x202F;0.05), ruling out these individual-difference factors as alternative explanations and supporting the effectiveness of the stimuli.</p>
<p><italic>Main effect test</italic>. An independent samples <italic>t</italic>-test compared user trust across the medical triage doctors. Trust was significantly higher in the human medical triage doctor condition compared to the AI condition (M<sub>Human medical triage doctor</sub>&#x202F;=&#x202F;6.09, <italic>SD</italic>&#x202F;=&#x202F;0.57 vs. M<sub>AI medical triage doctor</sub>&#x202F;=&#x202F;5.04, <italic>SD</italic>&#x202F;=&#x202F;0.50, <italic>t</italic>(189)&#x202F;=&#x202F;&#x2212;13.609, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001), supporting H1. The effect of the medical triage doctors on user trust is shown in <xref ref-type="fig" rid="fig2">Figure 2</xref>.</p>
<fig position="float" id="fig2">
<label>Figure 2</label>
<caption>
<p>Effect of medical triage doctors on user trust. &#x002A;&#x002A;&#x002A; <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001; <italic>ns</italic>: <italic>p</italic>&#x202F;&#x003E;&#x202F;0.05 (no significant between-group difference).</p>
</caption>
<graphic xlink:href="fpsyg-16-1730902-g002.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Bar chart comparing user trust between an AI doctor and a human doctor. The AI doctor has a score of 5.04, shown in red, while the human doctor scores 6.09, shown in green. A significant difference is indicated by three asterisks (&#x002A;&#x002A;&#x002A;).</alt-text>
</graphic>
</fig>
<p>A bootstrapping analysis with 5,000 samples was conducted using the PROCESS macro (<xref ref-type="bibr" rid="ref32">Hayes, 2013</xref>; Model 4) to test the mediating effect of psychological distance on the relationship between the medical triage doctors and user trust. The results (see <xref ref-type="table" rid="tab3">Table 3</xref>) indicated a significant indirect effect, as the 95% confidence interval (CI) for the effect through psychological distance did not include zero (effect&#x202F;=&#x202F;0.649, 95% BootCI&#x202F;=&#x202F;[0.461, 0.855]). Moreover, the total effect of the medical triage doctor on user trust was significant (effect&#x202F;=&#x202F;1.056, <italic>t</italic>&#x202F;=&#x202F;13.609, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001). When controlled for psychological distance, the direct effect of the medical triage doctor on user trust remained significant (effect&#x202F;=&#x202F;0.407, <italic>t</italic>&#x202F;=&#x202F;4.947, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001). Since both the direct and indirect effects were significant, the results support a partial mediation model. These findings verify H2.</p>
<table-wrap position="float" id="tab3">
<label>Table 3</label>
<caption>
<p>Test results of the mediating effect of psychological distance in Study 1 (<italic>N</italic>&#x202F;=&#x202F;191).</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top" rowspan="2">Category</th>
<th align="center" valign="top" rowspan="2">Effect</th>
<th align="center" valign="top" rowspan="2">SE</th>
<th align="center" valign="top" rowspan="2">
<italic>t</italic>
</th>
<th align="center" valign="top" rowspan="2">
<italic>p</italic>
</th>
<th align="center" valign="top" colspan="2">95% CI</th>
<th align="left" valign="top" rowspan="2">Conclusion</th>
</tr>
<tr>
<th align="center" valign="top">BootLLCI</th>
<th align="center" valign="top">BootULCI</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Indirect effect</td>
<td align="center" valign="top">0.649</td>
<td align="center" valign="top">0.100</td>
<td align="center" valign="top">-</td>
<td align="center" valign="top">CI excludes 0</td>
<td align="center" valign="top">0.461</td>
<td align="center" valign="top">0.855</td>
<td align="left" valign="top" rowspan="3">Partial mediation</td>
</tr>
<tr>
<td align="left" valign="top">Direct effect</td>
<td align="center" valign="top">0.407</td>
<td align="center" valign="top">0.082</td>
<td align="center" valign="top">4.947</td>
<td align="center" valign="top">&#x002A;&#x002A;&#x002A;</td>
<td align="center" valign="top">0.244</td>
<td align="center" valign="top">0.569</td>
</tr>
<tr>
<td align="left" valign="top">Total effect</td>
<td align="center" valign="top">1.056</td>
<td align="center" valign="top">0.078</td>
<td align="center" valign="top">13.609</td>
<td align="center" valign="top">&#x002A;&#x002A;&#x002A;</td>
<td align="center" valign="top">0.903</td>
<td align="center" valign="top">1.209</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>&#x002A;&#x002A;&#x002A; <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001.</p>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="sec17">
<label>5.2</label>
<title>Study 2: the moderating effect of anthropomorphism</title>
<p>We recruited 210 participants (107 males). Ages were concentrated in the ranges of 18&#x2013;25&#x202F;years (32.38%) and 26&#x2013;35&#x202F;years (45.24%). Apart from adding an anthropomorphism manipulation for the AI medical triage doctor, the procedure and design were identical to Study 1. The anthropomorphism scale (<italic>&#x03B1;</italic>&#x202F;=&#x202F;0.856) used the pretest items, and the psychological distance (<italic>&#x03B1;</italic>&#x202F;=&#x202F;0.887) and user trust (<italic>&#x03B1;</italic>&#x202F;=&#x202F;0.907) measures were the same as in Study 1; all exhibited strong internal consistency.</p>
<p><italic>Manipulation check</italic>. Participants were required to successfully distinguish between the high- and low- anthropomorphism AI medical triage doctors and human medical triage doctors (<italic>p</italic>&#x202F;&#x003C;&#x202F;0.001) and reported experiencing strong immersion in the scenario (<italic>p</italic>&#x202F;&#x003C;&#x202F;0.001). No between-condition differences were observed in receptiveness to novel technologies, trust in AI medical products, or scenario involvement (all <italic>p</italic>&#x202F;&#x003E;&#x202F;0.05). An independent samples <italic>t</italic>-test confirmed that perceived anthropomorphism was higher in the high-anthropomorphism medical triage doctor condition than in the low-anthropomorphism AI condition (M <sub>High-anthropomorphism AI medical triage doctor</sub>&#x202F;=&#x202F;5.60, <italic>SD</italic>&#x202F;=&#x202F;0.60 vs. M <sub>Low-anthropomorphism AI medical triage doctor</sub>&#x202F;=&#x202F;4.14, SD&#x202F;=&#x202F;0.59; <italic>t</italic>(138.000)&#x202F;=&#x202F;&#x2212;14.442, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.00). These results indicated that the anthropomorphism manipulation was successful, as the participants clearly differentiated between the high&#x2013;low-anthropomorphism AI medical triage doctors; thus, the stimuli were effective.</p>
<p><italic>Main effect test</italic>. An independent samples <italic>t</italic>-test showed that user trust was higher in the human medical triage doctor condition than in the AI condition (M<sub>Human medical triage doctor</sub>&#x202F;=&#x202F;6.02, SD&#x202F;=&#x202F;0.63 vs. M<sub>AI medical triage doctor</sub>&#x202F;=&#x202F;4.80, SD&#x202F;=&#x202F;1.04; <italic>t</italic>(199.800)&#x202F;=&#x202F;&#x2212;10.577, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001), thereby again confirming H1.</p>
<p><italic>Mediation analysis</italic>. The mediation analysis was repeated using the PROCESS macro (Model 4). Results showed that the total effect of the medical triage doctors on user trust was significant (effect&#x202F;=&#x202F;1.223, <italic>t</italic>&#x202F;=&#x202F;9.066, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001), and the direct effect also remained significant (effect&#x202F;=&#x202F;0.457, <italic>t</italic>&#x202F;=&#x202F;5.217, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001). Furthermore, the bootstrap analysis confirmed a significant indirect effect through psychological distance (effect&#x202F;=&#x202F;0.767, 95% BootCI&#x202F;=&#x202F;[0.557, 0.997]). As detailed in <xref ref-type="table" rid="tab4">Table 4</xref>, these findings once again supported the partial mediating role of psychological distance, further verifying H2.</p>
<table-wrap position="float" id="tab4">
<label>Table 4</label>
<caption>
<p>Test results of the mediating effect of psychological distance in Study 2 (<italic>N</italic>&#x202F;=&#x202F;210).</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top" rowspan="2">Category</th>
<th align="center" valign="top" rowspan="2">Effect</th>
<th align="center" valign="top" rowspan="2">SE</th>
<th align="center" valign="top" rowspan="2">
<italic>t</italic>
</th>
<th align="center" valign="top" rowspan="2">
<italic>p</italic>
</th>
<th align="center" valign="top" colspan="2">95% CI</th>
<th align="left" valign="top" rowspan="2">Conclusion</th>
</tr>
<tr>
<th align="center" valign="top">BootLLCI</th>
<th align="center" valign="top">BootULCI</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Indirect effect</td>
<td align="center" valign="top">0.767</td>
<td align="center" valign="top">0.112</td>
<td align="center" valign="top">-</td>
<td align="center" valign="top">CI excludes 0</td>
<td align="center" valign="top">0.557</td>
<td align="center" valign="top">0.997</td>
<td align="left" valign="top" rowspan="3">Partial mediation</td>
</tr>
<tr>
<td align="left" valign="top">Direct effect</td>
<td align="center" valign="top">0.457</td>
<td align="center" valign="top">0.088</td>
<td align="center" valign="top">5.217</td>
<td align="center" valign="top">&#x002A;&#x002A;&#x002A;</td>
<td align="center" valign="top">0.284</td>
<td align="center" valign="top">0.629</td>
</tr>
<tr>
<td align="left" valign="top">Total effect</td>
<td align="center" valign="top">1.223</td>
<td align="center" valign="top">0.135</td>
<td align="center" valign="top">9.066</td>
<td align="center" valign="top">&#x002A;&#x002A;&#x002A;</td>
<td align="center" valign="top">0.957</td>
<td align="center" valign="top">1.489</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>&#x002A;&#x002A;&#x002A; <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001.</p>
</table-wrap-foot>
</table-wrap>
<p>Moderation analysis. Because anthropomorphism was manipulated only for the AI medical triage doctor, three one-way ANOVAs and <italic>post hoc</italic> multiple-comparison tests were conducted to examine whether anthropomorphism moderated the effect of the medical triage doctor on psychological distance. Results (see <xref ref-type="table" rid="tab5">Table 5</xref>) showed a significant main effect of the medical triage doctor [<italic>F</italic>(1, 208)&#x202F;=&#x202F;48.849, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001], a significant main effect of anthropomorphism [<italic>F</italic>(1, 138)&#x202F;=&#x202F;216.908, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001], and a significant effect for the three-level factor [low-anthropomorphism AI vs. high-anthropomorphism AI vs. human medical triage doctors; <italic>F</italic>(1, 207)&#x202F;=&#x202F;148.397, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001]. <italic>Post hoc</italic> tests (see <xref ref-type="table" rid="tab6">Table 6</xref>) indicated that, relative to those in the low-anthropomorphism AI medical triage doctor condition, participants in the human medical triage doctor condition reported smaller psychological distance (M<sub>Human medical triage doctor</sub>&#x202F;=&#x202F;5.69, SD&#x202F;=&#x202F;0.72 vs. M<sub>Low-anthropomorphism AI medical triage doctor</sub>&#x202F;=&#x202F;3.99, SD&#x202F;=&#x202F;0.66, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001), supporting H3a. Reported psychological distance in the high-anthropomorphism AI medical triage doctor condition did not differ from that in the human medical triage doctor condition (M<sub>High-anthropomorphism AI medical triage doctor</sub>&#x202F;=&#x202F;5.59, SD&#x202F;=&#x202F;0.63 vs. M<sub>Human medical triage doctor</sub>&#x202F;=&#x202F;5.69, SD&#x202F;=&#x202F;0.72, <italic>p</italic>&#x202F;=&#x202F;0.398&#x202F;&#x003E;&#x202F;0.05), supporting H3b. In sum, anthropomorphism moderated the effect of the medical triage doctor on psychological distance; H3, H3a, and H3b were all supported. The mean plot for the medical triage doctor&#x202F;&#x00D7;&#x202F;anthropomorphism comparison is shown in <xref ref-type="fig" rid="fig3">Figure 3</xref>.</p>
<table-wrap position="float" id="tab5">
<label>Table 5</label>
<caption>
<p>ANOVA results of psychological distance for variables tested in Study 2 from medical triage doctors.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top" rowspan="2">Variable</th>
<th align="center" valign="top" colspan="2">Medical triage doctors (M&#x202F;&#x00B1;&#x202F;SD)</th>
<th align="center" valign="top" rowspan="2">
<italic>F</italic>
</th>
<th align="center" valign="top" rowspan="2">
<italic>p</italic>
</th>
</tr>
<tr>
<th align="center" valign="top">AI medical triage doctor<break/>(<italic>n</italic> =&#x202F;140)</th>
<th align="center" valign="top">Human medical triage doctor<break/>(<italic>n</italic> =&#x202F;70)</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Psychological distance</td>
<td align="center" valign="middle">4.73&#x202F;&#x00B1;&#x202F;1.03</td>
<td align="center" valign="middle">5.69&#x202F;&#x00B1;&#x202F;0.72</td>
<td align="center" valign="middle">48.849</td>
<td align="center" valign="middle">&#x002A;&#x002A;&#x002A;</td>
</tr>
</tbody>
</table>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top" rowspan="2">Variable</th>
<th align="center" valign="top" colspan="2">Anthropomorphism (M&#x202F;&#x00B1;&#x202F;SD)</th>
<th align="center" valign="top" rowspan="2">
<italic>F</italic>
</th>
<th align="center" valign="top" rowspan="2">
<italic>p</italic>
</th>
</tr>
<tr>
<th align="center" valign="top">Low-anthropomorphism AI medical triage doctor (<italic>n</italic> =&#x202F;75)</th>
<th align="center" valign="top">High-anthropomorphism AI medical triage doctor (<italic>n</italic> =&#x202F;65)</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Psychological distance</td>
<td align="center" valign="top">3.99&#x202F;&#x00B1;&#x202F;0.66</td>
<td align="center" valign="top">5.59&#x202F;&#x00B1;&#x202F;0.63</td>
<td align="center" valign="top">216.908</td>
<td align="center" valign="top">&#x002A;&#x002A;&#x002A;</td>
</tr>
</tbody>
</table>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top" rowspan="2">Variable</th>
<th align="center" valign="top" colspan="3">Different medical triage doctors (M&#x202F;&#x00B1;&#x202F;SD)</th>
<th align="center" valign="top" rowspan="2">
<italic>F</italic>
</th>
<th align="center" valign="top" rowspan="2">
<italic>p</italic>
</th>
</tr>
<tr>
<th align="center" valign="top">Low-anthropomorphism AI medical triage doctor (<italic>n</italic> =&#x202F;75)</th>
<th align="center" valign="top">High-anthropomorphism AI medical triage doctor (<italic>n</italic> =&#x202F;65)</th>
<th align="center" valign="top">Human medical triage doctor (<italic>n</italic> =&#x202F;70)</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Psychological distance</td>
<td align="center" valign="top">3.99&#x202F;&#x00B1;&#x202F;0.66</td>
<td align="center" valign="top">5.59&#x202F;&#x00B1;&#x202F;0.63</td>
<td align="center" valign="top">5.69&#x202F;&#x00B1;&#x202F;0.72</td>
<td align="center" valign="top">148.397</td>
<td align="center" valign="top">&#x002A;&#x002A;&#x002A;</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>&#x002A;&#x002A;&#x002A; <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001.</p>
</table-wrap-foot>
</table-wrap>
<table-wrap position="float" id="tab6">
<label>Table 6</label>
<caption>
<p>Post hoc multiple comparison results in Study 2.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Category</th>
<th align="left" valign="top">(I) Group</th>
<th align="left" valign="top">(J) Group</th>
<th align="center" valign="top">(I) M</th>
<th align="center" valign="top">(J) M</th>
<th align="center" valign="top">(I&#x2212;J)</th>
<th align="center" valign="top">SE</th>
<th align="center" valign="top">
<italic>p</italic>
</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top" rowspan="3">Psychological distance</td>
<td align="left" valign="top">Low-anthropomorphism AI medical triage doctor</td>
<td align="left" valign="top">High-anthropomorphism AI medical triage doctor</td>
<td align="char" valign="top" char=".">3.99</td>
<td align="char" valign="top" char=".">5.59</td>
<td align="char" valign="top" char=".">&#x2212;1.60</td>
<td align="char" valign="top" char=".">0.113</td>
<td align="center" valign="top">&#x002A;&#x002A;&#x002A;</td>
</tr>
<tr>
<td align="left" valign="top">Low-anthropomorphism AI medical triage doctor</td>
<td align="left" valign="top">Human medical triage doctor</td>
<td align="char" valign="top" char=".">3.99</td>
<td align="char" valign="top" char=".">5.69</td>
<td align="char" valign="top" char=".">&#x2212;1.70</td>
<td align="char" valign="top" char=".">0.111</td>
<td align="center" valign="top">&#x002A;&#x002A;&#x002A;</td>
</tr>
<tr>
<td align="left" valign="top">High-anthropomorphism AI medical triage doctor</td>
<td align="left" valign="top">Human medical triage doctor</td>
<td align="char" valign="top" char=".">5.59</td>
<td align="char" valign="top" char=".">5.69</td>
<td align="char" valign="top" char=".">&#x2212;0.10</td>
<td align="char" valign="top" char=".">0.115</td>
<td align="char" valign="top" char=".">0.398</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>(I) Name and (J) Name indicate the two groups being compared in each post hoc pairwise comparison. I is the reference group, and J is the comparison group. (I) Mean and (J) Mean are the group means for groups I and J, respectively. Difference (I&#x2212;J) is the mean difference, calculated as (I) Mean&#x2212;(J) Mean. A negative (I&#x2212;J) value means the mean of group I is lower than the mean of group J; a positive (I&#x2212;J) value means the mean of group I is higher than that of group J and &#x002A;&#x002A;&#x002A; <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001.</p>
</table-wrap-foot>
</table-wrap>
<fig position="float" id="fig3">
<label>Figure 3</label>
<caption>
<p>Interaction effect of medical triage doctors and anthropomorphism on psychological distance. &#x002A;&#x002A;&#x002A; <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001; ns: <italic>p</italic> 0.05; no significant between-group difference.</p>
</caption>
<graphic xlink:href="fpsyg-16-1730902-g003.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Bar chart illustrating psychological distance across three categories: low anthropomorphism (3.99), high anthropomorphism (5.59), and human doctor (5.69). Statistical significance is denoted as follows: low versus high anthropomorphism and human doctor (&#x002A;&#x002A;&#x002A;), and high anthropomorphism versus human doctor (ns).</alt-text>
</graphic>
</fig>
</sec>
<sec id="sec18">
<label>5.3</label>
<title>Study 3: the higher-order moderating effect of task sensitivity</title>
<p>We recruited 355 participants (172 males). Ages were concentrated in the ranges of 18&#x2013;25&#x202F;years (27.32%) and 26&#x2013;35&#x202F;years (30.42%). Aside from adding manipulations of task sensitivity and anthropomorphism for the AI medical triage doctor, as well as varying the interaction script by task sensitivity, the procedure matched that of Study 1 and Study 2. The anthropomorphism scale (<italic>&#x03B1;</italic>&#x202F;=&#x202F;0.905) and task sensitivity scale (<italic>&#x03B1;</italic>&#x202F;=&#x202F;0.860) adopted the pretest items, and the measures of psychological distance (<italic>&#x03B1;</italic>&#x202F;=&#x202F;0.882) and user trust (<italic>&#x03B1;</italic>&#x202F;=&#x202F;0.938) were the same as in Study 1; all showed strong internal consistency.</p>
<p><italic>Manipulation check</italic>. Participants were required to successfully distinguished between high- and low-anthropomorphism AI medical triage doctors and human medical triage doctor (<italic>p</italic>&#x202F;&#x003C;&#x202F;0.001) and reported strong immersion in the scenario (<italic>p</italic>&#x202F;&#x003C;&#x202F;0.001). No between-group differences were observed in receptiveness to novel technologies, trust in AI medical products, or scenario involvement (all <italic>p</italic>&#x202F;&#x003E;&#x202F;0.05), indicating that the stimuli were effective. Independent samples <italic>t</italic>-testing confirmed that perceived anthropomorphism was higher in the high-anthropomorphism AI medical triage doctor condition than in the low-anthropomorphism AI condition (M<sub>High-anthropomorphism AI medical triage doctor</sub>&#x202F;=&#x202F;5.66, SD&#x202F;=&#x202F;0.62 vs. M<sub>Low-anthropomorphism AI medical triage doctor</sub>&#x202F;=&#x202F;3.09, SD&#x202F;=&#x202F;0.64; <italic>t</italic>(353.000)&#x202F;=&#x202F;&#x2212;38.059, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.05), and that perceived task sensitivity was higher in the high sensitivity group than in the low sensitivity group (M<sub>High task sensitivity</sub>&#x202F;=&#x202F;4.87, SD&#x202F;=&#x202F;0.83 vs. M<sub>Low task sensitivity</sub>&#x202F;=&#x202F;2.98, SD&#x202F;=&#x202F;0.94; <italic>t</italic>(353.000)&#x202F;=&#x202F;&#x2212;19.188, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001). These results indicated that both the anthropomorphism and task sensitivity manipulations were successful.</p>
<p><italic>Moderation analysis</italic>. As <xref ref-type="table" rid="tab7">Table 7</xref> shows, ANOVA revealed a significant main effect of medical triage doctors [low-anthropomorphism AI medical triage doctor vs. high-anthropomorphism AI vs. human medical triage doctor; <italic>F</italic>(2, 349)&#x202F;=&#x202F;378.690, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001]; a significant main effect of task sensitivity [<italic>F</italic>(1, 349)&#x202F;=&#x202F;23.604, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001]; and a significant interaction effect of medical triage doctor&#x202F;&#x00D7;&#x202F;task sensitivity interaction [<italic>F</italic>(2, 349)&#x202F;=&#x202F;48.211, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001]. Simple-effects tests showed that under the low task sensitivity condition, psychological distance in the high-anthropomorphism AI medical triage doctor condition did not differ from that in the human medical triage doctor condition (M<sub>High-anthropomorphism AI medical triage doctor</sub>&#x202F;=&#x202F;5.38, SD&#x202F;=&#x202F;0.56 vs. M<sub>Human medical triage doctor</sub>&#x202F;=&#x202F;5.34, SD&#x202F;=&#x202F;0.57; <italic>p</italic>&#x202F;=&#x202F;1.000&#x202F;&#x003E;&#x202F;0.05). Participants in the low-anthropomorphism AI medical triage doctor condition reported significantly greater psychological distance than those in both the high-anthropomorphism AI medical triage doctor condition (M<sub>Low-anthropomorphism AI medical triage doctor</sub>&#x202F;=&#x202F;3.36, SD&#x202F;=&#x202F;0.86; <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001) and the human medical triage doctor condition (<italic>p</italic>&#x202F;&#x003C;&#x202F;0.001), supporting H4a. Under high task sensitivity, participants in the human medical triage doctor condition reported significantly smaller distance than those in the high-anthropomorphism AI medical triage doctor condition (M<sub>Human medical triage doctor</sub>&#x202F;=&#x202F;5.64, SD&#x202F;=&#x202F;0.56; M<sub>High-anthropomorphism AI medical triage doctor</sub>&#x202F;=&#x202F;3.98, SD&#x202F;=&#x202F;0.84; <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001), and those in the high-anthropomorphism AI medical triage doctor condition reported smaller distance than those in the low-anthropomorphism AI condition (M<sub>Low-anthropomorphism AI medical triage doctor</sub>&#x202F;=&#x202F;2.91, SD&#x202F;=&#x202F;0.53; <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001), supporting H4b.</p>
<table-wrap position="float" id="tab7">
<label>Table 7</label>
<caption>
<p>Interaction effects of different medical triage doctors and task sensitivity in Study 3.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Source</th>
<th align="center" valign="top">Sum of squares</th>
<th align="center" valign="top">
<italic>df</italic>
</th>
<th align="center" valign="top">Mean square</th>
<th align="center" valign="top">
<italic>F</italic>
</th>
<th align="center" valign="top">
<italic>p</italic>
</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Intercept</td>
<td align="center" valign="top">6977.799</td>
<td align="center" valign="top">1</td>
<td align="center" valign="top">6977.799</td>
<td align="center" valign="top">15616.401</td>
<td align="center" valign="top">&#x002A;&#x002A;&#x002A;</td>
</tr>
<tr>
<td align="left" valign="top">Low-anthropomorphism AI medical triage doctor vs. high-anthropomorphism AI medical triage doctor vs. human medical triage doctor</td>
<td align="center" valign="top">338.417</td>
<td align="center" valign="top">2</td>
<td align="center" valign="top">169.208</td>
<td align="center" valign="top">378.690</td>
<td align="center" valign="top">&#x002A;&#x002A;&#x002A;</td>
</tr>
<tr>
<td align="left" valign="top">Task sensitivity</td>
<td align="center" valign="top">23.604</td>
<td align="center" valign="top">1</td>
<td align="center" valign="top">23.604</td>
<td align="center" valign="top">52.826</td>
<td align="center" valign="top">&#x002A;&#x002A;&#x002A;</td>
</tr>
<tr>
<td align="left" valign="top">Different medical triage doctors&#x202F;&#x00D7;&#x202F;task sensitivity</td>
<td align="center" valign="top">43.084</td>
<td align="center" valign="top">2</td>
<td align="center" valign="top">21.542</td>
<td align="center" valign="top">48.211</td>
<td align="center" valign="top">&#x002A;&#x002A;&#x002A;</td>
</tr>
<tr>
<td align="left" valign="middle">Residual</td>
<td align="center" valign="top">155.942</td>
<td align="center" valign="top">349</td>
<td align="center" valign="top">0.447</td>
<td/>
<td/>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p><italic>R</italic><sup>2</sup>&#x202F;=&#x202F;0.541, &#x002A;&#x002A;&#x002A; <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001.</p>
</table-wrap-foot>
</table-wrap>
<p>Taken together, anthropomorphism moderated the effect of the medical triage doctor on psychological distance, and this moderating effect was further conditioned by task sensitivity; thus H4, H4a, and H4b were all supported. The mean comparison for the medical triage doctor&#x202F;&#x00D7;&#x202F;task sensitivity interaction is displayed in <xref ref-type="fig" rid="fig4">Figure 4</xref>.</p>
<fig position="float" id="fig4">
<label>Figure 4</label>
<caption>
<p>Interaction effect of medical triage doctors, anthropomorphism, and task sensitivity on psychological distance. &#x002A;&#x002A;&#x002A; <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001.</p>
</caption>
<graphic xlink:href="fpsyg-16-1730902-g004.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Bar chart comparing psychological distance across low and high task sensitivity for low anthropomorphism, high anthropomorphism, and human doctor. Psychological distance is higher in high task sensitivity for all categories, with significant differences indicated by asterisks.</alt-text>
</graphic>
</fig>
</sec>
<sec id="sec19">
<label>5.4</label>
<title>Study 4: the moderating effect of level of AI technology adoption</title>
<p>We recruited 220 participants (112 males). Ages were concentrated in the ranges of 18&#x2013;25&#x202F;years (38.18%) and 26&#x2013;35&#x202F;years (33.64%). The procedure matched that of Study 1, except for the addition of measuring participants&#x2019; AI technology adoption level at the outset (<italic>&#x03B1;</italic>&#x202F;=&#x202F;0.903), and splitting participants into high vs. low AI technology adoption groups using the sample mean. Measures of psychological distance (<italic>&#x03B1;</italic>&#x202F;=&#x202F;0.928) and user trust (<italic>&#x03B1;</italic>&#x202F;=&#x202F;0.915) were identical to those in Study 1.</p>
<p><italic>Manipulation check</italic>. Participants were required to successfully distinguish between the AI and human medical triage doctors (<italic>p</italic>&#x202F;&#x003C;&#x202F;0.001) and reported strong immersion in the scenario (<italic>p</italic>&#x202F;&#x003C;&#x202F;0.001). No between-group differences were observed in receptiveness to novel technologies, trust in AI medical products, or scenario involvement (all <italic>p</italic>&#x202F;&#x003E;&#x202F;0.05), indicating that the stimuli were effective. An independent samples <italic>t</italic>-test showed that perceived AI technology adoption level was higher in the high AI technology adoption level group than in the low AI technology adoption level group (M<sub>High AI technology adoption level</sub>&#x202F;=&#x202F;5.98, SD&#x202F;=&#x202F;0.46 vs. M<sub>Low AI technology adoption level</sub>&#x202F;=&#x202F;3.61, SD&#x202F;=&#x202F;0.86; <italic>t</italic>(152.753)&#x202F;=&#x202F;&#x2212;24.950, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001), confirming that the AI technology adoption level grouping was successful.</p>
<p><italic>Main effect test</italic>. An independent-samples <italic>t</italic>-test showed that user trust was higher for participants in the human medical triage doctor condition than it was for those in the AI condition (M<sub>Human medical triage doctor</sub>&#x202F;=&#x202F;5.49, SD&#x202F;=&#x202F;1.00 vs. M<sub>AI medical triage doctor</sub>&#x202F;=&#x202F;4.62, SD&#x202F;=&#x202F;1.41; <italic>t</italic>(185.535)&#x202F;=&#x202F;&#x2212;5.262, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001), thus confirming H1 once again.</p>
<p><italic>Mediation analysis</italic>. The mediation analysis was repeated using the PROCESS macro (Model 4). Results indicated a significant total effect of the medical triage doctor on user trust (effect&#x202F;=&#x202F;0.877, <italic>t</italic>&#x202F;=&#x202F;5.342, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001) as well as a significant direct effect (effect&#x202F;=&#x202F;0.450, <italic>t</italic>&#x202F;=&#x202F;3.672, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001). A bootstrap analysis also confirmed a significant indirect effect of the medical triage doctor on user trust through psychological distance (effect&#x202F;=&#x202F;0.427, 95% BootCI&#x202F;=&#x202F;[0.203 to 0.681]), as detailed in <xref ref-type="table" rid="tab8">Table 8</xref>. These results support the partial mediating role of psychological distance, further verifying H2.</p>
<table-wrap position="float" id="tab8">
<label>Table 8</label>
<caption>
<p>Test of the mediating effect of psychological distance in Study 4 (<italic>N</italic>&#x202F;=&#x202F;220).</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top" rowspan="2">Category</th>
<th align="center" valign="top" rowspan="2">Effect</th>
<th align="center" valign="top" rowspan="2">SE</th>
<th align="center" valign="top" rowspan="2">
<italic>t</italic>
</th>
<th align="center" valign="top" rowspan="2">
<italic>p</italic>
</th>
<th align="center" valign="top" colspan="2">95% CI</th>
<th align="left" valign="top" rowspan="2">Conclusion</th>
</tr>
<tr>
<th align="center" valign="top">BootLLCI</th>
<th align="center" valign="top">BootULCI</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Indirect effect</td>
<td align="center" valign="top">0.427</td>
<td align="center" valign="top">0.122</td>
<td align="center" valign="top">-</td>
<td align="center" valign="middle">CI excludes 0</td>
<td align="center" valign="top">0.203</td>
<td align="center" valign="top">0.681</td>
<td align="left" valign="top" rowspan="3">Partial mediation</td>
</tr>
<tr>
<td align="left" valign="top">Direct effect</td>
<td align="center" valign="top">0.450</td>
<td align="center" valign="top">0.123</td>
<td align="center" valign="top">3.672</td>
<td align="center" valign="middle">&#x002A;&#x002A;&#x002A;</td>
<td align="center" valign="top">0.208</td>
<td align="center" valign="top">0.691</td>
</tr>
<tr>
<td align="left" valign="top">Total effect</td>
<td align="center" valign="top">0.877</td>
<td align="center" valign="top">0.164</td>
<td align="center" valign="top">5.342</td>
<td align="center" valign="middle">&#x002A;&#x002A;&#x002A;</td>
<td align="center" valign="top">0.553</td>
<td align="center" valign="top">1.200</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>&#x002A;&#x002A;&#x002A; <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001.</p>
</table-wrap-foot>
</table-wrap>
<p><italic>Moderation analysis</italic>. As <xref ref-type="table" rid="tab9">Table 9</xref> shows, ANOVA revealed a significant main effect of medical triage doctor [<italic>F</italic>(1, 216)&#x202F;=&#x202F;98.143, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001], a significant main effect of AI technology adoption level [<italic>F</italic>(1, 216)&#x202F;=&#x202F;391.863, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001], and a significant interaction effect of medical triage doctor&#x202F;&#x00D7;&#x202F;AI technology adoption level interaction [<italic>F</italic>(1, 216)&#x202F;=&#x202F;26.434, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001]. Specifically, among participants with a high level of AI technology adoption, user trust was higher in the human medical triage doctor condition than it was in the AI condition (M<sub>AI medical triage doctor</sub>&#x202F;=&#x202F;5.72, SD&#x202F;=&#x202F;0.34 vs. M<sub>Human medical triage doctor</sub>&#x202F;=&#x202F;6.19, SD&#x202F;=&#x202F;0.46, <italic>F</italic>(1, 216)&#x202F;=&#x202F;107.081, <italic>p&#x202F;&#x003C;&#x202F;0</italic>.001); thus, H5a was not supported. Among participants with a low level of AI technology adoption, user trust was higher in the human medical triage doctor condition than it was in the AI condition (M<sub>AI medical triage doctor</sub>&#x202F;=&#x202F;3.30, SD&#x202F;=&#x202F;1.02 vs. M<sub>Human medical triage doctor</sub>&#x202F;=&#x202F;4.76, SD&#x202F;=&#x202F;0.89, <italic>F</italic>(1, 216)&#x202F;=&#x202F;12.048, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001), supporting H5b. In sum, a higher level of AI technology adoption increased trust in the AI medical triage doctor and narrowed the trust gap with the human medical triage doctor; overall, H5 and H5b were supported, whereas H5a was not. The mean comparison for the medical triage doctor&#x202F;&#x00D7;&#x202F;AI technology adoption level interaction is shown in <xref ref-type="fig" rid="fig5">Figure 5</xref>.</p>
<table-wrap position="float" id="tab9">
<label>Table 9</label>
<caption>
<p>Interaction effects of medical triage doctors and level of AI technology adoption in Study 4.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Source</th>
<th align="center" valign="top">Sum of squares</th>
<th align="center" valign="top">
<italic>df</italic>
</th>
<th align="center" valign="top">Mean square</th>
<th align="center" valign="top">
<italic>F</italic>
</th>
<th align="center" valign="top">
<italic>p</italic>
</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Intercept</td>
<td align="char" valign="top" char=".">5452.386</td>
<td align="center" valign="top">1</td>
<td align="char" valign="top" char=".">5452.386</td>
<td align="char" valign="top" char=".">10578.416</td>
<td align="center" valign="middle">&#x002A;&#x002A;&#x002A;</td>
</tr>
<tr>
<td align="left" valign="top">Medical triage doctors</td>
<td align="char" valign="top" char=".">50.586</td>
<td align="center" valign="top">1</td>
<td align="char" valign="top" char=".">50.586</td>
<td align="char" valign="top" char=".">98.143</td>
<td align="center" valign="middle">&#x002A;&#x002A;&#x002A;</td>
</tr>
<tr>
<td align="left" valign="top">AI technology adoption level</td>
<td align="char" valign="top" char=".">201.976</td>
<td align="center" valign="top">1</td>
<td align="char" valign="top" char=".">201.976</td>
<td align="char" valign="top" char=".">391.863</td>
<td align="center" valign="middle">&#x002A;&#x002A;&#x002A;</td>
</tr>
<tr>
<td align="left" valign="top">Medical triage doctors&#x202F;&#x00D7;&#x202F;AI technology adoption level</td>
<td align="char" valign="top" char=".">13.625</td>
<td align="center" valign="top">1</td>
<td align="char" valign="top" char=".">13.625</td>
<td align="char" valign="top" char=".">26.434</td>
<td align="center" valign="middle">&#x002A;&#x002A;&#x002A;</td>
</tr>
<tr>
<td align="left" valign="middle">Residual</td>
<td align="char" valign="top" char=".">111.332</td>
<td align="center" valign="top">216</td>
<td align="char" valign="top" char=".">0.515</td>
<td/>
<td/>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p><italic>R</italic><sup>2</sup>&#x202F;=&#x202F;0.695, &#x002A;&#x002A;&#x002A; <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001.</p>
</table-wrap-foot>
</table-wrap>
<fig position="float" id="fig5">
<label>Figure 5</label>
<caption>
<p>Interaction effect of medical triage doctors and AI technology adoption level on user trust.</p>
</caption>
<graphic xlink:href="fpsyg-16-1730902-g005.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Bar chart comparing user trust in AI versus human doctors across low and high AI technology adoption levels. Orange bars represent low adoption: AI doctor at 3.30 and human doctor at 4.76. Blue bars represent high adoption: AI doctor at 5.72 and human doctor at 6.19. Asterisks indicate statistical significance.</alt-text>
</graphic>
</fig>
</sec>
</sec>
<sec sec-type="conclusions" id="sec20">
<label>6</label>
<title>Conclusion</title>
<p>After systematically reviewing the literature on AI healthcare and medical triage, this study identified a mediated pathway by which the medical triage doctor (both AI and human) affects user trust through psychological distance, and empirically evaluated this mechanism using multiple moderators. The core findings are as follows:</p>
<p>First, medical triage doctors (both AI and human) significantly affected user trust, with psychological distance serving as a partial mediator. Relative to the AI medical triage doctors, the human medical triage doctor decreased psychological distance, thereby increasing user trust. Beyond this indirect pathway, the medical triage doctor also had a direct effect, in that the human medical triage doctor elicited higher user trust than an AI medical triage doctor.</p>
<p>Second, higher anthropomorphism shortened psychological distance and increased trust in AI medical triage doctors. When the AI medical triage doctor was perceived as being high in anthropomorphism, participants reported less psychological distance and greater trust. Elevated anthropomorphism thereby mitigates part of the AI medical triage doctor&#x2019;s disadvantage relative to human medical triage doctors, yielding trust levels comparable to human triage.</p>
<p>Third, the effect of anthropomorphism was constrained by context and further influenced by task sensitivity. The moderating effect of anthropomorphism was contingent upon the situation and additionally influenced by task sensitivity. In low task sensitivity scenarios, a high-anthropomorphism AI medical triage doctor diminished psychological distance to a point comparable to that of a human doctor, with both surpassing a low-anthropomorphism AI medical triage doctor. This indicates that for routine, low sensitivity consultations, high-anthropomorphism AI medical triage doctors can effectively replace a human medical triage doctor. Conversely, in high task sensitivity scenarios, the irreplaceability of human medical triage doctors becomes evident: participants experienced the closest psychological distance &#x2013; and consequently the greatest trust &#x2013; with the human medical triage doctor, followed by the high-anthropomorphism AI medical triage doctor, and finally the low-anthropomorphism AI medical triage doctor. In situations characterized by significant privacy concerns or severity, increased anthropomorphism is beneficial, however, users maintain the strongest affinity for and greatest faith in human medical triage doctors in these circumstances.</p>
<p>Fourth, AI technology adoption level moderated group differences, but the human medical triage doctor&#x2019;s advantage persisted. AI technology adoption level was found to moderate the effect of the medical triage doctors (both AI and human) on user trust. Users with a low level of AI technology adoption showed greater trust in the human medical triage doctor group than in the AI medical triage doctor condition. Among users with a high level of AI technology adoption, acceptance of AI increased participants&#x2019; trust in the AI medical triage doctor and narrowed the trust gap with the human medical triage doctor, however, a preference for human medical triage doctors nonetheless remained. In short, the trust advantage of the human medical triage doctor was maintained consistently across all groups. This further suggests that, in the context of medical triage, a general preference for AI technology is not sufficient to fully offset users&#x2019; need for psychological security, nor their concerns about algorithmic accountability and system safety. Even among high AI adopters, trust in AI medical triage doctors may be undermined by the perceived ambiguity of responsibility when clinical decision errors occur, as well as by perceived uncontrollable risks associated with unexpected technical malfunctions. In contrast, human medical triage doctors possess legal and ethical moral agency and can be held clearly accountable for medical outcomes. This perceived traceability of responsibility functions as an indispensable psychological safety safeguard. Moreover, users may view uniquely human capacities &#x2013; such as emotional communication, reassurance, a sense of moral obligation, and the ability to manage sudden, non-routine, or unstructured complex problems &#x2013; as core advantages that AI medical triage doctors cannot yet fully replicate.</p>
</sec>
<sec id="sec21">
<label>7</label>
<title>Implications and future directions</title>
<sec id="sec22">
<label>7.1</label>
<title>Theoretical contributions</title>
<p>First, this study brings psychological distance into the exploration of the AI medical triage context and clarifies its mediating effect in the formation of user trust. Prior research has placed great emphasis on the Technology Acceptance Model and anthropomorphism, with comparatively little attention given to users&#x2019; deeper psychological perceptions. By foregrounding psychological distance as a core mediator, we systematically traced how trust emerges across different types of medical triage doctors (AI vs. human) and provide a complementary framework for understanding trust formation in AI medical triage doctors.</p>
<p>Second, this research unpacks the context dependence of AI anthropomorphism in medical triage. Although many studies have found that anthropomorphism design can reduce psychological distance and thereby increase user trust, our introduction of task sensitivity as a second-stage moderator demonstrates that these benefits are not universal, but rather contingent on the clinical context. Under low sensitivity task conditions, high-anthropomorphism AI medical triage doctors are able to meaningfully shorten users&#x2019; perceived psychological distance and achieve trust levels comparable to human medical triage doctors; under high sensitivity task conditions, the trust gains from anthropomorphism are limited, and users continue to place greater trust in human medical triage doctors. This finding offers actionable guidance for differentiated design of AI medical triage services; specifically, the applicability of anthropomorphism should be evaluated carefully across tasks with different risk and privacy profiles.</p>
<p>Third, this study advances a comprehensive trust model that integrates multiple moderators within a single framework. Moving beyond single-variable accounts, we have incorporated psychological distance, anthropomorphism, task sensitivity, and level of AI technology adoption into a unified model and examined their interplay. The model not only corroborates the core pathways of trust formation in AI medical triage doctors but also reveals the complexity and dynamics of the trust-building process.</p>
</sec>
<sec id="sec23">
<label>7.2</label>
<title>Practical implications</title>
<p>The findings of this research offer meaningful managerial implications for guiding healthy development of smart AI healthcare.</p>
<p>First, implement differentiated, context-aware AI deployment. When introducing AI medical triage doctors, organizations should avoid positioning them as one-to-one substitutes for human care and instead adopt human&#x2013;AI collaborative deployment. For low sensitivity tasks (e.g., appointment scheduling, department navigation), high-anthropomorphism, high-capability AI can be deployed to improve efficiency. For high sensitivity tasks (e.g., privacy-laden issues or serious-condition consultations), organizations should default to using human or AI&#x202F;+&#x202F;human oversight models, with guaranteed human back-up available for when circumstances require it. If AI must be used, authoritative endorsement, transparent risk disclosures, and seamless escalation/hand-off mechanisms should be provided to mitigate potential losses in trust.</p>
<p>Second, incorporate psychological distance as a core design parameter for AI healthcare services. Advancing intelligent transformation requires attention not only to technical functionality and efficiency, but also to the perceived psychological proximity between the technology and its users. AI medical triage doctors should employ inclusive, empathetic, and human-centered design, language, and patterns of interaction. Humanizing cues such as compassionate responses, clear explanations, and timely apologies can narrow the psychological gap between users and AI services, thereby strengthening user trust. Elevating psychological distance to a fundamental design consideration is critical for sustaining the perceived legitimacy and acceptability of AI healthcare services.</p>
<p>Third, establish a trust-centric AI governance and evaluation framework. In assessing AI healthcare services, user trust should be a core dimension, alongside technical performance. Strategies should be tailored to user segments: for individuals with low AI adoption, prioritize an AI&#x202F;+&#x202F;human backup model that preserves clinician oversight to enhance perceived safety and controllability; for users with high AI acceptance, increase AI autonomy while strengthening explainability and transparency to meet high expectations and reinforce user confidence.</p>
</sec>
<sec id="sec24">
<label>7.3</label>
<title>Limitations and future research</title>
<p>This study has several limitations. First, the sample lacks broad representativeness, being concentrated in specific online and student populations that differ from the general public in age, education, and digital literacy; therefore, the findings may not generalize well to wider groups (e.g., older adults, rural residents). Second, there are inherent limitations regarding the experimental manipulation and the ecological validity of the scenario simulations, meaning that the scenario-based experiment is simplified relative to real clinical encounters. Specifically, for the manipulation of anthropomorphism, the high-anthropomorphism AI medical triage doctor was presented using an image of a human doctor while explicitly labeled as an AI doctor. Although the manipulation-check results indicated that participants understood and accepted the doctor as a high-anthropomorphism AI medical triage doctor, this combination of visual appearance and identity labeling may still have introduced perceptions of identity ambiguity or potential deception cues. In addition, although the text- and image-based design aids can be used in causal inference, they cannot fully reproduce the complex emotions and behavioral decisions present in real interactions, where further additional uncontrolled factors may also be operating in practice. Third, the measurement approach used in these studies was relatively single-method: core constructs relying on self-report scales, with limited validation from physiological or behavioral data.</p>
<p>To address these limitations, future work should proceed along three directions. (1) Broaden sampling coverage: use stratified sampling across multiple regions and recruitment channels, explicitly including older adults, rural residents, and individuals with lower digital literacy to enhance external validity. Future studies could also conduct cross-age or cross-cultural comparisons to examine whether the trust mechanisms underlying reliance on AI medical triage doctors differs across populations, thereby providing stronger evidence for the construction of more generalizable smart healthcare services. (2) The experimental manipulation should be refined, and the scenario realism improved. To minimize potential confounds in the manipulation of anthropomorphism (e.g., identity ambiguity and deception cues), future research could use high-fidelity computer-generated imagery (CGI) or GAN-based methods to create ultra-realistic &#x201C;digital human&#x201D; medical triage doctors, allowing for a cleaner test of the effects of anthropomorphism appearance. In addition, researchers should move from static text-and-image vignettes toward more dynamic, interactive paradigms &#x2013; such as high-simulation chatbots or immersive VR-based clinical environments &#x2013; to better approximate actual doctor&#x2013;patient dialogue, emotional exchange, and real-time feedback. Such designs can also be paired with longitudinal (repeated-measures) approaches to trace the dynamic evolution of trust, psychological distance, and affective bonding over sustained use of AI medical triage doctors. (3) Multimethod measurement and triangulation should be adopted. Self-report data should be complemented by physiological indices (e.g., heart rate variability, electrodermal activity) and objective behavioral data (e.g., eye-tracking trajectories, fixation duration, decision response time, interaction frequency and duration, and actual adherence/behavioral compliance) to build a subjective&#x2013;objective validation framework and capture deeper psychological processes during human and AI medical triage doctors more precisely.</p>
</sec>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="sec25">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/<xref rid="SM1" ref-type="supplementary-material">Supplementary material</xref>, further inquiries can be directed to the corresponding author.</p>
</sec>
<sec sec-type="ethics-statement" id="sec26">
<title>Ethics statement</title>
<p>The studies involving humans were approved by the Medical Ethics Committee of Guangxi University. The studies were conducted in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study. Written informed consent was obtained from the individual(s) for the publication of any potentially identifiable images or data included in this article.</p>
</sec>
<sec sec-type="author-contributions" id="sec27">
<title>Author contributions</title>
<p>JC: Software, Writing &#x2013; review &#x0026; editing, Formal analysis, Resources, Validation, Supervision, Investigation, Conceptualization, Methodology. HW: Writing &#x2013; original draft, Project administration, Data curation, Formal analysis, Validation. XQ: Data curation, Writing &#x2013; original draft, Formal analysis, Investigation, Supervision, Writing &#x2013; review &#x0026; editing, Project administration.</p>
</sec>
<sec sec-type="COI-statement" id="sec28">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="sec29">
<title>Generative AI statement</title>
<p>The author(s) declared that Generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="sec30">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec sec-type="supplementary-material" id="sec31">
<title>Supplementary material</title>
<p>The Supplementary material for this article can be found online at: <ext-link xlink:href="https://www.frontiersin.org/articles/10.3389/fpsyg.2025.1730902/full#supplementary-material" ext-link-type="uri">https://www.frontiersin.org/articles/10.3389/fpsyg.2025.1730902/full#supplementary-material</ext-link>.</p>
<supplementary-material xlink:href="Table_1.DOCX" id="SM1" mimetype="application/vnd.openxmlformats-officedocument.wordprocessingml.document" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<ref-list>
<title>References</title>
<ref id="ref1"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Aaker</surname><given-names>J.</given-names></name> <name><surname>Fournier</surname><given-names>S.</given-names></name> <name><surname>Brasel</surname><given-names>S. A.</given-names></name></person-group> (<year>2004</year>). <article-title>When good brands do bad</article-title>. <source>J. Consum. Res.</source> <volume>31</volume>, <fpage>1</fpage>&#x2013;<lpage>16</lpage>. doi: <pub-id pub-id-type="doi">10.1086/383419</pub-id></mixed-citation></ref>
<ref id="ref2"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ahn</surname><given-names>J.</given-names></name> <name><surname>Kim</surname><given-names>J.</given-names></name> <name><surname>Sung</surname><given-names>Y.</given-names></name></person-group> (<year>2021</year>). <article-title>AI-powered recommendations: the roles of perceived similarity and psychological distance on persuasion</article-title>. <source>Int. J. Advert.</source> <volume>40</volume>, <fpage>1366</fpage>&#x2013;<lpage>1384</lpage>. doi: <pub-id pub-id-type="doi">10.1080/02650487.2021.1982529</pub-id></mixed-citation></ref>
<ref id="ref3"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ali</surname><given-names>F.</given-names></name> <name><surname>Dogan</surname><given-names>S.</given-names></name> <name><surname>Amin</surname><given-names>M.</given-names></name> <name><surname>Ramkissoon</surname><given-names>H.</given-names></name></person-group> (<year>2021</year>). <article-title>Brand anthropomorphism, love and defense: does attitude towards social distancing matter?</article-title> <source>Serv. Ind. J.</source> <volume>41</volume>, <fpage>58</fpage>&#x2013;<lpage>83</lpage>. doi: <pub-id pub-id-type="doi">10.1080/02642069.2020.1867542</pub-id></mixed-citation></ref>
<ref id="ref4"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Attia</surname><given-names>Z. I.</given-names></name> <name><surname>Noseworthy</surname><given-names>P. A.</given-names></name> <name><surname>Lopez-Jimenez</surname><given-names>F.</given-names></name> <name><surname>Friedman</surname><given-names>P. A.</given-names></name> <name><surname>Kapa</surname><given-names>S.</given-names></name> <name><surname>Ocoro</surname><given-names>A. E.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>An artificial intelligence-enabled ECG algorithm for the identification of patients with atrial fibrillation during sinus rhythm: a retrospective analysis of outcome prediction</article-title>. <source>Lancet</source> <volume>394</volume>, <fpage>861</fpage>&#x2013;<lpage>867</lpage>. doi: <pub-id pub-id-type="doi">10.1016/S0140-6736(19)31721-0</pub-id></mixed-citation></ref>
<ref id="ref5"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Baker</surname><given-names>A.</given-names></name> <name><surname>Perov</surname><given-names>Y.</given-names></name> <name><surname>Middleton</surname><given-names>K.</given-names></name> <name><surname>Diaz</surname><given-names>J.</given-names></name> <name><surname>Lee</surname><given-names>J. T. T.</given-names></name> <name><surname>Smith</surname><given-names>R. J. H.</given-names></name></person-group> (<year>2020</year>). <article-title>A comparison of artificial intelligence and human doctors for the purpose of triage and diagnosis</article-title>. <source>Front. Artif. Intell.</source> <volume>3</volume>:<fpage>543405</fpage>. doi: <pub-id pub-id-type="doi">10.3389/frai.2020.543405</pub-id>, <pub-id pub-id-type="pmid">33733203</pub-id></mixed-citation></ref>
<ref id="ref6"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bamicha</surname><given-names>V.</given-names></name> <name><surname>Drigas</surname><given-names>A.</given-names></name></person-group> (<year>2024</year>). <article-title>Strengthening AI via ToM and MC dimensions</article-title>. <source>Sci. Electron. Arch.</source> <volume>17</volume>:<fpage>1939</fpage>. doi: <pub-id pub-id-type="doi">10.36560/17320241939</pub-id></mixed-citation></ref>
<ref id="ref7"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bamicha</surname><given-names>V.</given-names></name> <name><surname>Pergantis</surname><given-names>P.</given-names></name> <name><surname>Drigas</surname><given-names>A.</given-names></name></person-group> (<year>2024</year>). <article-title>The effect of gut microbiome, neurotransmitters, and digital insights in autism</article-title>. <source>Appl. Microbiol.</source> <volume>4</volume>, <fpage>1677</fpage>&#x2013;<lpage>1701</lpage>. doi: <pub-id pub-id-type="doi">10.3390/applmicrobiol4040114</pub-id></mixed-citation></ref>
<ref id="ref8"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bamicha</surname><given-names>V.</given-names></name> <name><surname>Pergantis</surname><given-names>P.</given-names></name> <name><surname>Skianis</surname><given-names>C.</given-names></name> <name><surname>Drigas</surname><given-names>A.</given-names></name></person-group> (<year>2025</year>). <article-title>Computational neuroscience&#x2019;s influence on autism neuro-transmission research: mapping serotonin, dopamine, GABA, and glutamate</article-title>. <source>Biomedicine</source> <volume>13</volume>:<fpage>1420</fpage>. doi: <pub-id pub-id-type="doi">10.3390/biomedicines13061420</pub-id>, <pub-id pub-id-type="pmid">40564138</pub-id></mixed-citation></ref>
<ref id="ref9"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bandara</surname><given-names>R. J.</given-names></name> <name><surname>Fernando</surname><given-names>M.</given-names></name> <name><surname>Akter</surname><given-names>S.</given-names></name></person-group> (<year>2021</year>). <article-title>Construing online consumers&#x2019; information privacy decisions: the impact of psychological distance</article-title>. <source>Inf. Manag.</source> <volume>58</volume>:<fpage>103497</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.im.2021.103497</pub-id></mixed-citation></ref>
<ref id="ref10"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bartneck</surname><given-names>C.</given-names></name> <name><surname>Kuli&#x0107;</surname><given-names>D.</given-names></name> <name><surname>Croft</surname><given-names>E.</given-names></name> <name><surname>Zoghbi</surname><given-names>S.</given-names></name></person-group> (<year>2009</year>). <article-title>Measurement instruments for the anthropomorphism, animacy, likeability, perceived intelligence, and perceived safety of robots</article-title>. <source>Int. J. Soc. Robot.</source> <volume>1</volume>, <fpage>71</fpage>&#x2013;<lpage>81</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s12369-008-0001-3</pub-id></mixed-citation></ref>
<ref id="ref11"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chan</surname><given-names>B.</given-names></name></person-group> (<year>2023</year>). <article-title>Black-box assisted medical decisions: AI power vs. ethical physician care</article-title>. <source>Med. Health Care Philos.</source> <volume>26</volume>, <fpage>285</fpage>&#x2013;<lpage>292</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s11019-023-10153-z</pub-id>, <pub-id pub-id-type="pmid">37273041</pub-id></mixed-citation></ref>
<ref id="ref12"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname><given-names>S. Y.</given-names></name> <name><surname>Kuo</surname><given-names>H. Y.</given-names></name> <name><surname>Chang</surname><given-names>S. H.</given-names></name></person-group> (<year>2024</year>). <article-title>Perceptions of ChatGPT in healthcare: usefulness, trust, and risk</article-title>. <source>Front. Public Health</source> <volume>12</volume>:<fpage>1457131</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpubh.2024.1457131</pub-id>, <pub-id pub-id-type="pmid">39346584</pub-id></mixed-citation></ref>
<ref id="ref13"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname><given-names>H.</given-names></name> <name><surname>Li</surname><given-names>S.</given-names></name></person-group> (<year>2018</year>). <article-title>Measuring the psychological distance between an organization and its member &#x2013;the construction and validation of a new scale</article-title>. <source>Front. Psychol.</source> <volume>8</volume>:<fpage>2296</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2017.02296</pub-id>, <pub-id pub-id-type="pmid">29375427</pub-id></mixed-citation></ref>
<ref id="ref14"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname><given-names>M.</given-names></name> <name><surname>Zhang</surname><given-names>B.</given-names></name> <name><surname>Cai</surname><given-names>Z.</given-names></name> <name><surname>Li</surname><given-names>Y.</given-names></name></person-group> (<year>2022</year>). <article-title>Acceptance of clinical artificial intelligence among physicians and medical students: a systematic review with cross-sectional survey</article-title>. <source>Front. Med.</source> <volume>9</volume>:<fpage>990604</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fmed.2022.990604</pub-id>, <pub-id pub-id-type="pmid">36117979</pub-id></mixed-citation></ref>
<ref id="ref15"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chin</surname><given-names>C. H.</given-names></name> <name><surname>Wong</surname><given-names>W. P. M.</given-names></name> <name><surname>Cham</surname><given-names>T. H.</given-names></name> <name><surname>Thong</surname><given-names>J. Z.</given-names></name> <name><surname>Ling</surname><given-names>J. P. W.</given-names></name></person-group> (<year>2024</year>). <article-title>Exploring the usage intention of AI-powered devices in smart homes among millennials and zillennials: the moderating role of trust</article-title>. <source>Young Consum.</source> <volume>25</volume>, <fpage>1</fpage>&#x2013;<lpage>27</lpage>. doi: <pub-id pub-id-type="doi">10.1108/yc-05-2023-1752</pub-id></mixed-citation></ref>
<ref id="ref16"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chung</surname><given-names>S. I.</given-names></name> <name><surname>Han</surname><given-names>K. H.</given-names></name></person-group> (<year>2022</year>). <article-title>Consumer perception of chatbots and purchase intentions: anthropomorphism and conversational relevance</article-title>. <source>Int. J. Adv. Cult. Technol.</source> <volume>10</volume>, <fpage>211</fpage>&#x2013;<lpage>229</lpage>. doi: <pub-id pub-id-type="doi">10.17703/IJACT.2022.10.1.211</pub-id></mixed-citation></ref>
<ref id="ref17"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Cohen</surname><given-names>J.</given-names></name></person-group> (<year>2013</year>). <source>Statistical power analysis for the behavioral sciences</source>. <publisher-loc>New York</publisher-loc>: <publisher-name>Routledge</publisher-name> doi: <pub-id pub-id-type="doi">10.4324/9780203771587</pub-id>.</mixed-citation></ref>
<ref id="ref18"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Contreras</surname><given-names>I.</given-names></name> <name><surname>Vehi</surname><given-names>J.</given-names></name></person-group> (<year>2018</year>). <article-title>Artificial intelligence for diabetes management and decision support: literature review</article-title>. <source>J. Med. Internet Res.</source> <volume>20</volume>:<fpage>e10775</fpage>. doi: <pub-id pub-id-type="doi">10.2196/10775</pub-id>, <pub-id pub-id-type="pmid">29848472</pub-id></mixed-citation></ref>
<ref id="ref19"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cui</surname><given-names>H.</given-names></name> <name><surname>Liu</surname><given-names>F.</given-names></name></person-group> (<year>2020</year>). <article-title>Design and implementation of intelligent guidance service robot</article-title>. <source>Comput. Appl. Softw.</source> <volume>37</volume>, <fpage>329</fpage>&#x2013;<lpage>333</lpage>. doi: <pub-id pub-id-type="doi">10.3969/j.issn.1000-386x.2020.07.055</pub-id></mixed-citation></ref>
<ref id="ref20"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Davis</surname><given-names>F. D.</given-names></name></person-group> (<year>1989</year>). <article-title>Perceived usefulness, perceived ease of use, and user acceptance of information technology</article-title>. <source>MIS Q.</source> <volume>13</volume>, <fpage>319</fpage>&#x2013;<lpage>340</lpage>. doi: <pub-id pub-id-type="doi">10.2307/249008</pub-id></mixed-citation></ref>
<ref id="ref21"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Dias</surname><given-names>D.</given-names></name> <name><surname>Cunha</surname><given-names>J. P. S.</given-names></name></person-group> (<year>2018</year>). <article-title>Wearable health devices &#x2013; vital sign monitoring, systems and technologies</article-title>. <source>Sensors</source> <volume>18</volume>:<fpage>2414</fpage>. doi: <pub-id pub-id-type="doi">10.3390/s18082414</pub-id>, <pub-id pub-id-type="pmid">30044415</pub-id></mixed-citation></ref>
<ref id="ref22"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Epley</surname><given-names>N.</given-names></name> <name><surname>Waytz</surname><given-names>A.</given-names></name> <name><surname>Akalis</surname><given-names>S.</given-names></name> <name><surname>Cacioppo</surname><given-names>J. T.</given-names></name></person-group> (<year>2008</year>). <article-title>When we need a human: motivational determinants of anthropomorphism</article-title>. <source>Soc. Cogn.</source> <volume>26</volume>, <fpage>143</fpage>&#x2013;<lpage>155</lpage>. doi: <pub-id pub-id-type="doi">10.1521/soco.2008.26.2.143</pub-id></mixed-citation></ref>
<ref id="ref23"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Epley</surname><given-names>N.</given-names></name> <name><surname>Waytz</surname><given-names>A.</given-names></name> <name><surname>Cacioppo</surname><given-names>J. T.</given-names></name></person-group> (<year>2007</year>). <article-title>On seeing human: a three-factor theory of anthropomorphism</article-title>. <source>Psychol. Rev.</source> <volume>114</volume>, <fpage>864</fpage>&#x2013;<lpage>886</lpage>. doi: <pub-id pub-id-type="doi">10.1037/0033-295x.114.4.864</pub-id>, <pub-id pub-id-type="pmid">17907867</pub-id></mixed-citation></ref>
<ref id="ref24"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Esfandiari</surname><given-names>E.</given-names></name> <name><surname>Kalroozi</surname><given-names>F.</given-names></name> <name><surname>Mehrabi</surname><given-names>N.</given-names></name> <name><surname>Haghani</surname><given-names>H.</given-names></name></person-group> (<year>2024</year>). <article-title>Knowledge and acceptance of artificial intelligence and its applications among the physicians working in military medical centers affiliated with Aja University: a cross-sectional study</article-title>. <source>J. Educ. Health Promot.</source> <volume>13</volume>:<fpage>271</fpage>. doi: <pub-id pub-id-type="doi">10.4103/jehp.jehp_898_23</pub-id>, <pub-id pub-id-type="pmid">39309999</pub-id></mixed-citation></ref>
<ref id="ref25"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Everard</surname><given-names>A.</given-names></name> <name><surname>Galletta</surname><given-names>D. F.</given-names></name></person-group> (<year>2005</year>). <article-title>How presentation flaws affect perceived site quality, trust, and intention to purchase from an online store</article-title>. <source>J. Manag. Inf. Syst.</source> <volume>22</volume>, <fpage>55</fpage>&#x2013;<lpage>95</lpage>. doi: <pub-id pub-id-type="doi">10.2753/mis0742-1222220303</pub-id></mixed-citation></ref>
<ref id="ref26"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Fritz</surname><given-names>M. S.</given-names></name> <name><surname>MacKinnon</surname><given-names>D. P.</given-names></name></person-group> (<year>2007</year>). <article-title>Required sample size to detect the mediated effect</article-title>. <source>Psychol. Sci.</source> <volume>18</volume>, <fpage>233</fpage>&#x2013;<lpage>239</lpage>. doi: <pub-id pub-id-type="doi">10.1111/j.1467-9280.2007.01882.x</pub-id>, <pub-id pub-id-type="pmid">17444920</pub-id></mixed-citation></ref>
<ref id="ref27"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Grassini</surname><given-names>S.</given-names></name></person-group> (<year>2023</year>). <article-title>Development and validation of the AI attitude scale (AIAS-4): a brief measure of general attitude toward artificial intelligence</article-title>. <source>Front. Psychol.</source> <volume>14</volume>:<fpage>1191628</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2023.1191628</pub-id>, <pub-id pub-id-type="pmid">37554139</pub-id></mixed-citation></ref>
<ref id="ref28"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Guido</surname><given-names>G.</given-names></name> <name><surname>Peluso</surname><given-names>A. M.</given-names></name></person-group> (<year>2015</year>). <article-title>Brand anthropomorphism: conceptualization, measurement, and impact on brand personality and loyalty</article-title>. <source>J. Brand Manag.</source> <volume>22</volume>, &#x2013;<lpage>19</lpage>. doi: <pub-id pub-id-type="doi">10.1057/bm.2014.40</pub-id></mixed-citation></ref>
<ref id="ref29"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Haenssle</surname><given-names>H. A.</given-names></name> <name><surname>Fink</surname><given-names>C.</given-names></name> <name><surname>Schneiderbauer</surname><given-names>R.</given-names></name> <name><surname>Toberer</surname><given-names>F.</given-names></name> <name><surname>Buhl</surname><given-names>T.</given-names></name> <name><surname>Blum</surname><given-names>A.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>Man against machine: diagnostic performance of a deep learning convolutional neural network for dermoscopic melanoma recognition in comparison to 58 dermatologists</article-title>. <source>Ann. Oncol.</source> <volume>29</volume>, <fpage>1836</fpage>&#x2013;<lpage>1842</lpage>. doi: <pub-id pub-id-type="doi">10.1093/annonc/mdy166</pub-id>, <pub-id pub-id-type="pmid">29846502</pub-id></mixed-citation></ref>
<ref id="ref30"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hamet</surname><given-names>P.</given-names></name> <name><surname>Tremblay</surname><given-names>J.</given-names></name></person-group> (<year>2017</year>). <article-title>Artificial intelligence in medicine</article-title>. <source>Metabolism</source> <volume>69</volume>, <fpage>S36</fpage>&#x2013;<lpage>S40</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.metabol.2017.01.011</pub-id>, <pub-id pub-id-type="pmid">28126242</pub-id></mixed-citation></ref>
<ref id="ref31"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Harada</surname><given-names>T.</given-names></name> <name><surname>Shimizu</surname><given-names>T.</given-names></name> <name><surname>Kaji</surname><given-names>Y.</given-names></name> <name><surname>Takeuchi</surname><given-names>M.</given-names></name></person-group> (<year>2020</year>). <article-title>A perspective from a case conference on comparing the diagnostic process: human diagnostic thinking vs. artificial intelligence (AI) decision support tools</article-title>. <source>Int. J. Environ. Res. Public Health</source> <volume>17</volume>:<fpage>6110</fpage>. doi: <pub-id pub-id-type="doi">10.3390/ijerph17176110</pub-id>, <pub-id pub-id-type="pmid">32842581</pub-id></mixed-citation></ref>
<ref id="ref32"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Hayes</surname><given-names>A. F.</given-names></name></person-group> (<year>2013</year>). <source>Introduction to mediation, moderation, and conditional process analysis: a regression-based approach</source>. <publisher-loc>New York</publisher-loc>: <publisher-name>Guilford Press</publisher-name>.</mixed-citation></ref>
<ref id="ref33"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hsieh</surname><given-names>S. H.</given-names></name> <name><surname>Lee</surname><given-names>C. T.</given-names></name></person-group> (<year>2024</year>). <article-title>The AI humanness: how perceived personality builds trust and continuous usage intention</article-title>. <source>J. Prod. Brand. Manag.</source> <volume>33</volume>, <fpage>618</fpage>&#x2013;<lpage>632</lpage>. doi: <pub-id pub-id-type="doi">10.1108/jpbm-10-2023-4797</pub-id></mixed-citation></ref>
<ref id="ref34"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Hutson</surname><given-names>M.</given-names></name></person-group> (<year>2017</year>). <source>Self-taught artificial intelligence beats doctors at predicting heart attacks</source>. <publisher-name>Science (News)</publisher-name>. Available online at: <ext-link xlink:href="https://www.science.org/content/article/self-taught-artificial-intelligence-beats-doctors-predicting-heart-attacks" ext-link-type="uri">https://www.science.org/content/article/self-taught-artificial-intelligence-beats-doctors-predicting-heart-attacks</ext-link>(Accessed October 18, 2025).</mixed-citation></ref>
<ref id="ref35"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ilicki</surname><given-names>J.</given-names></name></person-group> (<year>2022</year>). <article-title>Challenges in evaluating the accuracy of AI-containing digital triage systems: a systematic review</article-title>. <source>PLoS One</source> <volume>17</volume>:<fpage>e0279636</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pone.0279636</pub-id>, <pub-id pub-id-type="pmid">36574438</pub-id></mixed-citation></ref>
<ref id="ref36"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jiang</surname><given-names>F.</given-names></name> <name><surname>Jiang</surname><given-names>Y.</given-names></name> <name><surname>Zhi</surname><given-names>H.</given-names></name> <name><surname>Dong</surname><given-names>Y.</given-names></name> <name><surname>Li</surname><given-names>H.</given-names></name> <name><surname>Ma</surname><given-names>S. F.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>Artificial intelligence in healthcare: past, present and future</article-title>. <source>Stroke Vasc. Neurol.</source> <volume>2</volume>, <fpage>230</fpage>&#x2013;<lpage>243</lpage>. doi: <pub-id pub-id-type="doi">10.1136/svn-2017-000101</pub-id>, <pub-id pub-id-type="pmid">29507784</pub-id></mixed-citation></ref>
<ref id="ref37"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jumper</surname><given-names>J.</given-names></name> <name><surname>Evans</surname><given-names>R.</given-names></name> <name><surname>Pritzel</surname><given-names>A.</given-names></name> <name><surname>Green</surname><given-names>T.</given-names></name> <name><surname>Figurnov</surname><given-names>M.</given-names></name> <name><surname>Ronneberger</surname><given-names>O.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Highly accurate protein structure prediction with AlphaFold</article-title>. <source>Nature</source> <volume>596</volume>, <fpage>583</fpage>&#x2013;<lpage>589</lpage>. doi: <pub-id pub-id-type="doi">10.1038/s41586-021-03819-2</pub-id>, <pub-id pub-id-type="pmid">34265844</pub-id></mixed-citation></ref>
<ref id="ref38"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Keskinbora</surname><given-names>K. H.</given-names></name></person-group> (<year>2019</year>). <article-title>Medical ethics considerations on artificial intelligence</article-title>. <source>J. Clin. Neurosci.</source> <volume>64</volume>, <fpage>277</fpage>&#x2013;<lpage>282</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jocn.2019.03.001</pub-id>, <pub-id pub-id-type="pmid">30878282</pub-id></mixed-citation></ref>
<ref id="ref39"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kim</surname><given-names>D.</given-names></name> <name><surname>Park</surname><given-names>K.</given-names></name> <name><surname>Park</surname><given-names>Y.</given-names></name> <name><surname>Rho</surname><given-names>J. J.</given-names></name></person-group> (<year>2019</year>). <article-title>Willingness to provide personal information: perspective of privacy calculus in IoT services</article-title>. <source>Comput. Hum. Behav.</source> <volume>92</volume>, <fpage>273</fpage>&#x2013;<lpage>281</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.chb.2018.11.022</pub-id></mixed-citation></ref>
<ref id="ref40"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kokolakis</surname><given-names>S.</given-names></name></person-group> (<year>2017</year>). <article-title>Privacy attitudes and privacy behaviour: a review of current research on the privacy paradox phenomenon</article-title>. <source>Comput. Secur.</source> <volume>64</volume>, <fpage>122</fpage>&#x2013;<lpage>134</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cose.2015.07.002</pub-id></mixed-citation></ref>
<ref id="ref41"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Leachman</surname><given-names>S. A.</given-names></name> <name><surname>Merlino</surname><given-names>G.</given-names></name></person-group> (<year>2017</year>). <article-title>The final frontier in cancer diagnosis</article-title>. <source>Nature</source> <volume>542</volume>, <fpage>36</fpage>&#x2013;<lpage>38</lpage>. doi: <pub-id pub-id-type="doi">10.1038/nature21492</pub-id>, <pub-id pub-id-type="pmid">28150762</pub-id></mixed-citation></ref>
<ref id="ref42"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lee</surname><given-names>S.</given-names></name> <name><surname>Choi</surname><given-names>J.</given-names></name></person-group> (<year>2017</year>). <article-title>Enhancing user experience with conversational agent for movie recommendation: effects of self-disclosure and reciprocity</article-title>. <source>Int. J. Hum. Comput. Stud.</source> <volume>103</volume>, <fpage>95</fpage>&#x2013;<lpage>105</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ijhcs.2017.02.005</pub-id></mixed-citation></ref>
<ref id="ref43"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lee</surname><given-names>J. D.</given-names></name> <name><surname>See</surname><given-names>K. A.</given-names></name></person-group> (<year>2004</year>). <article-title>Trust in automation: designing for appropriate reliance</article-title>. <source>Hum. Factors</source> <volume>46</volume>, <fpage>50</fpage>&#x2013;<lpage>80</lpage>. doi: <pub-id pub-id-type="doi">10.1518/hfes.46.1.50.30392</pub-id>, <pub-id pub-id-type="pmid">15151155</pub-id></mixed-citation></ref>
<ref id="ref44"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Li</surname><given-names>X.</given-names></name> <name><surname>Sung</surname><given-names>Y.</given-names></name></person-group> (<year>2021</year>). <article-title>Anthropomorphism brings us closer: the mediating role of psychological distance in user&#x2013;AI assistant interactions</article-title>. <source>Comput. Human Behav.</source> <volume>118</volume>:<fpage>106680</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.chb.2021.106680</pub-id></mixed-citation></ref>
<ref id="ref45"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Liang</surname><given-names>X. D.</given-names></name> <name><surname>Li</surname><given-names>Y. H.</given-names></name> <name><surname>Liu</surname><given-names>F.</given-names></name></person-group> (<year>2018</year>). <article-title>The influence mechanism of privacy policies on consumers&#x2019; willingness to provide information: based on moderating effects of information sensitivity</article-title>. <source>Manag. Rev.</source> <volume>30</volume>, <fpage>97</fpage>&#x2013;<lpage>107</lpage>. doi: <pub-id pub-id-type="doi">10.14120/j.cnki.cn11-5057/f.2018.11.008</pub-id></mixed-citation></ref>
<ref id="ref46"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Liberman</surname><given-names>N.</given-names></name> <name><surname>Trope</surname><given-names>Y.</given-names></name></person-group> (<year>2008</year>). <article-title>The psychology of transcending the here and now</article-title>. <source>Science</source> <volume>322</volume>, <fpage>1201</fpage>&#x2013;<lpage>1205</lpage>. doi: <pub-id pub-id-type="doi">10.1126/science.1161958</pub-id>, <pub-id pub-id-type="pmid">19023074</pub-id></mixed-citation></ref>
<ref id="ref47"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lin</surname><given-names>C. C.</given-names></name> <name><surname>Hill</surname><given-names>C. E.</given-names></name> <name><surname>Burke</surname><given-names>J. F.</given-names></name> <name><surname>Callaghan</surname><given-names>B. C.</given-names></name></person-group> (<year>2021</year>). <article-title>Primary care providers perform more neurologic visits than neurologists among Medicare beneficiaries</article-title>. <source>J. Eval. Clin. Pract.</source> <volume>27</volume>, <fpage>223</fpage>&#x2013;<lpage>227</lpage>. doi: <pub-id pub-id-type="doi">10.1111/jep.13439</pub-id>, <pub-id pub-id-type="pmid">32754960</pub-id></mixed-citation></ref>
<ref id="ref48"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Lohr</surname><given-names>S.</given-names></name></person-group> (<year>2016</year>). <source>IBM is counting on its bet on Watson, and paying big money for it</source>. <publisher-name>The New York Times</publisher-name>. Available online at: <ext-link xlink:href="https://www.nytimes.com/2016/10/17/technology/ibm-is-counting-on-its-bet-on-watson-and-paying-big-money-for-it.html" ext-link-type="uri">https://www.nytimes.com/2016/10/17/technology/ibm-is-counting-on-its-bet-on-watson-and-paying-big-money-for-it.html</ext-link> (Accessed October 17, 2025).</mixed-citation></ref>
<ref id="ref49"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Longoni</surname><given-names>C.</given-names></name> <name><surname>Bonezzi</surname><given-names>A.</given-names></name> <name><surname>Morewedge</surname><given-names>C. K.</given-names></name></person-group> (<year>2019</year>). <article-title>Resistance to medical artificial intelligence</article-title>. <source>J. Consum. Res.</source> <volume>46</volume>, <fpage>629</fpage>&#x2013;<lpage>650</lpage>. doi: <pub-id pub-id-type="doi">10.1093/jcr/ucz013</pub-id></mixed-citation></ref>
<ref id="ref50"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lu</surname><given-names>W.</given-names></name> <name><surname>Liu</surname><given-names>J. W.</given-names></name> <name><surname>Ma</surname><given-names>Y. Q.</given-names></name> <name><surname>Cheng</surname><given-names>Q. K.</given-names></name></person-group> (<year>2023</year>). <article-title>The influence of large language models represented by ChatGPT on information resources management</article-title>. <source>Doc. Inf. Knowl.</source> <volume>40</volume>, <fpage>6</fpage>&#x2013;<lpage>9</lpage>. doi: <pub-id pub-id-type="doi">10.13366/j.dik.2023.02.006</pub-id></mixed-citation></ref>
<ref id="ref51"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>MacIntyre</surname><given-names>C. R.</given-names></name> <name><surname>Chen</surname><given-names>X.</given-names></name> <name><surname>Kunasekaran</surname><given-names>M.</given-names></name> <name><surname>Moa</surname><given-names>A.</given-names></name> <name><surname>Heslop</surname><given-names>D. J.</given-names></name> <name><surname>Chughtai</surname><given-names>A. A.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Artificial intelligence in public health: the potential of epidemic early warning systems</article-title>. <source>J. Int. Med. Res.</source> <volume>51</volume>:<fpage>3000605231159335</fpage>. doi: <pub-id pub-id-type="doi">10.1177/03000605231159335</pub-id></mixed-citation></ref>
<ref id="ref52"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Malhotra</surname><given-names>N. K.</given-names></name> <name><surname>Kim</surname><given-names>S. S.</given-names></name> <name><surname>Agarwal</surname><given-names>J.</given-names></name></person-group> (<year>2004</year>). <article-title>Internet users' information privacy concerns (IUIPC): the construct, the scale, and a causal model</article-title>. <source>Inf. Syst. Res.</source> <volume>15</volume>, <fpage>336</fpage>&#x2013;<lpage>355</lpage>. doi: <pub-id pub-id-type="doi">10.1287/isre.1040.0032</pub-id></mixed-citation></ref>
<ref id="ref53"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Minen</surname><given-names>M. T.</given-names></name> <name><surname>Robbins</surname><given-names>M. S.</given-names></name> <name><surname>Loder</surname><given-names>E.</given-names></name> <name><surname>Nahas</surname><given-names>S.</given-names></name> <name><surname>Gautreaux</surname><given-names>J.</given-names></name> <name><surname>Litin</surname><given-names>S.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Addressing the crisis of diagnosis and management of migraine in primary care: a summary of the American headache society FrontLine primary care advisory board</article-title>. <source>Headache</source> <volume>60</volume>, <fpage>1000</fpage>&#x2013;<lpage>1004</lpage>. doi: <pub-id pub-id-type="doi">10.1111/head.13797</pub-id></mixed-citation></ref>
<ref id="ref54"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Moor</surname><given-names>M.</given-names></name> <name><surname>Banerjee</surname><given-names>O.</given-names></name> <name><surname>Abad</surname><given-names>Z. S. H.</given-names></name> <name><surname>Krumholz</surname><given-names>H. M.</given-names></name> <name><surname>Leskovec</surname><given-names>J.</given-names></name> <name><surname>Topol</surname><given-names>E. J.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Foundation models for generalist medical artificial intelligence</article-title>. <source>Nature</source> <volume>616</volume>, <fpage>259</fpage>&#x2013;<lpage>265</lpage>. doi: <pub-id pub-id-type="doi">10.1038/s41586-023-05881-4</pub-id>, <pub-id pub-id-type="pmid">37045921</pub-id></mixed-citation></ref>
<ref id="ref55"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Moraiti</surname><given-names>I.</given-names></name> <name><surname>Drigas</surname><given-names>A.</given-names></name></person-group> (<year>2023</year>). <article-title>AI tools like ChatGPT for people with neurodevelopmental disorders</article-title>. <source>Int. J. Online Biomed. Eng.</source> <volume>19</volume>, <fpage>20</fpage>&#x2013;<lpage>34</lpage>. doi: <pub-id pub-id-type="doi">10.3991/ijoe.v19i16.44429</pub-id></mixed-citation></ref>
<ref id="ref56"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mori</surname><given-names>M.</given-names></name> <name><surname>MacDorman</surname><given-names>K. F.</given-names></name> <name><surname>Kageki</surname><given-names>N.</given-names></name></person-group> (<year>2012</year>). <article-title>The uncanny valley [from the field]</article-title>. <source>IEEE Robot. Autom. Mag.</source> <volume>19</volume>, <fpage>98</fpage>&#x2013;<lpage>100</lpage>. doi: <pub-id pub-id-type="doi">10.1109/MRA.2012.2192811</pub-id></mixed-citation></ref>
<ref id="ref57"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Na</surname><given-names>S.</given-names></name> <name><surname>Heo</surname><given-names>S.</given-names></name> <name><surname>Choi</surname><given-names>W.</given-names></name> <name><surname>Kim</surname><given-names>H.</given-names></name></person-group> (<year>2023</year>). <article-title>Artificial intelligence (AI)-based technology adoption in the construction industry: a cross national perspective using the technology acceptance model</article-title>. <source>Buildings</source> <volume>13</volume>:<fpage>2518</fpage>. doi: <pub-id pub-id-type="doi">10.3390/buildings13102518</pub-id></mixed-citation></ref>
<ref id="ref58"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Na</surname><given-names>S.</given-names></name> <name><surname>Heo</surname><given-names>S.</given-names></name> <name><surname>Han</surname><given-names>S.</given-names></name> <name><surname>Kim</surname><given-names>H.</given-names></name></person-group> (<year>2022</year>). <article-title>Acceptance model of artificial intelligence (AI)-based technologies in construction firms: applying the technology acceptance model (TAM) in combination with the technology&#x2013;organisation&#x2013;environment (TOE) framework</article-title>. <source>Buildings</source> <volume>12</volume>:<fpage>90</fpage>. doi: <pub-id pub-id-type="doi">10.3390/buildings12020090</pub-id></mixed-citation></ref>
<ref id="ref59"><mixed-citation publication-type="other"><person-group person-group-type="author"><collab id="coll1">National Health Commission of the People&#x2019;s Republic of China</collab></person-group> (<year>2024</year>). <source>Notice of the general office of the National Health Commission on issuing the reference guidelines for artificial intelligence application scenarios in the health care industry</source>. Available online at: <ext-link xlink:href="https://www.nhc.gov.cn/guihuaxxs/c100133/202411/3dee425b8dc34f739d63483c4e5c334c.shtml" ext-link-type="uri">https://www.nhc.gov.cn/guihuaxxs/c100133/202411/3dee425b8dc34f739d63483c4e5c334c.shtml</ext-link> (Accessed December 22, 2025).</mixed-citation></ref>
<ref id="ref60"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Palaniappan</surname><given-names>K.</given-names></name> <name><surname>Lin</surname><given-names>Y. E. T.</given-names></name> <name><surname>Vogel</surname><given-names>S.</given-names></name></person-group> (<year>2024</year>). <article-title>Global regulatory frameworks for the use of artificial intelligence (AI) in the healthcare services sector</article-title>. <source>Healthcare</source> <volume>12</volume>:<fpage>562</fpage>. doi: <pub-id pub-id-type="doi">10.3390/healthcare12050562</pub-id>, <pub-id pub-id-type="pmid">38470673</pub-id></mixed-citation></ref>
<ref id="ref61"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Park</surname><given-names>G.</given-names></name> <name><surname>Chung</surname><given-names>J.</given-names></name> <name><surname>Lee</surname><given-names>S.</given-names></name></person-group> (<year>2024</year>). <article-title>Human vs. machine-like representation in chatbot mental health counseling: the serial mediation of psychological distance and trust on compliance intention</article-title>. <source>Curr. Psychol.</source> <volume>43</volume>, <fpage>4352</fpage>&#x2013;<lpage>4363</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s12144-023-04653-7</pub-id>, <pub-id pub-id-type="pmid">37359642</pub-id></mixed-citation></ref>
<ref id="ref8001"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Park</surname><given-names>G.</given-names></name> <name><surname>Park</surname><given-names>H. S.</given-names></name></person-group> (<year>2016</year>). <article-title>Corporate social responsibility in Korea: How to communicate global issues to local stakeholders</article-title>. <source>Corp. Soc. Responsib. Environ. Manag.</source> <volume>23</volume>, <fpage>77</fpage>&#x2013;<lpage>87</lpage>. doi: <pub-id pub-id-type="doi">10.1002/csr.1362</pub-id></mixed-citation></ref>
<ref id="ref62"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pescosolido</surname><given-names>B. A.</given-names></name> <name><surname>Martin</surname><given-names>J. K.</given-names></name></person-group> (<year>2015</year>). <article-title>The stigma complex</article-title>. <source>Annu. Rev. Sociol.</source> <volume>41</volume>, <fpage>87</fpage>&#x2013;<lpage>116</lpage>. doi: <pub-id pub-id-type="doi">10.1146/annurev-soc-071312-145702</pub-id>, <pub-id pub-id-type="pmid">26855471</pub-id></mixed-citation></ref>
<ref id="ref63"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Qin</surname><given-names>X.</given-names></name> <name><surname>Zhou</surname><given-names>X.</given-names></name> <name><surname>Chen</surname><given-names>C.</given-names></name> <name><surname>Wu</surname><given-names>D. Y.</given-names></name> <name><surname>Zhou</surname><given-names>H. S.</given-names></name> <name><surname>Zhou</surname><given-names>H. S.</given-names></name> <etal/></person-group>. (<year>2025</year>). <article-title>AI aversion or appreciation? A capability&#x2013;personalization framework and a meta-analytic review</article-title>. <source>Psychol. Bull.</source> <volume>151</volume>, <fpage>580</fpage>&#x2013;<lpage>599</lpage>. doi: <pub-id pub-id-type="doi">10.1037/bul0000477</pub-id></mixed-citation></ref>
<ref id="ref64"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rajpurkar</surname><given-names>P.</given-names></name> <name><surname>Chen</surname><given-names>E.</given-names></name> <name><surname>Banerjee</surname><given-names>O.</given-names></name> <name><surname>Topol</surname><given-names>E. J.</given-names></name></person-group> (<year>2022</year>). <article-title>AI in health and medicine</article-title>. <source>Nat. Med.</source> <volume>28</volume>, <fpage>31</fpage>&#x2013;<lpage>38</lpage>. doi: <pub-id pub-id-type="doi">10.1038/s41591-021-01614-0</pub-id></mixed-citation></ref>
<ref id="ref65"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Riedl</surname><given-names>R.</given-names></name> <name><surname>Hogeterp</surname><given-names>S. A.</given-names></name> <name><surname>Reuter</surname><given-names>M.</given-names></name></person-group> (<year>2024</year>). <article-title>Do patients prefer a human doctor, artificial intelligence, or a blend, and is this preference dependent on medical discipline? Empirical evidence and implications for medical practice</article-title>. <source>Front. Psychol.</source> <volume>15</volume>:<fpage>1422177</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2024.1422177</pub-id>, <pub-id pub-id-type="pmid">39188871</pub-id></mixed-citation></ref>
<ref id="ref66"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Shaheen</surname><given-names>M.Y.</given-names></name></person-group> (<year>2021</year>). <article-title>Applications of artificial intelligence (AI) in healthcare: a review</article-title>. <source>ScienceOpen</source>. <italic>Preprint</italic>. doi: <pub-id pub-id-type="doi">10.14293/S2199-1006.1.SOR-.PPVRY8K.v1</pub-id></mixed-citation></ref>
<ref id="ref67"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Shukur</surname><given-names>B. S.</given-names></name> <name><surname>Abd Ghani</surname><given-names>M. K.</given-names></name> <name><surname>Aboobaider</surname><given-names>B. M.</given-names></name></person-group> (<year>2024</year>). <article-title>Digital physicians: unleashing artificial intelligence in transforming healthcare and exploring the future of modern approaches</article-title>. <source>Mesopotamian J. Artif. Intell. Healthc</source> <volume>2024</volume>, <fpage>28</fpage>&#x2013;<lpage>34</lpage>. doi: <pub-id pub-id-type="doi">10.58496/MJAIH/2024/005%20</pub-id></mixed-citation></ref>
<ref id="ref68"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Smith</surname><given-names>H. J.</given-names></name> <name><surname>Milberg</surname><given-names>S. J.</given-names></name> <name><surname>Burke</surname><given-names>S. J.</given-names></name></person-group> (<year>1996</year>). <article-title>Information privacy: measuring individuals' concerns about organizational practices</article-title>. <source>MIS Q.</source> <volume>20</volume>, <fpage>167</fpage>&#x2013;<lpage>196</lpage>. doi: <pub-id pub-id-type="doi">10.2307/249477</pub-id></mixed-citation></ref>
<ref id="ref69"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Solanki</surname><given-names>P.</given-names></name> <name><surname>Grundy</surname><given-names>J.</given-names></name> <name><surname>Hussain</surname><given-names>W.</given-names></name></person-group> (<year>2023</year>). <article-title>Operationalising ethics in artificial intelligence for healthcare: a framework for AI developers</article-title>. <source>AI Ethics</source> <volume>3</volume>, <fpage>223</fpage>&#x2013;<lpage>240</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s43681-022-00195-z</pub-id></mixed-citation></ref>
<ref id="ref70"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Steerling</surname><given-names>E.</given-names></name> <name><surname>Svedberg</surname><given-names>P.</given-names></name> <name><surname>Nilsen</surname><given-names>P.</given-names></name> <name><surname>Siira</surname><given-names>E.</given-names></name> <name><surname>Nygren</surname><given-names>J.</given-names></name></person-group> (<year>2025</year>). <article-title>Influences on trust in the use of AI-based triage&#x2014;an interview study with primary healthcare professionals and patients in Sweden</article-title>. <source>Front. Digit. Health</source> <volume>7</volume>:<fpage>1565080</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fdgth.2025.1565080</pub-id>, <pub-id pub-id-type="pmid">40463579</pub-id></mixed-citation></ref>
<ref id="ref71"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ting</surname><given-names>D. S. W.</given-names></name> <name><surname>Peng</surname><given-names>L.</given-names></name> <name><surname>Varadarajan</surname><given-names>A. V.</given-names></name> <name><surname>Keane</surname><given-names>P. A.</given-names></name> <name><surname>Burlina</surname><given-names>P. M.</given-names></name> <name><surname>Chiang</surname><given-names>M. F.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>Deep learning in ophthalmology: the technical and clinical considerations</article-title>. <source>Prog. Retin. Eye Res.</source> <volume>72</volume>:<fpage>100759</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.preteyeres.2019.04.003</pub-id>, <pub-id pub-id-type="pmid">31048019</pub-id></mixed-citation></ref>
<ref id="ref72"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Townsend</surname><given-names>B. A.</given-names></name> <name><surname>Plant</surname><given-names>K. L.</given-names></name> <name><surname>Hodge</surname><given-names>V. J.</given-names></name> <name><surname>Ashaolu</surname><given-names>O.</given-names></name> <name><surname>Calinescu</surname><given-names>R.</given-names></name></person-group> (<year>2023</year>). <article-title>Medical practitioner perspectives on AI in emergency triage</article-title>. <source>Front. Digit. Health</source> <volume>5</volume>:<fpage>1297073</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fdgth.2023.1297073</pub-id>, <pub-id pub-id-type="pmid">38125759</pub-id></mixed-citation></ref>
<ref id="ref73"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Trope</surname><given-names>Y.</given-names></name> <name><surname>Liberman</surname><given-names>N.</given-names></name></person-group> (<year>2003</year>). <article-title>Temporal construal</article-title>. <source>Psychol. Rev.</source> <volume>110</volume>, <fpage>403</fpage>&#x2013;<lpage>421</lpage>. doi: <pub-id pub-id-type="doi">10.1037/0033-295X.110.3.403</pub-id>, <pub-id pub-id-type="pmid">12885109</pub-id></mixed-citation></ref>
<ref id="ref74"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Trope</surname><given-names>Y.</given-names></name> <name><surname>Liberman</surname><given-names>N.</given-names></name></person-group> (<year>2010</year>). <article-title>Construal-level theory of psychological distance</article-title>. <source>Psychol. Rev.</source> <volume>117</volume>, <fpage>440</fpage>&#x2013;<lpage>469</lpage>. doi: <pub-id pub-id-type="doi">10.1037/a0018963</pub-id>, <pub-id pub-id-type="pmid">20438233</pub-id></mixed-citation></ref>
<ref id="ref75"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tyler</surname><given-names>S.</given-names></name> <name><surname>Olis</surname><given-names>M.</given-names></name> <name><surname>Aust</surname><given-names>N.</given-names></name> <name><surname>Patel</surname><given-names>L.</given-names></name> <name><surname>Simon</surname><given-names>L.</given-names></name> <name><surname>Triantafyllidis</surname><given-names>C.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>Use of artificial intelligence in triage in hospital emergency departments: a scoping review</article-title>. <source>Cureus.</source> <volume>16</volume>:<fpage>e59906</fpage>. doi: <pub-id pub-id-type="doi">10.7759/cureus.59906</pub-id>, <pub-id pub-id-type="pmid">38854295</pub-id></mixed-citation></ref>
<ref id="ref76"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Von Eschenbach</surname><given-names>W. J.</given-names></name></person-group> (<year>2021</year>). <article-title>Transparency and the black box problem: why we do not trust AI</article-title>. <source>Philos. Technol.</source> <volume>34</volume>, <fpage>1607</fpage>&#x2013;<lpage>1622</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s13347-021-00477-0</pub-id></mixed-citation></ref>
<ref id="ref77"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname><given-names>Z.</given-names></name> <name><surname>Walther</surname><given-names>J. B.</given-names></name> <name><surname>Pingree</surname><given-names>S.</given-names></name> <name><surname>Hawkins</surname><given-names>R. P.</given-names></name></person-group> (<year>2008</year>). <article-title>Health information, credibility, homophily, and influence via the internet: web sites versus discussion groups</article-title>. <source>Health Commun.</source> <volume>23</volume>, <fpage>358</fpage>&#x2013;<lpage>368</lpage>. doi: <pub-id pub-id-type="doi">10.1080/10410230802229738</pub-id>, <pub-id pub-id-type="pmid">18702000</pub-id></mixed-citation></ref>
<ref id="ref78"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname><given-names>H. Z.</given-names></name> <name><surname>Xie</surname><given-names>T.</given-names></name> <name><surname>Zhan</surname><given-names>C. Y.</given-names></name></person-group> (<year>2021</year>). <article-title>When service failed: the detrimental effect of anthropomorphism on intelligent customer service agent avatar &#x2013; disgust as mediation</article-title>. <source>Nankai Bus. Rev.</source> <volume>24</volume>, <fpage>194</fpage>&#x2013;<lpage>206</lpage>. doi: <pub-id pub-id-type="doi">10.3969/j.issn.1008-3448.2021.04.017</pub-id></mixed-citation></ref>
<ref id="ref79"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname><given-names>Y.</given-names></name> <name><surname>Zhu</surname><given-names>J.</given-names></name> <name><surname>Liu</surname><given-names>R.</given-names></name> <name><surname>Xu</surname><given-names>K.</given-names></name></person-group> (<year>2024</year>). <article-title>Enhancing recommendation acceptance: resolving the personalization&#x2013;privacy paradox in recommender systems: a privacy calculus perspective</article-title>. <source>Int. J. Inf. Manag.</source> <volume>76</volume>:<fpage>102755</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ijinfomgt.2024.102755</pub-id></mixed-citation></ref>
<ref id="ref80"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Waytz</surname><given-names>A.</given-names></name> <name><surname>Cacioppo</surname><given-names>J.</given-names></name> <name><surname>Epley</surname><given-names>N.</given-names></name></person-group> (<year>2010</year>). <article-title>Who sees human? The stability and importance of individual differences in anthropomorphism</article-title>. <source>Perspect. Psychol. Sci.</source> <volume>5</volume>, <fpage>219</fpage>&#x2013;<lpage>232</lpage>. doi: <pub-id pub-id-type="doi">10.1177/1745691610369336</pub-id>, <pub-id pub-id-type="pmid">24839457</pub-id></mixed-citation></ref>
<ref id="ref81"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Xie</surname><given-names>Y.</given-names></name> <name><surname>Lu</surname><given-names>L.</given-names></name> <name><surname>Gao</surname><given-names>F.</given-names></name> <name><surname>Yuan</surname><given-names>S.</given-names></name> <name><surname>Wang</surname><given-names>X.</given-names></name> <name><surname>Wang</surname><given-names>J.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Integration of artificial intelligence, blockchain, and wearable technology for chronic disease management: a new paradigm in smart healthcare</article-title>. <source>Curr. Med. Sci.</source> <volume>41</volume>, <fpage>1123</fpage>&#x2013;<lpage>1133</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s11596-021-2485-0</pub-id></mixed-citation></ref>
<ref id="ref82"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Xiong</surname><given-names>Y.</given-names></name> <name><surname>Shi</surname><given-names>Y.</given-names></name> <name><surname>Pu</surname><given-names>Q.</given-names></name> <name><surname>Liu</surname><given-names>Y.</given-names></name></person-group> (<year>2024</year>). <article-title>More trust or more risk? User acceptance of artificial intelligence virtual assistant</article-title>. <source>Hum. Factors Ergon. Manuf. Serv. Ind.</source> <volume>34</volume>, <fpage>190</fpage>&#x2013;<lpage>205</lpage>. doi: <pub-id pub-id-type="doi">10.1002/hfm.21020</pub-id></mixed-citation></ref>
<ref id="ref83"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Xu</surname><given-names>X.</given-names></name> <name><surname>Li</surname><given-names>J.</given-names></name> <name><surname>Zhu</surname><given-names>Z.</given-names></name> <name><surname>Zhao</surname><given-names>L.</given-names></name> <name><surname>Wang</surname><given-names>H.</given-names></name> <name><surname>Song</surname><given-names>C.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>A comprehensive review on synergy of multi-modal data and AI technologies in medical diagnosis</article-title>. <source>Bioengineering</source> <volume>11</volume>:<fpage>219</fpage>. doi: <pub-id pub-id-type="doi">10.3390/bioengineering11030219</pub-id>, <pub-id pub-id-type="pmid">38534493</pub-id></mixed-citation></ref>
<ref id="ref84"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Yang</surname><given-names>Z. M.</given-names></name> <name><surname>Wang</surname><given-names>C. F.</given-names></name> <name><surname>Yang</surname><given-names>H. J.</given-names></name></person-group> (<year>2023</year>). <article-title>The impact of AI-based customer services' anthropomorphism on consumers' continuance intention: the mediating role of psychological distance</article-title>. <source>Collect. Essays Financ. Econ.</source> <volume>45</volume>, <fpage>81</fpage>&#x2013;<lpage>90</lpage>. doi: <pub-id pub-id-type="doi">10.13762/j.cnki.cjlc.2023.08.006</pub-id></mixed-citation></ref>
<ref id="ref85"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Youn</surname><given-names>S.</given-names></name> <name><surname>Jin</surname><given-names>S. V.</given-names></name></person-group> (<year>2021</year>). <article-title>In AI we trust? &#x201C;The effects of parasocial interaction and technopian versus luddite ideological views on chatbot-based customer relationship management in the emerging&#x201D; feeling economy</article-title>. <source>Comput. Human Behav.</source> <volume>119</volume>:<fpage>106721</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.chb.2021.106721</pub-id></mixed-citation></ref>
<ref id="ref86"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Yu</surname><given-names>K. H.</given-names></name> <name><surname>Beam</surname><given-names>A. L.</given-names></name> <name><surname>Kohane</surname><given-names>I. S.</given-names></name></person-group> (<year>2018</year>). <article-title>Artificial intelligence in healthcare</article-title>. <source>Nat. Biomed. Eng.</source> <volume>2</volume>, <fpage>719</fpage>&#x2013;<lpage>731</lpage>. doi: <pub-id pub-id-type="doi">10.1038/s41551-018-0305-z</pub-id>, <pub-id pub-id-type="pmid">31015651</pub-id></mixed-citation></ref>
<ref id="ref87"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhu</surname><given-names>Y.</given-names></name> <name><surname>Wu</surname><given-names>T.</given-names></name> <name><surname>Chen</surname><given-names>Y.</given-names></name> <name><surname>Wu</surname><given-names>Y.</given-names></name> <name><surname>Lu</surname><given-names>J.</given-names></name> <name><surname>Wu</surname><given-names>J.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>Advances of CT-based artificial intelligence to identify EGFR mutation in non-small cell lung cancer</article-title>. <source>Chin. Comput. Med. Imaging</source> <volume>30</volume>, <fpage>516</fpage>&#x2013;<lpage>520</lpage>. doi: <pub-id pub-id-type="doi">10.3969/j.issn.1006-5742.2024.04.020</pub-id></mixed-citation></ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by" id="fn0001">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/981727/overview">Athanasios Drigas</ext-link>, National Centre of Scientific Research Demokritos, Greece</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by" id="fn0002">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3050093/overview">Titis Thoriquttyas</ext-link>, State University of Malang, Indonesia</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3083911/overview">Viktoriya Galitskaya</ext-link>, National Centre of Scientific Research Demokritos, Greece</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3128856/overview">Victoria Bamicha</ext-link>, National Centre of Scientific Research Demokritos, Greece</p>
</fn>
</fn-group>
</back>
</article>