<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article article-type="research-article" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Digit. Health</journal-id><journal-title-group>
<journal-title>Frontiers in Digital Health</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Digit. Health</abbrev-journal-title></journal-title-group>
<issn pub-type="epub">2673-253X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fdgth.2025.1653168</article-id>
<article-version article-version-type="Corrected Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>A GPT-reinforced social robot for patient communication: a pilot study</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes"><name><surname>van 't Klooster</surname><given-names>Jan-Willem J. R.</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="cor1">&#x002A;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1132606/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role></contrib>
<contrib contrib-type="author"><name><surname>Capasso</surname><given-names>Michela</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role></contrib>
<contrib contrib-type="author"><name><surname>van Gorssel</surname><given-names>Daan</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role></contrib>
<contrib contrib-type="author"><name><surname>Vrolijk</surname><given-names>Elvis</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role></contrib>
<contrib contrib-type="author"><name><surname>Rettagliata</surname><given-names>Giorgio</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/3062133/overview"/><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role></contrib>
<contrib contrib-type="author"><name><surname>Gerritsen</surname><given-names>Demy</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role></contrib>
<contrib contrib-type="author"><name><surname>Hegeman</surname><given-names>Mirjam</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/3111637/overview" /><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role></contrib>
<contrib contrib-type="author"><name><surname>Tauro</surname><given-names>Emanuele</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/3208517/overview" /><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role></contrib>
<contrib contrib-type="author"><name><surname>Caiani</surname><given-names>Enrico Gianluca</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/339291/overview" /><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role></contrib>
<contrib contrib-type="author"><name><surname>Vonkeman</surname><given-names>Harald E.</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/3289761/overview" /><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role></contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Behavioural Management and Social Sciences, University of Twente</institution>, <city>Enschede</city>, <country>Netherlands</country></aff>
<aff id="aff2"><label>2</label><institution>Dipartimento di Elettronica, Informazione e Bioingegneria, Politecnico di Milano</institution>, <city>Milano</city>, <country>Italy</country></aff>
<aff id="aff3"><label>3</label><institution>Department of Rheumatology and Clinical Immunology, Medisch Spectrum Twente</institution>, <city>Enschede</city>, <country>Netherlands</country></aff>
<aff id="aff4"><label>4</label><institution>IRCCS Istituto Auxologico Italiano, San Luca Hospital</institution>, <city>Milan</city>, <country>Italy</country></aff>
<author-notes>
<corresp id="cor1"><label>&#x002A;</label><bold>Correspondence:</bold> Jan-Willem J. R. van 't Klooster <email xlink:href="mailto:j.vantklooster@utwente.nl">j.vantklooster@utwente.nl</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-01-27"><day>27</day><month>01</month><year>2026</year></pub-date>
<pub-date publication-format="electronic" date-type="corrected" iso-8601-date="2026-03-03"><day>03</day><month>03</month><year>2026</year></pub-date>
<pub-date publication-format="electronic" date-type="collection"><year>2025</year></pub-date>
<volume>7</volume><elocation-id>1653168</elocation-id>
<history>
<date date-type="received"><day>24</day><month>06</month><year>2025</year></date>
<date date-type="rev-recd"><day>03</day><month>12</month><year>2025</year></date>
<date date-type="accepted"><day>26</day><month>12</month><year>2025</year></date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2026 van 't Klooster, Capasso, van Gorssel, Vrolijk, Rettagliata, Gerritsen, Hegeman, Tauro, Caiani and Vonkeman.</copyright-statement>
<copyright-year>2026</copyright-year><copyright-holder>van 't Klooster, Capasso, van Gorssel, Vrolijk, Rettagliata, Gerritsen, Hegeman, Tauro, Caiani and Vonkeman</copyright-holder><license><ali:license_ref start_date="2026-01-27">https://creativecommons.org/licenses/by/4.0/</ali:license_ref><license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p></license>
</permissions>
<abstract><sec><title>Problem</title>
<p>Quality healthcare requires effective patient communication. However, lack of personnel and increasing demands on healthcare professionals (HCPs) create a need for innovative solutions that enhance accessibility and delivery of information to patients.</p>
</sec><sec><title>Goal</title>
<p>We propose an innovative method to convey treatment and disease information using an Artificial Intelligence (AI)-driven social robotic physical interface. The aim of this study is to develop and test the feasibility of using a social robot that can convincingly provide health information in patient dialogues within clinical practice, to support patient communication and information exchange.</p>
</sec><sec><title>Methods</title>
<p>This paper sets out the architectural approach of an AI-reinforced social robot connected to whitelisted validated clinical sources using a Generative Pre-training Transformer (GPT)-based Large Language Model (LLM). We describe experimental results in a lab-based pilot feasibility study, and then highlight related results for user experience in clinical practice implementation for an osteoarthritis (OA) use case, in which the robot answers osteoarthritis-related questions. Results were obtained after end-user engagement using the User Experience Questionnaire (UEQ) and semi-structured interviews.</p>
</sec><sec><title>Results</title>
<p>UEQ results were obtained in a lab-based pilot test (<italic>n</italic>&#x2009;&#x003D;&#x2009;20) and with OA patients (<italic>n</italic>&#x2009;&#x003D;&#x2009;21) and healthcare professionals (<italic>n</italic>&#x2009;&#x003D;&#x2009;7). Above average/good attractiveness, perspicuity and stimulation were reported in the pilot test; novelty was excellent, yet dependability and efficiency were reported below average. In the clinical setting, Patient UEQ score resulted in mean 2.13 with values ranging from 1.7 to 2.5, indicating a positive trend in efficiency, inventiveness and acceptability. HCPs UEQ scores reached mean 1.89, with all values above 1 except for excitement of usage, which scored 0.8 (SD 1.3). Semi-structured interviews added in-depth enrichment of the data.</p>
</sec><sec><title>Conclusion</title>
<p>In summary, this paper demonstrates the feasibility of implementing a GPT-reinforced social robot for patient communication in clinical practice.</p>
</sec>
</abstract>
<kwd-group>
<kwd>GPT</kwd>
<kwd>osteoarthritis</kwd>
<kwd>patient communication</kwd>
<kwd>social robot</kwd>
<kwd>UEQ</kwd>
</kwd-group><funding-group><funding-statement>The author(s) declared that financial support was not received for this work and/or its publication.</funding-statement></funding-group><counts>
<fig-count count="6"/>
<table-count count="0"/><equation-count count="0"/><ref-count count="41"/><page-count count="11"/><word-count count="0"/></counts><custom-meta-group><custom-meta><meta-name>section-at-acceptance</meta-name><meta-value>Health Technology Implementation</meta-value></custom-meta></custom-meta-group>
</article-meta>
</front>
<body><sec id="s1" sec-type="intro"><title>Introduction</title>
<p>Quality healthcare requires effective patient communication (<xref ref-type="bibr" rid="B1">1</xref>). However, lack of personnel and increasing demands on healthcare professionals (HCPs) create a need for innovative solutions that enhance accessibility and delivery of information to patients.</p>
<p>Therefore, we propose an innovative method to convey disease and treatment information. By using an Artificial Intelligence (AI)-driven approach and a social robotic physical interface, we hypothesized that communication could be adapted beyond pre-programmed messages and strategies, personalized and empathized to individual needs beyond screen-based applications, thus providing a multimodal communication experience. The research question addressed is whether it is possible to develop an AI reinforced social robot that answers patients&#x0027; questions in such a way that it could have added value in clinical practice. Therefore, the aim of this study is to develop and evaluate a social robot that can convincingly provide patient information in patient dialogues in clinical practice, to support patient communication and information exchange. To this end, this paper sets out the architectural approach of the developed AI-reinforced social robot, it describes the obtained experimental results in a pilot feasibility study, and it highlights related results of user experience in a clinical practice implementation at a collaborating hospital institution. As a relevant use case, osteoarthritis (OA) was selected: OA represents a patient group with a complex chronic disease, where information and regular guidance are needed.</p>
<sec id="s1a"><title>Osteoarthritis (OA)</title>
<p>OA is a common chronic, progressive and disabling joint disease that results from degeneration of joint cartilage and underlying bone, causing progressive joint pain, stiffness and loss of motion. OA is one of the leading causes of disability in the world, affecting 1 in 7 adults in the Western world. Guidelines indicate that treatment should encompass both pharmacological and non-pharmacological management strategies, such as use of painkillers and lifestyle modification (<xref ref-type="bibr" rid="B2">2</xref>). However, many patients continue to suffer from daily complaints and limitations and therefore have high need for frequent guidance. It is beyond the scope of this article to provide a detailed overview of OA. For more information, the reader is referred to (<xref ref-type="bibr" rid="B2">2</xref>).</p>
<p>In the present study, this disease was chosen because of its high prevalence, chronicity, complexity and high patient guidance needs, leading to frequent hospital visits. This implies that there is a potential and significant gain in disease management, both at the societal and at the patient level, if novel technology could be utilized for increasing patient empowerment.</p>
</sec>
<sec id="s1b"><title>Communication</title>
<p>Appropriate communication on health literacy and therapy adherence is essential for the success of treatment, both from a personal and societal perspective. This is particularly true for OA (<xref ref-type="bibr" rid="B3">3</xref>) but also applies to other medical conditions. In patient communication, it is important to check, maintain and increase health literacy, and promote therapy adherence. These factors play a crucial role in treatment outcome and overall success (<xref ref-type="bibr" rid="B4">4</xref>, <xref ref-type="bibr" rid="B5">5</xref>).</p>
<p>When healthcare professionals do not have sufficient time to provide explanations (repeatedly) at an appropriate language level, social robotics could offer a scalable solution by engaging patients in accessible conversation and delivering understandable information (<xref ref-type="bibr" rid="B6">6</xref>, <xref ref-type="bibr" rid="B7">7</xref>).</p>
</sec>
<sec id="s1c"><title>Social robots</title>
<p>Social robots are physically embodied artificial agents designed to interact with users through verbal and nonverbal cues via a social interface. Robots&#x0027; social features, which mimic the behaviour and appearance of a living being, lead users to perceive them as social entities (<xref ref-type="bibr" rid="B8">8</xref>).</p>
<p>Social robots are emerging as promising tools in the ICT landscape, offering the ability to interact naturally with humans, convey basic emotions and assist with communication tasks. By integrating Artificial Intelligence (AI), these robots can further adapt to individual and organisational needs, thus providing personalized support and relevant information.</p>
<p>Practitioners and researchers are increasingly paying attention to the use of social robots in healthcare (<xref ref-type="bibr" rid="B9">9</xref>). This growing interest is primarily due to the potential of social robots to address challenges posed by an ageing population and the rising labour shortages in the healthcare sector (<xref ref-type="bibr" rid="B10">10</xref>, <xref ref-type="bibr" rid="B31">31</xref>). It is largely attributed to their potential to aid in the social management of health across various dimensions, ranging from assisting with medication schedules (<xref ref-type="bibr" rid="B11">11</xref>) to providing companionship to patients (<xref ref-type="bibr" rid="B12">12</xref>).</p>
<p>To fully realise these benefits, users must be willing to accept both the use of the robot for those tasks and the resulting outcomes. Establishing trust in the robot is essential for achieving this goal (<xref ref-type="bibr" rid="B13">13</xref>). Trust related to technological tools/agents can be defined as &#x201C;<italic>the attitude that an agent will help achieve an individual&#x0027;s goals in a situation characterized by uncertainty and vulnerability</italic>&#x201D; (<xref ref-type="bibr" rid="B14">14</xref>, p. 54). Indeed, trust in the robot is important because it also affects the willingness to accept the robot-provided outcome (<xref ref-type="bibr" rid="B42">42</xref>).</p>
<p>The concept of trust is multifaceted; initial perceptions influence the trustworthiness of the counterpart (<xref ref-type="bibr" rid="B43">43</xref>), but these impressions of the robot may change over time with subsequent interactions (<xref ref-type="bibr" rid="B15">15</xref>). These continuous interactions, in fact, lead to a continuous calibration of the trust placed in the robot itself so to arrive at a balance between expectations about the robot&#x0027;s performance and the robot&#x0027;s actual capabilities (<xref ref-type="bibr" rid="B16">16</xref>).</p>
<p>The specific characteristics of social robots are a novelty within the healthcare field and may influence this trust process differently to those of similar technologies, such as chatbots or virtual agents. For simple tasks, humans tend to trust physically present robots more than virtual agents, due to their enhanced social features, such as the ability to make gestures (<xref ref-type="bibr" rid="B17">17</xref>). Indeed, the physical presence and embodied interaction of robots (such as nodding while listening, eye gaze and other verbal behaviours) could provide more empathetic interactions and consequently elicit higher affective trust (<xref ref-type="bibr" rid="B18">18</xref>). However, when making moral decisions regarding healthcare, people tend to trust human nurses more than robots, even when social robots are perceived as competent. This preference reflects a more positive judgment of human nurses compared to robots (<xref ref-type="bibr" rid="B19">19</xref>). In such cases, it is evident that the social presence of the robot and the context of use could influence the trust towards the robot and acceptance of robot outcomes.</p>
<p>In a socially assistive task like conveying treatment and disease information, listening skills and conversational capabilities are fundamental to developing a relationship and establishing a therapeutic alliance with the patient; in this context, social robots are seen as a potential solution within these tasks (<xref ref-type="bibr" rid="B20">20</xref>). Previous research has shown that social robots were often viewed as more effective than computers and avatars for helping individuals track their dietary behaviours, as people tend to establish stronger relationships with them (<xref ref-type="bibr" rid="B21">21</xref>). Interactions with robots were also perceived more positively than with tablets, with individuals reporting greater trust in robot-delivered health instructions in Mann et al. (<xref ref-type="bibr" rid="B22">22</xref>). Similarly, elderly patients prefer physically present robots over virtual agents as exercise coaches (<xref ref-type="bibr" rid="B23">23</xref>). These previous studies demonstrated that robots&#x0027; physical presence, gestures, and ability to share the patient&#x0027;s environment could provide advantages that 2D systems cannot replicate. Also, in the present study we are particularly interested in patients&#x0027; interest for- and interaction with- a 3D entity.</p>
<p>In conveying treatment and disease information, it is important to consider both the robot&#x0027;s appearance and its ability to communicate effectively with the patient. These factors influence the patient&#x0027;s acceptance of the robot and their willingness to follow its instructions. Indeed, a human-like appearance alone is not a sufficient condition to increase robot acceptance. Robots must look human and act like humans (<xref ref-type="bibr" rid="B24">24</xref>). The robot must convey its competence and warmth to the patient in order to be trusted (<xref ref-type="bibr" rid="B15">15</xref>). Research shows that integrating empathetic statements from robots, such as soliciting patient feedback or expressing understanding, can significantly enhance their perceived trustworthiness. Indeed, patients who perceive robots as empathetic are more inclined to adhere to the recommendations provided by these robotic entities, ultimately improving their satisfaction with the treatment (<xref ref-type="bibr" rid="B25">25</xref>).</p>
<p>The basic idea of these dialogue-based robotic systems is that they talk and listen to end users, while providing a social (face-like) interface and provide speech-to-text and text-to-speech capabilities for natural interaction. When communicating, variants like the Furhat robot (<xref ref-type="bibr" rid="B26">26</xref>) place emphasis on its facial looks and on lipsync to promote a natural, speech-based interface. Furthermore, front camera&#x0027;s allows person tracking and basic emotion recognition, to follow the conversational partner and react based on its emotional state.</p>
<p>Furhat is a humanoid robotic head specifically designed for social interactions (<xref ref-type="bibr" rid="B26">26</xref>). The back-projected 3d face engine allows for rendering dynamic facial expressions and lip-syncing, improving the quality of the conversations and introducing all the nonverbal behaviour that might support information exchange with the patient. This is achieved through a beamer that projects face-like animations on the inside of a semi transparent plastic face mold. The robot contains a phased array microphone, speaker, and servos to operate as its neck and face muscles. Combined, these possibility of controlling gestures, neck movements, and facial expressions made Furhat a suitable choice for our study.</p>
<p>In terms of programming, traditional robots run script-based design time solutions, which allow them to perform pre-purposed tasks, but they can nowadays also be linked to other ICT systems (such as an electronic health record or internet-based sources), or to Large Language Models (LLMs) via an Application Programming Interface (API) (<xref ref-type="bibr" rid="B27">27</xref>). This allows for interactive dialogues without the need for complete preprogramming. In the present study, we investigate this latter approach, evaluating a social robot that provides patient information in patient dialogues, thus supporting patient communication and information exchange. In this way, it is possible to combine the social features of the robot with the conversational capabilities of LLMs. The use of LLMs like chatGPT to answer patient questions is an emerging field (<xref ref-type="bibr" rid="B28">28</xref>, <xref ref-type="bibr" rid="B29">29</xref>). At present, while physicians generally consider AI responses to be accurate (<xref ref-type="bibr" rid="B29">29</xref>), patients still tend to prefer consulting doctors for treatment recommendations (<xref ref-type="bibr" rid="B28">28</xref>). However, this dynamics may shift with the integration of social presence into LLMs through the use of social robots. To our knowledge, this study is the first to explore the intersection of LLM capabilities with the social features of robots in the context of providing treatment information to OA patients.</p>
</sec>
</sec>
<sec id="s2" sec-type="methods"><title>Materials and methods</title>
<sec id="s2a"><title>Social robot</title>
<p>We used the social robot &#x201C;Furhat&#x201D; (<xref ref-type="bibr" rid="B26">26</xref>) and linked it to a Generative Pre-trained Transformer (GPT) LLM (gpt-3.5) via an API using the Furhat Kotlin-based programming framework. To ensure conversational relevance, content accuracy, and to prevent for hallucinations, we limited the scope of the LLM to use only specific dependable medical websites whitelisted by the rheumatology department of the Medisch Spectrum Twente (MST) hospital, Enschede, the Netherlands. These websites contained physician-checked and relevant patient information on treatment options, conditions and disease management of OA. Using direct text search, these whitelisted websites are used according to the used prompt. The prompt is added in <xref ref-type="sec" rid="s12">Supplementary Material A</xref>.</p>
<p>The information flow and robot components are shown in <xref ref-type="fig" rid="F1">Figure&#x00A0;1</xref>. It entails that (in this case) a single patient talks to the social robot, after which a speech-to-text (S2T) process is triggered, relying on cloud-based recognition (Microsoft Azure Speech Services). The resulting text-string is sent via the API to the GPT, which searches for relevant answers within the whitelisted web-based data sources detailed in <xref ref-type="sec" rid="s12">Supplementary Material A</xref>. The returned answer is spoken out by the robot utilizing its text-to-speech (T2S, based on Amazon Polly speech synthesis services) and Furhat lip-sync speech services.</p>
<fig id="F1" position="float"><label>Figure&#x00A0;1</label>
<caption><p>GPT-reinforced social robot architecture with robot main software and hardware components shown in dashed area. Patient talks to social robot. Social robot has built-in gestures, steering its facial expression and face movement. Using its built-camera and built-in emotion recognition, it can look the patient in the eyes, follow their movement and convey basic emotions. Its built-in speech services rely on its microphone, Amazon Polly (Text-to-Speech) and Microsoft Azure Speech Services (Speech-to-text). These are called via API endpoints and hence depend on internet connection of the robot. Once a question posed by the patient is transcribed, the GPT model is called using the prompt of <xref ref-type="sec" rid="s12">Supplementary Material A</xref> to look up the answer on whitelisted medical websites. It is then sent back to the robot, and then spoken out by the robot using its builtin speaker. Created using <ext-link ext-link-type="uri" xlink:href="https://www.drawio.com/">draw.io</ext-link>, licensed under <ext-link ext-link-type="uri" xlink:href="https://www.apache.org/licenses/LICENSE-2.0">Apache License 2.0</ext-link>.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fdgth-07-1653168-g001.tif"><alt-text content-type="machine-generated">Diagram illustrating a communication system between a patient and a social robot. The social robot interacts with gestures and emotion mirroring, utilizing speech services, speaker, microphone, built-in camera, and internet connection. It connects to Speech-To-Text and Text-To-Speech APIs and a GPT API.</alt-text>
</graphic>
</fig>
<p>In addition, using a basic emotional state recognition feature running on top of the person-tracking built-in camera feed within the robot, and mirroring the recognized emotion, the robot is able to track the patient&#x0027;s face and be triggered to convey the same basic emotion (anger, disgust, fear, happiness, sadness and surprise) as seen in the patient while talking, listening, or waiting, for better personalization. This behaviour allows for non-deterministic and more rich user interactions. The robot could potentially distinguish among multiple users and track the latest speaker, but in this study these features were not utilized.</p>
</sec>
<sec id="s2b"><title>Experiments</title>
<p>First, a lab-based pilot study was performed at the Behavioural, Management and Social sciences (BMS) lab at the University of Twente, Enschede, the Netherlands, to assess the task performance and user experience of utilizing the social robot as a communication device for medical conversational purposes.</p>
<p>Twenty participants were enrolled to enter in a short simulation dialogue with the social robot on medication adherence, after which the User Experience Questionnaire (UEQ, <xref ref-type="bibr" rid="B30">30</xref>) was filled in. The UEQ is an end-user questionnaire to measure user experience quickly in a simple and immediate way, while covering a comprehensive impression. It uses a seven-stage scale to reduce the well-known central tendency bias for such types of items, e.g., attractive&#x2014;unattractive. The scale combines both ergonomic (e.g., goal or task orientation of interface) and hedonic (e.g., design originality, aesthetics of interface) aspects. UEQ is frequently used for the assessment of social robots and, together with <italic>post-hoc</italic> semi-structured interviews, it represents a suitable method for the nature of this study (<xref ref-type="bibr" rid="B10">10</xref>, <xref ref-type="bibr" rid="B31">31</xref>, <xref ref-type="bibr" rid="B44">44</xref>).</p>
<p>As a second step, feasibility was tested in actual clinical practice.</p>
<p>Patients visiting the outpatient rheumatology department at Medisch Spectrum Twente (MST) hospital in Enschede (The Netherlands) with either a new or an established diagnosis of osteoarthritis (OA), as well as their HCPs, were invited to interact with the social robot and to evaluate their interaction. The interaction was guided by a delineated list of potential conversation topics.</p>
<p>This list of questions was defined together with the treating rheumatologists.</p>
<p>In discussion with the rheumatology department, also the questions and answers (i.e., what the robot can talk about in respect to OA) were assessed and approved. This assessment is further detailed in van Gorssel (<xref ref-type="bibr" rid="B7">7</xref>).</p>
<p>Consent from the hospitals&#x2019; ethics and material committee was obtained (K24-22). The lists of questions, answers, surveys and robot details are provided in the <xref ref-type="sec" rid="s12">Supplementary Material</xref>.</p>
<p>The robot started the conversation with some small talk while introducing itself and even included a small joke (&#x201C;I am a social robot, the best one that you will meet today&#x201D;). Then, the conversation (cf. <xref ref-type="sec" rid="s12">Supplementary Material A</xref>) started as implemented. After interacting with the robot, participants completed the short UEQ (<xref ref-type="bibr" rid="B30">30</xref>) to assess the robot&#x0027;s usability and functionality in a time-effective way. The mean scores for each dimension were interpreted using standard benchmarks from UEQ studies. Scores above &#x002B;1.5 were considered to reflect a positive user experience, while scores closer to 0 indicated a neutral experience. UEQ wa followed by semi-structured interviews to add in-depth opinions, perceptions and qualitative data. A control group was not used, as this is a first study to assess usability, user experiences and usefulness. Nevertheless, UEQ was chosen as a metric that allows benchmarking and comparison between patients and HCPs, and with other interaction technologies.</p>
</sec>
</sec>
<sec id="s3" sec-type="results"><title>Results</title>
<sec id="s3a"><title>Pilot test</title>
<p>In the lab-based pilot study, 10 (50&#x0025;) men and 10 (50&#x0025;) women (age range 20&#x2013;55 years old, both workers as well as students) from various countries and cultural backgrounds including Canada, Aruba, The Netherlands, Poland, Germany, Russia, Italy, and India, participated. The setting was a 20-minute simulation conversation with the subject, robot and researcher present, after which a User Experience Questionnaire (UEQ) was filled in. The UEQ results are shown in <xref ref-type="fig" rid="F2">Figure&#x00A0;2</xref>. Overall, above average/good attractiveness, perspicuity and stimulation were reported; novelty was excellent, yet dependability and efficiency were reported below average.</p>
<fig id="F2" position="float"><label>Figure&#x00A0;2</label>
<caption><p>User experience questionnaire results with their component breakdown. Black line indicates the median; the traffic-light color coding indicates how the resulting scores (on <italic>y</italic>-axis), from low values (red) to very good values (green), compare to the UEQ benchmark (<xref ref-type="bibr" rid="B30">30</xref>).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fdgth-07-1653168-g002.tif"><alt-text content-type="machine-generated">Stacked bar chart showing ratings of six attributes: Attractiveness, Perspicuity, Efficiency, Dependability, Stimulation, and Novelty. Each bar is divided into five sections indicating ratings from bad (red) to excellent (dark green). A black line represents the median for each attribute.</alt-text>
</graphic>
</fig>
<p>Regarding attractiveness, the mean value of 1.50 (1.17; 2.08) resulted above average. Similarly, in the perspicuity scale, the mean score of 1.63 (1.25; 2.31) indicated above-average performance. However, in the efficiency scale, the average score of 1.00 (0.75; 1.81) indicates performance within the average range. Similarly, in the dependability scale the mean score of 1.13 (0.94; 1.56) is within the average range. In the stimulation scale, the mean score of 1.50 (1.0; 2.31) indicates a good result. Finally, in the novelty scale, the average score of 1.75 (0.94; 2.3) represented a good result.</p>
<p><xref ref-type="fig" rid="F3">Figure&#x00A0;3</xref> displays the UEQ item breakdown. It can be noticed how the robot&#x0027;s reaction time and predictability could be improved.</p>
<fig id="F3" position="float"><label>Figure&#x00A0;3</label>
<caption><p>Breakdown plot of the results obtained by the user experience questionnaire item.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fdgth-07-1653168-g003.tif"><alt-text content-type="machine-generated">A chart with a semantic differential scale depicting adjectives on both ends, ranging from negative to positive. Colored dots represent six scales: Attractiveness, Perspicuity, Novelty, Stimulation, Dependability, and Efficiency. Each dot's position varies from -3 to 3, indicating the degree of association with the adjectives.</alt-text>
</graphic>
</fig>
</sec>
<sec id="s3b"><title>Clinical practice test</title>
<p>Before conducting the clinical practice test, the reaction time issue (reported in the pilot test) was investigated. It was caused by the delay that occurs in checking transcribed speech to ChatGPT&#x0027;s API and waiting for the result; it was addressed by having the robot pronouncing an in-between response (i.e., &#x201C;I will think about it.&#x201D;) to bridge the time waiting for the GPT response. Also, wireless (wi-fi) connection was changed to wired connection. In addition, in the clinical test, a faster operating API endpoint as compared to the pilot test was available (gpt-4o), which resolved this issue and did not pose any barriers in patient trust.</p>
<p>The clinical practice test included 21 osteoarthritis (OA) patients and 7 HCPs (<xref ref-type="bibr" rid="B41">41</xref>).</p>
<p>Patients were aged in the range 43&#x2013;77 years with (self-reported) mixed technology experience (from none to &#x201C;a lot&#x201D;) and educational level ranging from primary education to university bachelor degree level. Duration of illness ranged from &#x201C;Newly diagnosed&#x201D; to &#x201C;Over 35 years&#x201D;, and (common) comorbidities included cardiovascular diseases and diabetes.</p>
<p>HCPs were in the range 22&#x2013;63 years old with predominantly &#x201C;a little&#x201D; self-reported technology experience (only two reporting &#x201C;a lot&#x201D;), and in professional capacity of Rheumatologists, rheumatology trainees, nurse practitioners, and medical researchers with professional experience ranging from 0 to 2 to over 6 years.</p>
<p>Patient UEQ mean score (<xref ref-type="bibr" rid="B30">30</xref>) (<xref ref-type="fig" rid="F4">Figure&#x00A0;4</xref>) was 2.13 with values ranging from 1.7 to 2.5, indicating a positive trend in efficiency, inventiveness and acceptability. HCPs UEQ mean score (<xref ref-type="fig" rid="F5">Figure&#x00A0;5</xref>) resulted in 1.89, with all values above 1 except for item 5 (excitement of usage), that scored 0.8 (SD 1.3). In the interviews, patients generally found the Social Robot both acceptable and useful in a clinical setting and appreciated the robot&#x0027;s ability to provide information and respond to their questions, but suggested that the robot&#x0027;s communication style might need to be adjusted for different educational levels. For example, one participant noted &#x201C;<italic>Yes, I think it is suitable for explanation</italic>&#x201D;, and another mentioned &#x201C;<italic>If you look at conversational techniques and checking if people understood the message, improvements to the current version are possible</italic>.&#x201D;</p>
<fig id="F4" position="float"><label>Figure&#x00A0;4</label>
<caption><p>UEQ data overview for patients. Green bars represent mean value per item (between &#x2212;3 and 3) with error bars indicating the standard deviation.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fdgth-07-1653168-g004.tif"><alt-text content-type="machine-generated">Bar chart titled "UEQ Mean Scores with Standard Deviation for Patients" showing scores for seven positive characteristics: Supportiveness, Ease of Use, Efficiency, Clarity, Excitement, Interest, Inventiveness, and Leading Edge. Scores range from 1.5 to 3, with varying standard deviations indicated by error bars. Bars are green, and characteristics are on the x-axis, with scores on the y-axis.</alt-text>
</graphic>
</fig>
<fig id="F5" position="float"><label>Figure&#x00A0;5</label>
<caption><p>UEQ data overview for HCPs. Blue bars represent mean value per item (between &#x2212;3 and 3) with error bars indicating the standard deviation.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fdgth-07-1653168-g005.tif"><alt-text content-type="machine-generated">Bar chart titled "UEQ Mean Scores with Standard Deviation for HCPs" shows scores for supportiveness, ease of use, efficiency, clarity, excitement, interest, inventiveness, and leading edge. Error bars indicate standard deviation. Scores range from negative three to positive three.</alt-text>
</graphic>
</fig>
<p>HCPs viewed the robot as both acceptable and useful, particularly in areas such as patient education, triage, and routine follow-up, but expressed concerns about the robot&#x0027;s ability to replace human interaction, particularly in contexts where empathy and personalized care are crucial. One HCP remarked in the interview: &#x201C;<italic>Yes, it gave good treatment options and a clear answer</italic>.&#x201D;, but also 2 HCPs noted: &#x201C;<italic>She gives quite a lot of advice in a single sentence, sometimes mentioning 4 or 5 things</italic>&#x201D; and &#x201C;<italic>The pronunciation isn&#x0027;t always entirely accurate. Sometimes technical terms can be improved</italic>&#x201D;.</p>
<p>Patients and HCPs generally found the information provided by the robot to be both accurate and relevant, giving additional comments on communication effectiveness, engagement, personalisation of interaction (&#x201C;one can ask whatever she/he wants&#x201D;), perceived usefulness, emotional comfort, trust, ease of use, interaction quality, accessibility, learnabilty, error tolerance, health literacy, behaviour change ethics and care continuity. A complete coding of the <italic>post-hoc</italic> interviews is shown in <xref ref-type="sec" rid="s12">Supplementary Material B</xref>.</p>
<p><xref ref-type="fig" rid="F6">Figure&#x00A0;6</xref> shows that both patients (top) and HCPs (bottom) are mildy positive about the unbiasedness, relevancy, trustworthiness, expectations being met and perceived accuracy of the robot, with few more reservations regarding relevancy in patients, and meeting of expectations in HCPs.</p>
<fig id="F6" position="float"><label>Figure&#x00A0;6</label>
<caption><p>Results plot for both patients (top) and HCPs (bottom) relevant to post-usage on unbiasedness, relevancy, trustworthiness, expectations being met, and perceived accuracy of the robot.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fdgth-07-1653168-g006.tif"><alt-text content-type="machine-generated">Bar charts showing Likert scale percentages for patients and healthcare providers across five items: accuracy, unbiasedness, trustworthiness, expectations, and relevancy. Each item has varying responses from strongly disagree to strongly agree. The charts illustrate the distribution of opinions, with higher positivity in trustworthiness for patients and higher agreement on unbiasedness for healthcare providers.</alt-text>
</graphic>
</fig>
</sec>
</sec>
<sec id="s4" sec-type="discussion"><title>Discussion</title>
<p>The conducted lab evaluation and testing in clinical practice demonstrated that sufficient usability and acceptability could be reached in implementing a social robot in a clinical setting. The implementation in clinical practice resulted in a better perceived efficiency compared to the lab scenario. Both scenarios indicated general usefulness, and potential for hospital-based use in patient care. As Patient UEQ score resulted in mean 2.13, with values ranging from 1.7 to 2.5, a positive trend in efficiency, inventiveness and acceptability was noticed.</p>
<p>A direct comparison could be made with Ger&#x0142;owska et al. (<xref ref-type="bibr" rid="B31">31</xref>) RAMCIP robotic assistant for older adults with mild cognitive impairments, and SERMO, a mental-health chatbot (<xref ref-type="bibr" rid="B32">32</xref>). Both studies employed the full UEQ scale. RAMCIP obtained median UEQ values between 0.63 and 2.0 across subscales, with particularly low results for Novelty and Efficiency (0.63&#x2013;1.25), and moderate results for Attractiveness and Dependability (1.66&#x2013;2.0). Similarly, the SERMO mental-health chatbot showed good pragmatic quality but neutral or low hedonic quality scores (Stimulation 0.298; Novelty 0.524) in its UEQ evaluation. In contrast, our clinical study yielded consistently higher mean scores across all UEQ subscales, for both patients and healthcare professionals. While RAMCIP users reported neutral usability due to slow interaction and developmental limitations, participants in our clinical test did not report such issues once latency was resolved. These differences suggest that combining embodiment with LLM-based conversational capabilities can enhance clarity, perceived usefulness, and engagement compared to both traditional social robots and screen-based conversational agents. The latter may be more cost-effective, but less embodied and social.</p>
<p>A key aspect in using LLMs is the prevention of hallucination; the implemented approach confronted this aspect by using only whitelisted sources, and this increased acceptability by the healthcare staff.</p>
<p>Nevertheless, patients were not tested on knowledge, so a summative rather than a formative effectiveness assessment still needs to take place. Also, a further study should compare the physical social robot approach with others, e.g., chatbot based approaches. An interesting dimension is that of trust, to consider which factors influence the establishment of trust, its breaching or recovery while using the technology (<xref ref-type="bibr" rid="B13">13</xref>).</p>
<p>Finally, privacy, security and personalisation remain important aspects for future research in the use of LLMs for clinical patient communication, including:
<list list-type="simple">
<list-item>
<p>Data Privacy and Confidentiality: LLMs may process sensitive personal health information, raising concerns about compliance with regulations such as the Health Insurance Portability and Accountability Act (HIPAA) in the U.S. or the General Data Protection Regulation (GDPR) in the EU (<xref ref-type="bibr" rid="B33">33</xref>).</p></list-item>
<list-item>
<p>Data Ownership and Control: when (third-party) LLM services are integrated, it has to be clear who owns the patient data or how it may be reused or stored, raising legal and ethical concerns (<xref ref-type="bibr" rid="B33">33</xref>).</p></list-item>
<list-item>
<p>Model Memorization and Information Leakage: LLMs have demonstrated to memorize parts of their training data, which could lead to the unintentional disclosure of sensitive patient information during future interactions (<xref ref-type="bibr" rid="B34">34</xref>), although in the present system these cannot be linked to specific individuals.</p></list-item>
<list-item>
<p>Informed Consent Challenges: patients may not be fully aware that an AI system is involved in the communication process, or may not understand how their data is used or stored, thus compromising ethical standards for informed consent (<xref ref-type="bibr" rid="B35">35</xref>).</p></list-item>
<list-item>
<p>Security Vulnerabilities in System Integration: integrating LLMs into electronic health record systems could introduce new cybersecurity threats, particularly through additional (insecure) APIs or improper configuration (<xref ref-type="bibr" rid="B36">36</xref>).</p></list-item>
<list-item>
<p>Over-Reliance and Misuse: although verified beforehand in the present case study, clinicians may overestimate the reliability of LLMs in general, leading to over-sharing of information or uncritical adoption of AI-generated advice (<xref ref-type="bibr" rid="B37">37</xref>).</p></list-item>
<list-item>
<p>Data Retention and Logging: many LLM services log user interactions which, if not anonymized or protected, could lead to unauthorized access or secondary misuse of health data (<xref ref-type="bibr" rid="B33">33</xref>). Hence, system security should be carefully examined when implemented in the hospital.</p></list-item>
<list-item>
<p>Transparency and Auditability: due to their black-box nature, LLMs often lack explainability, making it difficult to trace decisions, detect errors, or assign accountability (<xref ref-type="bibr" rid="B38">38</xref>). This could be partially resolved by using whitelisted clinical information sources only and standardization, but explainability remains a key aspect even then.</p></list-item>
<list-item>
<p>Availability: although response speed and availability did not pose any barrier in patient trust in the current study, latency in using LLMs could represent a limitation and should be improved in future work.</p></list-item>
<list-item>
<p>Personalisation: by personalisation towards the end users, a higher level of acceptance, privacy, and confidentiality could be achieved, beyond allowing a patient to ask anything he/she wants. This paper also explored mirroring as a personalisation technique, but the access to the information included in the personal electronic health record (HER), and tailored answers based on unique users&#x0027; questions, should also be explored as personalisation directions.</p></list-item>
</list></p>
</sec>
<sec id="s5" sec-type="conclusions"><title>Conclusion</title>
<p>This paper demonstrated the initial feasibility of implementation of a GPT-reinforced social robot in clinical practice, such as supporting treatment and disease educational communication for patients with osteoarthritis. Patients with rheumatic diseases see the AI-reinforced Social Robot as a potentially valuable tool for providing information and supporting patient education. Healthcare professionals appreciated the innovation introduced and could recognize its future potential, with emphasis on a complementary rather than replacing role. Hence, the need for human oversight remains.</p>
<p>Despite these promising results, several areas require improvement before large-scale deployment. Personalisation was intentionally limited in this study; however, future implementations will need to explore how robotic conversational strategies can be tailored to individual patient characteristics. Evidences from previous work show that personalised AI interactions play a crucial role in enhancing patient satisfaction and improving treatment adherence (<xref ref-type="bibr" rid="B39">39</xref>, <xref ref-type="bibr" rid="B40">40</xref>). What remains unknown is whether these benefits translate to embodied AI systems such as social robots, and how personalisation should be implemented safely and effectively in this context. In a future study, we will investigate language level adaptation in order to personalize the communication by the robot through an adapted language, based on patient preferences, health literacy, and characteristics.</p>
<p>Furthermore, our study was conducted in a single centre in the Netherlands, with a sample of 21 patients and 7 healthcare professionals. Cultural factors, organisation of care, and levels of digital literacy may influence how an AI-enhanced social robot is perceived. Future work should therefore replicate and extend this evaluation preferably in multi-centre and multi-country settings to examine how contextual and geographical factors could shape acceptance, trust, and effectiveness. In terms of effectiveness, both economical and communicative effectiveness in terms of retention of information) should be assessed.</p>
<p>Finally, although trust was not negatively affected in this feasibility phase, long-term trust trajectories and possible points of breakdown between users and robots remain essential to be investigated.</p>
<p>This study demonstrated the possibility of providing accurate, relevant, and timely medically validated information. Future multicentre and multi-country studies, combined with the exploration of personalisation and long-term trust, will be essential to determine whether AI-enhanced social robots could reliably and safely complement human clinicians in delivering clear, consistent, and patient-tailored health information in real-world clinical practice.</p>
</sec>
</body>
<back>
<sec id="s6" sec-type="data-availability"><title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p>
</sec>
<sec id="s7" sec-type="ethics-statement"><title>Ethics statement</title>
<p>The studies involving humans were approved by Ethics and materials committee, Medisch Spectrum Twente (K24-22). The studies were conducted in accordance with the local legislation and institutional requirements. Written informed consent for participation in the studies was provided by the participants.</p>
</sec>
<sec id="s8" sec-type="author-contributions"><title>Author contributions</title>
<p>J-WK: Visualization, Writing &#x2013; review &#x0026; editing, Conceptualization, Writing &#x2013; original draft, Supervision, Methodology. MC: Conceptualization, Investigation, Writing &#x2013; original draft, Data curation, Visualization, Methodology. DGo: Investigation, Writing &#x2013; original draft, Visualization, Data curation, Conceptualization, Methodology. EV: Software, Writing &#x2013; review &#x0026; editing. GR: Investigation, Writing &#x2013; original draft. DGe: Writing &#x2013; review &#x0026; editing, Investigation. MH: Investigation, Writing &#x2013; review &#x0026; editing. ET: Conceptualization, Supervision, Writing &#x2013; review &#x0026; editing. EC: Conceptualization, Writing &#x2013; review &#x0026; editing, Methodology, Supervision, Funding acquisition. HV: Writing &#x2013; original draft, Resources, Funding acquisition, Methodology, Conceptualization, Writing &#x2013; review &#x0026; editing, Investigation, Supervision.</p>
</sec>
<ack><title>Acknowledgments</title>
<p>We acknowledge the BMS faculty innovation lab BMS Lab, University of Twente Enschede, The Netherlands for its support in the technical infrastructure, software development, and operations.</p>
</ack>
<sec id="s10" sec-type="COI-statement"><title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
<p>The author J-WK declared that they were an editorial board member of Frontiers, at the time of submission. This had no impact on the peer review process and the final decision.</p>
</sec>
<sec id="s33" sec-type="correction-note"><title>Correction Note</title>
<p>A correction has been made to this article. Details can be found at: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fdgth.2026.1812402">10.3389/fdgth.2026.1812402</ext-link>.</p>
</sec>
<sec id="s11" sec-type="ai-statement"><title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec id="s13" sec-type="disclaimer"><title>Publisher&#x0027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec id="s12" sec-type="supplementary-material"><title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fdgth.2025.1653168/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fdgth.2025.1653168/full&#x0023;supplementary-material</ext-link></p>
<supplementary-material xlink:href="Datasheet1.pdf" id="SM1" mimetype="application/pdf"/>
</sec>
<ref-list><title>References</title>
<ref id="B1"><label>1.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Razai</surname> <given-names>MS</given-names></name> <name><surname>Kooner</surname> <given-names>P</given-names></name> <name><surname>Majeed</surname> <given-names>A</given-names></name></person-group>. <article-title>Strategies and interventions to improve healthcare professionals&#x2019; well-beingand reduce burnout</article-title>. <source>J Prim Care Community Health</source>. (<year>2023</year>) <volume>14</volume>:<fpage>21501319231178641</fpage>. <pub-id pub-id-type="doi">10.1177/21501319231178641</pub-id><pub-id pub-id-type="pmid">37246649</pub-id></mixed-citation></ref>
<ref id="B2"><label>2.</label><mixed-citation publication-type="other"><collab>NICE</collab>. <comment>Osteoarthritis in over 16s: diagnosis and management. National Institute for Health and Care Excellence (NICE) guideline Reference number: NG226 (2022). Available online at:</comment> <ext-link ext-link-type="uri" xlink:href="https://www.nice.org.uk/guidance/ng226">https://www.nice.org.uk/guidance/ng226</ext-link> <comment>(Accessed December 6, 2025).</comment></mixed-citation></ref>
<ref id="B3"><label>3.</label><mixed-citation publication-type="other"><collab>Federatie Medisch Specialisten (FMS)</collab>. <comment>Reumato&#x00EF;de Artritis (RA). Optimale behandelstrategie bij reumato&#x00EF;de artritis (2025). Available online at:</comment> <ext-link ext-link-type="uri" xlink:href="https://richtlijnendatabase.nl/richtlijn/reumato_de_artritis_ra/optimale_behandelstrategie_bij_reumatoide_artritis.html">https://richtlijnendatabase.nl/richtlijn/reumato_de_artritis_ra/optimale_behandelstrategie_bij_reumatoide_artritis.html</ext-link> <comment>(Accessed December 6, 2025).</comment></mixed-citation></ref>
<ref id="B4"><label>4.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jovani&#x0107;</surname> <given-names>M</given-names></name> <name><surname>Zdravkovi&#x0107;</surname> <given-names>M</given-names></name> <name><surname>Stanisavljevi&#x0107;</surname> <given-names>D</given-names></name> <name><surname>Jovi&#x0107; Vrane&#x0161;</surname> <given-names>A</given-names></name></person-group>. <article-title>Exploring the importance of health literacy for the quality of life in patients with heart failure</article-title>. <source>Int J Environ Res Public Health</source>. (<year>2018</year>) <volume>15</volume>(<issue>8</issue>):<fpage>1761</fpage>. <pub-id pub-id-type="doi">10.3390/IJERPH15081761</pub-id></mixed-citation></ref>
<ref id="B5"><label>5.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>McGuire</surname> <given-names>LC</given-names></name></person-group>. <article-title>Remembering what the doctor said: organization and adults&#x2019; memory for medical information</article-title>. <source>Exp Aging Res</source>. (<year>1996</year>) <volume>22</volume>(<issue>4</issue>):<fpage>403</fpage>&#x2013;<lpage>28</lpage>. <pub-id pub-id-type="doi">10.1080/03610739608254020</pub-id><pub-id pub-id-type="pmid">8968711</pub-id></mixed-citation></ref>
<ref id="B6"><label>6.</label><mixed-citation publication-type="thesis"><person-group person-group-type="author"><name><surname>Capasso</surname> <given-names>M</given-names></name></person-group>. <source>Development and testing of furhat robot as a persuasive system for therapeutic adherence support</source> (MSc thesis). <publisher-name>Politecnico Milano</publisher-name>, <publisher-loc>Italy</publisher-loc> (<year>2023</year>).</mixed-citation></ref>
<ref id="B7"><label>7.</label><mixed-citation publication-type="thesis"><person-group person-group-type="author"><name><surname>van Gorssel</surname> <given-names>D</given-names></name></person-group>. <source>ROMI: a robot for osteoarthritis medical information: artificial intelligence reinforced social robotics in patient communication for patients with osteoarthritis</source> (MSc thesis). <publisher-name>University of Twente</publisher-name>, <publisher-loc>Enschede</publisher-loc>, <publisher-loc>The Netherlands</publisher-loc> (<year>2024</year>).</mixed-citation></ref>
<ref id="B8"><label>8.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Naneva</surname> <given-names>S</given-names></name> <name><surname>Sarda Gou</surname> <given-names>M</given-names></name> <name><surname>Webb</surname> <given-names>TL</given-names></name> <name><surname>Prescott</surname> <given-names>TJ</given-names></name></person-group>. <article-title>A systematic review of attitudes, anxiety, acceptance, and trust towards social robots</article-title>. <source>Int J Soc Robot</source>. (<year>2020</year>) <volume>12</volume>(<issue>6</issue>):<fpage>1179</fpage>&#x2013;<lpage>201</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-020-00659-4</pub-id></mixed-citation></ref>
<ref id="B9"><label>9.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chita-Tegmark</surname> <given-names>M</given-names></name> <name><surname>Scheutz</surname> <given-names>M</given-names></name></person-group>. <article-title>Assistive robots for the social management of health: a framework for robot design and human-robot interaction research</article-title>. <source>Int J Soc Robot</source>. (<year>2021</year>) <volume>13</volume>(<issue>2</issue>):<fpage>197</fpage>&#x2013;<lpage>217</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-020-00634-z</pub-id><pub-id pub-id-type="pmid">32421077</pub-id></mixed-citation></ref>
<ref id="B10"><label>10.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Neef</surname> <given-names>C</given-names></name> <name><surname>Linden</surname> <given-names>K</given-names></name> <name><surname>Richert</surname> <given-names>A</given-names></name></person-group>. <article-title>Exploring the influencing factors on user experience in robot-assisted health monitoring systems combining subjective and objective health data</article-title>. <source>Appl Sci</source>. (<year>2023</year>) <volume>13</volume>(<issue>6</issue>):<fpage>3537</fpage>. <pub-id pub-id-type="doi">10.3390/app13063537</pub-id></mixed-citation></ref>
<ref id="B11"><label>11.</label><mixed-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Wilson</surname> <given-names>JR</given-names></name> <name><surname>Tickle-Degnen</surname> <given-names>L</given-names></name> <name><surname>Scheutz</surname> <given-names>M</given-names></name></person-group>. <article-title>Designing a social robot to assist in medication sorting</article-title>. <conf-name>Social Robotics: 8th International Conference, ICSR 2016, Kansas City, MO, USA, November 1-3, 2016 Proceedings 8</conf-name> (<year>2016</year>). p. <fpage>211</fpage>&#x2013;<lpage>21</lpage>. <publisher-name>Springer International Publishing</publisher-name>.</mixed-citation></ref>
<ref id="B12"><label>12.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Banks</surname> <given-names>MR</given-names></name> <name><surname>Willoughby</surname> <given-names>LM</given-names></name> <name><surname>Banks</surname> <given-names>WA</given-names></name></person-group>. <article-title>Animal-assisted therapy and loneliness in nursing homes: use of robotic versus living dogs</article-title>. <source>J Am Med Dir Assoc</source>. (<year>2008</year>) <volume>9</volume>(<issue>3</issue>):<fpage>173</fpage>&#x2013;<lpage>7</lpage>. <pub-id pub-id-type="doi">10.1016/j.jamda.2007.11.007</pub-id><pub-id pub-id-type="pmid">18294600</pub-id></mixed-citation></ref>
<ref id="B13"><label>13.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Rettagliata</surname> <given-names>G</given-names></name> <name><surname>Bondarouk</surname> <given-names>T</given-names></name> <name><surname>van 't Klooster</surname> <given-names>JWJR</given-names></name> <name><surname>Hertel</surname> <given-names>G</given-names></name></person-group>. <article-title>When robots join the deal: a trust-based model for introducing social robots in employment negotiations</article-title>. In: <person-group person-group-type="editor"><name><surname>Schafheitle</surname> <given-names>S</given-names></name> <name><surname>Searle</surname> <given-names>R</given-names></name> <name><surname>Nienaber</surname> <given-names>A-M</given-names></name></person-group>, editors. <source>Trust in the Age of AI&#x2014;Bringing the Humans Back to the Centre</source>. <publisher-loc>Cheltenham</publisher-loc>: <publisher-name>Edward Elgar Publishing</publisher-name> (<year>2025</year>)<comment>; (accepted/in press)</comment>.</mixed-citation></ref>
<ref id="B14"><label>14.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lee</surname> <given-names>JD</given-names></name> <name><surname>See</surname> <given-names>KA</given-names></name></person-group>. <article-title>Trust in automation: designing for appropriate reliance</article-title>. <source>Hum Factors</source>. (<year>2004</year>) <volume>46</volume>:<fpage>50</fpage>&#x2013;<lpage>80</lpage>. <pub-id pub-id-type="doi">10.1518/hfes.46.1.50_30392</pub-id><pub-id pub-id-type="pmid">15151155</pub-id></mixed-citation></ref>
<ref id="B15"><label>15.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Christoforakos</surname> <given-names>L</given-names></name> <name><surname>Gallucci</surname> <given-names>A</given-names></name> <name><surname>Surmava-Gro&#x00DF;e</surname> <given-names>T</given-names></name> <name><surname>Ullrich</surname> <given-names>D</given-names></name> <name><surname>Diefenbach</surname> <given-names>S</given-names></name></person-group>. <article-title>Can robots earn our trust the same way humans do? A systematic exploration of competence, warmth, and anthropomorphism as determinants of trust development in HRI</article-title>. <source>Front Rob AI</source>. (<year>2021</year>) <volume>8</volume>:<fpage>640444</fpage>. <pub-id pub-id-type="doi">10.3389/frobt.2021.640444</pub-id></mixed-citation></ref>
<ref id="B16"><label>16.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>De Visser</surname> <given-names>EJ</given-names></name> <name><surname>Peeters</surname> <given-names>MMM</given-names></name> <name><surname>Jung</surname> <given-names>MF</given-names></name> <name><surname>Kohn</surname> <given-names>S</given-names></name> <name><surname>Shaw</surname> <given-names>TH</given-names></name> <name><surname>Pak</surname> <given-names>R</given-names></name><etal/></person-group> <article-title>Towards a theory of longitudinal trust calibration in human&#x2013;robot teams</article-title>. <source>Int J Soc Robot</source>. (<year>2020</year>) <volume>12</volume>(<issue>2</issue>):<fpage>459</fpage>&#x2013;<lpage>78</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-019-00596-x</pub-id></mixed-citation></ref>
<ref id="B17"><label>17.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bainbridge</surname> <given-names>WA</given-names></name> <name><surname>Hart</surname> <given-names>JW</given-names></name> <name><surname>Kim</surname> <given-names>ES</given-names></name> <name><surname>Scassellati</surname> <given-names>B</given-names></name></person-group>. <article-title>The benefits of interactions with physically present robots over video-displayed agents</article-title>. <source>Int J Soc Robot</source>. (<year>2011</year>) <volume>3</volume>(<issue>1</issue>):<fpage>41</fpage>&#x2013;<lpage>52</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-010-0082-7</pub-id></mixed-citation></ref>
<ref id="B18"><label>18.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Anzabi</surname> <given-names>N</given-names></name> <name><surname>Umemuro</surname> <given-names>H</given-names></name></person-group>. <article-title>Effect of different listening behaviors of social robots on perceived trust in human-robot interactions</article-title>. <source>Int J Soc Robot</source>. (<year>2023</year>) <volume>15</volume>(<issue>6</issue>):<fpage>931</fpage>&#x2013;<lpage>51</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-023-01008-x</pub-id></mixed-citation></ref>
<ref id="B19"><label>19.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Laakasuo</surname> <given-names>M</given-names></name> <name><surname>Palom&#x00E4;ki</surname> <given-names>J</given-names></name> <name><surname>Kunnari</surname> <given-names>A</given-names></name> <name><surname>Rauhala</surname> <given-names>S</given-names></name> <name><surname>Drosinou</surname> <given-names>M</given-names></name> <name><surname>Halonen</surname> <given-names>J</given-names></name><etal/></person-group> <article-title>Moral psychology of nursing robots: exploring the role of robots in dilemmas of patient autonomy</article-title>. <source>Eur J Soc Psychol</source>. (<year>2023</year>) <volume>53</volume>(<issue>1</issue>):<fpage>108</fpage>&#x2013;<lpage>28</lpage>. <pub-id pub-id-type="doi">10.1002/ejsp.2890</pub-id></mixed-citation></ref>
<ref id="B20"><label>20.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Do</surname> <given-names>HM</given-names></name> <name><surname>Sheng</surname> <given-names>W</given-names></name> <name><surname>Harrington</surname> <given-names>EE</given-names></name> <name><surname>Bishop</surname> <given-names>AJ</given-names></name></person-group>. <article-title>Clinical screening interview using a social robot for geriatric care</article-title>. <source>IEEE Trans Autom Sci Eng</source>. (<year>2021</year>) <volume>18</volume>(<issue>3</issue>):<fpage>1229</fpage>&#x2013;<lpage>42</lpage>. <pub-id pub-id-type="doi">10.1109/TASE.2020.2999203</pub-id></mixed-citation></ref>
<ref id="B21"><label>21.</label><mixed-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Kidd</surname> <given-names>CD</given-names></name> <name><surname>Breazeal</surname> <given-names>C</given-names></name></person-group>. <article-title>Robots at home: understanding long-term human&#x2013;robot interaction</article-title>. <conf-name>IROS 2008. IEEE/RSJ International Conference on Intelligent Robots and Systems</conf-name> (<year>2008</year>). p. <fpage>3230</fpage>&#x2013;<lpage>5</lpage>.</mixed-citation></ref>
<ref id="B22"><label>22.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mann</surname> <given-names>JA</given-names></name> <name><surname>MacDonald</surname> <given-names>BA</given-names></name> <name><surname>Kuo</surname> <given-names>IH</given-names></name> <name><surname>Li</surname> <given-names>X</given-names></name> <name><surname>Broadbent</surname> <given-names>E</given-names></name></person-group>. <article-title>People respond better to robots than computer tablets delivering healthcare instructions</article-title>. <source>Comput Human Behav</source>. (<year>2015</year>) <volume>43</volume>:<fpage>112</fpage>&#x2013;<lpage>7</lpage>. <pub-id pub-id-type="doi">10.1016/j.chb.2014.10.029</pub-id></mixed-citation></ref>
<ref id="B23"><label>23.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Fasola</surname> <given-names>J</given-names></name> <name><surname>Matari&#x0107;</surname> <given-names>MJ</given-names></name></person-group>. <article-title>A socially assistive robot exercise coach for the elderly</article-title>. <source>J Hum Robot Interact</source>. (<year>2013</year>) <volume>2</volume>(<issue>2</issue>):<fpage>3</fpage>&#x2013;<lpage>32</lpage>. <pub-id pub-id-type="doi">10.5898/JHRI.2.2.Fasola</pub-id></mixed-citation></ref>
<ref id="B24"><label>24.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pelau</surname> <given-names>C</given-names></name> <name><surname>Dabija</surname> <given-names>D-C</given-names></name> <name><surname>Ene</surname> <given-names>I</given-names></name></person-group>. <article-title>What makes an AI device human-like? The role of interaction quality, empathy and perceived psychological anthropomorphic characteristics in the acceptance of artificial intelligence in the service industry</article-title>. <source>Comput Human Behav</source>. (<year>2021</year>) <volume>122</volume>:<fpage>106855</fpage>. <pub-id pub-id-type="doi">10.1016/j.chb.2021.106855</pub-id></mixed-citation></ref>
<ref id="B25"><label>25.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Johanson</surname> <given-names>D</given-names></name> <name><surname>Ahn</surname> <given-names>HS</given-names></name> <name><surname>Goswami</surname> <given-names>R</given-names></name> <name><surname>Saegusa</surname> <given-names>K</given-names></name> <name><surname>Broadbent</surname> <given-names>E</given-names></name></person-group>. <article-title>The effects of healthcare robot empathy statements and head nodding on trust and satisfaction: a video study</article-title>. <source>ACM Trans Hum Robot Interact</source>. (<year>2023</year>) <volume>12</volume>(<issue>1</issue>):<fpage>1</fpage>&#x2013;<lpage>21</lpage>. <pub-id pub-id-type="doi">10.1145/3549534</pub-id></mixed-citation></ref>
<ref id="B26"><label>26.</label><mixed-citation publication-type="other"><collab>Furhat Robotics</collab>. <comment>Furhat robot. Furhat Robotics, Sweden (2025). Available online at:</comment> <ext-link ext-link-type="uri" xlink:href="https://www.furhatrobotics.com/furhat-robot">https://www.furhatrobotics.com/furhat-robot</ext-link> <comment>(Accessed May 21, 2025)</comment>.</mixed-citation></ref>
<ref id="B27"><label>27.</label><mixed-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Vrins</surname> <given-names>A</given-names></name> <name><surname>Pruss</surname> <given-names>E</given-names></name> <name><surname>Ceccato</surname> <given-names>C</given-names></name> <name><surname>Prinsen</surname> <given-names>J</given-names></name> <name><surname>De Rooij</surname> <given-names>A</given-names></name> <name><surname>Alimardani</surname> <given-names>M</given-names></name><etal/></person-group> <article-title>Wizard-of-Oz vs. GPT-4: a comparative study of perceived social intelligence in HRI brainstorming</article-title>. <conf-name>Companion of the 2024 ACM/IEEE International Conference on Human-Robot Interaction</conf-name> (<year>2024</year>). p. <fpage>1090</fpage>&#x2013;<lpage>4</lpage></mixed-citation></ref>
<ref id="B28"><label>28.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Carl</surname> <given-names>N</given-names></name> <name><surname>Nguyen</surname> <given-names>L</given-names></name> <name><surname>Haggenm&#x00FC;ller</surname> <given-names>S</given-names></name> <name><surname>Hetz</surname> <given-names>MJ</given-names></name> <name><surname>Winterstein</surname> <given-names>JT</given-names></name> <name><surname>Hartung</surname> <given-names>FO</given-names></name><etal/></person-group> <article-title>Comparing patient&#x2019;s confidence in clinical capabilities in urology: large language models versus urologists</article-title>. <source>Eur Urol Open Sci</source>. (<year>2024</year>) <volume>70</volume>:<fpage>91</fpage>&#x2013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1016/j.euros.2024.10.009</pub-id><pub-id pub-id-type="pmid">39507511</pub-id></mixed-citation></ref>
<ref id="B29"><label>29.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zalzal</surname> <given-names>HG</given-names></name> <name><surname>Abraham</surname> <given-names>A</given-names></name> <name><surname>Cheng</surname> <given-names>J</given-names></name> <name><surname>Shah</surname> <given-names>RK</given-names></name></person-group>. <article-title>Can ChatGPT help patients answer their otolaryngology questions?</article-title> <source>Laryngoscope Invest Otolaryngol</source>. (<year>2024</year>) <volume>9</volume>(<issue>1</issue>):<fpage>e1193</fpage>. <pub-id pub-id-type="doi">10.1002/lio2.1193</pub-id></mixed-citation></ref>
<ref id="B30"><label>30.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Schrepp</surname> <given-names>M</given-names></name></person-group>. <comment>User Experience Questionnaire Handbook. All you need to know to apply the UEQ successfully in your projects. (2023). Available online at:</comment> <ext-link ext-link-type="uri" xlink:href="http://www.ueq-online.org/Material/Handbook.pdf">http://www.ueq-online.org/Material/Handbook.pdf</ext-link> <comment>(Accessed December 6, 2025).</comment></mixed-citation></ref>
<ref id="B31"><label>31.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ger&#x0142;owska</surname> <given-names>J</given-names></name> <name><surname>Skrobas</surname> <given-names>U</given-names></name> <name><surname>Grabowska-Aleksandrowicz</surname> <given-names>K</given-names></name> <name><surname>Korchut</surname> <given-names>A</given-names></name> <name><surname>Szklener</surname> <given-names>S</given-names></name> <name><surname>Szcz&#x0119;&#x015B;niak-Sta&#x0144;czyk</surname> <given-names>D</given-names></name><etal/></person-group> <article-title>Assessment of perceived attractiveness, usability, and societal impact of a multimodal robotic assistant for ageing patients with memory impairments</article-title>. <source>Front Neurol</source>. (<year>2018</year>) <volume>9</volume>:<fpage>392</fpage>. <pub-id pub-id-type="doi">10.3389/fneur.2018.00392</pub-id></mixed-citation></ref>
<ref id="B32"><label>32.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Denecke</surname> <given-names>K</given-names></name> <name><surname>Vaaheesan</surname> <given-names>S</given-names></name> <name><surname>Arulnathan</surname> <given-names>A</given-names></name></person-group>. <article-title>A mental health chatbot for regulating emotions (SERMO)&#x2014;concept and usability test</article-title>. <source>IEEE Trans Emerg Top Comput</source>. (<year>2020</year>) <volume>9</volume>:<fpage>1170</fpage>&#x2013;<lpage>82</lpage>. <pub-id pub-id-type="doi">10.1109/tetc.2020.2974478</pub-id></mixed-citation></ref>
<ref id="B33"><label>33.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>S</given-names></name> <name><surname>Chen</surname> <given-names>H</given-names></name> <name><surname>Luo</surname> <given-names>X</given-names></name> <name><surname>Zhang</surname> <given-names>R</given-names></name></person-group>. <article-title>Privacy risks and policy implications of deploying LLMs in healthcare</article-title>. <source>J Biomed Inform</source>. (<year>2024</year>) <volume>146</volume>:<fpage>104508</fpage>. <pub-id pub-id-type="doi">10.1016/j.jbi.2024.104508</pub-id></mixed-citation></ref>
<ref id="B34"><label>34.</label><mixed-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Lehman</surname> <given-names>E</given-names></name> <name><surname>Jain</surname> <given-names>S</given-names></name> <name><surname>Pichotta</surname> <given-names>K</given-names></name> <name><surname>Goldberg</surname> <given-names>Y</given-names></name> <name><surname>Wallace</surname> <given-names>BC</given-names></name></person-group>. <article-title>Does BERT pretrain on patients? Learning to detect hallucinated content in patient notes</article-title>. <conf-name>Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing (EMNLP)</conf-name> (<year>2021</year>).</mixed-citation></ref>
<ref id="B35"><label>35.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kalkman</surname> <given-names>S</given-names></name> <name><surname>van Delden</surname> <given-names>JJ</given-names></name> <name><surname>Banerjee</surname> <given-names>A</given-names></name> <name><surname>Tyl</surname> <given-names>B</given-names></name> <name><surname>Mostert</surname> <given-names>M</given-names></name> <name><surname>van Thiel</surname> <given-names>GJMW</given-names></name><etal/></person-group> <article-title>Responsible data sharing in international health research: a systematic review of principles and norms</article-title>. <source>BMC Med Ethics</source>. (<year>2019</year>) <volume>20</volume>(<issue>1</issue>):<fpage>21</fpage>. <pub-id pub-id-type="doi">10.1186/s12910-019-0367-1</pub-id><pub-id pub-id-type="pmid">30922290</pub-id></mixed-citation></ref>
<ref id="B36"><label>36.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gichoya</surname> <given-names>JW</given-names></name> <name><surname>Banerjee</surname> <given-names>I</given-names></name> <name><surname>Bhimireddy</surname> <given-names>AR</given-names></name> <name><surname>Burns</surname> <given-names>JL</given-names></name> <name><surname>Celi</surname> <given-names>LA</given-names></name> <name><surname>Chen</surname> <given-names>LC</given-names></name><etal/></person-group> <article-title>Ethical and safe use of AI in medical imaging</article-title>. <source>Radiol Artif Intell</source>. (<year>2022</year>) <volume>4</volume>(<issue>4</issue>):<fpage>e210331</fpage>. <pub-id pub-id-type="doi">10.1148/ryai.210331</pub-id></mixed-citation></ref>
<ref id="B37"><label>37.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>X</given-names></name> <name><surname>Zhang</surname> <given-names>X</given-names></name> <name><surname>Yin</surname> <given-names>S</given-names></name> <name><surname>Wang</surname> <given-names>Y</given-names></name></person-group>. <article-title>Risks and challenges of AI chatbots in clinical decision support: a case study of ChatGPT</article-title>. <source>NPJ Digit Med</source>. (<year>2023</year>) <volume>6</volume>:<fpage>109</fpage>. <pub-id pub-id-type="doi">10.1038/s41746-023-00894-0</pub-id><pub-id pub-id-type="pmid">37280429</pub-id></mixed-citation></ref>
<ref id="B38"><label>38.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Doshi-Velez</surname> <given-names>F</given-names></name> <name><surname>Kim</surname> <given-names>B</given-names></name></person-group>. <comment>Towards a rigorous science of interpretable machine learning. arXiv preprint arXiv:1702.08608 (2017).</comment> <pub-id pub-id-type="doi">10.48550/arXiv.1702.08608</pub-id></mixed-citation></ref>
<ref id="B39"><label>39.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Babel</surname> <given-names>A</given-names></name> <name><surname>Taneja</surname> <given-names>R</given-names></name> <name><surname>Malvestiti</surname> <given-names>F</given-names></name> <name><surname>Monaco</surname> <given-names>A</given-names></name> <name><surname>Donde</surname> <given-names>S</given-names></name></person-group>. <article-title>Artificial intelligence solutions to increase medication adherence in patients with non-communicable diseases</article-title>. <source>Front Digit Health</source>. (<year>2021</year>) <volume>3</volume>:<fpage>669869</fpage>. <pub-id pub-id-type="doi">10.3389/fdgth.2021.669869</pub-id><pub-id pub-id-type="pmid">34713142</pub-id></mixed-citation></ref>
<ref id="B40"><label>40.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gross</surname> <given-names>C</given-names></name> <name><surname>Schachner</surname> <given-names>T</given-names></name> <name><surname>Hasl</surname> <given-names>A</given-names></name> <name><surname>Kohlbrenner</surname> <given-names>D</given-names></name> <name><surname>Clarenbach</surname> <given-names>C</given-names></name> <name><surname>Wangenheim</surname> <given-names>F</given-names></name><etal/></person-group> <article-title>Personalization of conversational agent-patient interaction styles for chronic disease management: two consecutive cross-sectional questionnaire studies</article-title>. <source>J Med Internet Res</source>. (<year>2021</year>) <volume>23</volume>:<fpage>e26643</fpage>. <pub-id pub-id-type="doi">10.2196/26643</pub-id><pub-id pub-id-type="pmid">33913814</pub-id></mixed-citation></ref>
<ref id="B41"><label>41.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>van Gorssel</surname> <given-names>D</given-names></name> <name><surname>Hegeman</surname> <given-names>M</given-names></name> <name><surname>Gerritsen</surname> <given-names>D</given-names></name> <name><surname>van 't Klooster</surname> <given-names>JWJR</given-names></name> <name><surname>Vonkeman</surname> <given-names>HE</given-names></name></person-group>. <article-title>OP0036 artificial intelligence reinforced social robotics in patient communication for patients with rheumatic diseases</article-title>. <source>Ann Rheum Dis</source>. (<year>2025</year>) <volume>84</volume>(<issue>Supplement 1</issue>):<fpage>32</fpage>&#x2013;<lpage>3</lpage>. <pub-id pub-id-type="doi">10.1016/j.ard.2025.05.060</pub-id></mixed-citation></ref>
<ref id="B42"><label>42.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hancock</surname> <given-names>PA</given-names></name> <name><surname>Billings</surname> <given-names>DR</given-names></name> <name><surname>Schaefer</surname> <given-names>KE</given-names></name> <name><surname>Chen</surname> <given-names>JYC</given-names></name> <name><surname>De Visser</surname> <given-names>EJ</given-names></name> <name><surname>Parasuraman</surname> <given-names>R</given-names></name></person-group>. <article-title>A meta-analysis of factors affecting trust in human-robot interaction</article-title>. <source>Hum Factors</source>. (<year>2011</year>) <volume>53</volume>(<issue>5</issue>):<fpage>517</fpage>&#x2013;<lpage>27</lpage>. <pub-id pub-id-type="doi">10.1177/0018720811417254</pub-id><pub-id pub-id-type="pmid">22046724</pub-id></mixed-citation></ref>
<ref id="B43"><label>43.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mayer</surname> <given-names>RC</given-names></name> <name><surname>Davis</surname> <given-names>JH</given-names></name> <name><surname>Schoorman</surname> <given-names>FD</given-names></name></person-group>. <article-title>An integrative model of organizational trust</article-title>. <source>Acad Manag Rev</source>. (<year>1995</year>) <volume>20</volume>(<issue>3</issue>):<fpage>709</fpage>&#x2013;<lpage>34</lpage>. <pub-id pub-id-type="doi">10.2307/258792</pub-id></mixed-citation></ref>
<ref id="B44"><label>44.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Shourmasti</surname> <given-names>ES</given-names></name> <name><surname>Colomo-Palacios</surname> <given-names>R</given-names></name> <name><surname>Holone</surname> <given-names>H</given-names></name> <name><surname>Demi</surname> <given-names>S</given-names></name></person-group>. <article-title>User experience in social robots</article-title>. <source>Sensors</source>. (<year>2021</year>) <volume>21</volume>(<issue>15</issue>):<fpage>5052</fpage>. <pub-id pub-id-type="doi">10.3390/s21155052</pub-id><pub-id pub-id-type="pmid">34372289</pub-id></mixed-citation></ref></ref-list>
<fn-group>
<fn id="n1" fn-type="custom" custom-type="edited-by"><p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/665431/overview">Uwe Aickelin</ext-link>, The University of Melbourne, Australia</p></fn>
<fn id="n2" fn-type="custom" custom-type="reviewed-by"><p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2571606/overview">Lamia Elloumi</ext-link>, Amsterdam University of Applied Sciences, Netherlands</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3107740/overview">Mohamed Adlan Ait Ameur</ext-link>, University of Strathclyde, United Kingdom</p></fn>
</fn-group>
</back>
</article>