<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" article-type="research-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Artif. Intell.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Artificial Intelligence</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Artif. Intell.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2624-8212</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/frai.2026.1795842</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Self-experienced empathetic behaviour patterns in medical students during virtual patient encounters: a comparison between an AI-enhanced social robot and a computer-based platform</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Borg</surname>
<given-names>Alexander</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<xref ref-type="author-notes" rid="fn0001"><sup>&#x2020;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1728347"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Jobs</surname>
<given-names>Benjamin</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="author-notes" rid="fn0001"><sup>&#x2020;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Gentline</surname>
<given-names>Cidem</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="author-notes" rid="fn0001"><sup>&#x2020;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Huss</surname>
<given-names>Viking</given-names>
</name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="author-notes" rid="fn0001"><sup>&#x2020;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Hugelius</surname>
<given-names>Anna</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="author-notes" rid="fn0001"><sup>&#x2020;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Schi&#x00F6;tt</surname>
<given-names>Jonathan</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="author-notes" rid="fn0001"><sup>&#x2020;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Ivegren</surname>
<given-names>William</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="author-notes" rid="fn0001"><sup>&#x2020;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Espinosa</surname>
<given-names>Fabricio</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="author-notes" rid="fn0001"><sup>&#x2020;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1819136"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Ruiz</surname>
<given-names>Mini</given-names>
</name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="author-notes" rid="fn0001"><sup>&#x2020;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Edelbring</surname>
<given-names>Samuel</given-names>
</name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<xref ref-type="aff" rid="aff6"><sup>6</sup></xref>
<xref ref-type="author-notes" rid="fn0001"><sup>&#x2020;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Georg</surname>
<given-names>Carina</given-names>
</name>
<xref ref-type="aff" rid="aff7"><sup>7</sup></xref>
<xref ref-type="author-notes" rid="fn0001"><sup>&#x2020;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Skantze</surname>
<given-names>Gabriel</given-names>
</name>
<xref ref-type="aff" rid="aff8"><sup>8</sup></xref>
<xref ref-type="author-notes" rid="fn0001"><sup>&#x2020;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/747537"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Parodis</surname>
<given-names>Ioannis</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff9"><sup>9</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<xref ref-type="author-notes" rid="fn0001"><sup>&#x2020;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/392088"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Division of Rheumatology, Department of Medicine Solna, Karolinska Institutet, Karolinska University Hospital, Center for Molecular Medicine (CMM)</institution>, <city>Stockholm</city>, <country country="se">Sweden</country></aff>
<aff id="aff2"><label>2</label><institution>Division of Clinical Epidemiology, Department of Medicine Solna, Karolinska Institutet and Karolinska University Hospital</institution>, <city>Stockholm</city>, <country country="se">Sweden</country></aff>
<aff id="aff3"><label>3</label><institution>Department of Clinical Science, Intervention and Technology, Karolinska Institutet</institution>, <city>Stockholm</city>, <country country="se">Sweden</country></aff>
<aff id="aff4"><label>4</label><institution>Department of Educational Sciences and Arts, Faculty of Philosophy, M&#x00E4;lardalen University</institution>, <city>V&#x00E4;ster&#x00E5;s</city>, <country country="se">Sweden</country></aff>
<aff id="aff5"><label>5</label><institution>Unit of Teaching and Learning, Karolinska Institutet</institution>, <city>Stockholm</city>, <country country="se">Sweden</country></aff>
<aff id="aff6"><label>6</label><institution>School of Health Sciences, &#x00D6;rebro University</institution>, <city>&#x00D6;rebro</city>, <country country="se">Sweden</country></aff>
<aff id="aff7"><label>7</label><institution>Department of Neurobiology, Care Sciences and Society, Karolinska Institutet</institution>, <city>Stockholm</city>, <country country="se">Sweden</country></aff>
<aff id="aff8"><label>8</label><institution>Division of Speech Music and Hearing, Royal Institute of Technology (KTH)</institution>, <city>Stockholm</city>, <country country="se">Sweden</country></aff>
<aff id="aff9"><label>9</label><institution>Department of Rheumatology, Faculty of Medicine and Health, &#x00D6;rebro University</institution>, <city>&#x00D6;rebro</city>, <country country="se">Sweden</country></aff>
<author-notes>
<corresp id="c001"><label>&#x002A;</label>Correspondence: Alexander Borg, <email xlink:href="mailto:alexander.borg@ki.se">alexander.borg@ki.se</email> Ioannis Parodis, <email xlink:href="mailto:ioannis.parodis@ki.se">ioannis.parodis@ki.se</email></corresp>
<fn fn-type="other" id="fn0001"><label>&#x2020;</label><p>ORCID: Alexander Borg, <uri xlink:href="https://orcid.org/0000-0003-1013-4590">orcid.org/0000-0003-1013-4590</uri>; Benjamin Jobs, <uri xlink:href="https://orcid.org/0009-0002-2391-8087">orcid.org/0009-0002-2391-8087</uri>; Viking Huss, <uri xlink:href="https://orcid.org/0000-0002-9764-6435">orcid.org/0000-0002-9764-6435</uri>; Jonathan Schi&#x00F6;tt, <uri xlink:href="https://orcid.org/0009-0001-0445-630x">orcid.org/0009-0001-0445-630x</uri>; William Ivegren, <uri xlink:href="https://orcid.org/0009-0004-8417-6106">orcid.org/0009-0004-8417-6106</uri>; Fabricio Espinosa, <uri xlink:href="https://orcid.org/0000-0001-6227-8209">orcid.org/0000-0001-6227-8209</uri>; Mini Ruiz, <uri xlink:href="https://orcid.org/0000-0002-9910-8809">orcid.org/0000-0002-9910-8809</uri>; Samuel Edelbring, <uri xlink:href="https://orcid.org/0000-0002-1110-0782">orcid.org/0000-0002-1110-0782</uri>; Carina Georg, <uri xlink:href="https://orcid.org/0000-0001-8444-7624">orcid.org/0000-0001-8444-7624</uri>; Gabriel Skantze, <uri xlink:href="https://orcid.org/0000-0002-8579-1790">orcid.org/0000-0002-8579-1790</uri>; Ioannis Parodis, <uri xlink:href="https://orcid.org/0000-0002-4875-5395">orcid.org/0000-0002-4875-5395</uri></p></fn>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-03-04">
<day>04</day>
<month>03</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>9</volume>
<elocation-id>1795842</elocation-id>
<history>
<date date-type="received">
<day>25</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="rev-recd">
<day>13</day>
<month>02</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>23</day>
<month>02</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2026 Borg, Jobs, Gentline, Huss, Hugelius, Schi&#x00F6;tt, Ivegren, Espinosa, Ruiz, Edelbring, Georg, Skantze and Parodis.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Borg, Jobs, Gentline, Huss, Hugelius, Schi&#x00F6;tt, Ivegren, Espinosa, Ruiz, Edelbring, Georg, Skantze and Parodis</copyright-holder>
<license>
<ali:license_ref start_date="2026-03-04">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<sec>
<title>Objective</title>
<p>To explore whether an AI-enhanced social robotic virtual patient (VP) platform reinforces empathetic behaviour patterns in medical students compared with a traditional computer-based platform.</p>
</sec>
<sec>
<title>Methods</title>
<p>Twenty-three sixth-semester medical students from Karolinska Institutet participated in semi-structured interviews following VP encounters with the Social AI-enhanced Robotic Interface (SARI) and, as a comparator, the computer-based Virtual Interactive Case system (VIC). Additionally, 178 students evaluated the VP platforms in empathetic training quantitatively using categorical nominal variables and a visual analogue scale (VAS), with a score of 0 indicating full preference for SARI and 10 full preference for VIC. Interview data were thematically analysed, and quantitative preferences were compared using the Fisher&#x2019;s exact test with Monte Carlo simulation and the Wilcoxon signed-rank test.</p>
</sec>
<sec>
<title>Results</title>
<p>Thematic analysis yielded five major themes wherein students consistently reported that SARI facilitated greater empathetic engagement through multimodal interaction, ability to express emotions, and real-time communication adaptability. Quantitative analysis demonstrated a higher preference for SARI versus VIC (78% versus 6%; OR: 190.4; 95% CI: 76.8&#x2013;472.0; <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001), which remained consistent across subgroups of interest, i.e., female and male students, with and without prior experience in VPs, and students first exposed to SARI or first exposed to VIC. VAS data also showed a preference for SARI versus VIC (median: 2.00; IQR: 1.00&#x2013;4.00; <italic>W</italic>: 738.5; <italic>r</italic>: 0.70; <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001).</p>
</sec>
<sec>
<title>Conclusion</title>
<p>Our AI-enhanced social robotic VP platform was superior to a traditional computer-based VP platform in fostering empathetic engagement in medical students through enhanced authenticity and interactivity, supporting its potential to supplement clinical rotations.</p>
</sec>
</abstract>
<kwd-group>
<kwd>empathy</kwd>
<kwd>large language models</kwd>
<kwd>medical education</kwd>
<kwd>social robotics</kwd>
<kwd>virtual patients</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This work was supported by grants from Region Stockholm ALF Pedagogy (FoUI-977096), Karolinska Institutet Pedagogical Project Funding (FoUI-964139), the Swedish Rheumatism Association (R-1013624), King Gustaf V&#x2019;s 80-year Foundation (FAI-2023-1055), Swedish Society of Medicine (SLS-974449), Nyckelfonden (OLL-1023269), Professor Nanna Svartz Foundation (2021-00436), Ulla and Roland Gustafsson Foundation (2024-43), Region Stockholm (FoUI-1004114), and Karolinska Institutet.</funding-statement>
</funding-group>
<counts>
<fig-count count="3"/>
<table-count count="2"/>
<equation-count count="0"/>
<ref-count count="50"/>
<page-count count="13"/>
<word-count count="9574"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>AI for Human Learning and Behaviour Change</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec id="sec1">
<title>Highlights</title>
<list list-type="bullet">
<list-item>
<p>The Social AI-enhanced Robotic Interface (SARI) for virtual patient simulation creates a more authentic environment for medical students, fostering empathy and immersive learning.</p>
</list-item>
<list-item>
<p>Medical students experienced that SARI reinforces empathetic engagement through multimodal and advanced ways of interaction.</p>
</list-item>
<list-item>
<p>Large language model (LLM)-enhanced social robotic virtual patient platforms may prove useful as safe learning environments for developing empathetic communication skills, alongside training of clinical reasoning.</p>
</list-item>
</list>
</sec>
<sec sec-type="intro" id="sec2">
<title>Introduction</title>
<p>Empathy is by some considered one of the most important skills of healthcare practitioners who are engaged in patient care (<xref ref-type="bibr" rid="ref14">Colliver et al., 2010</xref>). Empathetic skills from healthcare providers have been shown to be associated with better health outcomes for patients and decreased risk of hospitalisations (<xref ref-type="bibr" rid="ref17">Del Canale et al., 2012</xref>; <xref ref-type="bibr" rid="ref28">Hojat et al., 2011</xref>). Patient-perceived empathy from physicians has been shown to influence patient satisfaction, adherence, and interpersonal trust (<xref ref-type="bibr" rid="ref22">Emamikia et al., 2022</xref>; <xref ref-type="bibr" rid="ref34">Kim et al., 2004</xref>), which are important factors in improving health outcomes for patients. Due to its multidimensional nature, empathy has been challenging to define, leading to varied descriptions across the literature (<xref ref-type="bibr" rid="ref26">Hojat, 2007</xref>). Within the field of medical education, empathy is mostly described using cognitive characteristics, such as being able to understand the perspective of a patient, in contrast to emotional characteristics, such as feeling a patient&#x2019;s pain or suffering (<xref ref-type="bibr" rid="ref47">Sulzer et al., 2016</xref>). Such emotional characteristics are more commonly associated within the concept of sympathy, which, however, can overlap with empathy (<xref ref-type="bibr" rid="ref29">Hojat et al., 2001</xref>).</p>
<p>In undergraduate medical education, the cultivation of empathetic behaviour is commonly facilitated through structured interactive modalities. Role-play scenarios constitute a primary approach, either between peers or with trained actors serving as standardised patients. These standardised patients are instructed to simulate specific clinical conditions with predetermined behavioural patterns and emotional responses, enabling students to practise empathetic communication within controlled yet realistic settings. This approach ensures consistent exposure to diverse patient presentations while allowing students to develop empathetic responses in a safe learning environment before meeting real patients (<xref ref-type="bibr" rid="ref42">Patel et al., 2019</xref>). However, standardised patients can be costly for educational sites, and availability aspects can further hurdle the training of empathetic conduct. An alternative to using standardised patients to practise empathetic conduct could be virtual patients (VPs).</p>
<p>VPs are digital educational modalities that are often used to resemble various clinical case scenarios, with the goal of practising and achieving specific learning outcomes in health professions education (HPE) curricula (<xref ref-type="bibr" rid="ref21">Ellaway, 2006</xref>; <xref ref-type="bibr" rid="ref25">Hege et al., 2016</xref>; <xref ref-type="bibr" rid="ref50">Xu et al., 2023</xref>). VPs are acknowledged as a good complement to real-life patient encounters, with ability to ensure the practise of clinical skills, e.g., clinical reasoning, procedural, and team-based skills (<xref ref-type="bibr" rid="ref36">Kononowicz et al., 2019</xref>). They have also been shown to be beneficial modalities in training empathetic communication in early stages of medical education by facilitating interactions in a low pressure environment (<xref ref-type="bibr" rid="ref35">Kleinsmith et al., 2015</xref>). However, when compared with standardised patients, students&#x2019; interactions with VPs have been shown to generate less empathetic responses (<xref ref-type="bibr" rid="ref18">Deladisma et al., 2007</xref>). Technological advancements have led to the emergence of artificial intelligence (AI) and large language models (LLMs), which enable more realistic VP interactions compared to traditional modalities (<xref ref-type="bibr" rid="ref15">Cook, 2024</xref>; <xref ref-type="bibr" rid="ref46">Su&#x00E1;rez et al., 2022</xref>), potentially improving the possibilities of empathetic behaviour towards VPs. In previous work, we introduced a novel VP modality by applying an LLM in combination with a social robot. This was perceived by medical students as a more realistic and more authentic platform compared to a conventional computer-based VP platform (<xref ref-type="bibr" rid="ref7">Borg et al., 2025</xref>; <xref ref-type="bibr" rid="ref8">Borg et al., 2024</xref>). To our knowledge, there are no previous studies that investigate empathetic behaviour patterns towards VPs presented through an AI-enhanced social robot.</p>
<p>The aim of this study was to explore if an AI-enhanced social robotic VP platform can reinforce self-experienced empathetic behaviour in medical students compared with a traditional computer-based platform.</p>
</sec>
<sec sec-type="methods" id="sec3">
<title>Methods</title>
<p>We conducted an interventional explorative study to examine how medical students perceived their empathetic behaviour when using a social robotic VP platform enhanced with an LLM, compared to a conventional computer-based VP platform. The conventional computer-based platform virtual interactive case system (VIC) (<xref ref-type="bibr" rid="ref49">Virtual Interactive Case System, n.d.</xref>) employs a semi-linear design structure where students navigate through predetermined pathways by selecting among questions and receiving fixed text-based responses. In contrast, our newly developed social AI-enhanced robotic interface (SARI) (<xref ref-type="bibr" rid="ref7">Borg et al., 2025</xref>; <xref ref-type="bibr" rid="ref8">Borg et al., 2024</xref>) enables branched conversations where each interaction is contextually generated based on specific queries and the conversation history. While VIC provides consistent, reproducible interactions through its structured format, SARI offers adaptive, naturalistic dialogue with multimodal communication including facial expressions, voice modulation, and real-time emotional responses (<xref ref-type="bibr" rid="ref30">Huwendiek et al., 2009</xref>).</p>
<p>Data for this comparison were derived from qualitative fully transcribed in-depth interviews and a quantitative questionnaire for VP platform evaluation wherein students rated which platform they preferred for practising empathetic skills. The qualitative part of the study followed consolidated criteria for reporting qualitative research (COREQ) (<xref ref-type="bibr" rid="ref48">Tong et al., 2007</xref>). The report is detailed in <xref ref-type="supplementary-material" rid="SM1">Supplementary Table S1</xref>.</p>
<sec id="sec4">
<title>Participants</title>
<p>In this study, we recruited sixth-semester medical students from Karolinska Institutet (KI) in Stockholm, Sweden. The recruitment took place during the course &#x201C;Clinical medicine 2: applied internal medicine and related disciplines,&#x201D; specifically during clinical placements within rheumatology at the Karolinska University Hospital. The sixth semester was chosen as it marks the transition from pre-clinical to clinical education during the KI medical programme, including the first clinical rotations. This represents a formative period for empathy development, as medical students have acquired foundational medical knowledge but are at the beginning of regular patient contact. Participants in the in-depth interviews (<italic>n</italic>&#x202F;=&#x202F;23) were recruited from a pool of students who undertook the course during the spring term of 2024 (<italic>n</italic>&#x202F;=&#x202F;117). Of 60 students who consented to participate in interviews, 23 students were finally included, determined by the chronological order of the participant response. The number of interviews was considered sufficient as it yielded information power (<xref ref-type="bibr" rid="ref38">Malterud et al., 2016</xref>) during thematic analysis, as per the COREQ recommendations (<xref ref-type="bibr" rid="ref48">Tong et al., 2007</xref>). Furthermore, 178 students agreed to participate in quantitative evaluations of the VP platforms and were recruited between the spring term of 2024 and the spring term of 2025.</p>
<p>Participation in the study was voluntary, and all participants provided written informed consent prior to inclusion and could withdraw their participation at any time. The study was approved by the Swedish Ethical Review Authority prior to enrolment (registration number: 2022&#x2013;04437-01).</p>
</sec>
<sec id="sec5">
<title>VP case development and practise</title>
<p>Ten VP cases were developed according to distinct recommendations for VP platform development (<xref ref-type="bibr" rid="ref43">Posel et al., 2009</xref>). They were designed to be in English to also facilitate accessibility for international students. The cases were presented in two VP platforms: our newly developed SARI and the computer-based interface VIC (<xref ref-type="bibr" rid="ref49">Virtual Interactive Case System, n.d.</xref>). Each platform contained five cases, with one having identical case content between the two platforms (<xref ref-type="bibr" rid="ref7">Borg et al., 2025</xref>). The case development followed distinct principles and guidelines for development of VP cases (<xref ref-type="bibr" rid="ref44">Posel et al., 2015</xref>), and used clinical case content illustrating various rheumatic conditions, as previously described by our group (<xref ref-type="bibr" rid="ref7">Borg et al., 2025</xref>; <xref ref-type="bibr" rid="ref8">Borg et al., 2024</xref>).</p>
<p>Students performed VP cases in pairs or groups of three to promote collaboration, based on previous work that has demonstrated enhanced CR training (<xref ref-type="bibr" rid="ref20">Edelbring et al., 2018</xref>). The cases started with a small introduction of a patient case within a specific setting, and students completed each VP case when they perceived that they had sufficient information for concluding to a preliminary diagnosis and a management plan. Since SARI primarily communicated using its voice, students were provided written information before the VP encounters based on the context of each case, as well as results from relevant laboratory tests along with their corresponding reference values. Following each completed case, students participated in follow-up seminars facilitated by a physician specialised in rheumatology to discuss the case contents. The seminar groups ranged between six and eight student participants. Half of the students started VP practise with SARI and half of the students started with VIC. All students completed all VP cases and participated in all follow-up seminars during their clinical placement.</p>
</sec>
<sec id="sec6">
<title>The social robotic platform SARI</title>
<p>For the embodiment of SARI, we used the social robot developed by Furhat robotics, which has an animated face that matches the patients&#x2019; age and sex, projected on a semi-transparent plastic face mask, and a mechanical neck that allows natural head movements (<xref ref-type="bibr" rid="ref2">Al Moubayed et al., 2012</xref>). Furhat displays subtle facial expressions and affective responses, including gaze behaviour as indicators of a patient&#x2019;s emotional status (<xref ref-type="bibr" rid="ref39">Mishra et al., 2023</xref>). Furhat includes the Furhat software development kit (FurhatSDK) (<xref ref-type="bibr" rid="ref2">Al Moubayed et al., 2012</xref>), which we combined with the Open AI gpt-3.5 turbo LLM (<xref ref-type="bibr" rid="ref12">Brown et al., 2020</xref>) to develop SARI. We prompted the LLM to generate authentic dialogue responses from the perspective of a patient using specific instructions and included a detailed patient description together with the last 10 dialogue lines. To reflect the emotional state of a patient, we prompted the LLM to generate suitable facial expressions during the conversation using anchor points (<xref ref-type="bibr" rid="ref31">Irfan et al., 2023</xref>). Appropriate facial expressions were then presented at these anchor points by the social robot from a predefined set of facial expressions in the FurhatSDK. The selection of facial expressions was based on the context of the dialogue and the LLM-generated responses from the VP. An example of a prompt is illustrated in the <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure S2</xref>.</p>
<p>A known limitation of LLMs and social robots in turn-taking dialogue is response delays (<xref ref-type="bibr" rid="ref31">Irfan et al., 2023</xref>). To mitigate the risk of misunderstanding due to response delays, we created a turn-taking signal using a LED-light at the bottom of the robot to signal if the robot was actively listening or preparing a response using specified colours (<xref ref-type="bibr" rid="ref45">Skantze et al., 2015</xref>).</p>
</sec>
<sec id="sec7">
<title>The computer-based platform VIC</title>
<p>VIC is a computer-based VP platform where users can explore patient case details in any order they choose, while the introduction and conclusion remain fixed (<xref ref-type="bibr" rid="ref49">Virtual Interactive Case System, n.d.</xref>). Users gather information by selecting pre-written questions about the patient&#x2019;s medical history and are instructed to gather relevant information within the context of the case. They can also conduct virtual physical examinations and review test results and reports from relevant imaging. When students felt they had collected sufficient information, they completed the case by diagnosing the condition and creating a management plan for the VP from a selection of multiple-choice options (<xref ref-type="bibr" rid="ref1">Ahn and Edelbring, 2020</xref>; <xref ref-type="bibr" rid="ref23">Georg and Zary, 2014</xref>).</p>
</sec>
<sec id="sec8">
<title>Data collection</title>
<p>Following completion of all VP cases in both SARI and VIC as well as the follow-up seminars, students who consented to participate in the study evaluated the VP platforms quantitatively by responding to two questions regarding the VP encounter about empathetic conduct. The first question was &#x201C;overall, which of the platforms is preferable to you in relation to self-experienced empathy during the patient encounter?&#x201D;, which was answered using one of three options: (i) SARI, (ii) VIC, or (iii) equally preferred. The second question followed the structure of a visual analogue scale (VAS) and read &#x201C;on a scale between 0 and 10, where 0 indicates total preference of the social robot and 10 indicates total preference of the computer-based platform, how would you grade your preference of the virtual patient platforms compared to each other in relation to self-experienced empathy during the patient encounter?&#x201D;; on this scale, 5 indicated equal preference between the two platforms. Students also provided additional data such as age, sex, whether they had previous experience with VP platforms, and which platform they were introduced to first.</p>
<p>Some students also participated in semi-structured interviews. The interviews followed an interview guide that pertained learning experiences, the platform contribution to acquirement of communication skills, self-perceived empathy, as well as suggestions for improvements. The interview guide was an iteration of the guide used for the pilot testing of SARI (<xref ref-type="bibr" rid="ref7">Borg et al., 2025</xref>), following refinements based on feedback provided by medical students. The interview guide is provided in <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure S3</xref>.</p>
<p>All interviews were conducted by the same researcher (AB), either in person at the Karolinska University Hospital or during video calls. Each interview ranged between 40 and 60&#x202F;min, was fully recorded, transcribed verbatim, and pseudonymised. Interview data were stored on secure servers at KI and were accessible only to involved researchers upon removal of personal or sensitive information.</p>
</sec>
<sec id="sec9">
<title>Interview analysis</title>
<p>We processed fully transcribed interview data systematically according to the six-phase methodological approach for reflexive thematic analysis described by Clarke and Braun (<xref ref-type="bibr" rid="ref9">Braun and Clarke, 2006</xref>; <xref ref-type="bibr" rid="ref10">Braun and Clarke, 2021</xref>; <xref ref-type="bibr" rid="ref11">Braun and Clarke, 2023</xref>). During analysis and theme development, two researchers (AB and BJ) were perceptive to the emergence of themes to capture unique aspects of the students&#x2019; experiences.</p>
<p>Firstly, researchers (AB and BJ) independently read and re-read all transcripts to note initial ideas and patterns related to empathetic behaviour. Secondly, codes were implemented systematically across the entire dataset, connecting relevant data to each code. The codes included specific references to emotional responses, communication patterns, and platform interactions. Thirdly, initial themes emerged by the connection of codes into overarching categories, gathering all relevant data to each potential theme. Fourthly, the themes were reviewed against coded extracts multiple times. This involved ensuring themes were internally coherent and distinct from each other. Fifthly, the final themes were defined and named to generate clear definitions and names that capture the essence of each theme. Finally, a report was produced by selection of representative quotes from the transcripts and final analysis relating back to the research question on empathetic behaviour towards VPs.</p>
<p>The coding process was collaborative and iterative. Researchers (AB and BJ) met regularly on a weekly basis to compare and discuss their interpretations, emerging patterns, and refine the coding framework. Following discussions, the codes were refined to resolve any discrepancies until the coding scheme resulted in consensus on the final thematic structure. The codes were used in specific areas of the interviews to highlight aspects of empathetic behaviour towards VPs. The analysis, coding process, and theme development were undertaken by AB and BJ, with IP providing supervision and guidance on the final thematic structure.</p>
</sec>
<sec id="sec10">
<title>Statistics</title>
<p>The Fisher&#x2019;s exact test with Monte Carlo simulation (10,000 iterations) was used to compare frequencies of categorical responses on VP platform preference for self-experienced empathy. The Wilcoxon signed-rank test was used for VAS-scale responses comparing the scores versus a hypothetical neutral score of 5. Results from Fisher&#x2019;s exact tests are presented as numbers and the corresponding percentage, odds ratio (OR), 95% confidence interval (CI), and <italic>p</italic> value. Results from Wilcoxon signed-rank tests are presented as the median and the corresponding interquartile range (IQR), test statistic (<italic>W</italic>), effect size (<italic>r</italic>), and <italic>p</italic> value. The statistical analysis was performed using R software, version 4.3.3 (R foundation for Statistical Computing, Vienna, Austria). Differences yielding <italic>p</italic> values &#x003C;0.05 were considered statistically significant.</p>
</sec>
</sec>
<sec sec-type="results" id="sec11">
<title>Results</title>
<sec id="sec12">
<title>Interviews</title>
<p>Of 23 students who participated in interviews, 61% were men (<italic>n</italic>&#x202F;=&#x202F;14) and 39% were women (<italic>n</italic>&#x202F;=&#x202F;9). A total of 74% had no previous experience with VPs (<italic>n</italic>&#x202F;=&#x202F;17). The students&#x2019; mean age was 23.5 (SD: 6.0) years. Regarding order of platform interaction, 52% started with SARI (<italic>n</italic>&#x202F;=&#x202F;12), whereas 48% started with VIC (<italic>n</italic>&#x202F;=&#x202F;11). Thematic analysis resulted in five themes: (i) physical embodiment, (ii) responses to emotional cues, (iii) cognitive immersion, (iv) empathetic interaction, and (v) complementary learning value. Each theme was further divided into sub-themes, totalling eight sub-themes across all themes. <xref ref-type="table" rid="tab1">Table 1</xref> details the identified themes along with their corresponding sub-themes and analytical codes. Illustrative quotes from students are shown in <xref ref-type="table" rid="tab2">Table 2</xref>. Examples of the reflexive analysis process are provided in <xref ref-type="supplementary-material" rid="SM1">Supplementary Table S4</xref>.</p>
<table-wrap position="float" id="tab1">
<label>Table 1</label>
<caption>
<p>Identified themes and sub-themes from the qualitative thematic analysis.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Themes</th>
<th align="left" valign="top">Sub-themes</th>
<th align="left" valign="top">Analytical codes</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top" rowspan="6">Physical embodiment</td>
<td align="left" valign="top" rowspan="3">Multimodal communication</td>
<td align="left" valign="top">Visual/facial expressions</td>
</tr>
<tr>
<td align="left" valign="top">Verbal interaction quality</td>
</tr>
<tr>
<td align="left" valign="top">Interactive experience quality</td>
</tr>
<tr>
<td align="left" valign="top" rowspan="3">From text to embodied interaction</td>
<td align="left" valign="top">Perception of patient authenticity</td>
</tr>
<tr>
<td align="left" valign="top">Emotional responses to VPs</td>
</tr>
<tr>
<td align="left" valign="top">Presence of emotional connection</td>
</tr>
<tr>
<td align="left" valign="top" rowspan="6">Responses to emotional cues</td>
<td align="left" valign="top" rowspan="3">Responding to concerns</td>
<td align="left" valign="top">Emotional responses to VPs</td>
</tr>
<tr>
<td align="left" valign="top">Sense of responsibility</td>
</tr>
<tr>
<td align="left" valign="top">Ability to express comfort/reassurance</td>
</tr>
<tr>
<td align="left" valign="top" rowspan="3">Adaptability of communication</td>
<td align="left" valign="top">Interactive experience quality</td>
</tr>
<tr>
<td align="left" valign="top">Verbal interaction quality</td>
</tr>
<tr>
<td align="left" valign="top">Ability to express comfort/reassurance</td>
</tr>
<tr>
<td align="left" valign="top" rowspan="6">Cognitive immersion</td>
<td align="left" valign="top" rowspan="3">Active engagement</td>
<td align="left" valign="top">Differences in question formulation</td>
</tr>
<tr>
<td align="left" valign="top">Interactive experience quality</td>
</tr>
<tr>
<td align="left" valign="top">Learning focus</td>
</tr>
<tr>
<td align="left" valign="top" rowspan="3">Memory and continuity</td>
<td align="left" valign="top">Interactive experience quality</td>
</tr>
<tr>
<td align="left" valign="top">Verbal interaction quality</td>
</tr>
<tr>
<td align="left" valign="top">Perception of patient authenticity</td>
</tr>
<tr>
<td align="left" valign="top" rowspan="4">Empathetic interaction</td>
<td rowspan="4"/>
<td align="left" valign="top">Emotional connection</td>
</tr>
<tr>
<td align="left" valign="top">Sense of responsibility</td>
</tr>
<tr>
<td align="left" valign="top">Emotional responses to VPs</td>
</tr>
<tr>
<td align="left" valign="top">Perception of patient authenticity</td>
</tr>
<tr>
<td align="left" valign="top" rowspan="6">Complementary learning value</td>
<td align="left" valign="top" rowspan="3">Clinical reasoning versus empathy</td>
<td align="left" valign="top">Learning focus</td>
</tr>
<tr>
<td align="left" valign="top">Interactive experience</td>
</tr>
<tr>
<td align="left" valign="top">Ability to express comfort/reassurance</td>
</tr>
<tr>
<td align="left" valign="top" rowspan="3">Standardisation and variance</td>
<td align="left" valign="top">Interactive experience</td>
</tr>
<tr>
<td align="left" valign="top">Differences in question formulation</td>
</tr>
<tr>
<td align="left" valign="top">Learning focus</td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap position="float" id="tab2">
<label>Table 2</label>
<caption>
<p>Relevant quotes from in-depth interviews by theme and sub-theme.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Study participant</th>
<th align="left" valign="top">Quote</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle" colspan="2">Physical embodiment</td>
</tr>
<tr>
<td align="left" valign="middle" colspan="2">Multimodal communication</td>
</tr>
<tr>
<td align="left" valign="middle">Female student, 21&#x202F;years old</td>
<td align="left" valign="middle">&#x201C;It was the facial expressions. You could hear it in the voice. &#x2018;What can it be? I&#x2019;m worried. I&#x2019;ve had so many complaints.&#x2019; And then you had to face it.&#x201D;</td>
</tr>
<tr>
<td align="left" valign="middle">Male student, 21&#x202F;years old</td>
<td align="left" valign="middle">&#x201C;The face adds quite a lot because you have someone to look at. Even if it&#x2019;s very difficult to get an emotional connection, the little you get comes from the face. Without it, you get zero, I&#x2019;d say.&#x201D;</td>
</tr>
<tr>
<td align="left" valign="middle" colspan="2">From text to embodied interaction</td>
</tr>
<tr>
<td align="left" valign="middle">Female student, 21&#x202F;years old</td>
<td align="left" valign="middle">&#x201C;The computer was like a game. And like a text that you read. So I did not feel close to the patient at all. But for the robot, when it talked, it was still like a real patient.&#x201D;</td>
</tr>
<tr>
<td align="left" valign="middle">Male student, 21&#x202F;years old</td>
<td align="left" valign="middle">&#x201C;Yes, but I think it was easier to feel empathy for the robot cases. Because they talk. They express what they think is tough. And then it&#x2019;s more like you can get into it.&#x201D;</td>
</tr>
<tr>
<td align="left" valign="middle">Male student, 42&#x202F;years old</td>
<td align="left" valign="middle">&#x201C;When patients talk to me, I try to picture what they describe [&#x2026;] when I got this told in speech by this [robotic] virtual patient, and also in some cases facial expressions and so on, I feel that I live in the patient&#x2019;s situation in a completely different way than when I read a text.</td>
</tr>
<tr>
<td align="left" valign="middle" colspan="2">Responses to emotional cues</td>
</tr>
<tr>
<td align="left" valign="middle" colspan="2">Responding to concerns</td>
</tr>
<tr>
<td align="left" valign="middle">Female student, 21&#x202F;years old</td>
<td align="left" valign="middle">&#x201C;When it started to express feelings, [&#x2026;], it was like my brain started to understand that this is almost a real person.&#x201D;</td>
</tr>
<tr>
<td align="left" valign="middle">Female student, 21&#x202F;years old</td>
<td align="left" valign="middle">&#x201C;And when it expressed its worries, you were like, &#x2018;I want to answer its worries so that it does not walk around and get anxious.&#x2019; So I felt closer to the patient.&#x201D;</td>
</tr>
<tr>
<td align="left" valign="middle">Male student, 22&#x202F;years old</td>
<td align="left" valign="middle">&#x201C;Sometimes when it was worried about something and so on, you still felt that you wanted to answer its worries. And we did it like this several times. We tried to calm down the patient.&#x201D;</td>
</tr>
<tr>
<td align="left" valign="middle" colspan="2">Adaptability of communication</td>
</tr>
<tr>
<td align="left" valign="middle">Male student, 25&#x202F;years old</td>
<td align="left" valign="middle">&#x201C;When you talked to the robot it affected a part of you. You confirm, mirror, and summarise, so you feel more empathetic.&#x201D;</td>
</tr>
<tr>
<td align="left" valign="middle">Male student, 21&#x202F;years old</td>
<td align="left" valign="middle">&#x201C;With the social robot, you lose that [stress] and can focus on learning, which will help you to encounter patients. But you still get that human connection, where it feels more real.&#x201D;</td>
</tr>
<tr>
<td align="left" valign="middle" colspan="2">Cognitive immersion</td>
</tr>
<tr>
<td align="left" valign="middle" colspan="2">Active engagement</td>
</tr>
<tr>
<td align="left" valign="middle">Male student, 42&#x202F;years old</td>
<td align="left" valign="middle">&#x201C;The robot, the face-to-face conversation, and getting a real response to the questions I asked, that created a lot of immersion.&#x201D;</td>
</tr>
<tr>
<td align="left" valign="middle">Male student, 21&#x202F;years old</td>
<td align="left" valign="middle">&#x201C;I&#x2019;m not an expert in neurology, but if I just try to think about it, it does not feel like it&#x2019;s the same parts that are activated by writing a question and formulating it, like actually sitting and formulating it in front of someone. You cannot stop halfway through a sentence, erase half of it and rewrite it when you start talking to someone. In a way, you really need to have a good idea of what you are going to say before you say it.&#x201D;</td>
</tr>
<tr>
<td align="left" valign="middle" colspan="2">Memory and continuity</td>
</tr>
<tr>
<td align="left" valign="middle">Male student, 21&#x202F;years old</td>
<td align="left" valign="middle">&#x201C;It was a conversation where you got to remember the answers you had received earlier while you were thinking about the next question. So it felt closer to reality in a way.&#x201D;</td>
</tr>
<tr>
<td align="left" valign="middle">Female student, 23&#x202F;years old</td>
<td align="left" valign="middle">&#x201C;I think that these robot cases [&#x2026;] you are forced to be more active. And then I also experienced that you remember more afterwards. [&#x2026;]&#x201D;</td>
</tr>
<tr>
<td align="left" valign="middle" colspan="2">Empathetic interaction</td>
</tr>
<tr>
<td align="left" valign="middle">Female student, 22&#x202F;years old</td>
<td align="left" valign="middle">&#x201C;The robots were more authentic [in allowing empathetic behaviour]. It felt more like an interaction with a patient. While the computer-based platform [&#x2026;] it&#x2019;s more restricted. It&#x2019;s not a patient that you can see.&#x201D;</td>
</tr>
<tr>
<td align="left" valign="middle">Female student, 21&#x202F;years old</td>
<td align="left" valign="middle">&#x201C;It [the robot] answers your questions and can express feelings. It builds on [&#x2026;] authenticity.&#x201D;</td>
</tr>
<tr>
<td align="left" valign="middle" colspan="2">Complementary learning value</td>
</tr>
<tr>
<td align="left" valign="middle" colspan="2">Clinical reasoning versus empathy</td>
</tr>
<tr>
<td align="left" valign="middle">Male student, 21&#x202F;years old</td>
<td align="left" valign="middle">&#x201C;The advantages are that the virtual patient cases are more straightforward. Real patients can often be very complicated. They often have a lot. It can be difficult to find out what the real problem is.&#x201D;</td>
</tr>
<tr>
<td align="left" valign="middle">Male student, 42 yeasr old</td>
<td align="left" valign="middle">&#x201C;Learning-wise, I cannot speak for the others, but I think I became more committed and took it to an even higher degree. I try to learn with all the opportunities I get, but I think it is the commitment, that you become more committed in the robotic case, plus, as I said earlier, you are forced to formulate things.&#x201D;</td>
</tr>
<tr>
<td align="left" valign="middle" colspan="2">Standardisation and variance</td>
</tr>
<tr>
<td align="left" valign="middle">Female student, 48&#x202F;years old</td>
<td align="left" valign="middle">&#x201C;And then also, because it is a virtual patient case, there is also the possibility that everyone should get the same patient, and that you should also be able to get the same, the most important and most common diseases.&#x201D;</td>
</tr>
<tr>
<td align="left" valign="middle">Female student, 23&#x202F;years old</td>
<td align="left" valign="middle">&#x201C;One thing I thought about with the robotic cases. [&#x2026;] Different groups received different information. This [&#x2026;] affects how you ask questions. [&#x2026;] with the computer-based cases you always get the same information.&#x201D;</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="sec13">
<title>Physical embodiment</title>
<p>The theme &#x201C;physical embodiment&#x201D; suggested that SARI&#x2019;s physical presence influenced empathetic engagement compared to text-based interactions. This theme was further divided into two sub-themes, i.e., (i) multimodal communication and (ii) from text to embodied interaction.</p>
<p>In the subtheme &#x201C;multimodal communication,&#x201D; students consistently reported that the multimodal interaction through a combination of voice, facial expressions, and interactive responses from the robot created a more authentic experience compared with VIC, which facilitated empathetic engagement. The ability of SARI to convey emotional states through facial animations while speaking created a more nuanced communication experience. Students reported that witnessing worried expressions together with concerned vocal tones made them more likely to respond empathetically than when reading similar concerns in text.</p>
<p>In the subtheme &#x201C;from text to embodied interaction,&#x201D; the analysis revealed a perception shift when students transitioned from text-based to embodied VP interactions. Students described that VIC felt game-like and abstract, creating psychological distance towards the VP. In contrast, SARI transformed their perception of the VP from an educational exercise to an encounter with a quasi-person, which they perceived as stimulating towards empathetic engagement.</p>
</sec>
<sec id="sec14">
<title>Responses to emotional cues</title>
<p>The theme &#x201C;responses to emotional cues&#x201D; suggested that students recognised and responded to emotional expressions from the VPs, with distinct differences between the platforms. This theme was further divided into two sub-themes, i.e., (i) responding to concerns and (ii) adaptability of communication.</p>
<p>In the subtheme &#x201C;responding to concerns,&#x201D; students demonstrated higher awareness and responsiveness when interacting with SARI compared to VIC. When the robotic patient expressed worries or fear, students reported feeling compelled to respond to these emotional needs directly, resembling interactions with real patients. Students experienced a sense of responsibility for the robotic VP&#x2019;s emotional state, prompting them to offer spontaneous reassurance and empathetic acknowledgment. This contrasted their approach to VIC, where emotional content was noted cognitively but did not evoke the same immediate empathetic response.</p>
<p>The subtheme &#x201C;adaptability of communication&#x201D; explored the interactive nature of the responses received from SARI, which required students to adapt their communication style based on those responses, fostering empathetic behaviour. Students reported adapting their questioning technique, tone, and pace in response to perceived patient distress or confusion. This adaptive process included using communication strategies typically used in real clinical encounters, such as mirroring, summarising, and validating. Such adaptive processes were not identified or discussed in the context of VIC.</p>
</sec>
<sec id="sec15">
<title>Cognitive immersion</title>
<p>The theme &#x201C;cognitive immersion&#x201D; captured how different levels of cognitive engagement between platforms influenced students&#x2019; capacity for empathetic connection. This theme was further divided into two sub-themes, i.e., (i) active engagement, and (ii) memory and continuity.</p>
<p>In the subtheme &#x201C;active engagement,&#x201D; the need to actively formulate questions and wait for responses created a more engaging and authentic learning experience compared to VIC. Students described that SARI required them to maintain continuity in the conversation and remember previous information from the consultation, resembling the cognitive demand of real-life clinical encounters.</p>
<p>In the subtheme &#x201C;memory and continuity,&#x201D; students reported that the conversational nature of SARI interactions required them to maintain continuity throughout the VP encounter, relying on working memory rather than visual references. The verbal communication made them more prone to remember details from the consultation, promoting active listening and mental organisation, compared to being able to repeatedly return to relevant information on a computer screen, as was possible in VIC.</p>
</sec>
<sec id="sec16">
<title>Empathetic interaction</title>
<p>The theme &#x201C;empathetic interaction&#x201D; captured students&#x2019; direct experiences and reflections on empathetic engagement with VPs in both platforms. Unlike the other themes, this emerged as a unified concept without being divided into distinct sub-themes.</p>
<p>Students reported experiencing varying degrees of empathy in their interactions with the two platforms, with SARI consistently evoking more empathetic responses compared to VIC. The enhanced authenticity of the multimodal social robotic interactions created conditions where students could practise empathetic communication more naturally. However, while they acknowledged that none of the two platform fully replicated the empathetic connection that becomes possible with real patients, students identified SARI as substantially closer to authentic clinical encounters in its capacity to evoke and allow practise of empathetic behaviours. The perceived authenticity of the interaction influenced the degree of empathy in students&#x2019; responses, with greater perceived authenticity leading to a higher degree of empathy.</p>
</sec>
<sec id="sec17">
<title>Complementary learning value</title>
<p>The theme &#x201C;complementary learning value&#x201D; recognised that both platforms offered distinct educational benefits, with their differences creating complementary learning opportunities. This theme was further divided into two sub-themes, i.e., (i) diagnostic versus empathetic skills, and (ii) standardisation and variance.</p>
<p>In the subtheme &#x201C;diagnostic versus empathetic skills,&#x201D; students identified different learning areas between the platforms. The structured format and comprehensive information in VIC optimised diagnostic reasoning training, allowing systematic exploration of clinical data without the interpersonal aspects. In contrast, SARI was seen as stronger in balancing diagnostic reasoning with empathetic communication, requiring students to obtain clinical information through interaction that evoked empathetic behaviour.</p>
<p>In the subtheme &#x201C;standardisation and variance,&#x201D; the two platforms offered distinct advantages in terms of standardised learning experiences. VIC ensured that all students received identical information, facilitated standardised assessment, and provided consistent representation of patient cases. In contrast, SARI allowed a controlled variance based on individual communication approaches, resulting in different groups obtaining slightly different sets of information. The students appreciated this variety in information as it stimulated rich discussions during follow-up seminars, where groups compared their different interaction strategies and the information they had gathered with the other student groups.</p>
</sec>
<sec id="sec18">
<title>Quantitative comparisons of self-perceived empathetic conduct</title>
<p>Of 178 students who participated in quantitative evaluations, 93 (52%) were women and 86 students (48%) were men. Regarding prior exposure, 29 students (16%) reported previous experience with VPs, whereas 150 students (84%) had no previous experience. The students&#x2019; mean age was 25.3 (SD: 5.4) years. Regarding the order of platforms, 101 students (56%) started with SARI and 77 (43%) started with VIC. In relation to self-experienced empathy during the VP encounter, students preferred SARI compared to VIC (78% versus 6%; OR 190.4; 95% CI: 76.8&#x2013;472.0; <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001), compared to equal preference (78% versus 17%; OR: 21.2; 95% CI: 12.1&#x2013;37.0; <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001), and compared to VIC combined with equal preference (78% versus 22%; OR: 11.9; 95% CI: 7.2&#x2013;19.6; <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001). Similar patterns were seen after stratification into subgroups of interest, i.e., female or male students, students who had and students who did not have prior experience with VPs, as well as students who had started with SARI and student who had started with VIC. Results are illustrated in <xref ref-type="fig" rid="fig1">Figures 1</xref>, <xref ref-type="fig" rid="fig2">2</xref>.</p>
<fig position="float" id="fig1">
<label>Figure 1</label>
<caption>
<p>Forest plots illustrating results from Fisher&#x2019;s exact test with Monte Carlo simulation on proportion of students preferring SARI versus comparator. Panel <bold>(A)</bold> shows comparisons between SARI and VIC (blue colour). Panel <bold>(B)</bold> illustrates comparisons between SARI and Equal preference (green colour). Panel <bold>(C)</bold> shows comparisons between SARI and Not SARI (red colour). Circles denote odds ratios (ORs) and whiskers 95% confidence intervals (CIs) on a logarithmic scale. SARI, Social AI-enhanced Robotic Interface; VIC, Virtual Interactive Case system.</p>
</caption>
<graphic xlink:href="frai-09-1795842-g001.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Three grouped forest plots compare student preference subgroups for SARI versus alternatives (VIC, Equal, Not SARI) across gender and experience, showing odds ratios, confidence intervals, and p-values, all favoring SARI with statistically significant differences.</alt-text>
</graphic>
</fig>
<fig position="float" id="fig2">
<label>Figure 2</label>
<caption>
<p>Bar plots illustrating the proportions of medical students&#x2019; self-perceived VP platform preference for empathetic conduct categorized by subgroups of interest (sex, previous experience of VP platforms, and platform introduced first). Exp., Experience; SARI, Social AI-enhanced Robotic Interface; VIC, Virtual Interactive Case system; VP, virtual patient.</p>
</caption>
<graphic xlink:href="frai-09-1795842-g002.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Stacked bar chart compares platform preferences (SARI, Equal preference, VIC) across overall results, sex, previous experience, and platform order. SARI is the most preferred platform in all groups, with percentages ranging from sixty-nine percent to eighty-four percent, followed by equal preference, then VIC. Total sample size and subgroup sizes are indicated below each column. The chart uses blue for SARI, green for equal preference, and yellow-orange for VIC.</alt-text>
</graphic>
</fig>
<p>Results from VAS data demonstrated that students preferred SARI compared to VIC (median: 2.0; IQR: 1.0&#x2013;4.0; <italic>W</italic>: 738.5; <italic>r</italic>: 0.70; <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001). This difference remained statistically significant in comparisons within the student subgroups of interest mentioned above. Results are illustrated in <xref ref-type="fig" rid="fig3">Figure 3</xref> and detailed in <xref ref-type="supplementary-material" rid="SM1">Supplementary Tables S5&#x2013;S8</xref>.</p>
<fig position="float" id="fig3">
<label>Figure 3</label>
<caption>
<p>Density plots illustrating distributions of responses regarding VP platform preference. Shown are also results from Wilcoxon signed rank tests performed for comparisons of scores with a hypothetical score of 5 (equal preference of platforms) for each student. The different horizontal panels show, from top to bottom, the overall distribution of responses in the entire cohort of students, overlayed distributions in women and men, overlayed distributions in students with and without prior experience of VPs, and overlayed distributions in subgroups of students starting with SARI or VIC. SARI, Social AI-enhanced Robotic Interface; VIC, Virtual Interactive Case system; VP, virtual patient.</p>
</caption>
<graphic xlink:href="frai-09-1795842-g003.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Four density plots show preference score distributions for SARI versus VIC platforms across overall, sex, previous experience, and platform order groups. Each plot uses color shading to compare subgroups, with vertical dashed lines for means and p-values for group differences.</alt-text>
</graphic>
</fig>
</sec>
</sec>
<sec sec-type="discussion" id="sec19">
<title>Discussion</title>
<p>Our study combined qualitative and quantitative methodology to investigate whether an AI-enhanced social robotic VP platform could foster a higher degree of self-experienced empathetic behaviour in medical students compared with a computer-based platform. The findings demonstrate a clear advantage of SARI over the conventional computer-based platform VIC for facilitating empathetic engagement. Thematic analysis of qualitative data yielded five major themes relating to empathetic behaviour consistently favouring SARI, while quantitative analysis confirmed these findings with students demonstrating a significant preference for SARI across all subgroups of interest.</p>
<p>The students&#x2019; experiences with SARI can be understood through the dual-process framework that distinguishes between cognitive and emotional empathy (<xref ref-type="bibr" rid="ref26">Hojat, 2007</xref>; <xref ref-type="bibr" rid="ref47">Sulzer et al., 2016</xref>). SARI appeared to promote cognitive empathy through requirements for active questioning, real-time interpretation of responses, and continuous dialogue. The themes &#x201C;cognitive immersion&#x201D; and its sub-themes &#x201C;active engagement&#x201D; and &#x201C;memory and continuity&#x201D; captured how students needed to actively generate questions and process information, mirroring the cognitive demands of real patient encounters. This aligns with the conceptualisation of empathy as a predominantly cognitive skill, as described by <xref ref-type="bibr" rid="ref47">Sulzer et al. (2016)</xref>, where understanding a patient&#x2019;s perspective takes precedence over emotional mirroring. This stands in contrast to the more passive engagement observed with VIC, where pre-written questions limit the cognitive processes that are necessary for empathetic conduct.</p>
<p>Simultaneously, SARI appeared to promote emotional empathy through its multimodal expression capabilities. The themes &#x201C;physical embodiment&#x201D; and &#x201C;responses to emotional cues&#x201D; illustrate how students experienced spontaneous emotional responses when the robotic VP expressed worry or distress, making the students feeling compelled to address the VP&#x2019;s emotional state and offer reassurance, suggesting that the multimodal interaction triggered affective empathetic processes beyond cognitive perspective-taking. The interplay between these two dimensions was particularly evident in the theme &#x201C;empathetic interaction,&#x201D; where students described how the enhanced authenticity of the multimodal interaction created conditions for a more engaging empathetic experience. The findings thus support the notion that empathy in medical education is not merely about emotional resonance but about active cognitive engagement with patient perspectives, a process that appeared to be better facilitated through the embodied interaction with the social robot and was seen as an important aspect of person-cantered communication (<xref ref-type="bibr" rid="ref5">American Geriatrics Society Expert Panel on Person&#x2010;Centered Care et al., 2016</xref>; <xref ref-type="bibr" rid="ref24">Hashim, 2017</xref>).</p>
<p>The advantages of SARI for fostering empathetic behaviour remained consistent despite individual characteristics and prior exposures, as evidenced by the consistent preference across all subgroups of interest. Contrary to previous literature suggesting variances in expressions of empathy between sexes (<xref ref-type="bibr" rid="ref6">Berg et al., 2011</xref>; <xref ref-type="bibr" rid="ref27">Hojat et al., 2002</xref>), our findings showed similar patterns between female and male medical students. Despite a numerical trend towards more students starting with SARI, suggesting a possible primacy effect, the platform order did not significantly influence the students&#x2019; preference. Similarly, prior VP experience did not substantially alter students&#x2019; preferences. This robustness across subgroups of interest strengthens our conclusion that the multimodal and interactive nature of SARI provides fundamental advantages for empathetic conduct that appeal broadly to medical students regardless of background characteristics.</p>
<p>Our findings align with emerging evidence on the value of physically embodied AI systems in healthcare education. A recent study showed that medical students who performed role-play with AI-enhanced humanoid robots for training English for medical purposes achieved significantly greater communication competence and empathy compared to those using only LLM-based virtual agents in a learning-by-teaching context (<xref ref-type="bibr" rid="ref19">Derakhshan et al., 2025</xref>). Taken together, these observations suggest that physical embodiment plays a crucial role in facilitating empathetic engagement. Furthermore, our results support previous work illustrating the role of a physical presence and multimodal interaction to facilitate deeper emotional engagement in dialogue (<xref ref-type="bibr" rid="ref16">Cummings and Bailenson, 2016</xref>; <xref ref-type="bibr" rid="ref37">Lee, 2004</xref>). SARI created conditions that allowed students to engage in empathetic behaviour with VPs through its ability to express and respond to emotions through facial expressions, voice modulation, and real-time communication adaptability&#x2014;features that consistently enhanced the authenticity of the interaction and thereby supported empathetic responses. Our research team has previously demonstrated that this enhanced authenticity of SARI also positively impacts clinical reasoning training in medical students (<xref ref-type="bibr" rid="ref7">Borg et al., 2025</xref>; <xref ref-type="bibr" rid="ref8">Borg et al., 2024</xref>).</p>
<p>The integration of LLM-enhanced social robots into medical education raises important considerations about the broader role of AI in healthcare training and practise (<xref ref-type="bibr" rid="ref3">Alam et al., 2023</xref>). As students increasingly interact with AI systems during their education, educators must consider how these experiences shape students&#x2019; approaches to person-centred care (<xref ref-type="bibr" rid="ref40">Nagi et al., 2023</xref>). The positive perception of the social robotic platform suggests that, rather than detracting from humanistic aspects of medicine, thoughtfully designed AI systems may enhance students&#x2019; capacity for empathetic communication by providing safe environments for practise (<xref ref-type="bibr" rid="ref41">Park and Whang, 2022</xref>). However, educators must also remain vigilant about the values and communication patterns being modelled by these systems, ensuring they align with best practises in person-centred care. As AI systems become more prevalent in medicine, early exposure to such technologies through educational platforms like SARI may help students develop appropriate levels of critical engagement with AI-generated information (<xref ref-type="bibr" rid="ref4">Ali, 2025</xref>). Importantly, AI-enhanced tools like SARI should be viewed as complementary for empathy training rather than replacements of human encounters. While our findings demonstrate that SARI can effectively facilitate both cognitive and emotional dimensions of empathetic engagement, the irreplaceable elements of genuine human connection must remain central to clinical education. The role of such platforms is therefore to provide accessible opportunities for students to develop and practise empathetic communication skills in a safe environment, thereby better preparing them for the humanistic demands of real clinical encounters.</p>
<sec id="sec20">
<title>Limitations and future directions</title>
<p>Despite these promising findings, our study design has several important limitations that should be considered when interpreting the results. The use of English rather than Swedish for the VP cases represents a constraint with potential impact on empathetic expression. Research has shown that communication of empathy can differ when using one&#x2019;s native versus second language, with potentially reduced emotional resonance when using a non-native language (<xref ref-type="bibr" rid="ref13">Caldwell-Harris, 2014</xref>). The students&#x2019; abilities to demonstrate empathetic communication may have been constrained compared to what they could have achieved in Swedish, potentially influencing our findings, although this limitation likely affected both platforms similarly. Research on bilingual emotional expression suggests that communicating in a second language can reduce emotional intensity and the naturalness of affective expression (<xref ref-type="bibr" rid="ref13">Caldwell-Harris, 2014</xref>; <xref ref-type="bibr" rid="ref33">Keysar et al., 2012</xref>), which may have constrained the depth of empathetic expressions on both platforms. While the differences we observed between SARI and VIC may represent conservative estimates of the actual differences, the fact that SARI still demonstrated significant advantages with high odds ratios despite this shared constraint speaks to the robustness of practising empathetic conduct via the embodied multimodal interaction of SARI compared to the click-based interaction in VIC. Furthermore, while the LLM-enhanced social robot represents an advancement in VP technology, it still faces technical challenges such as occasional mechanical responses and connection difficulties that could impact educational outcomes. Also, as SARI did not allow for physical examination options, this might have affected students&#x2019; perception of empathy. Due to physical touch being an important aspect of experiencing empathy in physician-patient relationships, inclusion of real physical examination options might have increased the empathetic immersion (<xref ref-type="bibr" rid="ref32">Kelly et al., 2020</xref>). While SARI appeared robust in promoting empathetic engagement through verbal and visual modalities, future iterations that incorporate haptic feedback or tactile interaction options could further enhance the empathetic immersion and bridge this gap.</p>
<p>Despite these technological constraints, we aimed to mitigate methodological limitations by enrolling a relatively large sample size (<italic>n</italic>&#x202F;=&#x202F;23) for the qualitative component, generating a rich and diverse dataset that increases the representativeness of our findings. The quantitative validation of platform preference with 178 students further strengthens our conclusions. However, it is important to note that the results from this study might not be translatable within other contexts, warranting further evaluations in different settings. Our quantitative evaluation relied on students&#x2019; self-perceived platform preferences rather than objective assessments. While this was a deliberate methodological choice, the absence of external measures such as validated empathy scales, observer-rated assessments, or behavioural coding represents a limitation. Future studies would benefit from incorporating objective assessment such as the Jefferson Scale of Empathy (<xref ref-type="bibr" rid="ref29">Hojat et al., 2001</xref>), observer-rated assessment, or behavioural coding of student-VP interactions, to triangulate with self-reported data.</p>
<p>The transferability of empathetic skills developed during VP simulations to real patient encounters remains an open question that warrants investigation. In future research, it would be of interest to investigate this transfer through longitudinal studies with pre- and post-intervention assessments using validated empathy instruments and investigation of dose&#x2013;response relationships between the number and frequency of VP training sessions and empathetic outcomes, such as observer-rated assessment or patient satisfaction data. Additionally, investigating how specific features of the social robotic interface of SARI (facial expressions, voice qualities, response timing) specifically contribute to empathetic engagement could help optimise future VP platforms. Lastly, exploring how cultural and linguistic factors influence empathetic responses to social robotic VPs could address important questions about the cross-cultural applicability of such educational technologies and inform adaptations needed across educational contexts.</p>
</sec>
<sec id="sec21">
<title>Concluding remarks</title>
<p>In summary, this study demonstrates that our LLM-enhanced social robotic VP platform offers substantial advantages over an established conventional computer-based VP platform for fostering empathetic behaviour in medical students. The physical embodiment, multimodal interaction, and responsive dialogue capabilities of SARI created conditions more conducive to empathetic engagement, as evidenced by both qualitative themes and quantitative preference data. These findings were consistent across student subgroups of interest, suggesting broad applicability. Despite technological limitations, the enhanced authenticity and interactivity conferred from the social robotic platform created a more engaging learning environment that bridges the gap between text-based simulation and real clinical encounters. As medical education evolves alongside technological advances, platforms like SARI may play an important role in complementing clinical rotations by providing standardised, accessible patient encounters, which help students acquire the crucial empathetic communication skills that are necessary for safe, efficient, and person-centred patient care, alongside training in clinical reasoning.</p>
</sec>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="sec22">
<title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors without undue reservation.</p>
</sec>
<sec sec-type="ethics-statement" id="sec23">
<title>Ethics statement</title>
<p>The studies involving humans were approved by the Swedish Ethical Review Authority. The studies were conducted in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study. Written informed consent was obtained from the individual(s) for the publication of any potentially identifiable images or data included in this article.</p>
</sec>
<sec sec-type="author-contributions" id="sec24">
<title>Author contributions</title>
<p>AB: Conceptualization, Data curation, Formal analysis, Investigation, Methodology, Project administration, Visualization, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. BJ: Data curation, Formal analysis, Investigation, Project administration, Writing &#x2013; review &#x0026; editing. CiG: Data curation, Investigation, Project administration, Writing &#x2013; review &#x0026; editing. VH: Data curation, Investigation, Project administration, Writing &#x2013; review &#x0026; editing. AH: Data curation, Investigation, Project administration, Writing &#x2013; review &#x0026; editing. JS: Data curation, Investigation, Project administration, Writing &#x2013; review &#x0026; editing. WI: Data curation, Investigation, Project administration, Writing &#x2013; review &#x0026; editing. FE: Data curation, Investigation, Project administration, Writing &#x2013; review &#x0026; editing. MR: Conceptualization, Investigation, Methodology, Supervision, Writing &#x2013; review &#x0026; editing. SE: Conceptualization, Investigation, Methodology, Supervision, Writing &#x2013; review &#x0026; editing. CaG: Conceptualization, Investigation, Methodology, Supervision, Writing &#x2013; review &#x0026; editing. GS: Project administration, Software, Supervision, Writing &#x2013; review &#x0026; editing. IP: Conceptualization, Funding acquisition, Resources, Supervision, Writing &#x2013; review &#x0026; editing.</p>
</sec>
<ack>
<title>Acknowledgments</title>
<p>The authors would like to thank the medical students who participated in the study, as well as the medical staff at the Division of Rheumatology at the Karolinska University Hospital, Stockholm, Sweden.</p>
</ack>
<sec sec-type="COI-statement" id="sec25">
<title>Conflict of interest</title>
<p>IP has received research funding and/or honoraria from Amgen, AstraZeneca, Aurinia, BMS, Elli Lilly, Gilead, GSK, Janssen, Novartis, Otsuka, and Roche. GS is a co-founder and Chief Scientist at Furhat Robotics. Contributions by GS were primarily in the technical development of SARI and in providing critical revision of the manuscript for intellectual content. GS did not influence the design of the study, the collection of data, or the analytical pipeline followed in the study.</p>
<p>The remaining author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="sec26">
<title>Generative AI statement</title>
<p>The author(s) declared that Generative AI was used in the creation of this manuscript. The authors used ChatGPT (OpenAI) for language refinement and grammar checking in specific sections of the manuscript during the writing process. All scientific content, study design, data analysis, statistical interpretation, and conclusions were developed entirely by the authors. The authors take full responsibility for the accuracy, integrity, and scientific validity of all content in this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="sec27">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec sec-type="supplementary-material" id="sec28">
<title>Supplementary material</title>
<p>The Supplementary material for this article can be found online at: <ext-link xlink:href="https://www.frontiersin.org/articles/10.3389/frai.2026.1795842/full#supplementary-material" ext-link-type="uri">https://www.frontiersin.org/articles/10.3389/frai.2026.1795842/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Data_Sheet_1.PDF" id="SM1" mimetype="application/pdf" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<ref-list>
<title>References</title>
<ref id="ref1"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ahn</surname><given-names>S.-e.</given-names></name> <name><surname>Edelbring</surname><given-names>S.</given-names></name></person-group> (<year>2020</year>). <article-title>Designing a virtual patient as an interprofessional enactment: lessons learnt from the process</article-title>. <source>Int. J. Learn. Technol.</source> <volume>15</volume>, <fpage>204</fpage>&#x2013;<lpage>218</lpage>. doi: <pub-id pub-id-type="doi">10.1504/ijlt.2020.112106</pub-id></mixed-citation></ref>
<ref id="ref2"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Al Moubayed</surname><given-names>S.</given-names></name> <name><surname>Beskow</surname><given-names>J.</given-names></name> <name><surname>Skantze</surname><given-names>G.</given-names></name> <name><surname>Granstr&#x00F6;m</surname><given-names>B.</given-names></name></person-group> (<year>2012</year>). &#x201C;<chapter-title>Furhat: a back-projected human-like robot head for multiparty human-machine interaction</chapter-title>,&#x201D; in <source>Cognitive behavioural systems: COST 2102 International Training School, Dresden, Germany, February 21&#x2013;26, 2011, Revised Selected Papers</source>. (<publisher-loc>Berlin, Heidelberg</publisher-loc>: <publisher-name>Springer</publisher-name>).</mixed-citation></ref>
<ref id="ref3"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Alam</surname><given-names>F.</given-names></name> <name><surname>Lim</surname><given-names>M. A.</given-names></name> <name><surname>Zulkipli</surname><given-names>I. N.</given-names></name></person-group> (<year>2023</year>). <article-title>Integrating AI in medical education: embracing ethical usage and critical understanding</article-title>. <source>Front. Med.</source> <volume>10</volume>:<fpage>1279707</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fmed.2023.1279707</pub-id>, <pub-id pub-id-type="pmid">37901398</pub-id></mixed-citation></ref>
<ref id="ref4"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ali</surname><given-names>M.</given-names></name></person-group> (<year>2025</year>). <article-title>The role of AI in reshaping medical education: opportunities and challenges</article-title>. <source>Clin. Teach.</source> <volume>22</volume>:<fpage>e70040</fpage>. doi: <pub-id pub-id-type="doi">10.1111/tct.70040</pub-id>, <pub-id pub-id-type="pmid">39956546</pub-id></mixed-citation></ref>
<ref id="ref5"><mixed-citation publication-type="journal"><person-group person-group-type="author"><collab id="coll1">American Geriatrics Society Expert Panel on Person&#x2010;Centered Care</collab><name><surname>Brummel&#x2010;Smith</surname><given-names>K.</given-names></name> <name><surname>Butler</surname><given-names>D.</given-names></name> <name><surname>Frieder</surname><given-names>M.</given-names></name> <name><surname>Gibbs</surname><given-names>N.</given-names></name> <name><surname>Henry</surname><given-names>M.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>Person-centered care: a definition and essential elements</article-title>. <source>J. Am. Geriatr. Soc.</source> <volume>64</volume>, <fpage>15</fpage>&#x2013;<lpage>18</lpage>. doi: <pub-id pub-id-type="doi">10.1111/jgs.13866</pub-id></mixed-citation></ref>
<ref id="ref6"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Berg</surname><given-names>K.</given-names></name> <name><surname>Majdan</surname><given-names>J. F.</given-names></name> <name><surname>Berg</surname><given-names>D.</given-names></name> <name><surname>Veloski</surname><given-names>J.</given-names></name> <name><surname>Hojat</surname><given-names>M.</given-names></name></person-group> (<year>2011</year>). <article-title>Medical students' self-reported empathy and simulated patients' assessments of student empathy: an analysis by gender and ethnicity</article-title>. <source>Acad. Med.</source> <volume>86</volume>, <fpage>984</fpage>&#x2013;<lpage>988</lpage>. doi: <pub-id pub-id-type="doi">10.1097/ACM.0b013e3182224f1f</pub-id>, <pub-id pub-id-type="pmid">21694558</pub-id></mixed-citation></ref>
<ref id="ref7"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Borg</surname><given-names>A.</given-names></name> <name><surname>Georg</surname><given-names>C.</given-names></name> <name><surname>Jobs</surname><given-names>B.</given-names></name> <name><surname>Huss</surname><given-names>V.</given-names></name> <name><surname>Waldenlind</surname><given-names>K.</given-names></name> <name><surname>Ruiz</surname><given-names>M.</given-names></name> <etal/></person-group>. (<year>2025</year>). <article-title>Virtual patient simulations using social robotics combined with large language models for clinical reasoning training in medical education: mixed methods study</article-title>. <source>J. Med. Internet Res.</source> <volume>27</volume>:<fpage>e63312</fpage>. doi: <pub-id pub-id-type="doi">10.2196/63312</pub-id>, <pub-id pub-id-type="pmid">40053778</pub-id></mixed-citation></ref>
<ref id="ref8"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Borg</surname><given-names>A.</given-names></name> <name><surname>Jobs</surname><given-names>B.</given-names></name> <name><surname>Huss</surname><given-names>V.</given-names></name> <name><surname>Gentline</surname><given-names>C.</given-names></name> <name><surname>Espinosa</surname><given-names>F.</given-names></name> <name><surname>Ruiz</surname><given-names>M.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>Enhancing clinical reasoning skills for medical students: a qualitative comparison of LLM-powered social robotic versus computer-based virtual patients within rheumatology</article-title>. <source>Rheumatol. Int.</source> <volume>44</volume>, <fpage>3041</fpage>&#x2013;<lpage>3051</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00296-024-05731-0</pub-id>, <pub-id pub-id-type="pmid">39412574</pub-id></mixed-citation></ref>
<ref id="ref9"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Braun</surname><given-names>V.</given-names></name> <name><surname>Clarke</surname><given-names>V.</given-names></name></person-group> (<year>2006</year>). <article-title>Using thematic analysis in psychology</article-title>. <source>Qual. Res. Psychol.</source> <volume>3</volume>, <fpage>77</fpage>&#x2013;<lpage>101</lpage>. doi: <pub-id pub-id-type="doi">10.1191/1478088706qp063oa</pub-id></mixed-citation></ref>
<ref id="ref10"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Braun</surname><given-names>V.</given-names></name> <name><surname>Clarke</surname><given-names>V.</given-names></name></person-group> (<year>2021</year>). <source>Thematic analysis: a practical guide</source>. <publisher-name>Sage publications inc</publisher-name>.</mixed-citation></ref>
<ref id="ref11"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Braun</surname><given-names>V.</given-names></name> <name><surname>Clarke</surname><given-names>V.</given-names></name></person-group> (<year>2023</year>). <article-title>Toward good practice in thematic analysis: avoiding common problems and be(com)ing a knowing researcher</article-title>. <source>Int. J. Transgend. Health</source> <volume>24</volume>, <fpage>1</fpage>&#x2013;<lpage>6</lpage>. doi: <pub-id pub-id-type="doi">10.1080/26895269.2022.2129597</pub-id>, <pub-id pub-id-type="pmid">36713144</pub-id></mixed-citation></ref>
<ref id="ref12"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Brown</surname><given-names>T.</given-names></name> <name><surname>Mann</surname><given-names>B.</given-names></name> <name><surname>Ryder</surname><given-names>N.</given-names></name> <name><surname>Subbiah</surname><given-names>M.</given-names></name> <name><surname>Kaplan</surname><given-names>J. D.</given-names></name> <name><surname>Dhariwal</surname><given-names>P.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Language models are few-shot learners</article-title>. <source>Adv. Neural Inf. Proces. Syst.</source> <volume>33</volume>, <fpage>1877</fpage>&#x2013;<lpage>1901</lpage>. doi: <pub-id pub-id-type="doi">10.48550/arXiv.2005.14165</pub-id></mixed-citation></ref>
<ref id="ref13"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Caldwell-Harris</surname><given-names>C. L.</given-names></name></person-group> (<year>2014</year>). <article-title>Emotionality differences between a native and foreign language: theoretical implications</article-title>. <source>Front. Psychol.</source> <volume>5</volume>:<fpage>1055</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2014.01055</pub-id>, <pub-id pub-id-type="pmid">25295019</pub-id></mixed-citation></ref>
<ref id="ref14"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Colliver</surname><given-names>J. A.</given-names></name> <name><surname>Conlee</surname><given-names>M. J.</given-names></name> <name><surname>Verhulst</surname><given-names>S. J.</given-names></name> <name><surname>Dorsey</surname><given-names>J. K.</given-names></name></person-group> (<year>2010</year>). <article-title>Reports of the decline of empathy during medical education are greatly exaggerated: a reexamination of the research</article-title>. <source>Acad. Med.</source> <volume>85</volume>, <fpage>588</fpage>&#x2013;<lpage>593</lpage>. doi: <pub-id pub-id-type="doi">10.1097/ACM.0b013e3181d281dc</pub-id>, <pub-id pub-id-type="pmid">20354372</pub-id></mixed-citation></ref>
<ref id="ref15"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cook</surname><given-names>D. A.</given-names></name></person-group> (<year>2024</year>). <article-title>Creating virtual patients using large language models: scalable, global, and low cost</article-title>. <source>Med. Teach.</source> <volume>47</volume>, <fpage>40</fpage>&#x2013;<lpage>42</lpage>. doi: <pub-id pub-id-type="doi">10.1080/0142159x.2024.2376879</pub-id>, <pub-id pub-id-type="pmid">38992981</pub-id></mixed-citation></ref>
<ref id="ref16"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cummings</surname><given-names>J. J.</given-names></name> <name><surname>Bailenson</surname><given-names>J. N.</given-names></name></person-group> (<year>2016</year>). <article-title>How immersive is enough? A meta-analysis of the effect of immersive technology on user presence</article-title>. <source>Media Psychol.</source> <volume>19</volume>, <fpage>272</fpage>&#x2013;<lpage>309</lpage>. doi: <pub-id pub-id-type="doi">10.1080/15213269.2015.1015740</pub-id></mixed-citation></ref>
<ref id="ref17"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Del Canale</surname><given-names>S.</given-names></name> <name><surname>Louis</surname><given-names>D. Z.</given-names></name> <name><surname>Maio</surname><given-names>V.</given-names></name> <name><surname>Wang</surname><given-names>X.</given-names></name> <name><surname>Rossi</surname><given-names>G.</given-names></name> <name><surname>Hojat</surname><given-names>M.</given-names></name> <etal/></person-group>. (<year>2012</year>). <article-title>The relationship between physician empathy and disease complications: an empirical study of primary care physicians and their diabetic patients in Parma, Italy</article-title>. <source>Acad. Med.</source> <volume>87</volume>:<fpage>22836852</fpage>, <fpage>1243</fpage>&#x2013;<lpage>1249</lpage>. doi: <pub-id pub-id-type="doi">10.1097/ACM.0b013e3182628fbf</pub-id>, <pub-id pub-id-type="pmid">22836852</pub-id></mixed-citation></ref>
<ref id="ref18"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Deladisma</surname><given-names>A. M.</given-names></name> <name><surname>Cohen</surname><given-names>M.</given-names></name> <name><surname>Stevens</surname><given-names>A.</given-names></name> <name><surname>Wagner</surname><given-names>P.</given-names></name> <name><surname>Lok</surname><given-names>B.</given-names></name> <name><surname>Bernard</surname><given-names>T.</given-names></name> <etal/></person-group>. (<year>2007</year>). <article-title>Do medical students respond empathetically to a virtual patient?</article-title> <source>Am. J. Surg.</source> <volume>193</volume>, <fpage>756</fpage>&#x2013;<lpage>760</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.amjsurg.2007.01.021</pub-id>, <pub-id pub-id-type="pmid">17512291</pub-id></mixed-citation></ref>
<ref id="ref19"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Derakhshan</surname><given-names>A.</given-names></name> <name><surname>Teo</surname><given-names>T.</given-names></name> <name><surname>Khazaie</surname><given-names>S.</given-names></name></person-group> (<year>2025</year>). <article-title>Investigating the usefulness of artificial intelligence-driven robots in developing empathy for English for medical purposes communication: the role-play of Asian and African students</article-title>. <source>Comput. Hum. Behav.</source> <volume>162</volume>:<fpage>108416</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.chb.2024.108416</pub-id></mixed-citation></ref>
<ref id="ref20"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Edelbring</surname><given-names>S.</given-names></name> <name><surname>Parodis</surname><given-names>I.</given-names></name> <name><surname>Lundberg</surname><given-names>I. E.</given-names></name></person-group> (<year>2018</year>). <article-title>Increasing reasoning awareness: video analysis of students&#x2019; two-party virtual patient interactions</article-title>. <source>Jmir Med. Educ.</source> <volume>4</volume>:<fpage>e9137</fpage>. doi: <pub-id pub-id-type="doi">10.2196/mededu.9137</pub-id></mixed-citation></ref>
<ref id="ref21"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ellaway</surname><given-names>R.</given-names></name></person-group> (<year>2006</year>). <article-title>Weaving the 'e's together</article-title>. <source>Med. Teach.</source> <volume>28</volume>, <fpage>587</fpage>&#x2013;<lpage>590</lpage>. doi: <pub-id pub-id-type="doi">10.1080/01421590600909070</pub-id>, <pub-id pub-id-type="pmid">17594547</pub-id></mixed-citation></ref>
<ref id="ref22"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Emamikia</surname><given-names>S.</given-names></name> <name><surname>Gentline</surname><given-names>C.</given-names></name> <name><surname>Enman</surname><given-names>Y.</given-names></name> <name><surname>Parodis</surname><given-names>I.</given-names></name></person-group> (<year>2022</year>). <article-title>How can we enhance adherence to medications in patients with systemic lupus erythematosus? Results from a qualitative study</article-title>. <source>J. Clin. Med.</source> <volume>11</volume>:<fpage>1857</fpage>. doi: <pub-id pub-id-type="doi">10.3390/jcm11071857</pub-id>, <pub-id pub-id-type="pmid">35407466</pub-id></mixed-citation></ref>
<ref id="ref23"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Georg</surname><given-names>C.</given-names></name> <name><surname>Zary</surname><given-names>N.</given-names></name></person-group> (<year>2014</year>). <article-title>Web-based virtual patients in nursing education: development and validation of theory-anchored design and activity models</article-title>. <source>J. Med. Internet Res.</source> <volume>16</volume>:<fpage>e105</fpage>. doi: <pub-id pub-id-type="doi">10.2196/jmir.2556</pub-id>, <pub-id pub-id-type="pmid">24727709</pub-id></mixed-citation></ref>
<ref id="ref24"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hashim</surname><given-names>M. J.</given-names></name></person-group> (<year>2017</year>). <article-title>Patient-centered communication: basic skills</article-title>. <source>Am. Fam. Physician</source> <volume>95</volume>, <fpage>29</fpage>&#x2013;<lpage>34</lpage>, <pub-id pub-id-type="pmid">28075109</pub-id></mixed-citation></ref>
<ref id="ref25"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hege</surname><given-names>I.</given-names></name> <name><surname>Kononowicz</surname><given-names>A. A.</given-names></name> <name><surname>Tolks</surname><given-names>D.</given-names></name> <name><surname>Edelbring</surname><given-names>S.</given-names></name> <name><surname>Kuehlmeyer</surname><given-names>K.</given-names></name></person-group> (<year>2016</year>). <article-title>A qualitative analysis of virtual patient descriptions in healthcare education based on a systematic literature review</article-title>. <source>BMC Med. Educ.</source> <volume>16</volume>:<fpage>146</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s12909-016-0655-8</pub-id>, <pub-id pub-id-type="pmid">27177766</pub-id></mixed-citation></ref>
<ref id="ref26"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Hojat</surname><given-names>M.</given-names></name></person-group> (<year>2007</year>). <source>Empathy in patient care: antecedents, development, measurement, and outcomes</source>. <publisher-loc>New York</publisher-loc>: <publisher-name>Springer</publisher-name>.</mixed-citation></ref>
<ref id="ref27"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hojat</surname><given-names>M.</given-names></name> <name><surname>Gonnella</surname><given-names>J. S.</given-names></name> <name><surname>Mangione</surname><given-names>S.</given-names></name> <name><surname>Nasca</surname><given-names>T. J.</given-names></name> <name><surname>Veloski</surname><given-names>J. J.</given-names></name> <name><surname>Erdmann</surname><given-names>J. B.</given-names></name> <etal/></person-group>. (<year>2002</year>). <article-title>Empathy in medical students as related to academic performance, clinical competence and gender</article-title>. <source>Med. Educ.</source> <volume>36</volume>, <fpage>522</fpage>&#x2013;<lpage>527</lpage>. doi: <pub-id pub-id-type="doi">10.1046/j.1365-2923.2002.01234.x</pub-id>, <pub-id pub-id-type="pmid">12047665</pub-id></mixed-citation></ref>
<ref id="ref28"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hojat</surname><given-names>M.</given-names></name> <name><surname>Louis</surname><given-names>D. Z.</given-names></name> <name><surname>Markham</surname><given-names>F. W.</given-names></name> <name><surname>Wender</surname><given-names>R.</given-names></name> <name><surname>Rabinowitz</surname><given-names>C.</given-names></name> <name><surname>Gonnella</surname><given-names>J. S.</given-names></name></person-group> (<year>2011</year>). <article-title>Physicians' empathy and clinical outcomes for diabetic patients</article-title>. <source>Acad. Med.</source> <volume>86</volume>, <fpage>359</fpage>&#x2013;<lpage>364</lpage>. doi: <pub-id pub-id-type="doi">10.1097/ACM.0b013e3182086fe1</pub-id>, <pub-id pub-id-type="pmid">21248604</pub-id></mixed-citation></ref>
<ref id="ref29"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hojat</surname><given-names>M.</given-names></name> <name><surname>Mangione</surname><given-names>S.</given-names></name> <name><surname>Nasca</surname><given-names>T. J.</given-names></name> <name><surname>Cohen</surname><given-names>M. J.</given-names></name> <name><surname>Gonnella</surname><given-names>J. S.</given-names></name> <name><surname>Erdmann</surname><given-names>J. B.</given-names></name> <etal/></person-group>. (<year>2001</year>). <article-title>The Jefferson scale of physician empathy: development and preliminary psychometric data</article-title>. <source>Educ. Psychol. Meas.</source> <volume>61</volume>, <fpage>349</fpage>&#x2013;<lpage>365</lpage>. doi: <pub-id pub-id-type="doi">10.1177/00131640121971158</pub-id></mixed-citation></ref>
<ref id="ref30"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Huwendiek</surname><given-names>S.</given-names></name> <name><surname>De leng</surname><given-names>B. A.</given-names></name> <name><surname>Zary</surname><given-names>N.</given-names></name> <name><surname>Fischer</surname><given-names>M. R.</given-names></name> <name><surname>Ruiz</surname><given-names>J. G.</given-names></name> <name><surname>Ellaway</surname><given-names>R.</given-names></name></person-group> (<year>2009</year>). <article-title>Towards a typology of virtual patients</article-title>. <source>Med. Teach.</source> <volume>31</volume>, <fpage>743</fpage>&#x2013;<lpage>748</lpage>. doi: <pub-id pub-id-type="doi">10.1080/01421590903124708</pub-id>, <pub-id pub-id-type="pmid">19811212</pub-id></mixed-citation></ref>
<ref id="ref31"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Irfan</surname><given-names>B.</given-names></name> <name><surname>Kuoppam&#x00E4;ki</surname><given-names>S.-M.</given-names></name> <name><surname>Skantze</surname><given-names>G.</given-names></name></person-group> (<year>2023</year>). <article-title>Between reality and delusion: challenges of applying large language models to companion robots for open-domain dialogues with older adults</article-title>. <source>Autonomous Robots</source> <volume>49</volume>:<fpage>9</fpage>. doi: <pub-id pub-id-type="doi">10.1007/s10514-025-10190-y</pub-id></mixed-citation></ref>
<ref id="ref32"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kelly</surname><given-names>M.</given-names></name> <name><surname>Svrcek</surname><given-names>C.</given-names></name> <name><surname>King</surname><given-names>N.</given-names></name> <name><surname>Scherpbier</surname><given-names>A.</given-names></name> <name><surname>Dornan</surname><given-names>T.</given-names></name></person-group> (<year>2020</year>). <article-title>Embodying empathy: a phenomenological study of physician touch</article-title>. <source>Med. Educ.</source> <volume>54</volume>, <fpage>400</fpage>&#x2013;<lpage>407</lpage>. doi: <pub-id pub-id-type="doi">10.1111/medu.14040</pub-id>, <pub-id pub-id-type="pmid">31793673</pub-id></mixed-citation></ref>
<ref id="ref33"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Keysar</surname><given-names>B.</given-names></name> <name><surname>Hayakawa</surname><given-names>S. L.</given-names></name> <name><surname>An</surname><given-names>S. G.</given-names></name></person-group> (<year>2012</year>). <article-title>The foreign-language effect: thinking in a foreign tongue reduces decision biases</article-title>. <source>Psychol. Sci.</source> <volume>23</volume>, <fpage>661</fpage>&#x2013;<lpage>668</lpage>. doi: <pub-id pub-id-type="doi">10.1177/0956797611432178</pub-id>, <pub-id pub-id-type="pmid">22517192</pub-id></mixed-citation></ref>
<ref id="ref34"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kim</surname><given-names>S. S.</given-names></name> <name><surname>Kaplowitz</surname><given-names>S.</given-names></name> <name><surname>Johnston</surname><given-names>M. V.</given-names></name></person-group> (<year>2004</year>). <article-title>The effects of physician empathy on patient satisfaction and compliance</article-title>. <source>Eval. Health Prof.</source> <volume>27</volume>, <fpage>237</fpage>&#x2013;<lpage>251</lpage>. doi: <pub-id pub-id-type="doi">10.1177/0163278704267037</pub-id>, <pub-id pub-id-type="pmid">15312283</pub-id></mixed-citation></ref>
<ref id="ref35"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kleinsmith</surname><given-names>A.</given-names></name> <name><surname>Rivera-Gutierrez</surname><given-names>D.</given-names></name> <name><surname>Finney</surname><given-names>G.</given-names></name> <name><surname>Cendan</surname><given-names>J.</given-names></name> <name><surname>Lok</surname><given-names>B.</given-names></name></person-group> (<year>2015</year>). <article-title>Understanding empathy training with virtual patients</article-title>. <source>Comput. Hum. Behav.</source> <volume>52</volume>, <fpage>151</fpage>&#x2013;<lpage>158</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.chb.2015.05.033</pub-id>, <pub-id pub-id-type="pmid">26166942</pub-id></mixed-citation></ref>
<ref id="ref36"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kononowicz</surname><given-names>A. A.</given-names></name> <name><surname>Woodham</surname><given-names>L. A.</given-names></name> <name><surname>Edelbring</surname><given-names>S.</given-names></name> <name><surname>Stathakarou</surname><given-names>N.</given-names></name> <name><surname>Davies</surname><given-names>D.</given-names></name> <name><surname>Saxena</surname><given-names>N.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>Virtual patient simulations in health professions education: systematic review and meta-analysis by the digital health education collaboration</article-title>. <source>J. Med. Internet Res.</source> <volume>21</volume>:<fpage>e14676</fpage>. doi: <pub-id pub-id-type="doi">10.2196/14676</pub-id>, <pub-id pub-id-type="pmid">31267981</pub-id></mixed-citation></ref>
<ref id="ref37"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lee</surname><given-names>K. M.</given-names></name></person-group> (<year>2004</year>). <article-title>Presence, explicated</article-title>. <source>Commun. Theory</source> <volume>14</volume>, <fpage>27</fpage>&#x2013;<lpage>50</lpage>. doi: <pub-id pub-id-type="doi">10.1111/j.1468-2885.2004.tb00302.x</pub-id></mixed-citation></ref>
<ref id="ref38"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Malterud</surname><given-names>K.</given-names></name> <name><surname>Siersma</surname><given-names>V. D.</given-names></name> <name><surname>Guassora</surname><given-names>A. D.</given-names></name></person-group> (<year>2016</year>). <article-title>Sample size in qualitative interview studies: guided by information power</article-title>. <source>Qual. Health Res.</source> <volume>26</volume>, <fpage>1753</fpage>&#x2013;<lpage>1760</lpage>. doi: <pub-id pub-id-type="doi">10.1177/1049732315617444</pub-id>, <pub-id pub-id-type="pmid">26613970</pub-id></mixed-citation></ref>
<ref id="ref39"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mishra</surname><given-names>C.</given-names></name> <name><surname>Verdonschot</surname><given-names>R.</given-names></name> <name><surname>Hagoort</surname><given-names>P.</given-names></name> <name><surname>Skantze</surname><given-names>G.</given-names></name></person-group> (<year>2023</year>). <article-title>Real-time emotion generation in human-robot dialogue using large language models</article-title>. <source>Front. Robot. AI</source> <volume>10</volume>:<fpage>1271610</fpage>. doi: <pub-id pub-id-type="doi">10.3389/frobt.2023.1271610</pub-id>, <pub-id pub-id-type="pmid">38106543</pub-id></mixed-citation></ref>
<ref id="ref40"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nagi</surname><given-names>F.</given-names></name> <name><surname>Salih</surname><given-names>R.</given-names></name> <name><surname>Alzubaidi</surname><given-names>M.</given-names></name> <name><surname>Shah</surname><given-names>H.</given-names></name> <name><surname>Alam</surname><given-names>T.</given-names></name> <name><surname>Shah</surname><given-names>Z.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Applications of artificial intelligence (AI) in medical education: a scoping review</article-title>. <source>Stud. Health Technol. Inform.</source> <volume>305</volume>, <fpage>648</fpage>&#x2013;<lpage>651</lpage>. doi: <pub-id pub-id-type="doi">10.3233/SHTI230581</pub-id>, <pub-id pub-id-type="pmid">37387115</pub-id></mixed-citation></ref>
<ref id="ref41"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Park</surname><given-names>S.</given-names></name> <name><surname>Whang</surname><given-names>M.</given-names></name></person-group> (<year>2022</year>). <article-title>Empathy in human-robot interaction: designing for social robots</article-title>. <source>Int. J. Environ. Res. Public Health</source> <volume>19</volume>:<fpage>1889</fpage>. doi: <pub-id pub-id-type="doi">10.3390/ijerph19031889</pub-id>, <pub-id pub-id-type="pmid">35162909</pub-id></mixed-citation></ref>
<ref id="ref42"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Patel</surname><given-names>S.</given-names></name> <name><surname>Pelletier-Bui</surname><given-names>A.</given-names></name> <name><surname>Smith</surname><given-names>S.</given-names></name> <name><surname>Roberts</surname><given-names>M. B.</given-names></name> <name><surname>Kilgannon</surname><given-names>H.</given-names></name> <name><surname>Trzeciak</surname><given-names>S.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>Curricula for empathy and compassion training in medical education: a systematic review</article-title>. <source>PLoS One</source> <volume>14</volume>:<fpage>e0221412</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pone.0221412</pub-id>, <pub-id pub-id-type="pmid">31437225</pub-id></mixed-citation></ref>
<ref id="ref43"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Posel</surname><given-names>N.</given-names></name> <name><surname>Fleiszer</surname><given-names>D.</given-names></name> <name><surname>Shore</surname><given-names>B. M.</given-names></name></person-group> (<year>2009</year>). <article-title>12 tips: guidelines for authoring virtual patient cases</article-title>. <source>Med. Teach.</source> <volume>31</volume>, <fpage>701</fpage>&#x2013;<lpage>708</lpage>. doi: <pub-id pub-id-type="doi">10.1080/01421590902793867</pub-id>, <pub-id pub-id-type="pmid">19513927</pub-id></mixed-citation></ref>
<ref id="ref44"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Posel</surname><given-names>N.</given-names></name> <name><surname>McGee</surname><given-names>J. B.</given-names></name> <name><surname>Fleiszer</surname><given-names>D. M.</given-names></name></person-group> (<year>2015</year>). <article-title>Twelve tips to support the development of clinical reasoning skills using virtual patient cases</article-title>. <source>Med. Teach.</source> <volume>37</volume>, <fpage>813</fpage>&#x2013;<lpage>818</lpage>. doi: <pub-id pub-id-type="doi">10.3109/0142159X.2014.993951</pub-id>, <pub-id pub-id-type="pmid">25523009</pub-id></mixed-citation></ref>
<ref id="ref45"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Skantze</surname><given-names>G.</given-names></name> <name><surname>Johansson</surname><given-names>M.</given-names></name> <name><surname>Beskow</surname><given-names>J.</given-names></name></person-group> (<year>2015</year>). &#x201C;<chapter-title>Exploring turn-taking cues in multi-party human-robot discussions about objects</chapter-title>,&#x201D; in <source>Proceedings of the 2015 ACM on international conference on multimodal interaction</source>. <publisher-loc>New York, NY, United States</publisher-loc>: <publisher-name>Association for Computing Machinery</publisher-name>.</mixed-citation></ref>
<ref id="ref46"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Su&#x00E1;rez</surname><given-names>A.</given-names></name> <name><surname>Adanero</surname><given-names>A.</given-names></name> <name><surname>D&#x00ED;az-Flores Garc&#x00ED;a</surname><given-names>V.</given-names></name> <name><surname>Freire</surname><given-names>Y.</given-names></name> <name><surname>Algar</surname><given-names>J.</given-names></name></person-group> (<year>2022</year>). <article-title>Using a virtual patient via an artificial intelligence chatbot to develop dental students&#x2019; diagnostic skills</article-title>. <source>Int. J. Environ. Res. Public Health</source> <volume>19</volume>:<fpage>8735</fpage>. doi: <pub-id pub-id-type="doi">10.3390/ijerph19148735</pub-id>, <pub-id pub-id-type="pmid">35886584</pub-id></mixed-citation></ref>
<ref id="ref47"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sulzer</surname><given-names>S. H.</given-names></name> <name><surname>Feinstein</surname><given-names>N. W.</given-names></name> <name><surname>Wendland</surname><given-names>C. L.</given-names></name></person-group> (<year>2016</year>). <article-title>Assessing empathy development in medical education: a systematic review</article-title>. <source>Med. Educ.</source> <volume>50</volume>, <fpage>300</fpage>&#x2013;<lpage>310</lpage>. doi: <pub-id pub-id-type="doi">10.1111/medu.12806</pub-id>, <pub-id pub-id-type="pmid">26896015</pub-id></mixed-citation></ref>
<ref id="ref48"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tong</surname><given-names>A.</given-names></name> <name><surname>Sainsbury</surname><given-names>P.</given-names></name> <name><surname>Craig</surname><given-names>J.</given-names></name></person-group> (<year>2007</year>). <article-title>Consolidated criteria for reporting qualitative research (COREQ): a 32-item checklist for interviews and focus groups</article-title>. <source>Int. J. Qual. Health Care</source> <volume>19</volume>, <fpage>349</fpage>&#x2013;<lpage>357</lpage>. doi: <pub-id pub-id-type="doi">10.1093/intqhc/mzm042</pub-id>, <pub-id pub-id-type="pmid">17872937</pub-id></mixed-citation></ref>
<ref id="ref49"><mixed-citation publication-type="other"><person-group person-group-type="author"><collab id="coll2">Virtual Interactive Case System</collab></person-group>. Available online at: <ext-link xlink:href="https://pie.med.utoronto.ca/VIC/index.htm" ext-link-type="uri">https://pie.med.utoronto.ca/VIC/index.htm</ext-link> (Accessed April 17, 2024).</mixed-citation></ref>
<ref id="ref50"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Xu</surname><given-names>M.</given-names></name> <name><surname>Luo</surname><given-names>Y.</given-names></name> <name><surname>Zhang</surname><given-names>Y.</given-names></name> <name><surname>Xia</surname><given-names>R.</given-names></name> <name><surname>Qian</surname><given-names>H.</given-names></name> <name><surname>Zou</surname><given-names>X.</given-names></name></person-group> (<year>2023</year>). <article-title>Game-based learning in medical education</article-title>. <source>Front. Public Health</source> <volume>11</volume>:<fpage>1113682</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpubh.2023.1113682</pub-id>, <pub-id pub-id-type="pmid">36935696</pub-id></mixed-citation></ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by" id="fn0002">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1031045/overview">Tuba Mutluer</ext-link>, Akdeniz University, T&#x00FC;rkiye</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by" id="fn0003">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2198869/overview">Yufang Hao</ext-link>, Beijing University of Chinese Medicine, China</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3367320/overview">Hua Ma</ext-link>, Suzhou Art and Design Technology Institute, China</p>
</fn>
</fn-group>
</back>
</article>