<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" 'JATS-journalpublishing1-3-mathml3.dtd'>
<article article-type="research-article" dtd-version="1.3" xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Robot. AI</journal-id>
<journal-title-group>
<journal-title>Frontiers in Robotics and AI</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Robot. AI</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2296-9144</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">1728647</article-id>
<article-id pub-id-type="doi">10.3389/frobt.2025.1728647</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Evaluating human perceptions of android robot facial expressions based on variations in instruction styles</article-title>
<alt-title alt-title-type="left-running-head">Fujii et al.</alt-title>
<alt-title alt-title-type="right-running-head">
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/frobt.2025.1728647">10.3389/frobt.2025.1728647</ext-link>
</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Fujii</surname>
<given-names>Ayaka</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1634442"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal Analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Ishi</surname>
<given-names>Carlos Toshinori</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/3275700"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal Analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing &#x2013; review and editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Sakai</surname>
<given-names>Kurima</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/360663"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing &#x2013; review and editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Funayama</surname>
<given-names>Tomo</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing &#x2013; review and editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Iwai</surname>
<given-names>Ritsuko</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/295188"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing &#x2013; review and editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Takahashi</surname>
<given-names>Yusuke</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1971126"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Kumada</surname>
<given-names>Takatsune</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff4">
<sup>4</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/138597"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing &#x2013; review and editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Minato</surname>
<given-names>Takashi</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/123778"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing &#x2013; review and editing</role>
</contrib>
</contrib-group>
<aff id="aff1">
<label>1</label>
<institution>Guardian Robot Project, RIKEN</institution>, <city>Kyoto</city>, <country country="JP">Japan</country>
</aff>
<aff id="aff2">
<label>2</label>
<institution>Hiroshi Ishiguro Laboratories, Advanced Telecommunications Research Institute International</institution>, <city>Kyoto</city>, <country country="JP">Japan</country>
</aff>
<aff id="aff3">
<label>3</label>
<institution>Graduate School of Education, Kyoto University</institution>, <city>Kyoto</city>, <country country="JP">Japan</country>
</aff>
<aff id="aff4">
<label>4</label>
<institution>Graduate School of Informatics, Kyoto University</institution>, <city>Kyoto</city>, <country country="JP">Japan</country>
</aff>
<author-notes>
<corresp id="c001">
<label>&#x2a;</label>Correspondence: Ayaka Fujii, <email xlink:href="mailto:ayaka.fujii.wu@riken.jp">ayaka.fujii.wu@riken.jp</email>
</corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2025-12-16">
<day>16</day>
<month>12</month>
<year>2025</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2025</year>
</pub-date>
<volume>12</volume>
<elocation-id>1728647</elocation-id>
<history>
<date date-type="received">
<day>20</day>
<month>10</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>24</day>
<month>11</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>28</day>
<month>11</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2025 Fujii, Ishi, Sakai, Funayama, Iwai, Takahashi, Kumada and Minato.</copyright-statement>
<copyright-year>2025</copyright-year>
<copyright-holder>Fujii, Ishi, Sakai, Funayama, Iwai, Takahashi, Kumada and Minato</copyright-holder>
<license>
<ali:license_ref start_date="2025-12-16">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<p>Robots that interact with humans are required to express emotions in ways that are appropriate to the context. While most prior research has focused primarily on basic emotions, real-life interactions demand more nuanced expressions. In this study, we extended the expressive capabilities of the android robot Nikola by implementing 63 facial expressions, covering not only complex emotions and physical conditions, but also differences in intensity. At Expo 2025 in Japan, more than 600 participants interacted with Nikola by describing situations in which they wanted the robot to perform facial expressions. The robot inferred emotions using a large language model and performed corresponding facial expressions. Questionnaire responses revealed that participants rated the robot&#x2019;s behavior as more appropriate and emotionally expressive when their instructions were abstract, compared to when they explicitly included emotions or physical states. This suggests that abstract instructions enhance perceived agency in the robot. We also investigated and discussed how impressions towards the robot varied depending on the expressions it performed and the personality traits of participants. This study contributes to the research field of human&#x2013;robot interaction by demonstrating how adaptive facial expressions, in association with instruction styles, are linked to shaping human perceptions of social robots.</p>
</abstract>
<kwd-group>
<kwd>human-robot interaction</kwd>
<kwd>facial expression</kwd>
<kwd>android robot</kwd>
<kwd>emotion attribution</kwd>
<kwd>social agency</kwd>
</kwd-group>
<funding-group>
<award-group id="gs1">
<funding-source id="sp1">
<institution-wrap>
<institution>Moonshot Research and Development Program</institution>
<institution-id institution-id-type="doi" vocab="open-funder-registry" vocab-identifier="10.13039/open_funder_registry">10.13039/501100020963</institution-id>
</institution-wrap>
</funding-source>
<award-id rid="sp1">JPMJMS2011</award-id>
</award-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This work was supported in part by JST Moonshot R&#x26;D Grant Number JPMJMS2011.</funding-statement>
</funding-group>
<counts>
<fig-count count="5"/>
<table-count count="0"/>
<equation-count count="0"/>
<ref-count count="54"/>
<page-count count="12"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-in-acceptance</meta-name>
<meta-value>Human-Robot Interaction</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<label>1</label>
<title>Introduction</title>
<p>Robots that interact with humans are required to express emotions appropriately according to the situation. Facial expressions are known to have a major impact on expressing emotions in human-robot interaction (<xref ref-type="bibr" rid="B43">Sato et al., 2025</xref>). Many studies have shown that facial expressions can enhance the warmth and attractiveness of the robot and affect the trust and impressions towards the robot (<xref ref-type="bibr" rid="B22">Ghazali et al., 2018</xref>; <xref ref-type="bibr" rid="B38">Mishra et al., 2023</xref>; <xref ref-type="bibr" rid="B15">Dong et al., 2023</xref>).</p>
<p>In addition to a robot&#x2019;s ability to express emotions through facial expressions, how humans attribute emotions to the robot is also an important factor in realizing rich emotional interactions. Most previous research on emotion attribution has primarily relied on an approach in which researchers intentionally vary robot behaviors or experimental conditions (<xref ref-type="bibr" rid="B47">Thellman et al., 2022</xref>), while the influence of human behavior on how robots are perceived has received much less attention. In particular, it is still unclear how different styles of instruction to a robot influence the emotion attribution to the robot. A previous study on end-user programming shows that when users customize a robot&#x2019;s behavior, using a more abstract interface leads to a higher perceived social agency of the robot, compared to using a detailed interface (<xref ref-type="bibr" rid="B54">Zhang et al., 2025</xref>). This result implies the effect of abstract instructions in prompting users to derive interpretations of the robot&#x2019;s process of inference.</p>
<p>In this study, we hypothesize that such a mechanism could extend to emotional contexts. Based on the hypothesis, we design an experiment in which participants provide situational instructions to the robot to perform facial expressions, and investigate whether the abstractness of the instructions influences their emotion attribution to the robot. We assume that when participants provide abstract instructions without emotions for specifying the context for performing facial expressions, they may build a mental model of the robot&#x2019;s internal state based on predictive processing (<xref ref-type="bibr" rid="B11">Clark, 2013</xref>). This inference process can involve not only cognitive capacities (Agency), but also emotional capacities (Experience) of the robot (<xref ref-type="bibr" rid="B24">Gray et al., 2007</xref>). Accordingly, we examine the hypothesis that perceived agency and experience of the robot increase when participants specify only abstract situations rather than explicitly including emotions or physical conditions.</p>
<p>In order to conduct the experiment, a robot capable of performing a wide range of facial expressions in response to various situations is essential. Research on robotic facial expression generation has traditionally focused on the expression of basic emotions, such as happiness, anger, sadness, fear, surprise, and disgust (<xref ref-type="bibr" rid="B33">Kobayashi and Hara, 1993</xref>; <xref ref-type="bibr" rid="B2">Allison et al., 2008</xref>; <xref ref-type="bibr" rid="B21">Faraj et al., 2021</xref>; <xref ref-type="bibr" rid="B42">Sato et al., 2022</xref>). However, in real-life settings, people often expect not only basic emotions such as &#x201c;angry&#x201d; or &#x201c;sad,&#x201d; but also more nuanced expressions like &#x201c;noticing failure&#x201d; or &#x201c;drowsy.&#x201d; In addition to the range of capable expressions, the ability to select appropriate expressions according to the situation is also important.</p>
<p>To meet these requirements, we select an android robot named Nikola (<xref ref-type="bibr" rid="B42">Sato et al., 2022</xref>), which is one of the robots equipped with a large number of facial actuators, allowing for the detailed expression of subtle emotional nuances. Prior research indicates that embodiment and physical presence contribute to improving recognition of specific facial expressions in agents (<xref ref-type="bibr" rid="B39">Mollahosseini et al., 2018</xref>). Therefore, we adopt a physically embodied robot capable of expressing a wide range of facial expressions, rather than a virtual agent, to sufficiently investigate how people attribute emotions to robots. In addition, Nikola is designed to resemble a child so that it can interact naturally with both adults and children. As we conduct the large-scale experiment at Expo 2025 in Japan, which involves participants across a wide range of age groups, Nikola serves as an ideal platform for the experiment. In the previous study, our research team has demonstrated that Nikola can express around twenty complex emotions, such as boredom and hesitation (<xref ref-type="bibr" rid="B14">Diel et al., 2025</xref>). In this study, we extend Nikola to be capable of displaying over 60 types of facial expressions, and investigate how participants evaluated the expressions performed by the robot in response to their instructions about the situations.</p>
<p>The contributions of this study are 2-fold:<list list-type="order">
<list-item>
<p>We evaluate the effect of the abstractness of instructions to the robot on emotion attribution. We conduct a large-scale experiment outside the laboratory setting, enabling the collection of data on more natural human reactions.</p>
</list-item>
<list-item>
<p>We implement 63 facial expressions, including complex emotions and physical conditions, in an android robot, and verify whether the robot can appropriately perform the facial expressions in response to user-specified situations.</p>
</list-item>
</list>
</p>
</sec>
<sec id="s2">
<label>2</label>
<title>Related works</title>
<sec id="s2-1">
<label>2.1</label>
<title>Variety of facial expressions</title>
<p>Many studies have been conducted on the classification of human facial expressions. Ekman&#x2019;s six basic emotions (happiness, anger, sadness, fear, surprise, and disgust) (<xref ref-type="bibr" rid="B17">Ekman and Friesen, 1971</xref>) are widely known and have been referenced in many robot research (<xref ref-type="bibr" rid="B33">Kobayashi and Hara, 1993</xref>; <xref ref-type="bibr" rid="B2">Allison et al., 2008</xref>; <xref ref-type="bibr" rid="B21">Faraj et al., 2021</xref>; <xref ref-type="bibr" rid="B42">Sato et al., 2022</xref>). However, recent research on human facial expressions has shown that the repertoire of facial expressions is not limited to the basic expressions. Du et al. studied compound facial expressions that combine different emotions and defined 21 different emotion categories (<xref ref-type="bibr" rid="B16">Du et al., 2014</xref>). For example, happily surprised and angrily surprised are different compound emotion categories. Cowen et al. showed that facial and body expressions can be used to express at least 28 different categories of emotions, including complex emotions such as embarrassment and relief (<xref ref-type="bibr" rid="B13">Cowen and Keltner, 2020</xref>).</p>
<p>It is also known that facial expressions reflect not only emotions but also physical conditions such as fatigue and pain. For example, Sundelin et al. showed that sleep deprivation affects facial characteristics related to the eyes, mouth, and skin (<xref ref-type="bibr" rid="B46">Sundelin et al., 2013</xref>). Numerous studies have also examined the use of facial expressions to assess pain (<xref ref-type="bibr" rid="B41">Prkachin, 2009</xref>; <xref ref-type="bibr" rid="B50">Werner et al., 2017</xref>). Furthermore, Strack et al. found that facial expressions when looking at food differ between when hungry and when full (<xref ref-type="bibr" rid="B45">Strack et al., 2009</xref>).</p>
<p>In addition, many studies show that the intensity of emotions changes the way facial expressions are made. Ekman et al. showed that the frequency and intensity of specific facial expressions change depending on the intensity of negative emotions (<xref ref-type="bibr" rid="B18">Ekman et al., 1980</xref>). Kunz et al. found a correlation between the degree of subjective pain and changes in facial expressions (<xref ref-type="bibr" rid="B35">Kunz et al., 2004</xref>).</p>
<p>However, in robotics research, facial expression generation has traditionally been based on basic emotion models, such as Ekman&#x2019;s six categories. While this approach enables standardized evaluation, it limits the diversity and contextual flexibility of expressions. To address this limitation, we expand the expressive capabilities of the android robot, Nikola, by implementing 63 distinct facial expressions, including variations in intensity. We incorporate not only basic emotions, but also complex emotions and expressions reflecting physical conditions to realize richer emotional interactions.</p>
</sec>
<sec id="s2-2">
<label>2.2</label>
<title>Emotion interpretation with context</title>
<p>An important aspect of emotional interaction is not only whether a robot can display easy-to-identify facial expressions, but also whether humans can feel emotions from the robot in a given context. Psychological studies have shown that emotion perception from facial expression is highly context-dependent. Carroll et al. demonstrated that facial expressions can be interpreted as different emotions depending on the context, rather than being interpreted as the emotion when seen in isolation (<xref ref-type="bibr" rid="B9">Carroll and Russell, 1996</xref>). For example, an expression that appears to be anger on its own tends to be interpreted as fear in a context of fear, and an expression that appears to be sadness on its own tends to be interpreted as disgust in a context of disgust. There are also numerous reports that neutral facial expression can be interpreted as different emotion expressions depending on the context (<xref ref-type="bibr" rid="B4">Barratt et al., 2016</xref>; <xref ref-type="bibr" rid="B8">Calbi et al., 2017</xref>).</p>
<p>In the case of robots as well, it is also known that facial expressions are recognized differently depending on contextual information (<xref ref-type="bibr" rid="B7">Bennett and abanovi&#x107;, 2015</xref>), though some studies have shown that facial expressions that deviate from the context can negatively impact the trust and impression towards the robot (<xref ref-type="bibr" rid="B40">Paradeda et al., 2016</xref>; <xref ref-type="bibr" rid="B3">Appel et al., 2021</xref>). These findings suggest that, rather than attempting to produce one correct facial expression, it would be more reasonable to design a system that generates expressions within a range perceived as appropriate by humans, depending on the situational context.</p>
<p>However, most previous studies on robotic facial expressions have followed a paradigm in which the robot first displays an expression, and then human participants are asked to identify which emotion it represents. In our prior study using Nikola, where various complex expressions were displayed without contextual information, some expressions tended to be judged to reflect emotions other than their intended targets (<xref ref-type="bibr" rid="B14">Diel et al., 2025</xref>). Though the result suggests that the expression is also perceived as appropriate in other contexts, it does not imply that it is inappropriate in the context of the target emotion.</p>
<p>Despite the importance of context in interpreting facial expressions, little attention has been paid to whether a robot&#x2019;s expression is perceived as contextually appropriate, particularly when the context is imagined by the user. To address this gap, this study adopts a context-first approach: participants imagine and specify a situation at first, and the robot then displays a facial expression that corresponds to the situation. This enables the evaluation of the contextual appropriateness and emotion interpretation of facial expressions.</p>
</sec>
<sec id="s2-3">
<label>2.3</label>
<title>Emotion attribution</title>
<p>Agency and Experience are key dimensions of mind perception (<xref ref-type="bibr" rid="B24">Gray et al., 2007</xref>). Agency refers to the capacity to think and act, such as self-control, planning, reasoning, and decision-making, whereas Experience refers to the capacity to feel sensations and emotions, such as pain, hunger, pleasure, fear, and embarrassment. In the context of human-robot interaction, the extent to which these factors are attributed to a robot has been shown to influence trust, anthropomorphism, and satisfaction (<xref ref-type="bibr" rid="B52">Yam et al., 2021</xref>; <xref ref-type="bibr" rid="B44">Spatola and Wudarczyk, 2021</xref>; <xref ref-type="bibr" rid="B19">Esterwood and Robert, 2023</xref>).</p>
<p>Previous research on emotional attribution toward robots has largely examined factors such as robotic behavior and appearance, as well as the influence of human factors such as age and social background (<xref ref-type="bibr" rid="B25">Haring et al., 2015</xref>; <xref ref-type="bibr" rid="B1">Abubshait and Wiese, 2017</xref>; <xref ref-type="bibr" rid="B36">Manzi et al., 2020</xref>). However, little attention has been paid to how humans&#x2019; behavior, such as the way they interact with or give instructions to a robot, including the degree of abstraction in the instructions, affects their perception of the robot. To investigate this underexplored issue, we examine whether emotion attribution to the robot changes according to the way of giving instructions to the robot.</p>
</sec>
</sec>
<sec sec-type="materials|methods" id="s3">
<label>3</label>
<title>Materials and methods</title>
<sec id="s3-1">
<label>3.1</label>
<title>Hypotheses</title>
<p>Research on how humans give instructions affects emotion attribution to robots remains underdeveloped. In the field of end-user programming, research shows that customizing the behavior of home assist robots through low-granularity interfaces enhances perceived social agency compared to high-granularity interfaces (<xref ref-type="bibr" rid="B54">Zhang et al., 2025</xref>). This perceived social agency includes autonomy, perceived agency, social presence, social intelligence, and perceived intelligence. It is suggested that the abstractness of user instructions may influence the emotion attribution to the robot. Similarly, in the task of performing facial expressions, which is an important factor in human-robot interaction, we predict that abstract instructions can enhance emotion attribution, as the robot is more likely to be perceived as inferring the appropriate emotion on its own. However, this prediction has not yet been empirically tested because of the technical limitations of conventional robots, which lack the ability to perform nuanced emotional expressions in response to diverse user instructions.</p>
<p>In this study, we utilize the android robot, Nikola, which is capable of performing over 60 types of facial expressions, in order to investigate the prediction. Leveraging Nikola&#x2019;s rich expressive capabilities, we introduce a novel interactive framework in which participants freely describe a situation, and the robot performs a facial expression appropriate to that situation. This setup enables testing the effect of instructional style on emotion attribution. We examine whether emotion attribution to the robot changes when participants specify only abstract situations, compared to when their instructions include specific emotions or physical conditions, based on the following three hypotheses.</p>
<p>H1: Abstract instructions lead to higher perceptions of the robot giving appropriate responses (Agency).</p>
<p>H2: Abstract instructions lead to higher perceptions of the robot expressing emotions (Agency).</p>
<p>H3: Abstract instructions lead to higher perceptions of the robot showing empathy (Experience).</p>
</sec>
<sec id="s3-2">
<label>3.2</label>
<title>Overview of the experiment</title>
<p>We conducted a 7-day experiment at the Expo 2025 Osaka, Kansai, Japan, from May 20 to 26 May 2025. Nikola (<xref ref-type="bibr" rid="B42">Sato et al., 2022</xref>), which is an android robot capable of performing facial expressions, was employed in the study. In each interaction, the participant specified scenes or situations to the robot, and the robot inferred the emotion and performed the corresponding facial expression. An example of the experiment scene is shown in <xref ref-type="fig" rid="F1">Figure 1</xref>. We also performed an exploratory analysis of the relationship between personality traits based on the Big Five (<xref ref-type="bibr" rid="B29">Iwai et al., 2019</xref>) and impressions towards the robot.</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>Experiment scene at the Expo 2025 Osaka, Kansai, Japan.</p>
</caption>
<graphic xlink:href="frobt-12-1728647-g001.tif">
<alt-text content-type="machine-generated">A humanoid robot with a realistic head and a red shirt marked with &#x22;N&#x22; interacts with a young girl wearing glasses and a pink jacket. They are positioned in a futuristic setting, likely a tech expo or exhibit.</alt-text>
</graphic>
</fig>
</sec>
<sec id="s3-3">
<label>3.3</label>
<title>Android robot system</title>
<p>Nikola is an android robot with the appearance of a junior high school-aged child. It is capable of performing various facial expressions and head movements. Previous experiments have demonstrated that it can express not only the basic emotions (<xref ref-type="bibr" rid="B53">Yang et al., 2022</xref>) but also more complex expressions (<xref ref-type="bibr" rid="B14">Diel et al., 2025</xref>). There are 35 low-noise pneumatic actuators: 29 dedicated to facial movements, 3 for eye movements (left eye pan, right eye pan, and tilt motion of both eyes), and 3 for neck movements (roll, pitch, and yaw). The frontal facial surface and neck area are covered with soft silicone skin.</p>
<p>The demonstration system is implemented using Robot Operating System (ROS), integrating human tracker, audio tracker, speech recognition, and speech synthesis functionalities. These core modules are built upon the interaction system described in previous research (<xref ref-type="bibr" rid="B23">Glas et al., 2016</xref>). The human tracker is based on distance measurement using a stereo camera to estimate human positions near the robot. The audio tracker detects the speech activities of humans near the robot, based on sound source localization using two microphone arrays (<xref ref-type="bibr" rid="B27">Ishi et al., 2015</xref>). The speech signal of the target user who is interacting with the robot is then separated from interfering sound sources (<xref ref-type="bibr" rid="B28">Ishi et al., 2018</xref>), and sent to the speech recognition module.</p>
</sec>
<sec id="s3-4">
<label>3.4</label>
<title>Implementation of facial expression</title>
<p>Based on prior research on facial expressions (<xref ref-type="bibr" rid="B37">McDaniel et al., 2007</xref>; <xref ref-type="bibr" rid="B49">Vural et al., 2007</xref>; <xref ref-type="bibr" rid="B34">Krumhuber et al., 2013</xref>; <xref ref-type="bibr" rid="B16">Du et al., 2014</xref>; <xref ref-type="bibr" rid="B6">Benitez-Quiroz et al., 2016</xref>; <xref ref-type="bibr" rid="B12">Cordaro et al., 2018</xref>; <xref ref-type="bibr" rid="B5">Barrett et al., 2019</xref>), we prepared 63 facial expressions, as shown in <xref ref-type="fig" rid="F2">Figure 2</xref>. These facial expressions were selected considering their relevance in situations specified during the test trials before the experiment and their typical appearance in daily life. They included not only basic emotions but also complex emotions and expressions reflecting physical conditions. Each expression was assigned one to four emotion labels that matched its characteristics, resulting in a total of 127 labels. For example, BitterSmile (high intensity) was labeled as expressing a derisive and mocking smile, while NotFace (high intensity) was labeled as disagreement.</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>Nikola&#x2019;s neutral face and 63 facial expressions prepared for this experiment.</p>
</caption>
<graphic xlink:href="frobt-12-1728647-g002.tif">
<alt-text content-type="machine-generated">Grid of a humanoid robot displaying various facial expressions, each labeled with emotions like &#x22;Neutral,&#x22; &#x22;CoolSmile,&#x22; &#x22;AngryDisgust,&#x22; &#x22;FearfulSurprise,&#x22; and more. Each expression varies in intensity, from low to high.</alt-text>
</graphic>
</fig>
<p>To validate these facial expressions, we conducted an online survey using static photos of these expressions with a total of 121 participants. On average, 4.79 participants (ranging from 4 to 7) evaluated each expression. For each expression, we calculated the percentage of participants who judged it as appropriate, and the average of these percentages across all expressions was 82.5%. Sixty-two expressions were judged as appropriate by at least 50% of the participants, and one expression (Hungry) was judged appropriate by 40% of participants. These results suggest that the implemented facial expressions were generally perceived as appropriate, though some expressions showed variability in interpretation across participants, indicating potential limitations in their generalizability as facial expressions.</p>
<p>Using GPT-4o by OpenAI, Nikola inferred the emotion labels that closely matched the sentiment associated with a participant-specified situation. The described situation, along with all emotion labels, was input into the prompt, which also included an instruction for the model to output three emotion labels corresponding to the given situation. Subsequently, it showed the facial expressions corresponding to one of the inferred emotion labels.</p>
</sec>
<sec id="s3-5">
<label>3.5</label>
<title>Design of dialogue interaction flow</title>
<p>In the introduction phase, the robot first introduced itself and explained that it was skilled at imagining human emotions and expressing them through facial expressions. At first, the robot presented an example situation and performed the facial expression corresponding to that scene. For example, the robot said &#x201c;when I received a large pocket money, but found out that my friend received even more&#x201d; and performed the facial expression of NotFace (high-intensity). This initial example and its corresponding facial expression were randomly selected from a set of three pre-prepared options.</p>
<p>The facial expression trial phase was conducted twice in total. In each phase, the robot first asked the participant to describe a situation from their past experiences in which they would like the robot to perform a facial expression. Participants freely spoke about situations that came to mind, and no specific guidance was given regarding the level of abstraction of the speech. The participant&#x2019;s speech was transcribed using an automatic speech recognition system implemented through the Web Speech API provided by Google Chrome. The transcribed content was then summarized using GPT-4o. The robot verbally confirmed the summary with the participant. If the participant did not disagree, the robot proceeded to perform a facial expression. In the case in which the participant&#x2019;s speech was not detected within the time limit, the robot suggested a situation from the pre-prepared list and then proceeded to perform a facial expression. GPT was prompted to select three suitable emotion labels from the entire label set. Using one of these labels, the robot performed a corresponding facial expression. After the facial expression, the robot asked the participant for their impression of it. If the participant responded positively, the interaction moved to the next phase. If the participant responded negatively, the robot performed an alternative expression corresponding to a different inferred emotion label. If all three inferred emotions received negative responses, the phase concluded, and the interaction moved on. In the second trial phase, the robot asked the participant to provide a different situation, and the subsequent interaction followed the same process as the first trial phase.</p>
<p>After the second trial phase, the closing phase began. Based on the participant&#x2019;s reaction history to the performed expressions, the robot provided a brief comment on the interaction. Finally, the robot said a farewell message, and the demonstration ended.</p>
</sec>
<sec id="s3-6">
<label>3.6</label>
<title>Experiment procedure</title>
<p>Informed consent was obtained from all participants before the experiment. Before interacting with the robot, participants completed a questionnaire about their background, including age, gender, and Big Five personality traits. The interaction demonstration with the robot lasted approximately 4 min in total.</p>
<p>Following the demonstration, participants completed a post-experience questionnaire. The questionnaire consisted of three items (Q1&#x2013;Q3), each rated on a four-point Likert scale, where 1 indicated &#x201c;Not at all&#x201d; and 4 indicated &#x201c;Very much.&#x201d; The scale excluded a neutral midpoint intentionally.</p>
<p>Q1: The robot responded appropriately to what I said.</p>
<p>Q2: The robot expressed emotions towards me.</p>
<p>Q3: The robot empathized with my feelings.</p>
<p>These three items were extracted from <xref ref-type="bibr" rid="B30">Iwai et al. (2025)</xref> to align with the aim of this experiment and corresponded to the hypotheses H1&#x2013;H3. Three of the authors created a child-appropriate version of the pre- and post-experiment questionnaires for participants aged 6&#x2013;15, using child-friendly expressions that conveyed the same meaning. In addition, participants aged 16 and over rated their familiarity with each of seven types of robot-related technologies, including cleaning robots, serving robots, guide robots, smart speakers, pet robots, companion robots, and care robots, using a 5-point Likert scale. A rating of 1 indicated that the participant did not know the technology, while a rating of 5 indicated that they frequently used (or had used) it.</p>
<p>The experiment was conducted with the approval of the ethics committee of RIKEN (Permission number: Wako2025-31). It was audio-recorded, and participants&#x2019; utterances were analyzed by combining the results of speech recognition with transcripts produced after the experiment.</p>
</sec>
<sec id="s3-7">
<label>3.7</label>
<title>Participants</title>
<p>The participants were visitors to the Expo 2025. The number of visitors was not controlled, and only individuals who had given their consent were included in the study. Individuals who did not provide consent only experienced the interaction, without participating in the research.</p>
<p>After excluding cases with major system errors, there were 624 people with corresponding data from the robot interaction log and questionnaires. 252 were male, 368 were female, and 4 identified as other. Ages ranged from 6 to 86 years, with a mean age of 48.6 years. All participants were able to understand Japanese, since the interaction and questionnaire were conducted in Japanese. Participants aged between 6 and 17 years were included in the study only if consent was obtained from their accompanying guardian. Young children participated in the interaction and completed the questionnaire with the assistance of accompanying adults.</p>
</sec>
</sec>
<sec sec-type="results" id="s4">
<label>4</label>
<title>Results</title>
<sec id="s4-1">
<label>4.1</label>
<title>Interaction system evaluation</title>
<sec id="s4-1-1">
<label>4.1.1</label>
<title>Unsmooth interaction flow</title>
<p>Since 624 participants completed two facial expression trials each, a total of 1,248 trials were conducted. There were 177 cases (14.2%) in which the robot was unable to recognize speech from the participant within the time limit, and proposed a situation to proceed with the interaction. The reasons for these cases include failures in speech recognition and the inability of the participants to come up with a situation. We compared participants who experienced at least one case in which the robot proposed a situation (n &#x3d; 155) with those who did not (n &#x3d; 469). The median (mean <inline-formula id="inf1">
<mml:math id="m1">
<mml:mrow>
<mml:mo>&#xb1;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> SE) scores of the former group vs. the latter group were as follows: Q1: 3.00 (2.93 <inline-formula id="inf2">
<mml:math id="m2">
<mml:mrow>
<mml:mo>&#xb1;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.0648) vs. 4.00 (3.51 <inline-formula id="inf3">
<mml:math id="m3">
<mml:mrow>
<mml:mo>&#xb1;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.0309), Q2: 3.00 (3.11 <inline-formula id="inf4">
<mml:math id="m4">
<mml:mrow>
<mml:mo>&#xb1;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.0631) vs. 4.00 (3.43 <inline-formula id="inf5">
<mml:math id="m5">
<mml:mrow>
<mml:mo>&#xb1;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.0322), Q3: 3.00 (2.87 <inline-formula id="inf6">
<mml:math id="m6">
<mml:mrow>
<mml:mo>&#xb1;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.0605) vs. 3.00 (3.26 <inline-formula id="inf7">
<mml:math id="m7">
<mml:mrow>
<mml:mo>&#xb1;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.0349). The one-sided Mann&#x2013;Whitney U test showed that the former group gave significantly lower questionnaire ratings across all items (Q1: U &#x3d; 50988.0, p <inline-formula id="inf8">
<mml:math id="m8">
<mml:mrow>
<mml:mo>&#x3c;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.001, Q2: U &#x3d; 44822.5, p <inline-formula id="inf9">
<mml:math id="m9">
<mml:mrow>
<mml:mo>&#x3c;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.001, Q3: U &#x3d; 46493.5, p <inline-formula id="inf10">
<mml:math id="m10">
<mml:mrow>
<mml:mo>&#x3c;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.001).</p>
<p>There were a total of 68 cases (5.4%) where the robot&#x2019;s summary of the speech recognition results contained obvious errors. The causes of errors include cases where GPT-4o determined that there was insufficient information and did not summarize, and cases where the speech recognition ended midway through the participants&#x2019; speech, resulting in the opposite meaning of the summary. When comparing participants who experienced at least one summary error (n &#x3d; 67) with those who did not (n &#x3d; 557), the median (mean <inline-formula id="inf11">
<mml:math id="m11">
<mml:mrow>
<mml:mo>&#xb1;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> SE) scores of the former group vs. the latter group were as follows: Q1: 3.00 (2.91 <inline-formula id="inf12">
<mml:math id="m12">
<mml:mrow>
<mml:mo>&#xb1;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.0844) vs. 4.00 (3.42 <inline-formula id="inf13">
<mml:math id="m13">
<mml:mrow>
<mml:mo>&#xb1;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.0312), Q2: 3.00 (3.21 <inline-formula id="inf14">
<mml:math id="m14">
<mml:mrow>
<mml:mo>&#xb1;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.0940) vs. 3.00 (3.37 <inline-formula id="inf15">
<mml:math id="m15">
<mml:mrow>
<mml:mo>&#xb1;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.0308), Q3: 3.00 (3.03 <inline-formula id="inf16">
<mml:math id="m16">
<mml:mrow>
<mml:mo>&#xb1;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.0876) vs. 3.00 (3.18 <inline-formula id="inf17">
<mml:math id="m17">
<mml:mrow>
<mml:mo>&#xb1;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.0330). The former had significantly lower questionnaire ratings (Q1: U &#x3d; 25923.0, p <inline-formula id="inf18">
<mml:math id="m18">
<mml:mrow>
<mml:mo>&#x3c;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.001, Q2: U &#x3d; 20880.5, p &#x3d; 0.039, Q3: U &#x3d; 21034.0, p &#x3d; 0.032).</p>
<p>In terms of facial expressions, the robot performed a different emotion once in 121 cases (9.6%) and twice in 43 cases (3.4%). Since a single trial could include up to three expressions, the maximum number of facial expression redos per trial was two. Some factors included the inability of the robot to identify an appropriate expression and unsuccessful summarization in the preceding stage, which in turn led to inappropriate expressions. Comparing participants who experienced at least one redo (n &#x3d; 151) with those who did not (n &#x3d; 473), the median (mean <inline-formula id="inf19">
<mml:math id="m19">
<mml:mrow>
<mml:mo>&#xb1;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> SE) scores of the former group vs. the latter group were as follows: Q1: 3.00 (3.18 <inline-formula id="inf20">
<mml:math id="m20">
<mml:mrow>
<mml:mo>&#xb1;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.0580) vs. 4.00 (3.42 <inline-formula id="inf21">
<mml:math id="m21">
<mml:mrow>
<mml:mo>&#xb1;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.0345), Q2: 3.00 (3.15 <inline-formula id="inf22">
<mml:math id="m22">
<mml:mrow>
<mml:mo>&#xb1;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.0597) vs. 4.00 (3.42 <inline-formula id="inf23">
<mml:math id="m23">
<mml:mrow>
<mml:mo>&#xb1;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.0331), Q3: 3.00 (2.98 <inline-formula id="inf24">
<mml:math id="m24">
<mml:mrow>
<mml:mo>&#xb1;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.0612) vs. 3.00 (3.22 <inline-formula id="inf25">
<mml:math id="m25">
<mml:mrow>
<mml:mo>&#xb1;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.0355). A comparison revealed that the former group gave significantly lower questionnaire ratings (Q1: U &#x3d; 43073.0, p <inline-formula id="inf26">
<mml:math id="m26">
<mml:mrow>
<mml:mo>&#x3c;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.001, Q2: U &#x3d; 43426.0, p <inline-formula id="inf27">
<mml:math id="m27">
<mml:mrow>
<mml:mo>&#x3c;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.001, Q3: U &#x3d; 42121.0, p <inline-formula id="inf28">
<mml:math id="m28">
<mml:mrow>
<mml:mo>&#x3c;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.001).</p>
</sec>
<sec id="s4-1-2">
<label>4.1.2</label>
<title>Analysis target: participants with smooth interaction flow</title>
<p>Based on the above results, the unsmooth interaction flows were considered to have influenced the evaluation of the experience. In order to investigate the effects of the instruction styles, we excluded unsmooth flows from subsequent analyses. The following analyses were then conducted with participants who experienced interactions with smooth flow (327 participants: 132 males, 194 females, and 1 other).</p>
<p>We compared participants included in the analysis (n &#x3d; 327) and those excluded (n &#x3d; 297) with respect to age group and familiarity with technology to examine the potential for selection bias due to the exclusion. For age, we classified participants into three categories: children (<inline-formula id="inf29">
<mml:math id="m29">
<mml:mrow>
<mml:mo>&#x2264;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>15 years, based on the use of child-friendly expressions in the questionnaire), older adults (<inline-formula id="inf30">
<mml:math id="m30">
<mml:mrow>
<mml:mo>&#x2265;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>65 years, based on the standard definition of older adults in Japan), and others (16&#x2013;64 years). Among the included participants, there were 21 children, 46 older adults, and 260 others. Among the excluded participants, there were 19 children, 56 older adults, and 222 others. A chi-square test revealed no significant difference in age distribution between these two groups (<inline-formula id="inf31">
<mml:math id="m31">
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mi>&#x3c7;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula>(2) &#x3d; 2.64, p &#x3d; 0.267). In addition, to compare the familiarity with technology, we analyzed the mean ratings across seven robot-related technologies. The included participants showed a median of 2.86 with a mean of 2.96 (SE &#x3d; 0.0281), while the excluded participants showed a median of 3.00 with a mean of 2.97 (SE &#x3d; 0.0295). A two-sided Mann&#x2013;Whitney U test revealed no significant difference between the two groups. These results suggest that the exclusion of participants with unsmooth interaction flows is unlikely to have a substantial impact on the generalizability of the findings, given the scope of the data collected in this experiment.</p>
</sec>
</sec>
<sec id="s4-2">
<label>4.2</label>
<title>Hypotheses testing</title>
<sec id="s4-2-1">
<label>4.2.1</label>
<title>Categorization of instruction abstractness</title>
<p>Participants were divided into two groups: those whose situation descriptions for the robot&#x2019;s facial expression explicitly referred to emotions or physical conditions, and those whose descriptions did not. To determine whether there were emotions or physical conditions in the description, we used GPT-5 by OpenAI, Gemini 2.5 Pro by Google DeepMind, and Claude Opus 4.1 by Anthropic, and took a majority vote. Examples that were concrete and included emotions or physical conditions were &#x201c;when you are happy after winning the lottery&#x201d; or &#x201c;when you feel pain after falling off a bicycle,&#x201d; whereas examples of abstract instructions without such references were &#x201c;when you are about to be late for school&#x201d; or &#x201c;when your lie is discovered.&#x201d; Participants whose instructions in both trials did not include specific emotions or physical conditions were categorized as the abstract group (n &#x3d; 200), while those who provided at least one instruction containing these references were categorized as the concrete group (n &#x3d; 127).</p>
<p>The abstract group consisted of 80 males, 119 females, and 1 other, and the mean age was 45.9 years. The concrete group consisted of 52 males and 75 females, and the mean age was 49.2 years. In the analysis of personality traits, normalized scores were used for each of the Big Five factors to adjust for differences in the number of response scales between adults and children. The abstract group showed average scores of 0.577 (Extraversion), 0.662 (Agreeableness), 0.520 (Conscientiousness), 0.519 (Neuroticism), and 0.606 (Openness), whereas the concrete group showed averages of 0.581, 0.673, 0.505, 0.558, and 0.592, respectively. The two-sided Mann-Whitney U test did not show significant differences in all Big Five factors between the groups.</p>
</sec>
<sec id="s4-2-2">
<label>4.2.2</label>
<title>Analysis results</title>
<p>The results of the one-sided Mann&#x2013;Whitney U test comparing questionnaire evaluations between the abstract and concrete groups are shown in <xref ref-type="fig" rid="F3">Figure 3</xref>. The median (mean <inline-formula id="inf32">
<mml:math id="m32">
<mml:mrow>
<mml:mo>&#xb1;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> SE) scores of the abstract group vs. the concrete group were as follows: Q1: 4.00 (3.69 <inline-formula id="inf33">
<mml:math id="m33">
<mml:mrow>
<mml:mo>&#xb1;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.0391) vs. 4.00 (3.54 <inline-formula id="inf34">
<mml:math id="m34">
<mml:mrow>
<mml:mo>&#xb1;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.0640), Q2: 4.00 (3.60 <inline-formula id="inf35">
<mml:math id="m35">
<mml:mrow>
<mml:mo>&#xb1;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.0432) vs. 4.00 (3.45 <inline-formula id="inf36">
<mml:math id="m36">
<mml:mrow>
<mml:mo>&#xb1;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.0620), Q3: 3.00 (3.40 <inline-formula id="inf37">
<mml:math id="m37">
<mml:mrow>
<mml:mo>&#xb1;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.0476) vs. 3.00 (3.28 <inline-formula id="inf38">
<mml:math id="m38">
<mml:mrow>
<mml:mo>&#xb1;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.0722). For Q1 (appropriate response) (U &#x3d; 13819.0, p &#x3d; 0.047) and Q2 (emotional expression) (U &#x3d; 14090.0, p &#x3d; 0.026), the abstract group, those who did not specify particular emotions or physical conditions, rated significantly higher evaluations. In contrast, no significant difference was observed for Q3 (empathy) (U &#x3d; 13491.5, p &#x3d; 0.147). The effect sizes based on Cliff&#x2019;s delta were <inline-formula id="inf39">
<mml:math id="m39">
<mml:mrow>
<mml:mi>&#x3b4;</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; 0.088 for Q1, <inline-formula id="inf40">
<mml:math id="m40">
<mml:mrow>
<mml:mi>&#x3b4;</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; 0.109 for Q2, and <inline-formula id="inf41">
<mml:math id="m41">
<mml:mrow>
<mml:mi>&#x3b4;</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; 0.062 for Q3. These relatively modest values could be influenced by the use of a 4-point Likert scale, which offers limited sensitivity for detecting subtle differences between groups. In summary, H1 and H2 were verified, but H3 was not verified.</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>Analysis results of questionnaire responses based on how situations were specified (left: mean scores, right: violin plots, &#x2a;p <inline-formula id="inf42">
<mml:math id="m42">
<mml:mrow>
<mml:mo>&#x3c;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.05).</p>
</caption>
<graphic xlink:href="frobt-12-1728647-g003.tif">
<alt-text content-type="machine-generated">Bar and violin plots compare questionnaire scores on three questions between abstract and concrete conditions. Abstract scores are generally higher for Q1 (appropriate response) and Q2 (emotional expression), indicated by asterisks. Scores are similar for Q3 (empathy).</alt-text>
</graphic>
</fig>
</sec>
</sec>
<sec id="s4-3">
<label>4.3</label>
<title>General analyses</title>
<sec id="s4-3-1">
<label>4.3.1</label>
<title>Evaluation across facial expressions</title>
<p>The average questionnaire scores for each facial expression are presented in <xref ref-type="fig" rid="F4">Figure 4</xref>. The analysis was limited to expressions that were performed to at least 10 participants. In the case of Q1 (appropriate response), the highest-rated expressions were Sad (low-intensity), Sad (high-intensity), and ColdAnger (low-intensity) in descending order. Regarding Q2 (emotional expression), the top-rated expressions were Embarrassed (low-intensity), Sad (low-intensity), and Pain (low-intensity). With respect to Q3 (empathy), the highest evaluations were given to Pain (high-intensity), Sad (low-intensity), and CoolSmile (high-intensity).</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption>
<p>Mean questionnaire scores for each facial expression that appeared in at least 10 participants. The numbers following the expression names indicate intensity, with 1 representing low and 2 representing high.</p>
</caption>
<graphic xlink:href="frobt-12-1728647-g004.tif">
<alt-text content-type="machine-generated">Bar charts illustrating questionnaire scores for various emotional responses. Chart Q1: &#x22;appropriate response&#x22; shows highest for Sad_1 (n &#x3d; 34). Chart Q2: &#x22;emotional expression&#x22; has Embarrassed_1 (n &#x3d; 24) highest. Chart Q3: &#x22;empathy&#x22; scores Pain_2 (n &#x3d; 13) highest.</alt-text>
</graphic>
</fig>
</sec>
<sec id="s4-3-2">
<label>4.3.2</label>
<title>Correlations between Big Five and questionnaire results</title>
<p>The correlations between the Big Five personality traits obtained before the experiment and the questionnaire responses were analyzed using Spearman&#x2019;s rank correlation coefficient. The results are shown in <xref ref-type="fig" rid="F5">Figure 5</xref>. In general, there were no strong correlations between the personality traits and the results of the questionnaire. Although statistically significant negative correlation between Q1 and neuroticism (<inline-formula id="inf43">
<mml:math id="m43">
<mml:mrow>
<mml:mi>&#x3c1;</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; &#x2212;0.119, p &#x3d; 0.031) and positive correlations between Q3 and extraversion (<inline-formula id="inf44">
<mml:math id="m44">
<mml:mrow>
<mml:mi>&#x3c1;</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; 0.129, p &#x3d; 0.020) and agreeableness (<inline-formula id="inf45">
<mml:math id="m45">
<mml:mrow>
<mml:mi>&#x3c1;</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; 0.153, p &#x3d; 0.006), the observed correlations were very weak (<inline-formula id="inf46">
<mml:math id="m46">
<mml:mrow>
<mml:mi>&#x3c1;</mml:mi>
<mml:mspace width="0.3333em"/>
<mml:mo>&#x3c;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.2), suggesting limited practical impact.</p>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption>
<p>Correlation between participants&#x2019; personality traits and questionnaire results.</p>
</caption>
<graphic xlink:href="frobt-12-1728647-g005.tif">
<alt-text content-type="machine-generated">Correlation heatmap showing the relationships between personality traits (Extraversion, Agreeableness, Conscientiousness, Neuroticism, Openness) and three questions (Q1: appropriate response, Q2: emotional expression, Q3: empathy). Values range from negative to positive correlations with corresponding p-values. No strong correlations were observed between the personality traits and three questions.</alt-text>
</graphic>
</fig>
</sec>
</sec>
</sec>
<sec sec-type="discussion" id="s5">
<label>5</label>
<title>Discussion</title>
<sec id="s5-1">
<label>5.1</label>
<title>Emotion attribution to the robot</title>
<p>Regarding the Agency-related items (Q1 and Q2), the abstract group, in which participants did not explicitly specify particular emotions or physical conditions, scored significantly higher evaluations. Within the theoretical framework of human-robot interaction, it is argued that agency is determined by interactivity, autonomy, and adaptability (<xref ref-type="bibr" rid="B31">Jackson and Williams, 2021</xref>). When no explicit emotions or physical conditions were specified, the robot&#x2019;s behavior was more likely to be interpreted as if it had inferred the appropriate emotion and facial expression, not as merely acting according to given instructions. This may enhance the perceived autonomy and adaptability of the robot. In addition, the absence of predefined emotional labels broadened participants&#x2019; interpretive latitude, expanding the range of expressions considered appropriate, resulting in an effect on the participants&#x2019; perception of the robot&#x2019;s adaptability. In summary, the perceived agency was increased when the robot&#x2019;s expressive behavior was generated by the abstract situational instructions without emotions or physical conditions.</p>
<p>These findings can also be explained from the perspective of predictive processing (<xref ref-type="bibr" rid="B11">Clark, 2013</xref>). Human perception arises from the integration of bottom-up sensory input and top-down predictions. In this experimental setting, bottom-up input can be understood as the perception of the robot&#x2019;s facial expression together with the human&#x2019;s instruction. Top-down prediction refers to inferences generated from internal models based on prior knowledge and expectations. When the instruction given to the robot is abstract and the bottom-up information is limited, people need to rely more heavily on top-down predictions to compensate for the missing information. During this compensation process, assumptions about internal states of the robot, such as the reason why it chooses this expression, are likely to be invoked. Therefore, the robot&#x2019;s agency is perceived more strongly.</p>
<p>However, with regard to the Experience-related items (Q3), no significant differences were observed. As mentioned earlier, the absence of specific emotional instructions may have broadened participants&#x2019; interpretive latitude, leading to cases where the robot&#x2019;s expression did not necessarily align with the participant&#x2019;s own feelings. In such cases, participants may have perceived the expression as reasonable for someone else, even if it did not match their personal experience. In addition, the experimental setting may affect this result. In this study, the robot only performed the participants&#x2019; experiences, rather than its own experiences. This lack of perceived first-person experience of the robot could have weakened the impression of empathy. In studies on human interaction, it has been shown that when people mention having similar experiences, it can increase the sense of empathy perceived by others (<xref ref-type="bibr" rid="B26">Hodges et al., 2010</xref>). For future study, it may be beneficial to design scenarios in which, when responding to participant instructions, the robot explicitly refers to its own similar past experiences. (e.g., &#x201c;I&#x2019;ve been through a similar situation, and this was how I felt.&#x201d;) Such contextual framing may enhance the perception of empathy between the robot and the participant.</p>
<p>It is also important to acknowledge methodological limitations. For instance, the empathy was assessed using a single-item Likert question, rather than a multi-item scale, which might not be sensitive enough to detect more subtle feelings of empathy. In addition, other factors such as the interaction duration and the robot&#x2019;s appearance may also have influenced participants&#x2019; perceptions. Particularly, the robot was designed to appear as a junior high school-aged child. This may have impacted perceived empathy, especially in scenarios that are atypical for children of that age, for example, receiving congratulations at a wedding.</p>
</sec>
<sec id="s5-2">
<label>5.2</label>
<title>Trends across facial expressions</title>
<p>In general, most expressions achieved average ratings above 3 on a 4-point scale across all questionnaire items. This result indicates that the robot was largely successful in presenting expressions that participants regarded as appropriate to the given situations, and that the repertoire of expressions was sufficiently broad to address many different contexts. Although the repertoire covered a wide range of contexts, some situations mentioned by participants appeared to fall outside of its scope, such as &#x201c;when you eat sour pickled plum,&#x201d; and &#x201c;when you are desperately fighting an enemy.&#x201d; Additional categories of expression could further improve emotional interaction in future work.</p>
<p>Compared to positive expressions (e.g., Positive Surprise, Hope), negative expressions (e.g., Sad, Embarrassed, Pain) tended to receive higher ratings across all three evaluation items. This aligns with previous research on human-human interaction, which has reported that sharing negative episodes enhances empathy more than sharing neutral episodes, even though the experimental conditions differ (<xref ref-type="bibr" rid="B10">Cheng et al., 2024</xref>). In addition, regarding pain, which received the highest ratings for Q3 (empathy), previous studies have shown that observing facial expressions of pain can trigger empathic responses in the observer (<xref ref-type="bibr" rid="B51">Xiong et al., 2019</xref>). It is suggested that participants may have interpreted the robot&#x2019;s negative facial expressions not as mere reactions, but as emotional resonance, and recalled their past experiences and emotions again. As a result, they were more likely to feel that the robot empathized with them.</p>
</sec>
<sec id="s5-3">
<label>5.3</label>
<title>Relationship with Big Five personality traits</title>
<p>All absolute correlation coefficients were below 0.2, suggesting that there is little meaningful relationship between personality traits and the questionnaire scores. This suggests that participants&#x2019; personality traits did not strongly influence the evaluation of the impressions towards the robot in this experiment. Although previous studies have reported that personality traits influence human-robot interaction (<xref ref-type="bibr" rid="B32">Kabacin&#x308;ska et al., 2025</xref>), within the scope of this experiment, impression evaluations are considered to be carried out without being strongly affected by personality traits.</p>
</sec>
<sec id="s5-4">
<label>5.4</label>
<title>Recovery from errors</title>
<p>In this experiment, after the robot performed a facial expression, participants were asked how they felt about it, and if they responded negatively, the robot attempted to present an alternative expression. While some participants were satisfied after the first retry, there was a general trend indicating that the occurrence of retries led to lower evaluation scores in the questionnaire. In human-robot interaction, recovery strategies following failures have shown limited success in restoring trust, while apologies and explanations are considered relatively effective (<xref ref-type="bibr" rid="B20">Esterwood and Robert, 2025</xref>). In this experiment, the robot&#x2019;s responses were generated using the LLM, and therefore, the robot did not consistently provide apologies or explanations when retrying facial expressions.</p>
<p>In some cases, participants provided specific requests during retries, such as asking the robot to adjust certain parts of the face. However, the current system was designed to regenerate entire facial expressions at the level of emotion labels and did not support the changes of facial action units or actuator movements. This factor prevented the robot from following such detailed user requests. For future development, these issues should be taken into account to improve the system.</p>
</sec>
<sec id="s5-5">
<label>5.5</label>
<title>Limitation</title>
<p>Since the experiment was conducted as part of an exhibit at the Expo 2025, it was necessary to minimize the burden on participants when completing the post-experiment questionnaire. As a result, the evaluation of the robot was limited to only three questions, and detailed assessments could not be conducted. Future studies should incorporate more comprehensive measures, such as the Dimensions of Mind Perception questionnaire (<xref ref-type="bibr" rid="B24">Gray et al., 2007</xref>) or the Perceived Agency Scale (<xref ref-type="bibr" rid="B48">Trafton et al., 2024</xref>), to allow for more detailed evaluation.</p>
<p>The android robot used in this study had the appearance of a junior high school-aged child. Therefore, many of the participants perceived the robot as younger than themselves. The use of a robot with a different age or gender could lead to different results.</p>
<p>In addition, some of the facial expressions used in this study were interpreted inconsistently as described in <xref ref-type="sec" rid="s3-4">Section 3.4</xref>. This suggests that some facial expressions may not be universally intuitive, highlighting potential limitations in their generalizability. Further study is needed on this issue. However, in the process of this study, when participants thought that the expression performed by the robot was not good, they could respond negatively, and such instances were excluded from the evaluation as an unsmooth interaction flow. Therefore, we consider that the validations of each facial expression had no significant impact on the experimental results.</p>
</sec>
</sec>
<sec sec-type="conclusion" id="s6">
<label>6</label>
<title>Conclusion</title>
<p>In this study, we implemented a total of 63 facial expressions on the android robot Nikola, including not only basic emotions but also complex emotions, physical conditions, and variations in emotional intensity. We conducted a large-scale experiment at Expo 2025 Osaka, Kansai, Japan. Participants provided verbal descriptions of various situations, to which the robot responded by performing a corresponding facial expression. The robot&#x2019;s responses were assessed along three key dimensions: appropriateness, emotional expression, and empathy. The results showed that most of the expressions received high ratings, with average scores of 3 or higher on a 4-point scale, indicating that the prepared repertoire of expressions was generally perceived as appropriate across a wide range of situations.</p>
<p>In addition, when participants described situations in more abstract terms, without explicitly stating specific emotions or physical conditions, the robot was perceived to have higher agency, including the ability to respond appropriately and express emotions. It is suggested that the absence of explicit emotional cues may have enhanced the sense of the robot&#x2019;s autonomy and adaptability in inferring the emotion and encouraged participants to interpret the expression in more diverse ways. On the other hand, there was no significant difference in perceived empathy based on the level of abstraction in the instructions with this experimental setting.</p>
<p>This study provides new insights into the field of human&#x2013;robot interaction by demonstrating how people perceive robots that flexibly express emotions based on contextual input, and showing that the abstractness of user instructions influences emotional attribution. While this experiment focused on the abstractness of human instructions, the gained insights may have broader applications in designing robot behaviors, such as when a robot shares its own experiences along with facial expressions. Further study would be required to explore the applicability of these findings in such contexts. In future work, we would like to expand the range of expressions to accommodate more diverse situations, improve interaction system design, and conduct more detailed questionnaire evaluations.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s7">
<title>Data availability statement</title>
<p>The datasets presented in this article are not readily available because of ethic restrictions. Requests to access the datasets should be directed to AF, <email>ayaka.fujii.wu@riken.jp</email>.</p>
</sec>
<sec sec-type="ethics-statement" id="s8">
<title>Ethics statement</title>
<p>The studies involving humans were approved by the research ethics committee of RIKEN. The studies were conducted in accordance with the local legislation and institutional requirements. Written informed consent for participation in this study was provided by the participants or participants&#x2019; legal guardians/next of kin.</p>
</sec>
<sec sec-type="author-contributions" id="s9">
<title>Author contributions</title>
<p>AF: Writing &#x2013; original draft, Data curation, Formal Analysis, Visualization. CI: Data curation, Formal Analysis, Methodology, Software, Writing &#x2013; review and editing. KS: Software, Writing &#x2013; review and editing. TF: Software, Writing &#x2013; review and editing. RI: Methodology, Writing &#x2013; review and editing, Conceptualization, Data curation. YT: Methodology, Writing &#x2013; review and editing. TK: Methodology, Writing &#x2013; review and editing, Conceptualization, Data curation. TM: Conceptualization, Funding acquisition, Investigation, Methodology, Project administration, Software, Supervision, Writing &#x2013; review and editing.</p>
</sec>
<ack>
<title>Acknowledgements</title>
<p>The authors would like to thank Yuka Nakayama and Naomi Uratani for their efforts in implementing the facial expressions and transcribing and labeling the participants&#x2019; speech during the experiments.</p>
</ack>
<sec sec-type="COI-statement" id="s11">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="s12">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was used in the creation of this manuscript. To assist with refining wording and enhancing readability.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="s13">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec sec-type="supplementary-material" id="s14">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/frobt.2025.1728647/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/frobt.2025.1728647/full&#x23;supplementary-material</ext-link>
</p>
<supplementary-material xlink:href="DataSheet1.pdf" id="SM1" mimetype="application/pdf" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<fn-group>
<fn fn-type="custom" custom-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2160877/overview">Karolina Eszter Kov&#xe1;cs</ext-link>, University of Debrecen, Hungary</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1400779/overview">Giada Lombardi</ext-link>, Ospedale Policlinico San Martino, Italy</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1936777/overview">Olivia Zhao</ext-link>, University of Wisconsin-Madison, United States</p>
</fn>
</fn-group>
<ref-list>
<title>References</title>
<ref id="B1">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Abubshait</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Wiese</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>You look human, but act like a machine: agent appearance and behavior modulate different aspects of human&#x2013;robot interaction</article-title>. <source>Front. Psychol.</source> <volume>8</volume>, <fpage>1393</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2017.01393</pub-id>
<pub-id pub-id-type="pmid">28878703</pub-id>
</mixed-citation>
</ref>
<ref id="B2">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Allison</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Nejat</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Kao</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2008</year>). <article-title>The design of an expressive humanlike socially assistive robot</article-title>. <source>J. Mech. Robotics</source> <volume>1</volume>, <fpage>011001</fpage>. <pub-id pub-id-type="doi">10.1115/1.2959097</pub-id>
</mixed-citation>
</ref>
<ref id="B3">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Appel</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Lugrin</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>K&#xfc;hle</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Heindl</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>The emotional robotic storyteller: on the influence of affect congruency on narrative transportation, robot perception, and persuasion</article-title>. <source>Comput. Hum. Behav.</source> <volume>120</volume>, <fpage>106749</fpage>. <pub-id pub-id-type="doi">10.1016/j.chb.2021.106749</pub-id>
</mixed-citation>
</ref>
<ref id="B4">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Barratt</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>R&#xe9;dei</surname>
<given-names>A. C.</given-names>
</name>
<name>
<surname>&#xc5;se</surname>
<given-names>I.-K.</given-names>
</name>
<name>
<surname>van de Weijer</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Does the Kuleshov effect really exist? Revisiting a classic film experiment on facial expressions and emotional contexts</article-title>. <source>Perception</source> <volume>45</volume>, <fpage>847</fpage>&#x2013;<lpage>874</lpage>. <pub-id pub-id-type="doi">10.1177/0301006616638595</pub-id>
<pub-id pub-id-type="pmid">27056181</pub-id>
</mixed-citation>
</ref>
<ref id="B5">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Barrett</surname>
<given-names>L. F.</given-names>
</name>
<name>
<surname>Adolphs</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Marsella</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Martinez</surname>
<given-names>A. M.</given-names>
</name>
<name>
<surname>Pollak</surname>
<given-names>S. D.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Emotional expressions reconsidered: challenges to inferring emotion from human facial movements</article-title>. <source>Psychol. Sci. Public Interest</source> <volume>20</volume>, <fpage>1</fpage>&#x2013;<lpage>68</lpage>. <pub-id pub-id-type="doi">10.1177/1529100619832930</pub-id>
<pub-id pub-id-type="pmid">31313636</pub-id>
</mixed-citation>
</ref>
<ref id="B6">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Benitez-Quiroz</surname>
<given-names>C. F.</given-names>
</name>
<name>
<surname>Wilbur</surname>
<given-names>R. B.</given-names>
</name>
<name>
<surname>Martinez</surname>
<given-names>A. M.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>The not face: a grammaticalization of facial expressions of emotion</article-title>. <source>Cognition</source> <volume>150</volume>, <fpage>77</fpage>&#x2013;<lpage>84</lpage>. <pub-id pub-id-type="doi">10.1016/j.cognition.2016.02.004</pub-id>
<pub-id pub-id-type="pmid">26872248</pub-id>
</mixed-citation>
</ref>
<ref id="B7">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bennett</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>abanovi&#x107;</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>The effects of culture and context on perceptions of robotic facial expressions</article-title>. <source>Interact. Stud.</source> <volume>16</volume>, <fpage>272</fpage>&#x2013;<lpage>302</lpage>. <pub-id pub-id-type="doi">10.1075/is.16.2.11ben</pub-id>
</mixed-citation>
</ref>
<ref id="B8">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Calbi</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Heimann</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Barratt</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Siri</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Umilt&#xe1;</surname>
<given-names>M. A.</given-names>
</name>
<name>
<surname>Gallese</surname>
<given-names>V.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>How context influences our perception of emotional faces: a behavioral study on the Kuleshov effect</article-title>. <source>Front. Psychol.</source> <volume>8</volume>, <fpage>1684</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2017.01684</pub-id>
<pub-id pub-id-type="pmid">29046652</pub-id>
</mixed-citation>
</ref>
<ref id="B9">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Carroll</surname>
<given-names>J. M.</given-names>
</name>
<name>
<surname>Russell</surname>
<given-names>J. A.</given-names>
</name>
</person-group> (<year>1996</year>). <article-title>Do facial expressions signal specific emotions? Judging emotion from the face in context</article-title>. <source>J. Personality Soc. Psychol.</source> <volume>70</volume>, <fpage>205</fpage>&#x2013;<lpage>218</lpage>. <pub-id pub-id-type="doi">10.1037/0022-3514.70.2.205</pub-id>
<pub-id pub-id-type="pmid">8636880</pub-id>
</mixed-citation>
</ref>
<ref id="B10">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cheng</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Guo</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Hu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Pan</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>How self-disclosure of negative experiences shapes prosociality?</article-title> <source>Soc. Cogn. Affective Neuroscience</source> <volume>19</volume>, <fpage>nsae003</fpage>. <pub-id pub-id-type="doi">10.1093/scan/nsae003</pub-id>
<pub-id pub-id-type="pmid">38324732</pub-id>
</mixed-citation>
</ref>
<ref id="B11">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Clark</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Whatever next? Predictive brains, situated agents, and the future of cognitive science</article-title>. <source>Behav. Brain Sci.</source> <volume>36</volume>, <fpage>181</fpage>&#x2013;<lpage>204</lpage>. <pub-id pub-id-type="doi">10.1017/S0140525X12000477</pub-id>
<pub-id pub-id-type="pmid">23663408</pub-id>
</mixed-citation>
</ref>
<ref id="B12">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cordaro</surname>
<given-names>D. T.</given-names>
</name>
<name>
<surname>Sun</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Keltner</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Kamble</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Huddar</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>McNeil</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Universals and cultural variations in 22 emotional expressions across five cultures</article-title>. <source>Emotion</source> <volume>18</volume>, <fpage>75</fpage>&#x2013;<lpage>93</lpage>. <pub-id pub-id-type="doi">10.1037/emo0000302</pub-id>
<pub-id pub-id-type="pmid">28604039</pub-id>
</mixed-citation>
</ref>
<ref id="B13">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cowen</surname>
<given-names>A. S.</given-names>
</name>
<name>
<surname>Keltner</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>What the face displays: mapping 28 emotions conveyed by naturalistic expression</article-title>. <source>Am. Psychol.</source> <volume>75</volume>, <fpage>349</fpage>&#x2013;<lpage>364</lpage>. <pub-id pub-id-type="doi">10.1037/amp0000488</pub-id>
<pub-id pub-id-type="pmid">31204816</pub-id>
</mixed-citation>
</ref>
<ref id="B14">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Diel</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Sato</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Hsu</surname>
<given-names>C.-T.</given-names>
</name>
<name>
<surname>B&#xe4;uerle</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Teufel</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Minato</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2025</year>). <article-title>An android can show the facial expressions of complex emotions</article-title>. <source>Sci. Rep.</source> <volume>15</volume>, <fpage>2433</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-024-84224-3</pub-id>
<pub-id pub-id-type="pmid">39828769</pub-id>
</mixed-citation>
</ref>
<ref id="B15">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dong</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Santiago-Anaya</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Jeon</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Facial expressions increase emotion recognition clarity and improve warmth and attractiveness on a humanoid robot without adding the uncanny valley</article-title>. <source>Proc. Hum. Factors Ergonomics Soc. Annu. Meet.</source> <volume>67</volume>, <fpage>933</fpage>&#x2013;<lpage>939</lpage>. <pub-id pub-id-type="doi">10.1177/21695067231192427</pub-id>
</mixed-citation>
</ref>
<ref id="B16">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Du</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Tao</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Martinez</surname>
<given-names>A. M.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Compound facial expressions of emotion</article-title>. <source>Proc. Natl. Acad. Sci. U. S. A.</source> <volume>111</volume>, <fpage>E1454</fpage>&#x2013;<lpage>E1462</lpage>. <pub-id pub-id-type="doi">10.1073/pnas.1322355111</pub-id>
<pub-id pub-id-type="pmid">24706770</pub-id>
</mixed-citation>
</ref>
<ref id="B17">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ekman</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Friesen</surname>
<given-names>W. V.</given-names>
</name>
</person-group> (<year>1971</year>). <article-title>Constants across cultures in the face and emotion</article-title>. <source>J. Personality Soc. Psychol.</source> <volume>17</volume>, <fpage>124</fpage>&#x2013;<lpage>129</lpage>. <pub-id pub-id-type="doi">10.1037/h0030377</pub-id>
<pub-id pub-id-type="pmid">5542557</pub-id>
</mixed-citation>
</ref>
<ref id="B18">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ekman</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Freisen</surname>
<given-names>W. V.</given-names>
</name>
<name>
<surname>Ancoli</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>1980</year>). <article-title>Facial signs of emotional experience</article-title>. <source>J. Personality Soc. Psychol.</source> <volume>39</volume>, <fpage>1125</fpage>&#x2013;<lpage>1134</lpage>. <pub-id pub-id-type="doi">10.1037/h0077722</pub-id>
</mixed-citation>
</ref>
<ref id="B19">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Esterwood</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Robert</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>The theory of mind and human&#x2013;robot trust repair</article-title>. <source>Sci. Rep.</source> <volume>13</volume>, <fpage>9877</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-023-37032-0</pub-id>
<pub-id pub-id-type="pmid">37337033</pub-id>
</mixed-citation>
</ref>
<ref id="B20">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Esterwood</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Robert</surname>
<given-names>L. P.</given-names>
</name>
</person-group> (<year>2025</year>). &#x201c;<article-title>Repairing trust in robots? A meta-analysis of HRI trust repair studies with a no-repair condition</article-title>,&#x201d; in <source>Proceedings of the 2025 ACM/IEEE international conference on human-robot interaction</source> (<publisher-name>IEEE Press</publisher-name>), <fpage>410</fpage>&#x2013;<lpage>419</lpage>.</mixed-citation>
</ref>
<ref id="B21">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Faraj</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Selamet</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Morales</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Torres</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Hossain</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>B.</given-names>
</name>
<etal/>
</person-group> (<year>2021</year>). <article-title>Facially expressive humanoid robotic face</article-title>. <source>HardwareX</source> <volume>9</volume>, <fpage>e00117</fpage>. <pub-id pub-id-type="doi">10.1016/j.ohx.2020.e00117</pub-id>
<pub-id pub-id-type="pmid">35492039</pub-id>
</mixed-citation>
</ref>
<ref id="B22">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ghazali</surname>
<given-names>A. S.</given-names>
</name>
<name>
<surname>Ham</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Barakova</surname>
<given-names>E. I.</given-names>
</name>
<name>
<surname>Markopoulos</surname>
<given-names>P.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Effects of robot facial characteristics and gender in persuasive human-robot interaction</article-title>. <source>Front. Robotics AI</source> <volume>5</volume>, <fpage>73</fpage>. <pub-id pub-id-type="doi">10.3389/frobt.2018.00073</pub-id>
<pub-id pub-id-type="pmid">33500952</pub-id>
</mixed-citation>
</ref>
<ref id="B23">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Glas</surname>
<given-names>D. F.</given-names>
</name>
<name>
<surname>Minato</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Ishi</surname>
<given-names>C. T.</given-names>
</name>
<name>
<surname>Kawahara</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Ishiguro</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2016</year>). &#x201c;<article-title>ERICA: the ERATO intelligent conversational android</article-title>,&#x201d; in <source>2016 25th IEEE international symposium on robot and human interactive communication (RO-MAN)</source>, <fpage>22</fpage>&#x2013;<lpage>29</lpage>. <pub-id pub-id-type="doi">10.1109/ROMAN.2016.7745086</pub-id>
</mixed-citation>
</ref>
<ref id="B24">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gray</surname>
<given-names>H. M.</given-names>
</name>
<name>
<surname>Gray</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Wegner</surname>
<given-names>D. M.</given-names>
</name>
</person-group> (<year>2007</year>). <article-title>Dimensions of mind perception</article-title>. <source>Science</source> <volume>315</volume>, <fpage>619</fpage>. <pub-id pub-id-type="doi">10.1126/science.1134475</pub-id>
<pub-id pub-id-type="pmid">17272713</pub-id>
</mixed-citation>
</ref>
<ref id="B25">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Haring</surname>
<given-names>K. S.</given-names>
</name>
<name>
<surname>Silvera-Tawil</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Takahashi</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Velonaki</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Watanabe</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2015</year>). &#x201c;<article-title>Perception of a humanoid robot: a cross-cultural comparison</article-title>,&#x201d; in <source>2015 24th IEEE international symposium on robot and human interactive communication (RO-MAN)</source>, <fpage>821</fpage>&#x2013;<lpage>826</lpage>. <pub-id pub-id-type="doi">10.1109/ROMAN.2015.7333613</pub-id>
</mixed-citation>
</ref>
<ref id="B26">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hodges</surname>
<given-names>S. D.</given-names>
</name>
<name>
<surname>Kiel</surname>
<given-names>K. J.</given-names>
</name>
<name>
<surname>Kramer</surname>
<given-names>A. D. I.</given-names>
</name>
<name>
<surname>Veach</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Villanueva</surname>
<given-names>B. R.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>Giving birth to empathy: the effects of similar experience on empathic accuracy, empathic concern, and perceived empathy</article-title>. <source>Personality Soc. Psychol. Bull.</source> <volume>36</volume>, <fpage>398</fpage>&#x2013;<lpage>409</lpage>. <pub-id pub-id-type="doi">10.1177/0146167209350326</pub-id>
<pub-id pub-id-type="pmid">19875825</pub-id>
</mixed-citation>
</ref>
<ref id="B27">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Ishi</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Even</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Hagita</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2015</year>). &#x201c;<article-title>Speech activity detection and face orientation estimation using multiple microphone arrays and human position information</article-title>,&#x201d; in <source>Proceedings of IEEE/RSJ international conference on intelligent robots and systems</source>, <fpage>5574</fpage>&#x2013;<lpage>5579</lpage>. <pub-id pub-id-type="doi">10.1109/IROS.2015.7354167</pub-id>
</mixed-citation>
</ref>
<ref id="B28">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ishi</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Even</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Hagita</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>A sound-selective hearing support system using environment sensor network</article-title>. <source>Acoust. Sci. Technol.</source> <volume>39</volume>, <fpage>287</fpage>&#x2013;<lpage>294</lpage>. <pub-id pub-id-type="doi">10.1250/ast.39.287</pub-id>
</mixed-citation>
</ref>
<ref id="B29">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Iwai</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Kawahara</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Kumada</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Kurohashi</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2019</year>). &#x201c;<article-title>Applying machine translation to psychology: automatic translation of personality adjectives</article-title>,&#x201d; in <source>Proceedings of machine translation summit XVII: translator, project and user tracks</source>, <fpage>23</fpage>&#x2013;<lpage>29</lpage>.</mixed-citation>
</ref>
<ref id="B30">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Iwai</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Takahashi</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Minato</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Kohno</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Iio</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Shiomi</surname>
<given-names>M.</given-names>
</name>
<etal/>
</person-group> (<year>2025</year>). &#x201c;<article-title>Functions driving mind attribution to robots: development and validation of a questionnaire based on psychological evidence</article-title>,&#x201d; in <source>The 43rd annual conference on robotics society of Japan</source>.</mixed-citation>
</ref>
<ref id="B31">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jackson</surname>
<given-names>R. B.</given-names>
</name>
<name>
<surname>Williams</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>A theory of social agency for human-robot interaction</article-title>. <source>Front. Robotics AI</source> <volume>8</volume>, <fpage>687726</fpage>. <pub-id pub-id-type="doi">10.3389/frobt.2021.687726</pub-id>
<pub-id pub-id-type="pmid">34485389</pub-id>
</mixed-citation>
</ref>
<ref id="B32">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kabacin&#x308;ska</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Dosso</surname>
<given-names>J. A.</given-names>
</name>
<name>
<surname>Vu</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Prescott</surname>
<given-names>T. J.</given-names>
</name>
<name>
<surname>Robillard</surname>
<given-names>J. M.</given-names>
</name>
</person-group> (<year>2025</year>). <article-title>Influence of user personality traits and attitudes on interactions with social robots: systematic review</article-title>. <source>Collabra Psychol.</source> <volume>11</volume>, <fpage>129175</fpage>. <pub-id pub-id-type="doi">10.1525/collabra.129175</pub-id>
</mixed-citation>
</ref>
<ref id="B33">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Kobayashi</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Hara</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>1993</year>). &#x201c;<article-title>Study on face robot for active human interface-mechanisms of face robot and expression of 6 basic facial expressions</article-title>,&#x201d; in <source>Proceedings of 1993 2nd IEEE international workshop on robot and human communication</source>, <fpage>276</fpage>&#x2013;<lpage>281</lpage>. <pub-id pub-id-type="doi">10.1109/ROMAN.1993.367708</pub-id>
</mixed-citation>
</ref>
<ref id="B34">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Krumhuber</surname>
<given-names>E. G.</given-names>
</name>
<name>
<surname>Kappas</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Manstead</surname>
<given-names>A. S. R.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Effects of dynamic aspects of facial expressions: a review</article-title>. <source>Emot. Rev.</source> <volume>5</volume>, <fpage>41</fpage>&#x2013;<lpage>46</lpage>. <pub-id pub-id-type="doi">10.1177/1754073912451349</pub-id>
</mixed-citation>
</ref>
<ref id="B35">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kunz</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Mylius</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Schepelmann</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Lautenbacher</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2004</year>). <article-title>On the relationship between self-report and facial expression of pain</article-title>. <source>J. Pain</source> <volume>5</volume>, <fpage>368</fpage>&#x2013;<lpage>376</lpage>. <pub-id pub-id-type="doi">10.1016/j.jpain.2004.06.002</pub-id>
<pub-id pub-id-type="pmid">15501194</pub-id>
</mixed-citation>
</ref>
<ref id="B36">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Manzi</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Peretti</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Di Dio</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Cangelosi</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Itakura</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Kanda</surname>
<given-names>T.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). <article-title>A robot is not worth another: exploring children&#x2019;s mental state attribution to different humanoid robots</article-title>. <source>Front. Psychol.</source> <volume>11</volume>, <fpage>2011</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2020.02011</pub-id>
<pub-id pub-id-type="pmid">33101099</pub-id>
</mixed-citation>
</ref>
<ref id="B37">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>McDaniel</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>D&#x2019;Mello</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>King</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Chipman</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Tapp</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Graesser</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2007</year>). <article-title>Facial features for affective state detection in learning environments</article-title>. <source>Proc. 29th Annu. Meet. Cognitive Sci. Soc.</source> <volume>29</volume>, <fpage>467</fpage>&#x2013;<lpage>472</lpage>.</mixed-citation>
</ref>
<ref id="B38">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mishra</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Verdonschot</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Hagoort</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Skantze</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Real-time emotion generation in human-robot dialogue using large language models</article-title>. <source>Front. Robotics AI</source> <volume>10</volume>, <fpage>1271610</fpage>. <pub-id pub-id-type="doi">10.3389/frobt.2023.1271610</pub-id>
<pub-id pub-id-type="pmid">38106543</pub-id>
</mixed-citation>
</ref>
<ref id="B39">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mollahosseini</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Abdollahi</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Sweeny</surname>
<given-names>T. D.</given-names>
</name>
<name>
<surname>Cole</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Mahoor</surname>
<given-names>M. H.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Role of embodiment and presence in human perception of robots&#x2019; facial cues</article-title>. <source>Int. J. Human-Computer Stud.</source> <volume>116</volume>, <fpage>25</fpage>&#x2013;<lpage>39</lpage>. <pub-id pub-id-type="doi">10.1016/j.ijhcs.2018.04.005</pub-id>
</mixed-citation>
</ref>
<ref id="B40">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Paradeda</surname>
<given-names>R. B.</given-names>
</name>
<name>
<surname>Hashemian</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Rodrigues</surname>
<given-names>R. A.</given-names>
</name>
<name>
<surname>Paiva</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>How facial expressions and small talk may influence trust in a robot</article-title>. <source>Soc. Robot.</source> <fpage>169</fpage>&#x2013;<lpage>178</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-319-47437-3_17</pub-id>
</mixed-citation>
</ref>
<ref id="B41">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Prkachin</surname>
<given-names>K. M.</given-names>
</name>
</person-group> (<year>2009</year>). <article-title>Assessing pain by facial expression: facial expression as nexus</article-title>. <source>Pain Res. Manage.</source> <volume>14</volume>, <fpage>53</fpage>&#x2013;<lpage>58</lpage>. <pub-id pub-id-type="doi">10.1155/2009/542964</pub-id>
<pub-id pub-id-type="pmid">19262917</pub-id>
</mixed-citation>
</ref>
<ref id="B42">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sato</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Namba</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Nishida</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Ishi</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Minato</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>An android for emotional interaction: spatiotemporal validation of its facial expressions</article-title>. <source>Front. Psychol.</source> <volume>12</volume>, <fpage>800657</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2021.800657</pub-id>
<pub-id pub-id-type="pmid">35185697</pub-id>
</mixed-citation>
</ref>
<ref id="B43">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sato</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Shimokawa</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Minato</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2025</year>). <article-title>Exploration of Mehrabian&#x2019;s communication model with an android</article-title>. <source>Sci. Rep.</source> <volume>15</volume>, <fpage>25986</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-025-11745-w</pub-id>
<pub-id pub-id-type="pmid">40676231</pub-id>
</mixed-citation>
</ref>
<ref id="B44">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Spatola</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Wudarczyk</surname>
<given-names>O. A.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Ascribing emotions to robots: explicit and implicit attribution of emotions and perceived robot anthropomorphism</article-title>. <source>Comput. Hum. Behav.</source> <volume>124</volume>, <fpage>106934</fpage>. <pub-id pub-id-type="doi">10.1016/j.chb.2021.106934</pub-id>
</mixed-citation>
</ref>
<ref id="B45">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Strack</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Weyers</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>M&#xfc;hlberger</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Seibt</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>H&#xe4;fner</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Deutsch</surname>
<given-names>R.</given-names>
</name>
<etal/>
</person-group> (<year>2009</year>). <article-title>When hunger finds no fault with moldy corn: food deprivation reduces food-related disgust</article-title>. <source>Emotion</source> <volume>9</volume>, <fpage>50</fpage>&#x2013;<lpage>58</lpage>. <pub-id pub-id-type="doi">10.1037/a0014449</pub-id>
<pub-id pub-id-type="pmid">19186916</pub-id>
</mixed-citation>
</ref>
<ref id="B46">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sundelin</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Lekander</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Kecklund</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Van</surname>
<given-names>S. E.</given-names>
</name>
<name>
<surname>Olsson</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Axelsson</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Cues of fatigue: effects of sleep deprivation on facial appearance</article-title>. <source>Sleep</source> <volume>36</volume>, <fpage>1355</fpage>&#x2013;<lpage>1360</lpage>. <pub-id pub-id-type="doi">10.5665/sleep.2964</pub-id>
<pub-id pub-id-type="pmid">23997369</pub-id>
</mixed-citation>
</ref>
<ref id="B47">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Thellman</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>de Graaf</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Ziemke</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Mental state attribution to robots: a systematic review of conceptions, methods, and findings</article-title>. <source>J. Hum.-Robot Interact.</source> <volume>11</volume>, <fpage>1</fpage>&#x2013;<lpage>51</lpage>. <pub-id pub-id-type="doi">10.1145/3526112</pub-id>
</mixed-citation>
</ref>
<ref id="B48">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Trafton</surname>
<given-names>J. G.</given-names>
</name>
<name>
<surname>McCurry</surname>
<given-names>J. M.</given-names>
</name>
<name>
<surname>Zish</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Frazier</surname>
<given-names>C. R.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>The perception of agency</article-title>. <source>J. Hum.-Robot Interact.</source> <volume>13</volume>, <fpage>1</fpage>&#x2013;<lpage>23</lpage>. <pub-id pub-id-type="doi">10.1145/3640011</pub-id>
</mixed-citation>
</ref>
<ref id="B49">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Vural</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Cetin</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Ercil</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Littlewort</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Bartlett</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Movellan</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2007</year>). &#x201c;<article-title>Drowsy driver detection through facial movement analysis</article-title>,&#x201d; in <source>Proceedings of the 2007 IEEE international conference on human-computer interaction</source>, <fpage>6</fpage>&#x2013;<lpage>18</lpage>.</mixed-citation>
</ref>
<ref id="B50">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Werner</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Al-Hamadi</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Limbrecht-Ecklundt</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Walter</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Gruss</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Traue</surname>
<given-names>H. C.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Automatic pain assessment with facial activity descriptors</article-title>. <source>IEEE Trans. Affect. Comput.</source> <volume>8</volume>, <fpage>286</fpage>&#x2013;<lpage>299</lpage>. <pub-id pub-id-type="doi">10.1109/TAFFC.2016.2537327</pub-id>
</mixed-citation>
</ref>
<ref id="B51">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Xiong</surname>
<given-names>R. C.</given-names>
</name>
<name>
<surname>Fu</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>L. Z.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>C. H.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>H. X.</given-names>
</name>
<name>
<surname>Shi</surname>
<given-names>Y.</given-names>
</name>
<etal/>
</person-group> (<year>2019</year>). <article-title>Brain pathways of pain empathy activated by pained facial expressions: a meta-analysis of fMRI using the activation likelihood estimation method</article-title>. <source>Neural Regeneration Res.</source> <volume>14</volume>, <fpage>172</fpage>&#x2013;<lpage>178</lpage>. <pub-id pub-id-type="doi">10.4103/1673-5374.243722</pub-id>
<pub-id pub-id-type="pmid">30531091</pub-id>
</mixed-citation>
</ref>
<ref id="B52">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yam</surname>
<given-names>K. C.</given-names>
</name>
<name>
<surname>Bigman</surname>
<given-names>Y. E.</given-names>
</name>
<name>
<surname>Tang</surname>
<given-names>P. M.</given-names>
</name>
<name>
<surname>Ilies</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>De Cremer</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Soh</surname>
<given-names>H.</given-names>
</name>
<etal/>
</person-group> (<year>2021</year>). <article-title>Robots at work: people prefer-and forgive-service robots with perceived feelings</article-title>. <source>J. Appl. Psychol.</source> <volume>106</volume>, <fpage>1557</fpage>&#x2013;<lpage>1572</lpage>. <pub-id pub-id-type="doi">10.1037/apl0000834</pub-id>
<pub-id pub-id-type="pmid">33030919</pub-id>
</mixed-citation>
</ref>
<ref id="B53">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Yang</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Sato</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Minato</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Namba</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Nishida</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2022</year>). &#x201c;<article-title>Optimizing facial expressions of an android robot effectively: a bayesian optimization approach</article-title>,&#x201d; in <source>2022 IEEE-RAS 21st international conference on humanoid robots (humanoids)</source>, <fpage>542</fpage>&#x2013;<lpage>549</lpage>. <pub-id pub-id-type="doi">10.1109/Humanoids53995.2022.10000154</pub-id>
</mixed-citation>
</ref>
<ref id="B54">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname>
<given-names>A. W.</given-names>
</name>
<name>
<surname>Queiroz</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Sebo</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2025</year>). &#x201c;<article-title>Balancing user control and perceived robot social agency through the design of end-user robot programming interfaces</article-title>,&#x201d; in <source>Proceedings of the 2025 ACM/IEEE international conference on human-robot interaction</source> (<publisher-name>IEEE Press</publisher-name>), <fpage>899</fpage>&#x2013;<lpage>908</lpage>.</mixed-citation>
</ref>
</ref-list>
</back>
</article>