<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article article-type="research-article" dtd-version="2.3" xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Robot. AI</journal-id>
<journal-title>Frontiers in Robotics and AI</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Robot. AI</abbrev-journal-title>
<issn pub-type="epub">2296-9144</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">1534060</article-id>
<article-id pub-id-type="doi">10.3389/frobt.2025.1534060</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Robotics and AI</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>A projection-based approach for clarifying interaction partners in human-robot communication</article-title>
<alt-title alt-title-type="left-running-head">Sone et al.</alt-title>
<alt-title alt-title-type="right-running-head">
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/frobt.2025.1534060">10.3389/frobt.2025.1534060</ext-link>
</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Sone</surname>
<given-names>Suguru</given-names>
</name>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Kishi</surname>
<given-names>Tsubasa</given-names>
</name>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Ikeda</surname>
<given-names>Tetsushi</given-names>
</name>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2886018/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
</contrib-group>
<aff>
<institution>Graduate School of Information Sciences</institution>, <institution>Hiroshima City University</institution>, <addr-line>Hiroshima</addr-line>, <country>Japan</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2580157/overview">Paul Bremner</ext-link>, University of the West of England, United Kingdom</p>
</fn>
<fn fn-type="edited-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2928624/overview">Michael Schiffmann</ext-link>, Technical University of Cologne, Germany</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2947252/overview">Ana M&#xfc;ller</ext-link>, Technical University of Cologne, Germany</p>
</fn>
<corresp id="c001">&#x2a;Correspondence: Tetsushi Ikeda, <email>ikeda@hiroshima-cu.ac.jp</email>
</corresp>
</author-notes>
<pub-date pub-type="epub">
<day>27</day>
<month>03</month>
<year>2025</year>
</pub-date>
<pub-date pub-type="collection">
<year>2025</year>
</pub-date>
<volume>12</volume>
<elocation-id>1534060</elocation-id>
<history>
<date date-type="received">
<day>25</day>
<month>11</month>
<year>2024</year>
</date>
<date date-type="accepted">
<day>27</day>
<month>02</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2025 Sone, Kishi and Ikeda.</copyright-statement>
<copyright-year>2025</copyright-year>
<copyright-holder>Sone, Kishi and Ikeda</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<p>Robots provide a variety of services in daily life spaces, making human-robot interaction essential. This research proposes a new projection-based method for non-humanoid robots to engage with people. While significant research has explored the use of human-like gestures in humanoid robots to initiate interaction, applying such approaches to non-humanoid robots is challenging in conveying the sense that the robot is addressing the person directly. In this study, we introduce a method where a projector mounted on the robot illuminates the area around both the robot and the partners it is addressing, enhancing the interaction clarity and participation. Experiments conducted in two scenarios demonstrated that the proposed method effectively conveyed the feeling of being directly addressed by the robot and fostered a sense of easy participation, even for those not actively participating.</p>
</abstract>
<kwd-group>
<kwd>guide robots</kwd>
<kwd>human-robot interaction</kwd>
<kwd>projection-based communication</kwd>
<kwd>service robots</kwd>
<kwd>social robotics</kwd>
</kwd-group>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Human-Robot Interaction</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec id="s1">
<title>1 Introduction</title>
<p>Robots are expected to coexist with humans and provide services that support human daily life. Research has advanced in various applications, such as exhibit guidance in museums (<xref ref-type="bibr" rid="B5">Burgard et al., 1998</xref>; <xref ref-type="bibr" rid="B32">Shiomi et al., 2006</xref>; <xref ref-type="bibr" rid="B13">Iio et al., 2019</xref>; <xref ref-type="bibr" rid="B27">Rosa et al., 2023</xref>), delivering packages (D. <xref ref-type="bibr" rid="B21">Lee et al., 2021</xref>), and information provision in settings like shopping malls (<xref ref-type="bibr" rid="B14">Kanda et al., 2009</xref>) and airports (<xref ref-type="bibr" rid="B38">Triebel et al., 2016</xref>). The ability to address target partners and initiate interaction is essential for robots operating in human-shared environments, and it remains a prominent area of study (<xref ref-type="bibr" rid="B3">Avelino et al., 2021</xref>).</p>
<p>Initiating conversations with specific partners in the presence of multiple people is a challenging task for robots. In human communication, we rely on cues such as standing position, body posture, pointing gestures, facial expressions, and eye contact to indicate to others that we are addressing them directly. Many studies have proposed methods for controlling humanoid robots to replicate such human behaviors (<xref ref-type="bibr" rid="B28">Saad et al., 2019</xref>; <xref ref-type="bibr" rid="B13">Iio et al., 2019</xref>). The importance of designing a coordinated combination of multiple modalities, such as gaze and body movement, has been highlighted (<xref ref-type="bibr" rid="B39">V&#xe1;zquez et al., 2017</xref>; <xref ref-type="bibr" rid="B2">Arai et al., 2019</xref>). However, applying these human-like methods to non-humanoid robots presents challenges. To address this issue, studies have examined methods for non-humanoid robots to convey their focus, such as using body orientation (<xref ref-type="bibr" rid="B30">Satake et al., 2013</xref>) and gaze direction on displays (<xref ref-type="bibr" rid="B15">Karreman et al., 2013</xref>). Nonetheless, accurately conveying the sense that the robot is directly addressing a specific partner or group remains difficult, especially when multiple people are present.</p>
<p>This research proposes a new method in which a non-humanoid robot, which lacks the ability to use gaze or gestures like human- or animal-like robots, uses projection to clearly convey that it is directly addressing multiple parties. Specifically, the robot projects a light field onto the ground that encompasses both the robot and the intended partners, making it explicit whom the robot is addressing. This method is advantageous in providing unambiguous communication, even when the robot is interacting with multiple people, and it is applicable to robots without human-like bodies. This paper builds upon the method proposed by <xref ref-type="bibr" rid="B33">Sone et al. (2022)</xref> and validates it through two new experiments conducted on a new projection robot.</p>
<p>The contributions of this research are as follows:<list list-type="simple">
<list-item>
<p>1. We propose a method for a robot to clearly convey its intended interaction partners in environments with multiple people, using an onboard projector to visually highlight them. The proposed method assumes that the robot already knows the positions of the partners it intends to address.</p>
</list-item>
<list-item>
<p>2. We validate the proposed method through two scenarios: a guidance task where the robot sequentially addresses multiple individuals, and an interaction experiment involving multiple partners.</p>
</list-item>
<list-item>
<p>3. In both experiments, subjective evaluations indicated that participants felt the robot was directly addressing them compared to the baseline method, and in Experiment 2, participants reported greater ease in engaging in conversation, with both effects being statistically significant (p &#x3c; 0.05).</p>
</list-item>
</list>
</p>
<p>The remainder of this paper is organized as follows: <xref ref-type="sec" rid="s2">Section 2</xref> reviews related work, <xref ref-type="sec" rid="s3">Section 3</xref> describes the proposed method, <xref ref-type="sec" rid="s4">Sections 4</xref> and <xref ref-type="sec" rid="s5">5</xref> describe the two validation experiments, and <xref ref-type="sec" rid="s6">Section 6</xref> presents a discussion of the results. Finally, <xref ref-type="sec" rid="s7">Section 7</xref> concludes the paper.</p>
</sec>
<sec id="s2">
<title>2 Related work</title>
<sec id="s2-1">
<title>2.1 Research on clarifying whom robots are addressing</title>
<p>Numerous methods have been investigated to enable robots to directly address their intended partners and initiate conversations. Behavioral theories regarding interpersonal distance and spatial positioning in human-human interaction have been proposed and later extended to human-robot interaction. <xref ref-type="bibr" rid="B11">Hall (1966)</xref> categorized interpersonal distances in human-human interactions and introduced the concept of proxemics. <xref ref-type="bibr" rid="B18">Kendon (1990)</xref> expanded on this by considering not only distance but also spatial formations, proposing that people in public conversations adjust their positions to form specific spatial arrangements. These insights into human conversational dynamics have been leveraged in designing robots that engage effectively in human-robot interactions (<xref ref-type="bibr" rid="B42">Yamaoka et al., 2010</xref>). One approach, proposed and experimentally validated by <xref ref-type="bibr" rid="B30">Satake et al. (2013)</xref>, involves a robot initiating interaction by first approaching a person at an appropriate social distance. They identified that a common cause of unsuccessful interactions is the person&#x2019;s failure to notice the robot&#x2019;s intention to start a conversation. To address this, the authors emphasized the need for the robot to clearly and unambiguously signal its intent. <xref ref-type="bibr" rid="B17">Kato et al. (2015)</xref> observed natural human behavior in approaching others, focusing on the use of body orientation and gaze, and implemented these behaviors in a mobile robot to evaluate their effectiveness. Similarly, <xref ref-type="bibr" rid="B43">Yang et al. (2020)</xref> confirmed that mimicking human approach behaviors is an effective strategy for managing a robot&#x2019;s movements when approaching a group of people.</p>
<p>When initiating conversation, the importance of using multiple modalities to communicate to people that the robot is attempting to address them directly has been widely recognized. For instance, <xref ref-type="bibr" rid="B28">Saad et al. (2019)</xref> demonstrated that when a robot greets a partner entering through a doorway with gestures or vocal cues to attract attention, the number of people who respond to the robot increases, while the number of unresponsive partners decreases. <xref ref-type="bibr" rid="B34">Strait et al. (2014)</xref> examined the effects of different modalities when a robot speaks to a person to provide advice, while <xref ref-type="bibr" rid="B12">Hoque et al. (2012)</xref> developed a method for recognizing a person&#x2019;s facial orientation and gaze, allowing the robot to use gaze behavior to signal its intent to engage in conversation. In this way, research has advanced techniques for engaging people through a combination of human-like modalities (<xref ref-type="bibr" rid="B39">V&#xe1;zquez et al., 2017</xref>).</p>
<p>Research on robots interacting with multiple people has been widely studied, exploring aspects such as gestures, gaze behaviors, and turn-taking management. <xref ref-type="bibr" rid="B26">Rifinski et al. (2021)</xref> examined how a robot&#x2019;s responsive gestures impact human-human interaction in multi-party settings. Their findings indicate that gaze and leaning gestures enhance interpersonal evaluation, leading to improved perceptions of conversation partners. Similarly, <xref ref-type="bibr" rid="B31">Shintani et al. (2024)</xref> analyzed the impact of a robot&#x2019;s gaze control on the dynamics of multi-party conversations and personality expression. Their study experimentally validated how a humanoid robot can reproduce human-like gaze behavior by considering three key factors: conversational roles, turn-taking, and gaze aversion. Regarding turn-taking, <xref ref-type="bibr" rid="B44">&#x17b;arkowski, (2019)</xref> investigated how the social robot EMYS facilitates conversational flow in group interactions. Their study demonstrated that a robot&#x2019;s effective management of speaking turns significantly enhances dialogue fluency. These studies highlight the growing interest in multi-party human-robot interaction and provide insights into key design considerations for robots engaging with multiple individuals in various social and conversational contexts.</p>
<p>These studies primarily focus on methods for humanoid robots to interact with people using various modalities, which may not be directly applicable to robots with non-human-like bodies. For non-humanoid robots, many interaction methods have been studied (<xref ref-type="bibr" rid="B6">Cha et al., 2018</xref>) using various means, such as gesture (<xref ref-type="bibr" rid="B25">Press and Erel, 2022</xref>), light (<xref ref-type="bibr" rid="B7">Cha et al., 2017</xref>), and augmented reality (<xref ref-type="bibr" rid="B40">Walker et al., 2018</xref>). To indicate to the surrounding partners whom the robot is addressing, existing methods have mainly relied on the robot&#x2019;s body orientation and gaze direction, often displayed on a screen. <xref ref-type="bibr" rid="B1">Althaus et al. (2004)</xref> proposed a method in which a robot orients itself toward the center of a group when moving with multiple people. <xref ref-type="bibr" rid="B19">Kuzuoka et al. (2010)</xref> found that appropriate control of the robot&#x2019;s torso and body orientation can achieve a positional relationship conducive to human conversation. <xref ref-type="bibr" rid="B16">Karreman et al. (2015)</xref> investigated the impact of body orientation when guiding partners through an exhibit. More recently, <xref ref-type="bibr" rid="B36">Takagi et al. (2023)</xref> examined the effects of the robot&#x2019;s body orientation in multi-person conversations.</p>
<p>However, these methods have struggled to convey a clear sense that the robot is directly addressing specific partners nearby. In human-robot interaction, establishing &#x201c;which person the robot is directly addressing&#x201d; is essential for effective communication, as it forms the common ground necessary for interaction (<xref ref-type="bibr" rid="B8">Clark and Brennan, 1991</xref>). Unlike humans, robots face challenges in flexibly constructing such common ground. To address this issue, this study proposes a method where the robot uses a mounted projector to clearly indicate the intended addressee. This approach leverages the concept of physical co-presence, as discussed by Clark, to establish common ground through spatial referencing. In human-human interactions, gestures and eye gaze are commonly used to make spatial references to dialogue partners and objects, facilitating mutual understanding (<xref ref-type="bibr" rid="B4">Bangerter, 2004</xref>). Similarly, our method employs &#x201c;projection on the ground&#x201d; to identify the person being addressed, serving as a form of spatial referencing. This enables the robot to explicitly share the spatial reference of the intended addressee, especially in scenarios where traditional gestures are not feasible for the robot.</p>
</sec>
<sec id="s2-2">
<title>2.2 Research on projection robots</title>
<p>Recent advancements in the miniaturization and brightness of projectors have led to research on their use as interfaces for robots (<xref ref-type="bibr" rid="B35">Suzuki et al., 2022</xref>), with projection-based methods being classified as a type of Augmented Reality (AR)-based approach. To date, fundamental functionalities have been proposed for mounting projectors on robots to provide easily viewable projections from robots. J.-H. <xref ref-type="bibr" rid="B22">Lee (2007)</xref> proposed a method for presenting information to partners in the environment without location constraints by mounting a projector on a pan-tilt actuator on a robot. <xref ref-type="bibr" rid="B10">Donner et al. (2013)</xref> utilized a projector-equipped robot to guide partners, incorporating image distortion correction and self-localization capabilities. Additionally, various interfaces combining robots and projectors have been explored. <xref ref-type="bibr" rid="B23">Machino et al. (2006)</xref> proposed an efficient method for facilitating cooperative work by remote partners through projections from a robot. <xref ref-type="bibr" rid="B29">Saegusa (2017)</xref> developed a gait rehabilitation system using a mobile robot that projects the optimal positions for foot placement. <xref ref-type="bibr" rid="B37">Tamai et al. (2021)</xref> proposed a method that integrates movement and projection, using projection to guide a person&#x2019;s standing position during the guidance process.</p>
<p>Several studies have also explored methods for robots to communicate their future behaviors to nearby partners using projectors, with the aim of achieving safe coexistence in daily environments. <xref ref-type="bibr" rid="B24">Matsumaru, (2008)</xref> proposed a method in which a robot uses a projector to display its future location by projecting information on movement speed and direction onto the floor. <xref ref-type="bibr" rid="B9">Coovert et al. (2014)</xref> examined the clarity and confidence level of pedestrians in understanding a robot&#x2019;s intended direction when it projected arrows indicating its movement path onto the ground. <xref ref-type="bibr" rid="B41">Watanabe et al. (2015)</xref> introduced a wheelchair robot that projects its intended travel route, emphasizing the importance of an autonomous wheelchair sharing route information with both surrounding pedestrians and passengers.</p>
<p>However, these studies have only proposed methods for using projection to convey information from a robot to people, without addressing how to clarify the specific partner to whom the robot is speaking. In contrast, this study proposes a method in which the robot uses projection to envelop the intended conversation partner in light, clarifying whom it is addressing. This approach demonstrates an interface that utilizes projection to signal the start of interaction.</p>
</sec>
</sec>
<sec id="s3">
<title>3 Using projection to clarify addressed partners</title>
<p>In this section, we propose a method to clearly indicate the partners to whom the robot is speaking by using a projector mounted on the robot. In the proposed method, the projection envelops both the robot and the intended conversation partner&#x2019;s feet, clarifying the interaction partner and simultaneously enhancing the sense of participation in the interaction with the robot.</p>
<p>This study is conducted under the assumption that the robot can recognize the positions and postures of surrounding individuals, identify the people it intends to address, and approach them. This study specifically focuses on the phase in which the robot communicates with these partners, aiming to clearly convey to the surrounding people whom the robot is addressing.</p>
<sec id="s3-1">
<title>3.1 Problems with robots starting to talk to partners</title>
<p>
<xref ref-type="fig" rid="F1">Figure 1A</xref> illustrates a non-humanoid robot attempting to speak to the partner on the right side of the figure. In this scenario, neither the intended conversation partner nor the other nearby partners can clearly understand whom the robot is addressing, resulting in unsuccessful dialogue initiation. Moreover, when addressing multiple people in such a situation, it is often unclear who is actively participating in the conversation. Therefore, a robot must be able to clarify its intended conversation partner and identify the partners engaged in the interaction.</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>Difficulty faced by non-humanoid robots in clearly addressing individuals in the presence of others. <bold>(A)</bold> When the robot addresses a person, surrounding individuals cannot clearly identify whom the robot is speaking to, leading to confusion. <bold>(B)</bold> In the proposed method, the robot uses projection to clearly identify the individual it is addressing.</p>
</caption>
<graphic xlink:href="frobt-12-1534060-g001.tif"/>
</fig>
<p>Methods utilizing the robot&#x2019;s body orientation and gaze direction on a display have been proposed to clarify the partners to whom a non-humanoid robot is speaking (<xref ref-type="bibr" rid="B15">Karreman et al., 2013</xref>). However, it remains challenging to clearly indicate the specific people the robot is addressing among surrounding partners. This study aims to address this issue by employing a projector, enabling the robot to clearly identify its intended conversation partners and share information effectively with nearby people (<xref ref-type="fig" rid="F1">Figure 1B</xref>).</p>
</sec>
<sec id="s3-2">
<title>3.2 Using projection to clarify who a robot talks to</title>
<p>We propose a method in which a robot clearly indicates the area encompassing the partners it is interacting with by projecting an image onto the ground. Compared to the display method commonly used by robots coexisting with humans, ground projection offers the advantage of being easily visible from a wide range of directions, allowing the robot to clearly indicate multiple target partners simultaneously. Additionally, it is intuitively easy to understand, as it illuminates the area directly beneath each person&#x2019;s feet.</p>
<p>For each individual the robot addresses, it calculates an ellipse centered at the midpoint between the positions of the robot and the individual, projecting this ellipse onto the ground. <xref ref-type="fig" rid="F2">Figure 2</xref> illustrates an example of the positional relationship between the projected image and the people surrounding the robot. In the figure, p1 and p2 are the two individuals on the left, whom the robot is attempting to address, while p3 is not a target of the conversation. The white circle on the line connecting the center of gravity of the robot and individuals p1 and p2 in <xref ref-type="fig" rid="F2">Figure 2</xref> represents the midpoint between them. The ellipse encompassing the robot and the individual is centered on this midpoint, with the robot and the individual positioned at the foci of the ellipse. However, as the ellipse is projected within the range of the projector mounted on the robot, parts of the ellipse, such as the area behind the robot, may be truncated depending on the projector&#x2019;s capabilities. This projection encompasses only the intended conversation partner(s), inviting them to participate and reinforcing the sensation that the robot is directly addressing them.</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>Positional relationship between the robot, the people being addressed, and the projected ellipse.</p>
</caption>
<graphic xlink:href="frobt-12-1534060-g002.tif"/>
</fig>
</sec>
</sec>
<sec id="s4">
<title>4 Experiment 1</title>
<p>To confirm the effectiveness of the proposed method in situations where a robot interacts with people, we tested it on a task in which a robot sequentially addressed multiple partners, asking each to move in turn.</p>
<sec id="s4-1">
<title>4.1 Task</title>
<p>In Experiment 1, we simulated a scenario in which the robot acted as a guide to manage facility entry, such as by restricting access and guiding partners in an orderly manner. The guiding robot&#x2019;s task was to request that people advance in a single line as they were permitted entry into the facility (<xref ref-type="fig" rid="F3">Figure 3</xref>). We evaluated the clarity and comfort of the instructions provided by the robot during this guidance.</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>Guidance task by the robot, showing the robot instructing the first two individuals to proceed with entry.</p>
</caption>
<graphic xlink:href="frobt-12-1534060-g003.tif"/>
</fig>
<p>
<xref ref-type="fig" rid="F4">Figure 4</xref> illustrates the experimental setup. The robot stood in front of three partners aligned in a row and used voice commands to instruct one or two partners on the right side of the figure to move to the right. The partner in the center was the primary subject, while the two partners standing 1 m away from the subject were experimenters who consistently followed the robot&#x2019;s instructions. When the robot addressed one or two partners on the right with the command, &#x201c;Please move to the left,&#x201d; it was necessary for the center participant to understand accurately whether they were included in the group instructed to move.</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption>
<p>Experimental setup (Experiment 1).</p>
</caption>
<graphic xlink:href="frobt-12-1534060-g004.tif"/>
</fig>
</sec>
<sec id="s4-2">
<title>4.2 Experimental setup</title>
<sec id="s4-2-1">
<title>4.2.1 Robot</title>
<p>
<xref ref-type="fig" rid="F5">Figure 5</xref> shows the projection robot used in the experiment. A projector (Optoma W340UST) was mounted on a mobile cart (T-frog Project i-Cart mini) for projection. The robot rotates at that location and turns its body to face the person to whom it is talking. To clarify the robot&#x2019;s frontal orientation, an illustration of the robot&#x2019;s face was displayed on the PC display on the robot. <xref ref-type="fig" rid="F6">Figure 6</xref> shows the projection range of the projector on the robot.</p>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption>
<p>Projection robot used in the experiments.</p>
</caption>
<graphic xlink:href="frobt-12-1534060-g005.tif"/>
</fig>
<fig id="F6" position="float">
<label>FIGURE 6</label>
<caption>
<p>The extent of the projection range from the robot.</p>
</caption>
<graphic xlink:href="frobt-12-1534060-g006.tif"/>
</fig>
</sec>
<sec id="s4-2-2">
<title>4.2.2 People tracking system</title>
<p>We installed three LiDAR sensors (Hokuyo UTM-30LX) around the experimental environment to measure the positions of individuals within the area. To reliably measure the center of gravity of each person, the sensors were positioned at a height of 120 cm. The measurement process consisted of two steps: people detection and tracking. In the detection step, candidate individuals were identified through background subtraction and clustering. The system then detected an entity matching a typical person&#x2019;s size and calculated its center of gravity. In the tracking step, a particle filter was applied to estimate each individual&#x2019;s trajectory, producing a smoothed position at a rate of ten updates per second.</p>
</sec>
<sec id="s4-2-3">
<title>4.2.3 Robot behavior</title>
<p>The robot detects a person&#x2019;s location using the people tracking system and automatically executes a predetermined action to present the addressee via projection and body orientation. When the robot produces predefined speech utterances, the timing is manually triggered by the experimenter pressing a button.</p>
</sec>
</sec>
<sec id="s4-3">
<title>4.3 System configuration</title>
<p>
<xref ref-type="fig" rid="F7">Figure 7</xref> illustrates the system configuration. The mobile cart estimates its own position and orientation using on-board range sensors and environmental map data, enabling it to move to a specified location and orientation. The robot control PC receives the positions of surrounding individuals from the human behavior measurement system, sends control commands to the cart to orient the robot&#x2019;s body toward the designated individual, and generates a projection image on the floor, which is sent to the robot&#x2019;s projector for display. For speech control, in this experiment, the experimenter used a remote control to trigger default speech, which was played through a speaker on the projector.</p>
<fig id="F7" position="float">
<label>FIGURE 7</label>
<caption>
<p>System configuration of the robot system.</p>
</caption>
<graphic xlink:href="frobt-12-1534060-g007.tif"/>
</fig>
</sec>
<sec id="s4-4">
<title>4.4 Conditions</title>
<p>A within-subject experimental design was used to investigate the effect of modality on the clarity with which the robot indicates the intended addressee (see <xref ref-type="table" rid="T1">Table 1</xref>). The independent variable was the modality used to indicate the intended addressee (Projection vs. Orientation). Each participant experienced both conditions in a counterbalanced order. The dependent variables included participant movement, subjective ratings on a seven-point Likert scale, and scores from the User Experience Questionnaire (UEQ; <xref ref-type="bibr" rid="B20">Laugwitz et al., 2008</xref>).</p>
<table-wrap id="T1" position="float">
<label>TABLE 1</label>
<caption>
<p>Experiment design summary (experiment 1).</p>
</caption>
<table>
<thead valign="top">
<tr>
<th colspan="3" align="left">Independent variables</th>
</tr>
<tr>
<th align="left">Factor</th>
<th align="center">Level 1</th>
<th align="center">Level 2</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">Modality</td>
<td align="center">Projection and orientation (proposed)</td>
<td align="center">Orientation</td>
</tr>
<tr>
<th colspan="3" align="left">Dependent variables</th>
</tr>
<tr>
<th align="left">Type</th>
<th colspan="2" align="left">Measures</th>
</tr>
<tr>
<td align="left">Objective</td>
<td colspan="2" align="left">Position</td>
</tr>
<tr>
<td align="left">Subjective (7-point Likert scale)</td>
<td colspan="2" align="left">Clarity of understating<break/>Willingness to be guided by the robot</td>
</tr>
<tr>
<td align="left">Subjective (UEQ)</td>
<td colspan="2" align="left">Pragmatic quality (Attractiveness, Perspicuity, Efficiency, Dependability)</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Projection-based condition: In addition to using body orientation, the robot indicated the person being addressed through projection, as described in the method proposed in <xref ref-type="sec" rid="s3">Section 3</xref>.</p>
<p>Orientation-based condition: The robot indicated the person being addressed using only body orientation, while the projection displayed a fixed circular pattern centered on the robot.</p>
<p>The robot was tasked with addressing partners in a scenario where three people stood in a line in front of it (<xref ref-type="fig" rid="F3">Figure 3</xref>). Guidance was provided in two distinct scenarios for each condition:</p>
<p>Scenario 1: The robot first instructed the first two partners to move, then, after a short interval, instructed the remaining two to move.</p>
<p>Scenario 2: The robot first instructed the first two partners to move, and then, after a short interval, instructed the remaining partner to move.</p>
<p>
<xref ref-type="fig" rid="F8">Figure 8</xref> shows the behavior of the robot under each condition and scenario in Experiment 1. In this setup, the first and third partners in the line were experimenters who consistently followed the robot&#x2019;s instructions&#x2014;moving when instructed and remaining stationary otherwise. The primary subject stood as the second person in the lineup, and was required to move in response to the first instruction in Scenario 1 and to the second instruction in Scenario 2.</p>
<fig id="F8" position="float">
<label>FIGURE 8</label>
<caption>
<p>Conditions and scenarios in Experiment 1.</p>
</caption>
<graphic xlink:href="frobt-12-1534060-g008.tif"/>
</fig>
<p>In both conditions, before speaking, the robot oriented itself toward the center of gravity of a single partner when addressing one person, and toward the midpoint between the positions of two partners when addressing two people.</p>
</sec>
<sec id="s4-5">
<title>4.5 Hypothesis</title>
<p>In situations where the robot is addressing specific partners around it, combining projection with body orientation is expected to enhance clarity in conveying who is being spoken to. Based on this, we formulated the following hypothesis:</p>
<p>
<statement content-type="hypothesis" id="Hypothesis_1">
<label>Hypothesis 1</label>
<p>By clearly indicating the partner to whom the robot is speaking using the proposed method, recipients will understand the robot&#x2019;s instructions more clearly and accurately than if only body orientation were used.</p>
</statement>
</p>
</sec>
<sec id="s4-6">
<title>4.6 Measurements</title>
<p>Experimental participants completed a questionnaire to rate the clarity of understanding whom the robot was addressing and their willingness to be guided by the robot. Ratings were given on a seven-point scale, with seven indicating &#x2018;very easy to understand&#x2019; and 1 indicating &#x2018;very difficult to understand.&#x2019; The human behavior measurement system recorded the participants&#x2019; positions to verify whether they moved as instructed by the robot.</p>
<p>To evaluate the user experience of the robot service using projection, we also conducted a survey with the User Experience Questionnaire (UEQ) (<xref ref-type="bibr" rid="B20">Laugwitz et al., 2008</xref>). The questionnaire assessed six key aspects of user experience. In this experiment, participants responded to questions related to attractiveness, perspicuity, efficiency, and dependability aspects in the Japanese version of the UEQ.</p>
<p>Comparisons between conditions in the questionnaire-based evaluations were conducted using Wilcoxon&#x2019;s signed-rank test. For the UEQ-based evaluations, comparisons between conditions were performed using Welch&#x2019;s t-test, which accounts for unequal variances between two populations. In both tests, the significance level (&#x3b1;) was set to 0.05, which means that the results with p &#x3c; 0.05 were considered statistically significant.</p>
</sec>
<sec id="s4-7">
<title>4.7 Participants</title>
<p>A total of 22 participants (1 woman and 21 men; average age: 23.1) took part in our experiment. The study was conducted from 19 December 2022, to 6 March 2023 at Hiroshima City University in a controlled laboratory environment. All participants were university students with a background in information science.</p>
<p>The study protocol was approved by the Ethics Committee of Hiroshima City University, Japan, and all participants provided written informed consent before participating in the study. They participated in both Experiment 1 and Experiment 2 sequentially, with each session lasting approximately 15 min and a 10-min break in between. Participants received monetary compensation for their participation.</p>
</sec>
<sec id="s4-8">
<title>4.8 Procedures</title>
<p>Participants were informed that the robot would use projections and body movements to deliver spoken instructions. Then they experienced how the robot behaves using both conditions in advance. Participants were told that the robot&#x2019;s speech would include a greeting at the start of the session, followed by the instruction, &#x2018;Please proceed to the left toward us.&#x2019; Additionally, participants were instructed to move to a designated position on the left near the robot when prompted and to remain there once they arrived. During each experiment, the participant stood at the center of a line of three people positioned in front of the robot.</p>
<p>The robot initiated the session with a voice greeting and guided participants through two scenarios in each condition. After completing the movements in each condition, participants filled out a questionnaire.</p>
</sec>
<sec id="s4-9">
<title>4.9 Results</title>
<p>
<xref ref-type="fig" rid="F9">Figure 9</xref> shows the percentage of participants who correctly followed the robot&#x2019;s instructions. In the projection-based condition, 93.8% of participants correctly interpreted and followed the robot&#x2019;s instructions. In contrast, only 37.5% of participants correctly understood and responded to the instructions in the orientation-only condition, where the robot used only body orientation. In this latter condition, the robot failed to effectively convey its instructions, resulting in many participants acting contrary to the robot&#x2019;s guidance.</p>
<fig id="F9" position="float">
<label>FIGURE 9</label>
<caption>
<p>Proportion of participants who correctly followed the robot&#x2019;s instructions.</p>
</caption>
<graphic xlink:href="frobt-12-1534060-g009.tif"/>
</fig>
<p>
<xref ref-type="fig" rid="F10">Figure 10</xref> shows the questionnaire evaluations. In terms of clarity regarding whom the robot was addressing, the proposed method scored significantly higher than the orientation-only condition, where the person was indicated solely by the robot&#x2019;s orientation. A Wilcoxon signed-rank test confirmed a significant difference (V &#x3d; 0, Z &#x3d; 3.85, p &#x3c; 0.05, p &#x3d; 0.0001). The effect size, Cliff&#x2019;s delta, was <inline-formula id="inf1">
<mml:math id="m1">
<mml:mrow>
<mml:mi>&#x3b4;</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>0.868</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, indicating a large effect. These results suggest that the use of projection significantly improved the clarity of the robot&#x2019;s addressee identification. Similarly, the projection-based condition also scored significantly higher than the orientation-only condition in terms of participants&#x2019; willingness to be guided by the robot. A Wilcoxon signed-rank test confirmed a significant difference (V &#x3d; 0, Z &#x3d; 3.73, p &#x3c; 0.05, p &#x3d; 0.0002). The effect size, Cliff&#x2019;s delta, was <inline-formula id="inf2">
<mml:math id="m2">
<mml:mrow>
<mml:mi>&#x3b4;</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>0.770</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, again indicating a large effect. These findings suggest a strong preference for the proposed method in guiding participants.</p>
<fig id="F10" position="float">
<label>FIGURE 10</label>
<caption>
<p>Questionnaire responses from participants in Experiment 1. <bold>(A)</bold> Clarity of the addressed person. <bold>(B)</bold> Willingness to be guided by the robot. (&#x2a; indicates p &#x3c; 0.05 and &#x2a;&#x2a; indicates p &#x3c; 0.01).</p>
</caption>
<graphic xlink:href="frobt-12-1534060-g010.tif"/>
</fig>
<p>The User Experience Questionnaire (UEQ) was used to evaluate participants&#x2019; perceptions across four scales: Attractiveness, Perspicuity, Efficiency, and Dependability, with the results visualized in <xref ref-type="fig" rid="F11">Figure 11</xref>. The reliability of each scale, measured by Cronbach&#x2019;s alpha, is summarized in <xref ref-type="table" rid="T2">Table 2</xref>. A Welch&#x2019;s t-test was conducted for each scale, with Cohen&#x2019;s <inline-formula id="inf3">
<mml:math id="m3">
<mml:mrow>
<mml:msub>
<mml:mi>d</mml:mi>
<mml:mi>z</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> used to assess effect sizes. The results showed that the proposed method scored significantly higher than the orientation-only condition in all four scales: Attractiveness (p &#x3d; 0.0003, <inline-formula id="inf4">
<mml:math id="m4">
<mml:mrow>
<mml:msub>
<mml:mi>d</mml:mi>
<mml:mi>z</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1.29</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>), Perspicuity (p &#x3c; 0. 0001, <inline-formula id="inf5">
<mml:math id="m5">
<mml:mrow>
<mml:msub>
<mml:mi>d</mml:mi>
<mml:mi>z</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1.30</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>), Efficiency (p &#x3d; 0.0014, <inline-formula id="inf6">
<mml:math id="m6">
<mml:mrow>
<mml:msub>
<mml:mi>d</mml:mi>
<mml:mi>z</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1.04</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>), and Dependability (p &#x3d; 0.0001, <inline-formula id="inf7">
<mml:math id="m7">
<mml:mrow>
<mml:msub>
<mml:mi>d</mml:mi>
<mml:mi>z</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1.23</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>), all showing large effects. These findings indicate that the projection-based method strongly enhanced participants&#x2019; user experience, particularly in terms of clarity (Perspicuity) and overall appeal (Attractiveness), which exhibited the largest effects.</p>
<fig id="F11" position="float">
<label>FIGURE 11</label>
<caption>
<p>Summary of participants&#x2019; evaluations from the User Experience Questionnaire (UEQ) in Experiment 1 (&#x2a; indicates p &#x3c; 0.05 and &#x2a;&#x2a; indicates p &#x3c; 0.01).</p>
</caption>
<graphic xlink:href="frobt-12-1534060-g011.tif"/>
</fig>
<table-wrap id="T2" position="float">
<label>TABLE 2</label>
<caption>
<p>Reliability analysis of UEQ scales: Cronbach&#x2019;s alpha values (experiment 1).</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">UEQ scale</th>
<th align="left">Projection</th>
<th align="left">Orientation</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">Attractiveness</td>
<td align="left">0.901</td>
<td align="left">0.929</td>
</tr>
<tr>
<td align="left">Perspicuity</td>
<td align="left">0.808</td>
<td align="left">0.598&#x2a;</td>
</tr>
<tr>
<td align="left">Efficiency</td>
<td align="left">0.808</td>
<td align="left">0.800</td>
</tr>
<tr>
<td align="left">Dependability</td>
<td align="left">0.698&#x2a;</td>
<td align="left">0.823</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>&#x2a; Results of scales that need to be interpreted with caution.</p>
</fn>
</table-wrap-foot>
</table-wrap>
<p>Overall, in the projection-based condition, participants found the robot easier to understand, and more respondents expressed a preference to be guided by the robot.</p>
</sec>
</sec>
<sec id="s5">
<title>5 Experiment 2</title>
<p>In Experiment 1, we evaluated whether the proposed method effectively indicates whether the robot is directly addressing a specific individual. In Experiment 2, we focused on a multi-person dialogue scenario, assessing the impressions of those who did not actively participate in the conversation. In situations where we are conversing with a robot, maintaining the sense that the robot is addressing you personally can enhance the feeling of inclusion in the conversation, which is essential for smooth communication. In this section, we examined the effect of projection in a scenario where two people ask a guide robot for directions, testing whether projection can effectively convey that the robot is addressing both partners. Additionally, we assessed the impact of projection on the impression of individuals who were present but not actively participating.</p>
<p>The same equipment used in Experiment 1 was employed to measure the behaviors of both the robot and the participants. Experiment 2 was conducted with the same participants from Experiment 1 and adhered to the same ethical procedures.</p>
<sec id="s5-1">
<title>5.1 Task and environment</title>
<p>The experiment simulated a scenario in which two people visit a commercial facility together, with one individual asking a guide robot for directions to their destination (<xref ref-type="fig" rid="F12">Figure 12</xref>). In this setup, one of the two individuals was the participant, while the other was the experimenter. The experimenter directed the participant to approach the guide robot from the right side of the figure, then stopped at a predetermined position, greeted the robot, and asked for directions to the destination. The conversation between the experimenter (E) and the robot (R) followed a fixed set of predetermined dialogue, and an example of this dialogue is shown below. After the interaction, participants were asked to rate the extent to which they felt the robot was speaking to them.</p>
<disp-quote>
<p>E: Excuse me. Could you tell me the way to the student room?</p>
</disp-quote>
<disp-quote>
<p>R: The student room, correct? First, please exit this room, take the elevator, and go down to the 4th floor.</p>
</disp-quote>
<disp-quote>
<p>E: Where is the elevator?</p>
</disp-quote>
<disp-quote>
<p>R: The elevator is located to the left after you exit this room. After you get off the elevator, proceed down the connecting corridor, and you&#x2019;ll find the student room to your right.</p>
</disp-quote>
<disp-quote>
<p>E: Thank you very much.</p>
</disp-quote>
<disp-quote>
<p>R: You&#x2019;re welcome. Please feel free to ask if you need any further assistance.</p>
</disp-quote>
<fig id="F12" position="float">
<label>FIGURE 12</label>
<caption>
<p>Experimental setup (Experiment 2).</p>
</caption>
<graphic xlink:href="frobt-12-1534060-g012.tif"/>
</fig>
</sec>
<sec id="s5-2">
<title>5.2 Conditions</title>
<p>Experiment 2 employed a within-subject experimental design, identical to that of Experiment 1, to investigate the effect of modality on multi-party interaction (see <xref ref-type="table" rid="T3">Table 3</xref>). The independent variable was the modality used to indicate the intended addressee (Projection vs. Orientation), and each participant experienced both conditions in a counterbalanced order.</p>
<table-wrap id="T3" position="float">
<label>TABLE 3</label>
<caption>
<p>Experiment design summary (experiment 2).</p>
</caption>
<table>
<thead valign="top">
<tr>
<th colspan="3" align="left">Independent variables</th>
</tr>
<tr>
<th align="left">Factor</th>
<th align="center">Level 1</th>
<th align="center">Level 2</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">Modality</td>
<td align="center">Projection and orientation (proposed)</td>
<td align="center">Orientation</td>
</tr>
<tr>
<th colspan="3" align="left">Dependent variables</th>
</tr>
<tr>
<th align="left">Type</th>
<th colspan="2" align="left">Measures</th>
</tr>
<tr>
<td align="left">Objective</td>
<td colspan="2" align="left">Position</td>
</tr>
<tr>
<td align="left">Subjective (7-point Likert scale)</td>
<td colspan="2" align="left">Feeling of participation<break/>Feeling of being noticed<break/>Clarity of understating<break/>Feeling ease in speaking</td>
</tr>
<tr>
<td align="left">Subjective (UEQ)</td>
<td colspan="2" align="left">Pragmatic quality (Attractiveness, Perspicuity, Efficiency, Dependability)<break/>Hedonic quality (Stimulation, Novelty)</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>As shown in <xref ref-type="table" rid="T3">Table 3</xref>, the dependent variables differed slightly from those in Experiment 1. While both experiments included participant movement and subjective ratings on a seven-point Likert scale, Experiment 2 specifically assessed participants&#x2019; sense of engagement in the conversation through additional questionnaire items. Additionally, Experiment 2 used the User Experience Questionnaire (UEQ) to evaluate all subscales, including Hedonic Quality, whereas Experiment 1 primarily focused on Pragmatic Quality.</p>
<p>In the scenario where two partners, the experimenter and the participant, approached the projection-equipped robot and the participant began asking questions, the robot conducted the conversation under the same two conditions as in Experiment 1. In both conditions, the robot was oriented toward the midpoint between the two partners, as measured by the human behavior measurement system. When the participants moved, the robot adjusted its orientation to follow the midpoint of their new positions.</p>
<p>The robot began projecting once the participant started speaking. In Condition A (the proposed method), the projection was aligned with the measured positions of both partners and adjusted to follow any changes in their standing positions. In Condition B, the projection displayed a fixed pattern that did not adjust to the partners&#x2019; positions. <xref ref-type="fig" rid="F13">Figure 13</xref> shows the behavior of the robot under each condition and scenario in Experiment 2.</p>
<fig id="F13" position="float">
<label>FIGURE 13</label>
<caption>
<p>Conditions and the robot&#x2019;s behavior in Experiment 2.</p>
</caption>
<graphic xlink:href="frobt-12-1534060-g013.tif"/>
</fig>
</sec>
<sec id="s5-3">
<title>5.3 Hypothesis</title>
<p>In a situation where multiple people interact with a robot and only one person is conversing with the robot, it is expected that by using projection to indicate that the robot is addressing both partners, even the person who is not speaking will feel as though the robot is talking to them, creating a sense of participation in the conversation. We hypothesize that in Condition A, where the robot uses projection to address both people, compared to Condition B, the sense of participation will be enhanced for individuals who are present but not actively participating.</p>
<p>
<statement content-type="hypothesis" id="Hypothesis_2">
<label>Hypothesis 2</label>
<p>In Condition A, where the robot uses projection to engage both partners, the sense of participation for individuals who are not actively participating in the conversation will be enhanced compared to Condition B, where the robot only faces both people.</p>
</statement>
</p>
<p>
<statement content-type="hypothesis" id="Hypothesis_3">
<label>Hypothesis 3</label>
<p>In Condition A, the sense that the robot is directly addressing the partner will be stronger compared to Condition B.</p>
</statement>
</p>
<p>
<statement content-type="hypothesis" id="Hypothesis_4">
<label>Hypothesis 4</label>
<p>In Condition A, partners will feel it is easier to talk to the robot compared to Condition B.</p>
</statement>
</p>
</sec>
<sec id="s5-4">
<title>5.4 Measurements</title>
<p>After each conversation in both conditions, participants completed a questionnaire to rate their engagement in the dialogue, their perception of the robot&#x2019;s awareness of them, their sense of being directly addressed by the robot, and their comfort level when interacting with the robot. As in Experiment 1, ratings were provided on a seven-point scale. An evaluation using the User Experience Questionnaire (UEQ) was also conducted. In this experiment, participants responded to all the questionnaire items, which assessed attractiveness, perspicuity, efficiency, dependability, stimulation, and novelty. Between-condition comparisons were conducted using the same statistical tests applied in Experiment 1. Comparisons between conditions in the questionnaire-based evaluations were conducted using Wilcoxon&#x2019;s signed-rank test. For the UEQ-based evaluations, comparisons between conditions were performed using Welch&#x2019;s t-test, which accounts for unequal variances between two populations. In both tests, the significance level (&#x3b1;) was set to 0.05, which means that the results with p &#x3c; 0.05 were considered statistically significant. The human behavior measurement system recorded participants&#x2019; positions to verify their responses to the robot&#x2019;s cues.</p>
</sec>
<sec id="s5-5">
<title>5.5 Participants</title>
<p>Participants from Experiment 1 also took part in Experiment 2. Thus, a total of 22 participants (1 woman and 21 men; average age: 23.1) participated in both experiments. The study protocol was approved by the Ethics Committee of Hiroshima City University, Japan, and all participants provided written informed consent before participation.</p>
</sec>
<sec id="s5-6">
<title>5.6 Procedures</title>
<p>Each experiment involved one participant and one experimenter. Participants were informed that the robot could provide directions verbally, engage in simple conversation, and occasionally use projection while speaking. They were also told that the experimenter and the participant knew each other and were together in a two-person situation heading toward a destination. Then they experienced how the robot behaves using both conditions in advance. From the initial position, the experimenter and the participant approached the robot, with the experimenter stopping at a predetermined position. The experimenter then engaged in a conversation with the robot, asking about the destination, as illustrated in <xref ref-type="fig" rid="F13">Figure 13</xref>. Once the conversation concluded, the experimenter informed the participant that it marked the end of the interaction with the robot. The participant then completed a questionnaire.</p>
</sec>
<sec id="s5-7">
<title>5.7 Results</title>
<p>
<xref ref-type="fig" rid="F14">Figure 14</xref> presents the impression ratings of the conversation with the robot. Both the median and mean values for the projection-based condition were higher than those for the orientation-only condition regarding the sense of participation in the dialogue (Q1). However, a Wilcoxon signed-rank test did not indicate statistical significance (V &#x3d; 31.5, Z &#x3d; 1.92, p &#x3d; 0.055, <inline-formula id="inf8">
<mml:math id="m8">
<mml:mrow>
<mml:mtext>Cliff</mml:mtext>
<mml:mo>&#x2032;</mml:mo>
<mml:mi mathvariant="normal">s</mml:mi>
<mml:mtext>&#x2009;delta</mml:mtext>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>0.433</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>), though the effect size suggested a moderate effect.</p>
<fig id="F14" position="float">
<label>FIGURE 14</label>
<caption>
<p>Questionnaire responses from participants in Experiment 2 (&#x2a; indicates p &#x3c; 0.05 and &#x2a;&#x2a; indicates p &#x3c; 0.01). <bold>(A)</bold> Feeling of participation in the conversation. <bold>(B)</bold> Feeling of being noticed in the conversation. <bold>(C)</bold> Clarity of the addressed person. <bold>(D)</bold> Feeling of ease in speaking to the robot.</p>
</caption>
<graphic xlink:href="frobt-12-1534060-g014.tif"/>
</fig>
<p>In contrast, the projection-based condition significantly outperformed the orientation-only condition in the other three aspects: feeling of being noticed (Q2), clarity of understating (Q3), and feeling ease in speaking (Q4). A Wilcoxon signed-rank test confirmed significant differences for Q2 (V &#x3d; 0, Z &#x3d; 3.62, p &#x3c; 0.05, p &#x3d; 0.0003, Cliff&#x2019;s delta &#x3d; <inline-formula id="inf9">
<mml:math id="m9">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>0.853</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>), Q3 (V &#x3d; 0, Z &#x3d; 3.51, p &#x3c; 0.05, p &#x3d; 0.0004, Cliff&#x2019;s delta &#x3d; <inline-formula id="inf10">
<mml:math id="m10">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>0.703</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>), and Q4 (V &#x3d; 4, Z &#x3d; 3.34, p &#x3c; 0.05, p &#x3d; 0.0009, Cliff&#x2019;s delta &#x3d; <inline-formula id="inf11">
<mml:math id="m11">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>0.550</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>), all indicating large effect sizes.</p>
<p>The User Experience Questionnaire (UEQ) was used to evaluate participants&#x2019; perceptions across six scales: Attractiveness, Perspicuity, Efficiency, Dependability, Stimulation, and Novelty, with the results visualized in <xref ref-type="fig" rid="F15">Figure 15</xref>. The reliability of each scale, measured by Cronbach&#x2019;s alpha, is summarized in <xref ref-type="table" rid="T4">Table 4</xref>. A Welch&#x2019;s t-test was conducted for each scale, with Cohen&#x2019;s <inline-formula id="inf12">
<mml:math id="m12">
<mml:mrow>
<mml:msub>
<mml:mi>d</mml:mi>
<mml:mi>z</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> used to assess effect sizes. The results showed that the proposed method scored significantly higher than the orientation-only condition in Attractiveness (p &#x3d; 0.002, <inline-formula id="inf13">
<mml:math id="m13">
<mml:mrow>
<mml:msub>
<mml:mi>d</mml:mi>
<mml:mi>z</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1.14</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>), Perspicuity (p &#x3d; 0.039, <inline-formula id="inf14">
<mml:math id="m14">
<mml:mrow>
<mml:msub>
<mml:mi>d</mml:mi>
<mml:mi>z</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>0.63</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>), Dependability (p &#x3d; 0.024, <inline-formula id="inf15">
<mml:math id="m15">
<mml:mrow>
<mml:msub>
<mml:mi>d</mml:mi>
<mml:mi>z</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>0.61</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>), Stimulation (p &#x3d; 0.0005, <inline-formula id="inf16">
<mml:math id="m16">
<mml:mrow>
<mml:msub>
<mml:mi>d</mml:mi>
<mml:mi>z</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1.08</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>), and Novelty (p &#x3d; 0.004, <inline-formula id="inf17">
<mml:math id="m17">
<mml:mrow>
<mml:msub>
<mml:mi>d</mml:mi>
<mml:mi>z</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>0.93</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>), all showing moderate to large effects. Although Efficiency did not reach statistical significance (p &#x3d; 0.230, <inline-formula id="inf18">
<mml:math id="m18">
<mml:mrow>
<mml:msub>
<mml:mi>d</mml:mi>
<mml:mi>z</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>0.38</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>), the effect size suggests a potential trend favoring the proposed method.</p>
<fig id="F15" position="float">
<label>FIGURE 15</label>
<caption>
<p>Summary of participants&#x2019; evaluations from the User Experience Questionnaire (UEQ) in Experiment 2 (&#x2a; indicates p &#x3c; 0.05 and &#x2a;&#x2a; indicates p &#x3c; 0.01).</p>
</caption>
<graphic xlink:href="frobt-12-1534060-g015.tif"/>
</fig>
<table-wrap id="T4" position="float">
<label>TABLE 4</label>
<caption>
<p>Reliability analysis of UEQ scales: Cronbach&#x2019;s alpha values (experiment 2).</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">UEQ scale</th>
<th align="left">Projection</th>
<th align="left">Orientation</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">Attractiveness</td>
<td align="left">0.887</td>
<td align="left">0.943</td>
</tr>
<tr>
<td align="left">Perspicuity</td>
<td align="left">0.844</td>
<td align="left">0.673&#x2a;</td>
</tr>
<tr>
<td align="left">Efficiency</td>
<td align="left">0.562&#x2a;</td>
<td align="left">0.783</td>
</tr>
<tr>
<td align="left">Dependability</td>
<td align="left">0.637&#x2a;</td>
<td align="left">0.793</td>
</tr>
<tr>
<td align="left">Stimulation</td>
<td align="left">0.915</td>
<td align="left">0.788</td>
</tr>
<tr>
<td align="left">Novelty</td>
<td align="left">0.906</td>
<td align="left">0.865</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>&#x2a;Results of scales that need to be interpreted with caution.</p>
</fn>
</table-wrap-foot>
</table-wrap>
</sec>
</sec>
<sec sec-type="discussion" id="s6">
<title>6 Discussion</title>
<p>The primary contribution of this study is the proposal of a novel method for robots to clearly communicate their intended interaction partners through the use of projection. We introduced a projection-based approach that delineates the area around the robot and the partners it is addressing, enabling the robot to better convey its intention to interact. For successful human-robot interaction, it is crucial for robots to engage naturally with people and to sustain a feeling that the interaction is directed toward them. While significant research has focused on replicating human behaviors using humanoid robots, non-humanoid robots&#x2014;such as delivery and security robots that are increasingly deployed in human environments&#x2014;face unique challenges in initiating dialogue and signaling their intention to engage with people.</p>
<p>In Experiment 1, we examined whether projection could help a robot clearly indicate which partners in its vicinity it was addressing when making requests. Compared to using body orientation alone, the projection-based method significantly improved the clarity with which participants could identify whom the robot was addressing (p &#x3d; 0.0001, <inline-formula id="inf19">
<mml:math id="m19">
<mml:mrow>
<mml:mi>&#x3b4;</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>0.868</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>), effectively outlining the robot&#x2019;s interaction range. Additionally, participants favored the projection-based guidance method (p &#x3d; 0.0002, <inline-formula id="inf20">
<mml:math id="m20">
<mml:mrow>
<mml:mi>&#x3b4;</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>0.770</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>), likely because the projection&#x2019;s clarity in highlighting intended interaction partners enhanced their perception of the robot&#x2019;s guidance.</p>
<p>Experiment 2 evaluated the proposed method in a scenario where multiple partners asked a guide robot for directions. In this experiment, we tested whether the projection-based approach could effectively enhance the sense of participation for individuals who were not actively participating. Results showed an increase in the sense of participation (p &#x3d; 0.055, moderate effect), with significant improvements in the feeling that the robot was addressing both partners (p &#x3d; 0.0004, <inline-formula id="inf21">
<mml:math id="m21">
<mml:mrow>
<mml:mi>&#x3b4;</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>0.703</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>) and in the ease with which participants felt they could speak to the robot (p &#x3d; 0.0009, <inline-formula id="inf22">
<mml:math id="m22">
<mml:mrow>
<mml:mi>&#x3b4;</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>0.550</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>). This effect likely arises from the projection encompassing both partners alongside the robot, creating a shared perception of a three-way conversation.</p>
<p>The proposed method, which uses projection to clarify whom the robot is addressing and to enhance participants&#x2019; sense of involvement, holds promise as an approach for facilitating smooth interactions with non-humanoid robots. These robots often lack the capability to perform human-like gestures, such as hand or foot movements, facial expressions, or eye contact. While this study focused on tasks involving verbal communication, projection-based interfaces may prove beneficial for a range of other tasks as well. Many mobile robots assisting in daily life are equipped with displays; however, displays are challenging to view unless directly in front of them. In contrast, projector-based projections are visible from a wider range of angles, allowing shared access to the projected information. Leveraging this capacity for information delivery through projection may help robots perform tasks more effectively in everyday environments.</p>
<p>The robot used in this experiment displayed a simple face illustration on its screen to enhance the recognizability of its front, rather than to convey facial expressions or gaze direction. Therefore, while it remains uncertain whether our findings are applicable to robots without facial displays, they are likely transferable to robots with a clearly defined frontal orientation. <xref ref-type="bibr" rid="B39">V&#xe1;zquez et al. (2017)</xref> investigated the effects of body orientation and gaze in group conversations using a robot capable of expressing facial expressions and gaze through back projection. In contrast, our robot&#x2019;s face illustration served only to indicate body orientation. Thus, our findings are likely relevant to robots that do not rely on eye gaze information in the same way humans do.</p>
<p>In the experiments conducted in this study, the robot&#x2019;s behavior was explained to the participants before the experiment began, and they had the opportunity to observe and interact with the robot. As a result, we have not examined how individuals unfamiliar with the robot would evaluate its behavior. Investigating how first-time users perceive and evaluate the robot remains an important direction for future research.</p>
<p>The visibility of projection-based interfaces varies depending on lighting conditions. While the experiments in this study were conducted indoors, where the projection was clearly visible, outdoor visibility may be limited with current equipment. The effectiveness of projection in bright environments, such as outdoor settings, depends on the capabilities of the projector. Future advancements in projection technology, such as laser projectors, may enhance visibility and enable more effective use in bright environments.</p>
<p>Further research challenges include sharing projected information with surrounding partners when obstacles are present between the robot and the person being addressed, or when the surrounding area is densely populated. In such crowded environments, it may be necessary to effectively combine projection with other modalities, such as robot motion control and auditory cues, to improve the effectiveness of the robot&#x2019;s communication. Additionally, this study assumes that the robot can recognize and approach its intended interaction partners. Future work will consider integrating our approach with other methods currently under investigation for enabling robots to approach partners they intend to address. Verifying these comprehensive tasks remains a subject for future research.</p>
<p>This study has several limitations. One limitation is that the participant sample in this study was skewed toward male students with information science backgrounds, which may limit the generalizability of our findings. Prior research suggests that familiarity with technology and gender differences can influence perceptions of robots, potentially affecting user expectations and interaction preferences. Consequently, the impressions and evaluations in this study may not fully represent a more diverse population. Future studies should aim for a more balanced sample in terms of gender and academic background to enhance the breadth of user perspectives.</p>
<p>Another limitation of this study is that the interaction between the robot and humans is not entirely natural. In Experiment 1, we evaluated the robot&#x2019;s interface in a scenario where it guided individuals in a queue into a store. However, since the study was conducted in a laboratory rather than an actual store, certain artificial constraints were introduced, such as requiring participants to wait at a predetermined location. These constraints were necessary to control experimental conditions but may limit generalizability to real-world environments. In Experiment 2, a scripted conversation between the experimenter and the robot was used, as the robot lacks the ability to respond dynamically to human utterances. Participants, who were not actively engaged in the conversation, were unaware that the dialogue was scripted during the interaction. After the conversation, they evaluated their own sense of participation and the extent to which they felt the robot was addressing them directly. While this controlled setting allowed us to assess the impact of the proposed method, it has not yet been tested in more flexible, natural conversations.</p>
</sec>
<sec sec-type="conclusion" id="s7">
<title>7 Conclusion</title>
<p>We proposed a projection-based method to enable robots to clearly communicate with their intended conversation partners. This method allows the robot to indicate who is participating in the dialogue by projecting an image on the ground that encompasses both the robot and the intended conversation partner. We evaluated this approach through two guidance tasks. Compared to the conventional method, where the robot merely orients its body toward the interlocutor, the projection-based method did not significantly enhance the sense of dialogue participation for non-speaking participants. However, it did lead to a significant improvement in the sense that the robot was addressing them directly and increased the ease with which they felt they could engage with the robot. We believe that robots employing various modalities, such as projection, movement, body direction, and auditory cues, to effectively convey their awareness and intentions can enhance human-robot interaction, particularly in everyday environments where robots coexist with humans. Research on effective information presentation methods by robots in daily life contexts remains an essential area of study.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s8">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/supplementary material, further inquiries can be directed to the corresponding author.</p>
</sec>
<sec sec-type="ethics-statement" id="s9">
<title>Ethics statement</title>
<p>The studies involving humans were approved by the Ethics Committee of Hiroshima City University, Japan. The studies were conducted in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study.</p>
</sec>
<sec sec-type="author-contributions" id="s10">
<title>Author contributions</title>
<p>SS: Conceptualization, Data curation, Investigation, Software, Visualization, Writing&#x2013;original draft. TK: Data curation, Methodology, Software, Visualization, Writing&#x2013;review and editing. TI: Writing&#x2013;review and editing, Conceptualization, Formal Analysis, Project administration, Visualization, Writing&#x2013;original draft.</p>
</sec>
<sec sec-type="funding-information" id="s11">
<title>Funding</title>
<p>The author(s) declare that financial support was received for the research, authorship, and/or publication of this article. This work was supported by the internal research funding of Hiroshima City University.</p>
</sec>
<sec sec-type="COI-statement" id="s12">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="s13">
<title>Generative AI statement</title>
<p>The author(s) declare that no Generative AI was used in the creation of this manuscript.</p>
</sec>
<sec sec-type="disclaimer" id="s14">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Althaus</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Ishiguro</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Kanda</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Miyashita</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Christensen</surname>
<given-names>H. I.</given-names>
</name>
</person-group> (<year>2004</year>). <article-title>Navigation for human-robot interaction tasks</article-title>. <source>IEEE Int. Conf. Robotics Automation</source> <volume>2004</volume> (<issue>2</issue>), <fpage>1894</fpage>&#x2013;<lpage>1900 Vol.2</lpage>. <pub-id pub-id-type="doi">10.1109/robot.2004.1308100</pub-id>
</citation>
</ref>
<ref id="B2">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Arai</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Kimoto</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Iio</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Shimohara</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Matsumura</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Shiomi</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>How can robot&#x2019;s gaze ratio and body direction show an awareness of priority to the people with whom it is interacting?</article-title> <source>IEEE Robotics Automation Lett.</source> <volume>4</volume> (<issue>4</issue>), <fpage>3798</fpage>&#x2013;<lpage>3805</lpage>. <pub-id pub-id-type="doi">10.1109/lra.2019.2929992</pub-id>
</citation>
</ref>
<ref id="B3">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Avelino</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Garcia-Marques</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Ventura</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Bernardino</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Break the ice: a survey on socially aware engagement for human&#x2013;robot first encounters</article-title>. <source>Int. J. Soc. Robotics</source> <volume>13</volume> (<issue>8</issue>), <fpage>1851</fpage>&#x2013;<lpage>1877</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-020-00720-2</pub-id>
</citation>
</ref>
<ref id="B4">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bangerter</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2004</year>). <article-title>Using pointing and describing to achieve joint focus of attention in dialogue</article-title>. <source>Psychol. Sci.</source> <volume>15</volume> (<issue>6</issue>), <fpage>415</fpage>&#x2013;<lpage>419</lpage>. <pub-id pub-id-type="doi">10.1111/j.0956-7976.2004.00694.x</pub-id>
</citation>
</ref>
<ref id="B5">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Burgard</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Cremers</surname>
<given-names>A. B.</given-names>
</name>
<name>
<surname>Fox</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>H&#xe4;hnel</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Lakemeyer</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Schulz</surname>
<given-names>D.</given-names>
</name>
<etal/>
</person-group> (<year>1998</year>). &#x201c;<article-title>The interactive museum tour-guide robot</article-title>,&#x201d; in <source>15th national conf. On innovative applications of artificial intelligence (AAAI)</source>, <fpage>11</fpage>&#x2013;<lpage>18</lpage>. <comment>Available online at: <ext-link ext-link-type="uri" xlink:href="http://www.aaai.org/Papers/AAAI/1998/AAAI98-002.pdf">http://www.aaai.org/Papers/AAAI/1998/AAAI98-002.pdf</ext-link>.</comment>
</citation>
</ref>
<ref id="B6">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cha</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Kim</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Fong</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Mataric</surname>
<given-names>M. J.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>A survey of nonverbal signaling methods for non-humanoid robots</article-title>. <source>Found. Trends Robotics</source> <volume>6</volume> (<issue>4</issue>), <fpage>211</fpage>&#x2013;<lpage>323</lpage>. <pub-id pub-id-type="doi">10.1561/2300000057</pub-id>
</citation>
</ref>
<ref id="B7">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cha</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Trehon</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Wathieu</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Wagner</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Shukla</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Mataric</surname>
<given-names>M. J.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>ModLight: designing a modular light signaling tool for human-robot interaction</article-title>. <source>Proc. IEEE Int. Conf. Robotics Automation (ICRA)</source>, <fpage>1654</fpage>&#x2013;<lpage>1661</lpage>. <pub-id pub-id-type="doi">10.1109/ICRA.2017.7989195</pub-id>
</citation>
</ref>
<ref id="B8">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Clark</surname>
<given-names>H. H.</given-names>
</name>
<name>
<surname>Brennan</surname>
<given-names>S. E.</given-names>
</name>
</person-group> (<year>1991</year>). &#x201c;<article-title>Grounding in communication</article-title>,&#x201d; in <source>Perspectives on socially shared cognition</source>. Editors <person-group person-group-type="editor">
<name>
<surname>Resnick</surname>
<given-names>L. B.</given-names>
</name>
<name>
<surname>Levine</surname>
<given-names>J. M.</given-names>
</name>
<name>
<surname>Teasley</surname>
<given-names>S. D.</given-names>
</name>
</person-group> (<publisher-name>American Psychological Association</publisher-name>), <fpage>127</fpage>&#x2013;<lpage>149</lpage>. <pub-id pub-id-type="doi">10.1037/10096-006</pub-id>
</citation>
</ref>
<ref id="B9">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Coovert</surname>
<given-names>M. D.</given-names>
</name>
<name>
<surname>Lee</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Shindev</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Sun</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Spatial augmented reality as a method for a mobile robot to communicate intended movement</article-title>. <source>Comput. Hum. Behav.</source> <volume>34</volume>, <fpage>241</fpage>&#x2013;<lpage>248</lpage>. <pub-id pub-id-type="doi">10.1016/j.chb.2014.02.001</pub-id>
</citation>
</ref>
<ref id="B10">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Donner</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Himstedt</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Hellbach</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Boehme</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Awakening history: preparing a museum tour guide robot for augmenting exhibits</article-title>. <source>Proc. Eur. Conf. Mob. Robots (ECMR)</source>, <fpage>337</fpage>&#x2013;<lpage>342</lpage>. <pub-id pub-id-type="doi">10.1109/ECMR.2013.6698864</pub-id>
</citation>
</ref>
<ref id="B11">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Hall</surname>
<given-names>E. T.</given-names>
</name>
</person-group> (<year>1966</year>). <source>The hidden dimension</source>. <publisher-name>Garden City, NY</publisher-name>: <publisher-loc>Doubleday</publisher-loc>.</citation>
</ref>
<ref id="B12">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hoque</surname>
<given-names>M. M.</given-names>
</name>
<name>
<surname>Das</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Onuki</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Kobayashi</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Kuno</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>An integrated approach of attention control of target human by nonverbal behaviors of robots in different viewing situations</article-title>. <source>Proc. Int. Conf. Intelligent Robots Syst. (IROS)</source>, <fpage>1399</fpage>&#x2013;<lpage>1406</lpage>. <pub-id pub-id-type="doi">10.1109/IROS.2012.6385480</pub-id>
</citation>
</ref>
<ref id="B13">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Iio</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Satake</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Kanda</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Hayashi</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Ferreri</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Hagita</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Human-like guide robot that proactively explains exhibits</article-title>. <source>Int. J. Soc. Robotics</source> <volume>12</volume>, <fpage>549</fpage>&#x2013;<lpage>566</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-019-00587-y</pub-id>
</citation>
</ref>
<ref id="B14">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kanda</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Shiomi</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Miyashita</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Ishiguro</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Hagita</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2009</year>). <article-title>An affective guide robot in a shopping mall</article-title>. <source>Proc. 4th ACM/IEEE Int. Conf. Hum. Robot Interact. - HRI &#x2019;09</source> <volume>173</volume>. <pub-id pub-id-type="doi">10.1145/1514095.1514127</pub-id>
</citation>
</ref>
<ref id="B15">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Karreman</surname>
<given-names>D. E.</given-names>
</name>
<name>
<surname>Bradford</surname>
<given-names>G. U. S.</given-names>
</name>
<name>
<surname>Van Dijk</surname>
<given-names>E. M. A. G.</given-names>
</name>
<name>
<surname>Lohse</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Evers</surname>
<given-names>V.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Picking favorites: the influence of robot eye-gaze on interactions with multiple users</article-title>. <source>Proc. IEEE Int. Conf. Intelligent Robots Syst. (IROS)</source>, <fpage>123</fpage>&#x2013;<lpage>128</lpage>. <pub-id pub-id-type="doi">10.1109/IROS.2013.6696342</pub-id>
</citation>
</ref>
<ref id="B16">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Karreman</surname>
<given-names>D. E.</given-names>
</name>
<name>
<surname>van Dijk</surname>
<given-names>E. M. A. G.</given-names>
</name>
<name>
<surname>Evers</surname>
<given-names>V.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>How can a tour guide robot influence visitors&#x2019; engagement, orientation and group formations?</article-title> <source>Proc. Fourth Int. Symp. New Front. Human-Robot Interact.</source>
</citation>
</ref>
<ref id="B17">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kato</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Kanda</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Ishiguro</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>I help you? design of human-like polite approaching behavior</article-title>. <source>Proc. ACM/IEEE Int. Conf. Human-Robot Interact. (HRI)</source>, <fpage>35</fpage>&#x2013;<lpage>42</lpage>. <pub-id pub-id-type="doi">10.1145/2696454.2696463</pub-id>
</citation>
</ref>
<ref id="B18">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Kendon</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>1990</year>). &#x201c;<article-title>Spatial organization in social encounters: the F-formation system</article-title>,&#x201d; in <source>Conducting interaction: patterns of behavior in focused encounters</source> (<publisher-name>Cambridge University Press</publisher-name>), <fpage>209</fpage>&#x2013;<lpage>238</lpage>.</citation>
</ref>
<ref id="B19">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kuzuoka</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Suzuki</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Yamashita</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Yamazaki</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>Reconfiguring spatial formation arrangement by robot body orientation</article-title>. <source>ACM/IEEE Int. Conf. Human-Robot Interact. (HRI)</source>, <fpage>285</fpage>&#x2013;<lpage>292</lpage>. <pub-id pub-id-type="doi">10.1109/HRI.2010.5453182</pub-id>
</citation>
</ref>
<ref id="B20">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Laugwitz</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Held</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Schrepp</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2008</year>). <article-title>Construction and evaluation of a user experience questionnaire</article-title>. <source>Lect. Notes Comput. Sci. Incl. Subser. Lect. Notes Artif. Intell. Lect. Notes Bioinforma.</source> <fpage>63</fpage>&#x2013;<lpage>76</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-540-89350-9_6</pub-id>
</citation>
</ref>
<ref id="B21">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lee</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Kang</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Kim</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Shim</surname>
<given-names>D. H.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Assistive delivery robot application for real-world postal services</article-title>. <source>IEEE Access</source> <volume>9</volume>, <fpage>141981</fpage>&#x2013;<lpage>141998</lpage>. <pub-id pub-id-type="doi">10.1109/ACCESS.2021.3120618</pub-id>
</citation>
</ref>
<ref id="B22">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Lee</surname>
<given-names>J.-H.</given-names>
</name>
</person-group> (<year>2007</year>). &#x201c;<article-title>Human centered ubiquitous display in intelligent space</article-title>,&#x201d; in <source>Proc. Annual conf. Of the IEEE industrial electronics society (IECON)</source>, <fpage>22</fpage>&#x2013;<lpage>27</lpage>. <pub-id pub-id-type="doi">10.1109/IECON.2007.4459955</pub-id>
</citation>
</ref>
<ref id="B23">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Machino</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Iwaki</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Kawata</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Yanagihara</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Nanjo</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Shimokura</surname>
<given-names>K.-i.</given-names>
</name>
</person-group> (<year>2006</year>). <article-title>Remote-collaboration system using mobile robot with camera and projector</article-title>. <source>Proc. IEEE Int. Conf. Robotics Automation (ICRA)</source> <volume>6</volume>, <fpage>4063</fpage>&#x2013;<lpage>4068</lpage>. <pub-id pub-id-type="doi">10.1109/ROBOT.2006.1642326</pub-id>
</citation>
</ref>
<ref id="B24">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Matsumaru</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2008</year>). <article-title>Experimental examination in simulated interactive situation between people and mobile robot with preliminary-announcement and indication function of upcoming operation</article-title>. <source>Proc. IEEE Int. Conf. Robotics Automation (ICRA)</source>, <fpage>3487</fpage>&#x2013;<lpage>3494</lpage>. <pub-id pub-id-type="doi">10.1109/ROBOT.2008.4543744</pub-id>
</citation>
</ref>
<ref id="B25">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Press</surname>
<given-names>V. S.</given-names>
</name>
<name>
<surname>Erel</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Designing non-verbal humorous gestures for a non-humanoid robot</article-title>. <source>CHI Conf. Hum. Factors Comput. Syst. Ext. Abstr.</source>, <fpage>1</fpage>&#x2013;<lpage>7</lpage>. <pub-id pub-id-type="doi">10.1145/3491101.3519924</pub-id>
</citation>
</ref>
<ref id="B26">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rifinski</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Erel</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Feiner</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Hoffman</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Zuckerman</surname>
<given-names>O.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Human-human-robot interaction: robotic object&#x2019;s responsive gestures improve interpersonal evaluation in human interaction</article-title>. <source>Human-Computer Interact.</source> <volume>36</volume> (<issue>4</issue>), <fpage>333</fpage>&#x2013;<lpage>359</lpage>. <pub-id pub-id-type="doi">10.1080/07370024.2020.1719839</pub-id>
</citation>
</ref>
<ref id="B27">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rosa</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Randazzo</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Landini</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Bernagozzi</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Sacco</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Piccinino</surname>
<given-names>M.</given-names>
</name>
<etal/>
</person-group> (<year>2023</year>). <article-title>Tour guide robot: a 5G-enabled robot museum guide</article-title>. <source>Front. Robotics AI</source> <volume>10</volume> (<issue>January</issue>), <fpage>1323675</fpage>&#x2013;<lpage>1323718</lpage>. <pub-id pub-id-type="doi">10.3389/frobt.2023.1323675</pub-id>
</citation>
</ref>
<ref id="B28">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Saad</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Broekens</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Neerincx</surname>
<given-names>M. A.</given-names>
</name>
<name>
<surname>Hindriks</surname>
<given-names>K. V.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Enthusiastic robots make better contact</article-title>. <source>Proc. Int. Conf. Intelligent Robots Syst. (IROS)</source>, <fpage>1094</fpage>&#x2013;<lpage>1100</lpage>. <pub-id pub-id-type="doi">10.1109/IROS40897.2019.8967950</pub-id>
</citation>
</ref>
<ref id="B29">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Saegusa</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Inclusive human-robot interaction for gait rehabilitation and wheel-chair exercises</article-title>. <source>Proc. IEEE Int. Conf. Robotics Biomimetics (ROBIO)</source>, <fpage>514</fpage>&#x2013;<lpage>519</lpage>. <pub-id pub-id-type="doi">10.1109/ROBIO.2017.8324468</pub-id>
</citation>
</ref>
<ref id="B30">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Satake</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Kanda</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Glas</surname>
<given-names>D. F.</given-names>
</name>
<name>
<surname>Imai</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Ishiguro</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Hagita</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>A robot that approaches pedestrians</article-title>. <source>IEEE Trans. Robotics</source> <volume>29</volume> (<issue>2</issue>), <fpage>508</fpage>&#x2013;<lpage>524</lpage>. <pub-id pub-id-type="doi">10.1109/TRO.2012.2226387</pub-id>
</citation>
</ref>
<ref id="B31">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Shintani</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Ishi</surname>
<given-names>C. T.</given-names>
</name>
<name>
<surname>Ishiguro</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Gaze modeling in multi-party dialogues and extraversion expression through gaze aversion control</article-title>. <source>Adv. Robot.</source> <volume>38</volume> (<issue>19&#x2013;20</issue>), <fpage>1470</fpage>&#x2013;<lpage>1485</lpage>. <pub-id pub-id-type="doi">10.1080/01691864.2024.2394538</pub-id>
</citation>
</ref>
<ref id="B32">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Shiomi</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Kanda</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Ishiguro</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Hagita</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2006</year>). <article-title>Interactive humanoid robots for a science museum</article-title>. <source>Proc. ACM Conf. Human-Robot Interact. (HRI)</source>, <fpage>305</fpage>&#x2013;<lpage>312</lpage>. <pub-id pub-id-type="doi">10.1145/1121241.1121293</pub-id>
</citation>
</ref>
<ref id="B33">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sone</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Ikeda</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Iwaki</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Clarification of the people the robot is talking to using projection</article-title>. <source>Proc. Annu. Conf. Soc. Instrum. Control Eng. (SICE)</source>, <fpage>755</fpage>&#x2013;<lpage>760</lpage>. <pub-id pub-id-type="doi">10.23919/SICE56594.2022.9905817</pub-id>
</citation>
</ref>
<ref id="B34">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Strait</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Canning</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Scheutz</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Let Me Tell You! Investigating the effects of robot communication strategies in advice-giving situations based on robot appearance, interaction modality and distance</article-title>. <source>Proc. ACM/IEEE Int. Conf. Human-Robot Interact. (HRI)</source>, <fpage>479</fpage>&#x2013;<lpage>486</lpage>. <pub-id pub-id-type="doi">10.1145/2559636.2559670</pub-id>
</citation>
</ref>
<ref id="B35">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Suzuki</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Karim</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Xia</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Hedayati</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Marquardt</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Augmented reality and robotics: a survey and taxonomy for AR-enhanced human-robot interaction and robotic interfaces</article-title>. <source>CHI Conf. Hum. Factors Comput. Syst.</source>, <fpage>1</fpage>&#x2013;<lpage>33</lpage>. <pub-id pub-id-type="doi">10.1145/3491102.3517719</pub-id>
</citation>
</ref>
<ref id="B36">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Takagi</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Sakamoto</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Ichikawa</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Takeuchi</surname>
<given-names>Y.</given-names>
</name>
</person-group>(<year>2023</year>). <article-title>Effects of robots&#x2019; &#x201c;body torque&#x201d; on participation and sustaining multi-person conversations</article-title>, <source>Proc. IEEE Int. Conf. Robot Hum. Interact. Commun. (RO-MAN)</source>, <fpage>38</fpage>&#x2013;<lpage>43</lpage>. <pub-id pub-id-type="doi">10.1109/RO-MAN57019.2023.10309596</pub-id>
</citation>
</ref>
<ref id="B37">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Tamai</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Ono</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Yoshida</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Ikeda</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Iwaki</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Guiding a person through combined robotic and projection movements</article-title>. <source>Int. J. Soc. Robotics</source> <volume>14</volume>, <fpage>515</fpage>&#x2013;<lpage>528</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-021-00798-2</pub-id>
</citation>
</ref>
<ref id="B38">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Triebel</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Arras</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Alami</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Beyer</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Breuers</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Chatila</surname>
<given-names>R.</given-names>
</name>
<etal/>
</person-group> (<year>2016</year>). &#x201c;<article-title>SPENCER: a socially aware service robot for passenger guidance and help in busy airports</article-title>,&#x201d; in <source>Springer tracts in advanced robotics</source>. <source>Field and service robotics</source>. Editors <person-group person-group-type="editor">
<name>
<surname>Wettergreen</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Barfoot</surname>
<given-names>T.</given-names>
</name>
</person-group> (<publisher-name>Springer</publisher-name>), <volume>113</volume>, <fpage>607</fpage>&#x2013;<lpage>622</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-319-27702-8_40</pub-id>
</citation>
</ref>
<ref id="B39">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>V&#xe1;zquez</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Carter</surname>
<given-names>E. J.</given-names>
</name>
<name>
<surname>McDorman</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Forlizzi</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Steinfeld</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Hudson</surname>
<given-names>S. E.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Towards robot autonomy in group conversations: understanding the effects of body orientation and gaze</article-title>. <source>ACM/IEEE Int. Conf. Human-Robot Interact.</source>, <fpage>42</fpage>&#x2013;<lpage>52</lpage>. <pub-id pub-id-type="doi">10.1145/2909824.3020207</pub-id>
</citation>
</ref>
<ref id="B40">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Walker</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Hedayati</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Lee</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Szafir</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Communicating robot motion intent with augmented reality</article-title>. <source>Proc. ACM/IEEE Int. Conf. Human-Robot Interact. (HRI)</source>, <fpage>316</fpage>&#x2013;<lpage>324</lpage>. <pub-id pub-id-type="doi">10.1145/3171221.3171253</pub-id>
</citation>
</ref>
<ref id="B41">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Watanabe</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Ikeda</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Morales</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Shinozawa</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Miyashita</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Hagita</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Communicating robotic navigational intentions</article-title>. <source>IEEE/RSJ Int. Conf. Intelligent Robots Syst. (IROS)</source>, <fpage>5763</fpage>&#x2013;<lpage>5769</lpage>. <pub-id pub-id-type="doi">10.1109/IROS.2015.7354195</pub-id>
</citation>
</ref>
<ref id="B42">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yamaoka</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Kanda</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Ishiguro</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Hagita</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>A model of proximity control for information-presenting robots</article-title>. <source>IEEE Trans. Robotics</source> <volume>26</volume> (<issue>1</issue>), <fpage>187</fpage>&#x2013;<lpage>195</lpage>. <pub-id pub-id-type="doi">10.1109/TRO.2009.2035747</pub-id>
</citation>
</ref>
<ref id="B43">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yang</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Yin</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Bjorkman</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Peters</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Impact of trajectory generation methods on viewer perception of robot approaching group behaviors</article-title>. <source>Proc. IEEE Int. Conf. Robot Hum. Interact. Commun. (RO-MAN)</source>, <fpage>509</fpage>&#x2013;<lpage>516</lpage>. <pub-id pub-id-type="doi">10.1109/RO-MAN47096.2020.9223584</pub-id>
</citation>
</ref>
<ref id="B44">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>&#x17b;arkowski</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Multi-party turn-taking in repeated human&#x2013;robot interactions: an interdisciplinary evaluation</article-title>. <source>Int. J. Soc. Robotics</source> <volume>11</volume> (<issue>5</issue>), <fpage>693</fpage>&#x2013;<lpage>707</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-019-00603-1</pub-id>
</citation>
</ref>
</ref-list>
</back>
</article>