<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article article-type="discussion" dtd-version="1.3" xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Robot. AI</journal-id>
<journal-title-group>
<journal-title>Frontiers in Robotics and AI</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Robot. AI</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2296-9144</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">1776097</article-id>
<article-id pub-id-type="doi">10.3389/frobt.2026.1776097</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Opinion</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Robots and AI are not one moral category: why the distinction matters for ethical and conscious systems</article-title>
<alt-title alt-title-type="left-running-head">K&#xfc;&#xe7;&#xfc;kuncular</alt-title>
<alt-title alt-title-type="right-running-head">
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/frobt.2026.1776097">10.3389/frobt.2026.1776097</ext-link>
</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>K&#xfc;&#xe7;&#xfc;kuncular</surname>
<given-names>Ahmet</given-names>
</name>
<xref ref-type="aff" rid="aff1"/>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/3201019"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal Analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing &#x2013; review and editing</role>
</contrib>
</contrib-group>
<aff id="aff1">
<institution>Near East University, Faculty of Economics and Administrative Sciences</institution>, <city>Nicosia</city>, <country country="CY">Cyprus</country>
</aff>
<author-notes>
<corresp id="c001">
<label>&#x2a;</label>Correspondence: Ahmet K&#xfc;&#xe7;&#xfc;kuncular, <email xlink:href="mailto:ahmet@kucukuncular.com">ahmet@kucukuncular.com</email>
</corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-20">
<day>20</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>13</volume>
<elocation-id>1776097</elocation-id>
<history>
<date date-type="received">
<day>26</day>
<month>12</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>22</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>12</day>
<month>02</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2026 K&#xfc;&#xe7;&#xfc;kuncular.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>K&#xfc;&#xe7;&#xfc;kuncular</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-20">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<kwd-group>
<kwd>AI ethics</kwd>
<kwd>artificial consciousness</kwd>
<kwd>embodied artificial intelligence</kwd>
<kwd>meaningful human control</kwd>
<kwd>moral agency</kwd>
<kwd>moral appearance</kwd>
<kwd>robot ethics</kwd>
<kwd>sociotechnical systems</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was not received for this work and/or its publication.</funding-statement>
</funding-group>
<counts>
<fig-count count="0"/>
<table-count count="0"/>
<equation-count count="0"/>
<ref-count count="25"/>
<page-count count="00"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Computational Intelligence in Robotics</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<title>Introduction</title>
<p>Calls that pair ethical and conscious AI with ethical and conscious robots may feel natural. Many contemporary robots use machine learning, and many AI systems are described in agentive terms. Yet the pairing can hide a conceptual shortcut. It quietly suggests that AI ethics and robot ethics are the same moral question applied to different shells. My claim in this opinion piece is modest but consequential: treating robotics and AI as a single moral category encourages avoidable category mistakes about where moral agency sits, where harms arise, how responsibility is attributed, and what consciousness claims could plausibly mean in deployed systems.</p>
<p>The overlap is real but not identity. Robotics is best understood as the engineering of embodied artefacts that sense and act in the physical world. AI is best understood as a family of computational techniques that can be embedded in many artefacts, including robots, but also in disembodied services such as decision support tools, recommender systems, and conversational agents (<xref ref-type="bibr" rid="B17">Riesen, 2025</xref>). The distinction defended here is not offered as a new ethical theory. It functions as a scoping rule for interdisciplinary work. It helps prevent recurring errors in evaluation, especially the tendency to import the ethical agenda of disembodied algorithmic systems into contexts where physical presence and bodily interaction are decisive, or to import debates about the moral status of social robots into contexts where there is no body, no situated action, and no human robot relationship (<xref ref-type="bibr" rid="B15">Moon et al., 2021</xref>; <xref ref-type="bibr" rid="B21">Torras, 2024</xref>).</p>
<p>This matters for research on ethical and conscious systems. Ethical performance is not only about internal decision rules; it is also about pathways of influence, constraint, and harm in real settings (<xref ref-type="bibr" rid="B14">Mittelstadt et al., 2016</xref>; <xref ref-type="bibr" rid="B19">Santoni De Sio and Van Den Hoven, 2018</xref>). Consciousness claims, if they ever become technically serious, will still intersect with embodiment, user perception, and accountability in ways that differ sharply between robots and software agents (<xref ref-type="bibr" rid="B9">Dehaene et al., 2017</xref>; <xref ref-type="bibr" rid="B11">Gray et al., 2007</xref>). Accordingly, I proceed in three steps. First, I separate overlap from equivalence by distinguishing computational cores from embodied systems. Second, I show why embodiment changes ethical evaluation by altering harm profiles, moral appearance, and responsibility pathways. Third, I translate this distinction into a practical discipline for research communication, so that authors, reviewers, and governance oriented readers can assess claims about ethical and conscious systems without conflating the relevant object of evaluation.</p>
<sec id="s1-1">
<title>Overlap without equivalence</title>
<p>A useful starting point is to separate the computational core from the embodied system. A robot may include AI modules, but it also includes sensors, actuators, safety interlocks, mechanical design, and a deployment environment. Conversely, many AI systems have no body at all, yet still shape behaviour through information, ranking, and gatekeeping (<xref ref-type="bibr" rid="B14">Mittelstadt et al., 2016</xref>). The ethical object is therefore rarely the AI model or the robot platform in isolation. It is the sociotechnical arrangement as a whole, including design choices, organisational incentives, user practices, and regulation (<xref ref-type="bibr" rid="B17">Riesen, 2025</xref>; <xref ref-type="bibr" rid="B22">Vallor and Vierkant, 2024</xref>). This point is familiar within sociotechnical and responsible robotics approaches, but my present emphasis is that the presence or absence of embodiment is not a minor implementation detail. It alters which ethical questions are primary, and which evidence would be relevant when assessing agency and consciousness claims.</p>
<p>Philosophical work on artificial agency helps clarify why the boundary matters. <xref ref-type="bibr" rid="B10">Floridi and Sanders (2004)</xref> argue that we can evaluate artificial agents at different levels of abstraction, without assuming that the artefact is a humanlike moral agent. That lens is valuable, but the choice of level is not free of engineering reality. Embodiment expands a system&#x2019;s causal footprint into the physical domain. A robot can collide, restrain, touch, obstruct, or physically shepherd. Even when its intelligence is modest, its body can create ethical stakes that look closer to product safety, bodily autonomy, and coercion than to statistical bias in classification (<xref ref-type="bibr" rid="B15">Moon et al., 2021</xref>; <xref ref-type="bibr" rid="B21">Torras, 2024</xref>). In contrast, disembodied AI can generate ethically significant effects without any physical presence, through epistemic authority, persuasive interaction, ranking power, or institutional gatekeeping. Treating these cases as if they raised the same ethical problems as embodied robots risks misspecifying both harms and responsibilities, and it encourages over general claims about agency or consciousness that are not supported by the relevant interaction context (<xref ref-type="bibr" rid="B14">Mittelstadt et al., 2016</xref>).</p>
<p>This motivates a discipline for research claims. When one asks whether a system is ethical, one ought to specify which of at least three targets one means: (1) ethical reasoning competence, meaning the quality of internal deliberation or value alignment (<xref ref-type="bibr" rid="B16">Moor, 2006</xref>), (2) ethical behaviour in context, meaning the observed effects of actions and interactions in a setting (<xref ref-type="bibr" rid="B15">Moon et al., 2021</xref>), (3) ethical governance, meaning how responsibility, oversight, and accountability are structured around the system (<xref ref-type="bibr" rid="B19">Santoni De Sio and Van Den Hoven, 2018</xref>). The proposed AI versus robot distinction sharpens this discipline by forcing an explicit answer to a prior question: what is the system under evaluation, a disembodied computational service, an embodied artefact, or a wider sociotechnical arrangement in which embodiment plays a constitutive role. Without this clarification, ethical appraisal can slide between targets and levels of abstraction, producing apparent disagreement that is in fact a mismatch of evaluative objects.</p>
<p>Machine ethics has long noted that systems can be ethical impact agents without being full moral agents (<xref ref-type="bibr" rid="B10">Floridi and Sanders, 2004</xref>; <xref ref-type="bibr" rid="B16">Moor, 2006</xref>). Robots, by being physically active and often socially present, tend to become ethical impact agents by default, even before we settle questions about moral agency. Disembodied AI systems can also become ethical impact agents by default, but typically through different pathways, such as differential access to opportunities, behavioural steering, or delegations of authority within organisations. Recognising overlap without equivalence makes these pathways easier to separate analytically, and it clarifies why collapsing AI ethics and robot ethics can create different category mistakes in different contexts.</p>
</sec>
<sec id="s1-2">
<title>Different harm profiles</title>
<p>Much of the modern ethical AI agenda grew around algorithmic mediation: fairness, accountability, transparency, privacy, and downstream social impacts of automated decisions (see, for example, <xref ref-type="bibr" rid="B12">Jobin et al., 2019</xref>; <xref ref-type="bibr" rid="B14">Mittelstadt et al., 2016</xref>). These concerns remain relevant when AI is embedded in robots. A care robot that allocates attention, flags risk, or prioritises tasks can reproduce bias just as a disembodied triage system can (<xref ref-type="bibr" rid="B20">Sharkey and Sharkey, 2012</xref>). Yet robot ethics adds dimensions that are easy to miss when everything sits in one basket. This section therefore isolates what embodiment changes in ethical evaluation, and why that change should alter research design, evidence standards, and governance expectations.</p>
<p>First is kinetic risk and bodily autonomy. The moral difference between a classifier and a mobile robot is not merely that one moves. Movement changes the kinds of harm that are salient, the time horizons of safety, and the evidential standards we should demand. Failure in a recommender system is often informational or distributive. Failure in an embodied robot can be immediate and bodily. This shifts ethical evaluation towards verification, fail safe design, and forms of human control that support timely intervention (<xref ref-type="bibr" rid="B19">Santoni De Sio and Van Den Hoven, 2018</xref>; <xref ref-type="bibr" rid="B23">Verhagen et al., 2024</xref>). This is also a methodological point: what counts as adequate assurance differs by domain. In disembodied AI, evaluation often prioritises representativeness, error disparities, contestability, and <italic>post hoc</italic> explanation. In robotics, assurance must additionally address mechanical reliability, hazard analysis, safe stopping, and the conditions under which human override is practically possible, not merely nominal.</p>
<p>Second is corporeal social influence. Robots have physical presence, can occupy space, can touch, and can create a sense of copresence. That changes what manipulation, consent, and vulnerability look like. Importantly, some of these concerns arise regardless of how advanced the underlying AI is (<xref ref-type="bibr" rid="B15">Moon et al., 2021</xref>). The ethical difficulty can be driven by embodiment itself, not by the sophistication of the model. A useful contrast is that disembodied AI frequently influences through informational pathways, such as ranking, recommendation, nudging, or institutional gatekeeping, whereas robots can additionally influence through spatial positioning, proximity, and touch, which can render consent more ambiguous and refusal more difficult in practice.</p>
<p>A practical way to keep both tracks visible is to separate harms into two spaces: (a) informational and institutional harms, such as bias, opacity, privacy loss, unequal access, and power asymmetries (<xref ref-type="bibr" rid="B12">Jobin et al., 2019</xref>; <xref ref-type="bibr" rid="B14">Mittelstadt et al., 2016</xref>), and (b) kinetic and relational harms, such as bodily safety, unwanted touch, spatial coercion, dependency, deception through social cues, and erosion of skills or relationships (<xref ref-type="bibr" rid="B15">Moon et al., 2021</xref>; <xref ref-type="bibr" rid="B20">Sharkey and Sharkey, 2012</xref>; <xref ref-type="bibr" rid="B21">Torras, 2024</xref>). The analytic value of this separation is not taxonomic elegance but governance clarity: it helps specify which harms are plausible in a given deployment and which forms of evidence, testing, and oversight are proportionate.</p>
<p>The second space is where category mistakes become costly. If we treat robotics as applied AI, we may overweigh what is easiest to measure in software and under weigh what is hardest but decisive in embodied interaction. For interdisciplinary audiences, this is the key practical implication of insisting on the distinction: it realigns what reviewers and policy assessors should ask for. A robot may satisfy common AI ethics expectations while still being ethically unacceptable due to interaction level risk, and a disembodied system may satisfy safety-oriented criteria while remaining ethically unacceptable due to institutional harms. Treating these as one evaluative basket blurs that difference and weakens accountability.</p>
<p>A related issue is norm compliance. There is growing interest in robots that learn and enact social norms. But norms are not automatically ethical, and encoding them can amplify bias, paternalism, and politically entrenched expectations. Recent critique catalogues multiple ways norm compliant robots can reinforce problematic norms and induce harmful norm change (<xref ref-type="bibr" rid="B8">Coggins and Steinert, 2023</xref>). That critique lands differently in robotics than in disembodied AI, because robots enact norms through physical presence and behaviour that users experience as interpersonal. This again illustrates why embodiment changes the moral interface: the same norm encoded in software may be experienced as bureaucratic exclusion, while enacted by a robot it may be experienced as interpersonal correction, pressure, or even coercion.</p>
</sec>
<sec id="s1-3">
<title>Moral status and moral appearance</title>
<p>The pairing of conscious AI and conscious robots also raises a sharper philosophical question. Are these morally the same claim? If conscious means phenomenal consciousness, meaning there is subjective experience, then moral patiency plausibly depends on experience rather than on a chassis. A conscious disembodied system would be a moral patient in the same broad sense as a conscious robot, even if it lacked a body. That is why scientific discussions emphasise the need to clarify what functions and architectures would count as evidence for consciousness claims, rather than relying on surface behaviour (<xref ref-type="bibr" rid="B9">Dehaene et al., 2017</xref>). This paper therefore distinguishes two separable issues that are often conflated when AI and robots are discussed together: the metaphysical question of whether a system is conscious, and the practical question of how consciousness like claims will be interpreted and operationalised in real deployments.</p>
<p>Robots, however, introduce a second phenomenon that cannot be ignored: moral appearance. Humans infer mind from cues, and these inferences shape moral judgement. Mind perception research suggests people organise these inferences along dimensions such as experience and agency, and these perceptions predict moral responses (<xref ref-type="bibr" rid="B11">Gray et al., 2007</xref>). In human robot interaction, anthropomorphism and perceived intelligence are measurable constructs that affect trust, likeability, and perceived safety (<xref ref-type="bibr" rid="B3">Bartneck et al., 2009</xref>). Recent work also operationalises perceived moral patiency of social robots, showing that people can attribute morally relevant vulnerability to robots in systematic ways (<xref ref-type="bibr" rid="B2">Banks and Bowman, 2023</xref>). Disembodied AI can also generate moral appearance, particularly through linguistic fluency, conversational framing, and the presentation of confident outputs, but the cues are narrower and the interaction is typically mediated through screens and institutional workflows rather than co present behaviour. The distinction matters because the evidential basis for mind attribution, and the channels through which users become vulnerable to manipulation or deference, differ across these contexts.</p>
<p>This creates an asymmetry that is ethically important even before we resolve the metaphysical and the ontological. A robot can be treated as if it were conscious because embodiment supplies social cues such as gaze, rhythm, proximity, and touch. <xref ref-type="bibr" rid="B6">Coeckelbergh (2010a)</xref>, <xref ref-type="bibr" rid="B7">Coeckelbergh (2010b)</xref> argues that moral consideration can be shaped by social relations and moral appearances, not only by hidden mental properties. At the same time, the robot rights debate has prompted warnings about mistaking human projections for genuine moral status, and about the political and legal implications of granting rights language to artefacts (<xref ref-type="bibr" rid="B4">Birhane et al., 2024</xref>). A parallel warning applies to disembodied AI, where agency like language and consciousness rhetoric can encourage misplaced deference to outputs, over trust in system competence, or the diffusion of responsibility within organisations. The category mistake differs, but the governance risk remains; moralised narratives can substitute for clear accountability.</p>
<p>Empirically, the consequences are no longer speculative. If people judge violence against robots as morally charged, that changes how we should think about deployment, user training, and acceptable design affordances. Recent experimental work suggests that people&#x2019;s moral judgements about harming robots can be measured and meaningfully vary with context (<xref ref-type="bibr" rid="B1">Archer et al., 2025</xref>). Related studies show that both anthropomorphising and dehumanising tendencies can shape moral and social responses to robots, which matters for accountability and user protection (<xref ref-type="bibr" rid="B24">Wieringa et al., 2025</xref>). These findings strengthen the practical case for separating moral status from moral treatment: even if no credible evidence for robot consciousness exists, predictable human responses generate ethically relevant duties regarding design, disclosure, and the prevention of manipulation and dependency.</p>
<p>So the moral landscape has at least two layers: (i) moral status claims, meaning whether there is consciousness and therefore potential moral patiency (<xref ref-type="bibr" rid="B9">Dehaene et al., 2017</xref>), and (ii) moral treatment dynamics, meaning how humans will treat the system as minded, and what duties arise from predictable human responses, including risks of attachment, deference, and manipulation (<xref ref-type="bibr" rid="B2">Banks and Bowman, 2023</xref>; <xref ref-type="bibr" rid="B6">Coeckelbergh, 2010a</xref>; <xref ref-type="bibr" rid="B24">Wieringa et al., 2025</xref>). The key point is that these layers invite different evidential standards. Moral status claims require unusually stringent justification. Moral treatment dynamics can be assessed empirically through interaction studies and deployment evidence, without presupposing consciousness.</p>
<p>Robots intensify the second layer. Disembodied AI can also elicit social responses, but embodiment amplifies and diversifies the channels of influence. This is why ethical and conscious robots are not simply ethical and conscious AI with a body. The body is part of the moral interface. The practical consequence is that ethical appraisal should not treat &#x201c;consciousness like&#x201d; impressions as interchangeable across domains. A robot&#x2019;s embodied cues can generate moral appearance that demands design and governance responses even in the absence of consciousness, whereas disembodied AI more often generates moral appearance through epistemic authority and linguistic performance, demanding different safeguards, disclosure norms, and accountability structures.</p>
</sec>
<sec id="s1-4">
<title>Agency and responsibility in embodied systems</title>
<p>Ethical systems research often slides between two senses of agency: causal agency, meaning the system makes things happen, and moral agency, meaning the system can be held responsible in a normative sense (<xref ref-type="bibr" rid="B10">Floridi and Sanders, 2004</xref>; <xref ref-type="bibr" rid="B22">Vallor and Vierkant, 2024</xref>). Taxonomies of machine ethics make room for machines that have ethical impact without being full ethical agents (<xref ref-type="bibr" rid="B16">Moor, 2006</xref>). Robots strain responsibility practices because their behaviour is situated, adaptive, and sometimes learned, creating well known responsibility gaps when outcomes are not reasonably foreseeable by designers or operators (<xref ref-type="bibr" rid="B13">Matthias, 2004</xref>). This section clarifies why embodiment makes that slide more consequential, and why responsibility attribution cannot be repaired by treating ethics as an internal module alone.</p>
<p>Adding an ethical reasoning module does not close this gap. A robot can deliberate well and still be embedded in a pipeline of incentives, training regimes, user pressures, and physical constraints that distribute control. Frameworks for meaningful human control aim to preserve accountability through design requirements that connect human reasons, oversight, and system behaviour (<xref ref-type="bibr" rid="B19">Santoni De Sio and Van Den Hoven, 2018</xref>). More recent conceptual work shows that meaningful human control is not a single simple requirement but a family of interpretations that shift across domains and governance goals, making operationalisation and measurement central research problems (<xref ref-type="bibr" rid="B18">Robbins, 2024</xref>; <xref ref-type="bibr" rid="B23">Verhagen et al., 2024</xref>). This is precisely where the AI versus robot distinction yields practical leverage. Disembodied systems often distribute responsibility through institutions and data pipelines, whereas embodied robots additionally distribute it through physical coupling, real time constraints, and interaction dynamics that can make oversight fragile. Treating these cases as equivalent invites either over attribution of responsibility to the artefact, or under specification of the human and organisational conditions required for accountable deployment.</p>
<p>Embodied cognition perspectives underline why this mapping is not optional. If cognition is tightly coupled to action and environment, then ethical behaviour will also be tightly coupled to environment (<xref ref-type="bibr" rid="B25">Wilson, 2002</xref>). The extended mind tradition likewise emphasises that cognitive processes can be distributed across agent and world, which is a useful warning against locating ethical competence in an internal module alone (<xref ref-type="bibr" rid="B5">Clark and Chalmers, 1998</xref>). In robots, this coupling is literal. Treating robotics as merely applied AI risks under specifying the physical and organisational conditions under which ethical behaviour can be expected. The point is not to deny sociotechnical continuity, but to ensure that evaluation and governance track the full control loop: sensors, actuation, bodies, spaces, and organisational incentives. Without that, responsibility gap discussions risk becoming abstract, while the most decisive sources of harm and accountability failure remain located in deployment conditions rather than in models.</p>
</sec>
</sec>
<sec id="s2">
<title>Discussion: keeping the baskets distinct without splitting the field</title>
<p>Separating robotics and AI conceptually may not require separating communities or journals. It does, however, require resisting a hidden equivalence. For research on ethical and conscious systems, I propose a simple reporting norm that would reduce conceptual slippage while still encouraging integrated work. Every contribution should state, explicitly, what its target of ethical analysis is and where embodiment enters the story (<xref ref-type="bibr" rid="B17">Riesen, 2025</xref>; <xref ref-type="bibr" rid="B21">Torras, 2024</xref>).</p>
<p>Concretely, authors could answer four questions at the outset, so that subsequent claims about ethics, consciousness, and responsibility remain anchored to stable evaluative objects.<list list-type="alpha-upper">
<list-item>
<p>What is the system boundary: model, robot platform, or sociotechnical deployment (<xref ref-type="bibr" rid="B17">Riesen, 2025</xref>).</p>
</list-item>
<list-item>
<p>What is the embodiment level: disembodied service, screen based agent, mobile robot, or physically interactive robot (<xref ref-type="bibr" rid="B15">Moon et al., 2021</xref>).</p>
</list-item>
<list-item>
<p>What is the ethical target: reasoning competence, behaviour in context, or governance and accountability (<xref ref-type="bibr" rid="B16">Moor, 2006</xref>; <xref ref-type="bibr" rid="B19">Santoni De Sio &#x26; Van Den Hoven, 2018</xref>).</p>
</list-item>
<list-item>
<p>What is the consciousness claim, if any: access consciousness, phenomenal consciousness, or human perceived mindedness (<xref ref-type="bibr" rid="B9">Dehaene et al., 2017</xref>; <xref ref-type="bibr" rid="B11">Gray et al., 2007</xref>).</p>
</list-item>
</list>
</p>
<p>This norm is intentionally lightweight. It does not demand that every paper cover every target. It asks only that authors specify what they are and are not claiming, and which kinds of evidence would be relevant given the system boundary and embodiment level. At the same time, this norm is not bureaucratic; it is a way of aligning methods with moral risk. For instance, introspective self control mechanisms evaluated only through decision traces can miss their primary ethical function in robots, namely, regulating physical action under uncertainty and under constraint (<xref ref-type="bibr" rid="B19">Santoni De Sio and Van Den Hoven, 2018</xref>; <xref ref-type="bibr" rid="B23">Verhagen et al., 2024</xref>). Similarly, work on visual self perception can mean very different things in an embodied robot, where self perception supports safe action and bodily boundaries, versus in disembodied AI, where self modelling may primarily concern epistemic confidence and decision calibration (<xref ref-type="bibr" rid="B17">Riesen, 2025</xref>; <xref ref-type="bibr" rid="B25">Wilson, 2002</xref>). In review terms, the four questions provide a simple checklist: are the evaluation methods appropriate to the system&#x2019;s dominant risk space, and are the governance implications proportional to the kind of harm and responsibility distribution that is plausible in deployment. In policy terms, the same clarifications reduce the temptation to generalise from a narrow benchmark or laboratory study to broad claims about ethical alignment or consciousness.</p>
<p>To the motivating question, are ethical and conscious AI and ethical and conscious robots morally the same? Philosophy suggests a split answer. Moral patiency, if grounded in conscious experience, is in principle independent of embodiment (<xref ref-type="bibr" rid="B9">Dehaene et al., 2017</xref>). Moral practice, responsibility, and influence are deeply shaped by embodiment and social relation, including moral appearances and predictable human projections (<xref ref-type="bibr" rid="B6">Coeckelbergh, 2010a</xref>; <xref ref-type="bibr" rid="B11">Gray et al., 2007</xref>; <xref ref-type="bibr" rid="B15">Moon et al., 2021</xref>). Treating them as the same question risks building consciousness sounding narratives for systems whose primary ethical risks lie in bodies and settings, while also overlooking that disembodied systems could, in principle, raise serious moral status questions if consciousness ever becomes a defensible empirical claim. The distinction herein defended therefore does not split the field. It makes the field easier to govern, by ensuring that ethical evaluation and consciousness talk remain accountable to the specific interaction context, harm profile, and responsibility pathway at stake.</p>
<p>Keeping two baskets in view is not pedantry. It is a condition for building systems that are ethically aligned and ethically governable. A research topic that jointly considers ethical and conscious AI and robots is valuable. My suggestion here is simply that the joint framing should be accompanied by a minimal discipline of specification, so that interdisciplinary work avoids category mistakes, and so that disagreements can be traced to genuine normative differences rather than to shifting system boundaries, embodiment levels, or targets of ethical appraisal.</p>
</sec>
</body>
<back>
<sec sec-type="author-contributions" id="s3">
<title>Author contributions</title>
<p>AK: Validation, Formal Analysis, Project administration, Methodology, Supervision, Data curation, Conceptualization, Software, Writing &#x2013; original draft, Investigation, Funding acquisition, Visualization, Resources, Writing &#x2013; review and editing.</p>
</sec>
<sec sec-type="COI-statement" id="s5">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="s6">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="s7">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Archer</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Wilks</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Sommer</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2025</year>). <article-title>Evaluating the morality of violence against robots</article-title>. <source>Sci. Rep.</source> <volume>15</volume> (<issue>1</issue>), <fpage>42846</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-025-24875-y</pub-id>
<pub-id pub-id-type="pmid">41315423</pub-id>
</mixed-citation>
</ref>
<ref id="B2">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Banks</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Bowman</surname>
<given-names>N. D.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Perceived moral patiency of social robots: explication and scale development</article-title>. <source>Int. J. Soc. Robotics</source> <volume>15</volume> (<issue>1</issue>), <fpage>101</fpage>&#x2013;<lpage>113</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-022-00950-6</pub-id>
</mixed-citation>
</ref>
<ref id="B3">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bartneck</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Kuli&#x107;</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Croft</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Zoghbi</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2009</year>). <article-title>Measurement instruments for the anthropomorphism, animacy, likeability, perceived intelligence, and perceived safety of robots</article-title>. <source>Int. J. Soc. Robotics</source> <volume>1</volume> (<issue>1</issue>), <fpage>71</fpage>&#x2013;<lpage>81</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-008-0001-3</pub-id>
</mixed-citation>
</ref>
<ref id="B4">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Birhane</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Van Dijk</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Pasquale</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Debunking robot rights metaphysically, ethically, and legally</article-title>. <source>First Monday</source>. <pub-id pub-id-type="doi">10.5210/fm.v29i4.13628</pub-id>
</mixed-citation>
</ref>
<ref id="B5">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Clark</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Chalmers</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>1998</year>). <article-title>The extended mind</article-title>. <source>Analysis</source> <volume>58</volume> (<issue>1</issue>), <fpage>7</fpage>&#x2013;<lpage>19</lpage>. <pub-id pub-id-type="doi">10.1093/analys/58.1.7</pub-id>
</mixed-citation>
</ref>
<ref id="B6">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Coeckelbergh</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2010a</year>). <article-title>Moral appearances: emotions, robots, and human morality</article-title>. <source>Ethics Inf. Technol.</source> <volume>12</volume> (<issue>3</issue>), <fpage>235</fpage>&#x2013;<lpage>241</lpage>. <pub-id pub-id-type="doi">10.1007/s10676-010-9221-y</pub-id>
</mixed-citation>
</ref>
<ref id="B7">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Coeckelbergh</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2010b</year>). <article-title>Robot rights? Towards a social-relational justification of moral consideration</article-title>. <source>Ethics Inf. Technol.</source> <volume>12</volume> (<issue>3</issue>), <fpage>209</fpage>&#x2013;<lpage>221</lpage>. <pub-id pub-id-type="doi">10.1007/s10676-010-9235-5</pub-id>
</mixed-citation>
</ref>
<ref id="B8">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Coggins</surname>
<given-names>T. N.</given-names>
</name>
<name>
<surname>Steinert</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>The seven troubles with norm-compliant robots</article-title>. <source>Ethics Inf. Technol.</source> <volume>25</volume> (<issue>2</issue>), <fpage>29</fpage>. <pub-id pub-id-type="doi">10.1007/s10676-023-09701-1</pub-id>
<pub-id pub-id-type="pmid">37123285</pub-id>
</mixed-citation>
</ref>
<ref id="B9">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dehaene</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Lau</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Kouider</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>What is consciousness, and could machines have it?</article-title> <source>Science</source> <volume>358</volume> (<issue>6362</issue>), <fpage>486</fpage>&#x2013;<lpage>492</lpage>. <pub-id pub-id-type="doi">10.1126/science.aan8871</pub-id>
<pub-id pub-id-type="pmid">29074769</pub-id>
</mixed-citation>
</ref>
<ref id="B10">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Floridi</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Sanders</surname>
<given-names>J. W.</given-names>
</name>
</person-group> (<year>2004</year>). <article-title>On the morality of artificial agents</article-title>. <source>Minds Mach.</source> <volume>14</volume> (<issue>3</issue>), <fpage>349</fpage>&#x2013;<lpage>379</lpage>. <pub-id pub-id-type="doi">10.1023/B:MIND.0000035461.63578.9d</pub-id>
</mixed-citation>
</ref>
<ref id="B11">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gray</surname>
<given-names>H. M.</given-names>
</name>
<name>
<surname>Gray</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Wegner</surname>
<given-names>D. M.</given-names>
</name>
</person-group> (<year>2007</year>). <article-title>Dimensions of mind perception</article-title>. <source>Science</source> <volume>315</volume> (<issue>5812</issue>), <fpage>619</fpage>. <pub-id pub-id-type="doi">10.1126/science.1134475</pub-id>
<pub-id pub-id-type="pmid">17272713</pub-id>
</mixed-citation>
</ref>
<ref id="B12">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jobin</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Ienca</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Vayena</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>The global landscape of AI ethics guidelines</article-title>. <source>Nat. Mach. Intell.</source> <volume>1</volume> (<issue>9</issue>), <fpage>389</fpage>&#x2013;<lpage>399</lpage>. <pub-id pub-id-type="doi">10.1038/s42256-019-0088-2</pub-id>
</mixed-citation>
</ref>
<ref id="B13">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Matthias</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2004</year>). <article-title>The responsibility gap: ascribing responsibility for the actions of learning automata</article-title>. <source>Ethics Inf. Technol.</source> <volume>6</volume> (<issue>3</issue>), <fpage>175</fpage>&#x2013;<lpage>183</lpage>. <pub-id pub-id-type="doi">10.1007/s10676-004-3422-1</pub-id>
</mixed-citation>
</ref>
<ref id="B14">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mittelstadt</surname>
<given-names>B. D.</given-names>
</name>
<name>
<surname>Allo</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Taddeo</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Wachter</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Floridi</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>The ethics of algorithms: mapping the debate</article-title>. <source>Big Data and Soc.</source> <volume>3</volume> (<issue>2</issue>), <fpage>2053951716679679</fpage>. <pub-id pub-id-type="doi">10.1177/2053951716679679</pub-id>
</mixed-citation>
</ref>
<ref id="B15">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Moon</surname>
<given-names>Aj.</given-names>
</name>
<name>
<surname>Rismani</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Van Der Loos</surname>
<given-names>H. F. M.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Ethics of corporeal, Co-present robots as agents of influence: a review</article-title>. <source>Curr. Robot. Rep.</source> <volume>2</volume> (<issue>2</issue>), <fpage>223</fpage>&#x2013;<lpage>229</lpage>. <pub-id pub-id-type="doi">10.1007/s43154-021-00053-6</pub-id>
</mixed-citation>
</ref>
<ref id="B16">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Moor</surname>
<given-names>J. H.</given-names>
</name>
</person-group> (<year>2006</year>). <article-title>The nature, importance, and difficulty of machine ethics</article-title>. <source>IEEE Intell. Syst.</source> <volume>21</volume> (<issue>4</issue>), <fpage>18</fpage>&#x2013;<lpage>21</lpage>. <pub-id pub-id-type="doi">10.1109/MIS.2006.80</pub-id>
</mixed-citation>
</ref>
<ref id="B17">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Riesen</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2025</year>). <article-title>A sociotechnological-system approach to AI ethics</article-title>. <source>AI and Soc.</source> <volume>40</volume> (<issue>8</issue>), <fpage>6231</fpage>&#x2013;<lpage>6243</lpage>. <pub-id pub-id-type="doi">10.1007/s00146-025-02335-5</pub-id>
</mixed-citation>
</ref>
<ref id="B18">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Robbins</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>The many meanings of meaningful human control</article-title>. <source>AI Ethics</source> <volume>4</volume> (<issue>4</issue>), <fpage>1377</fpage>&#x2013;<lpage>1388</lpage>. <pub-id pub-id-type="doi">10.1007/s43681-023-00320-6</pub-id>
</mixed-citation>
</ref>
<ref id="B19">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Santoni De Sio</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Van Den Hoven</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Meaningful human control over autonomous systems: a philosophical account</article-title>. <source>Front. Robotics AI</source> <volume>5</volume>, <fpage>15</fpage>. <pub-id pub-id-type="doi">10.3389/frobt.2018.00015</pub-id>
<pub-id pub-id-type="pmid">33500902</pub-id>
</mixed-citation>
</ref>
<ref id="B20">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sharkey</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Sharkey</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>Granny and the robots: ethical issues in robot care for the elderly</article-title>. <source>Ethics Inf. Technol.</source> <volume>14</volume> (<issue>1</issue>), <fpage>27</fpage>&#x2013;<lpage>40</lpage>. <pub-id pub-id-type="doi">10.1007/s10676-010-9234-6</pub-id>
</mixed-citation>
</ref>
<ref id="B21">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Torras</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Ethics of social robotics: individual and societal concerns and opportunities</article-title>. <source>Annu. Rev. Control, Robotics, Aut. Syst.</source> <volume>7</volume> (<issue>1</issue>), <fpage>1</fpage>&#x2013;<lpage>18</lpage>. <pub-id pub-id-type="doi">10.1146/annurev-control-062023-082238</pub-id>
</mixed-citation>
</ref>
<ref id="B22">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Vallor</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Vierkant</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Find the gap: AI, responsible agency and vulnerability</article-title>. <source>Minds Mach.</source> <volume>34</volume> (<issue>3</issue>), <fpage>20</fpage>. <pub-id pub-id-type="doi">10.1007/s11023-024-09674-0</pub-id>
<pub-id pub-id-type="pmid">38855350</pub-id>
</mixed-citation>
</ref>
<ref id="B23">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Verhagen</surname>
<given-names>R. S.</given-names>
</name>
<name>
<surname>Neerincx</surname>
<given-names>M. A.</given-names>
</name>
<name>
<surname>Tielman</surname>
<given-names>M. L.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Meaningful human control and variable autonomy in human-robot teams for firefighting</article-title>. <source>Front. Robotics AI</source> <volume>11</volume>, <fpage>1323980</fpage>. <pub-id pub-id-type="doi">10.3389/frobt.2024.1323980</pub-id>
<pub-id pub-id-type="pmid">38361604</pub-id>
</mixed-citation>
</ref>
<ref id="B24">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wieringa</surname>
<given-names>M. S.</given-names>
</name>
<name>
<surname>Daalmans</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>M&#xfc;ller</surname>
<given-names>B. C. N.</given-names>
</name>
<name>
<surname>Bijlstra</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Bosse</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2025</year>). <article-title>Exploring laypeople&#x2019;s moral considerations towards social robots</article-title>. <source>Int. J. Soc. Robotics</source> <volume>17</volume> (<issue>11</issue>), <fpage>2375</fpage>&#x2013;<lpage>2389</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-024-01195-1</pub-id>
</mixed-citation>
</ref>
<ref id="B25">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wilson</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2002</year>). <article-title>Six views of embodied cognition</article-title>. <source>Psychonomic Bull. and Rev.</source> <volume>9</volume> (<issue>4</issue>), <fpage>625</fpage>&#x2013;<lpage>636</lpage>. <pub-id pub-id-type="doi">10.3758/BF03196322</pub-id>
<pub-id pub-id-type="pmid">12613670</pub-id>
</mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2330340/overview">Mustansar Ghazanfar</ext-link>, University of East London, United Kingdom</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/134043/overview">Antonio Chella</ext-link>, University of Palermo, Italy</p>
</fn>
</fn-group>
</back>
</article>