<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="review-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Virtual Real.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Virtual Reality</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Virtual Real.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2673-4192</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">1753188</article-id>
<article-id pub-id-type="doi">10.3389/frvir.2026.1753188</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Review</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Enhancing user authentication and security in metaverse: a review of innovative techniques and the role of zero-trust models</article-title>
<alt-title alt-title-type="left-running-head">Ethiraj and Ellapan</alt-title>
<alt-title alt-title-type="right-running-head">
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/frvir.2026.1753188">10.3389/frvir.2026.1753188</ext-link>
</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Ethiraj</surname>
<given-names>Jeevalatha</given-names>
</name>
<xref ref-type="aff" rid="aff1"/>
<uri xlink:href="https://loop.frontiersin.org/people/3292098"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing - original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal Analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Ellapan</surname>
<given-names>Vijayan</given-names>
</name>
<xref ref-type="aff" rid="aff1"/>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/3292172"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
</contrib>
</contrib-group>
<aff id="aff1">
<institution>School of Computer Science Engineering and Information Systems, Vellore Institute of Technology</institution>, <city>Vellore</city>, <state>Tamil Nadu</state>, <country country="IN">India</country>
</aff>
<author-notes>
<corresp id="c001">
<label>&#x2a;</label>Correspondence: Vijayan Ellapan, <email xlink:href="mailto:evijayan@vit.ac.in">evijayan@vit.ac.in</email>
</corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-18">
<day>18</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>7</volume>
<elocation-id>1753188</elocation-id>
<history>
<date date-type="received">
<day>24</day>
<month>11</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>08</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>21</day>
<month>01</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2026 Ethiraj and Ellapan.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Ethiraj and Ellapan</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-18">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<p>Metaverse is a paradigm where people can interact in virtual environments using current technologies. The Metaverse is being advanced by artificial intelligence (AI) as it develops. These technologies have the power to significantly change how people communicate and live. Examples of these integrations include automating repetitive operations, creating customized user experiences based on their preferences and behaviors, and freeing up resources for more intricate and creative endeavors. The roles of artificial intelligence (AI) and other cutting-edge technologies in the Metaverse, such as the Internet of Things (IoT), virtual reality (VR), augmented reality (AR), extended reality (XR), and natural language processing (NLP), are comprehensively reviewed in this study. A systematic literature review was used to successfully achieve the goals of this study. In addition to addressing the research topics, this systematic evaluation of the literature aims to improve the understanding of security and privacy issues in metaverse for suggestive solutions. The Preferred Reporting Items for Systematic Reviews and Meta-Analyses (PRISMA) guideline were used in this work to ensure transparency. Despite being hailed as a promising new technology, early experiences in Metaverse suggest that security procedures need to be strengthened and issues addressed for fulfilling its promises. Hence, this work attempts to address Metaverse Security issues by leveraging on zero-trust principles where Zero Trust Architecture (ZTA) frameworks for Metaverse are examined with the aim of enhancing security in the applications of metaverse.</p>
</abstract>
<kwd-group>
<kwd>augmented reality</kwd>
<kwd>extended reality</kwd>
<kwd>metaverse</kwd>
<kwd>user authentication</kwd>
<kwd>virtual reality</kwd>
<kwd>zero-trust security</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This research is funded by the Vellore Institute of Technology, Vellore, India.</funding-statement>
</funding-group>
<counts>
<fig-count count="7"/>
<table-count count="3"/>
<equation-count count="0"/>
<ref-count count="62"/>
<page-count count="16"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Virtual Reality and Human Behaviour</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<label>1</label>
<title>Introduction</title>
<p>Metaverse is a virtualized world that blends real and digital worlds, offering new venues for social interactions to users. Metaverse is capable of integrating cloud computing, IoT and AI for accomplishing tasks that normally are human dependent. Examples include comprehending speeches or analyzing images for informed decisions. The term Metaverse includes &#x201c;meta&#x201d; meaning beyond and &#x201c;verse&#x201d; representing a condensed form of cosmos. This term was first used by Neil Stephenson in his science fiction novel &#x201c;Snow Crash&#x201d; (1992) (<xref ref-type="bibr" rid="B27">Huynh-The et al., 2023</xref>) for digital avatars interacting with virtual environments. His concept was later expanded by Mark Zuckerberg when he named Facebook as Meta, a sign of progress towards Metaverse constructions. It is possible for people realize immersive experiences while interacting with Metaverse&#x2019;s virtual locations. It is possible for Metaverse to revolutionize multiple domains including consumer products, healthcare, education, and finance where certain supporting technologies can contribute to the realization of this vision (<xref ref-type="bibr" rid="B36">Moztarzadeh et al., 2023</xref>). The overall technological ecology supporting Metaverse is shown as <xref ref-type="fig" rid="F1">Figure 1</xref>.</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>Overall technological ecology supporting metaverse.</p>
</caption>
<graphic xlink:href="frvir-07-1753188-g001.tif">
<alt-text content-type="machine-generated">Diagram illustrating XR (Extended Reality) at the center, connected to AR, VR, and various digital domains including Web 3.0, blockchain, financial systems, mobile platforms, entertainment, IoT, 5G, artificial intelligence, social networks, and metaverse.</alt-text>
</graphic>
</fig>
<p>Businesses using Metaverse can offer customers unique product experiences while Blockchains ensure security supporting themes for decentralized finance or creative economies (<xref ref-type="bibr" rid="B23">Gupta et al., 2022</xref>). Metaverse can also include mobile applications along with entertainment platforms (OTTs). The combination of IoT and AI can assist Metaverse in creating individualized experiences (<xref ref-type="bibr" rid="B54">Wang H. et al., 2023</xref>) using 5G/6G connections with low latencies as the binding force of Metaverse elements. AI can influence user interactions with Metaverse and offer more natural and intuitive interactions, adaptable interfaces, and personalized content recommendations. VR experiences can examine user preferences and behaviors in virtual worlds to draw conclusions, for example, AI in VR applications can detect user movements/gazes, enhancing naturalness and intuitiveness of their interactions. AI can also improve immersive VR experiences by simulating sound or lighting changes in response to user preferences and activities. The rendering process of 3D objects involves usages of lights/shadows, textures and colors processed by rendering engines which generate tangible realistic illusions (<xref ref-type="bibr" rid="B40">Ponchio and Hormann, 2008</xref>). Metaverse functions can be used to combine AR with physical environments for tangible representations of the internet. They can provide consumers with graphics, video streams, and realistic holographic experiences. Users can interact with Pok&#xe9;mon like characters by superimpositions even on hand phone cameras. AR applications can be labeled as marker-based or markerless. Image recognition employs marker-based AR to locate markers (items that launch AR applications). Markerless displays do not require markers and allow users to select the digital material to be presented. These apps are instances of location-based augmented reality, providing information depending on the user&#x2019;s location (<xref ref-type="bibr" rid="B9">Carotenuto et al., 2020</xref>). Markerless AR applications use device cameras, GPS, compasses, and accelerometers to collect environmental data. Object detections are the mainstay of AR applications which enable computers to recognize and find things of interest in images or videos (<xref ref-type="bibr" rid="B59">Yong et al., 2021</xref>). Intelligent and responsive Metaverse applications (<xref ref-type="bibr" rid="B44">Reiners et al., 2021</xref>) can also be created by combining AI with XR. Examples Metaverse applications in this usage include connecting remote workspaces, virtual healthcare, and immersive entertainments. XR usages offer consumers smooth transitions between virtual and real worlds by linking these two realms. XR includes both AR and VR (<xref ref-type="bibr" rid="B50">Venkatesan et al., 2021</xref>). These technologies are all part of building Metaverse visual environments. Avatars and followers are expected to represent human users in Metaverse. IoT that connects networked devices for exchange of information is another technology that can be proliferated in Metaverse applications. AI and IoT have the potential to fundamentally transform Metaverse interactions into seamless and intuitive experiences. AI can be used to examine and assess integration of virtual and physical worlds utilizing devices. Application developers can provide intelligent and responsive Metaverse applications for better user experiences (<xref ref-type="bibr" rid="B21">Ghosh et al., 2018</xref>). Though, Metaverse offer a variety of benefits to both individuals and businesses by combining AR/VR/XR technologies with AI, but has to be safeguarded against risks such as frauds, identity thefts, and data breaches, necessitating strong privacy, identity management, and asset protection procedures. Securing digital identities, preventing unauthorized tracking, managing virtual economies, and addressing AI/IoT risks are some of the major challenges that arise as Metaverse data collection grows (biometrics, movements), and vulnerabilities emerge from integrated technologies (VR, AR, XR, AI, IoT).</p>
<sec id="s1-1">
<label>1.1</label>
<title>Challenges faced by metaverse applications</title>
<p>Metaverse is unquestionably built on AR/VR/XR technologies which enable 3D virtual environments for user interactions. Initial Metaverse applications were built on web 3.0. Though, social networks were expected to be the first to implement Metaverse applications, the efforts faced constrained from technological infrastructures, interoperability and social/ethical considerations. The technologies used are still in their early stages and their ability to create smooth virtual environments are hampered by the absence of common standards. Currently, a number of infrastructure and technical problems are impeding the development of a fully functional and immersive metaverse. In a shared environment, current infrastructure finds it difficult to accommodate a large number of concurrent users without sacrificing speed. A trade-off exists between the processing power needed for realistic rendering and real-time interaction and guaranteeing a seamless user experience for numerous participants. One major challenge is the requirement for high-bandwidth data transfer and low-latency communication for real-time 3D settings, particularly in places with poor internet connectivity. Hence, immersion and accessibility are compromised. High-end immersive experiences require expensive equipment, such virtual reality headsets, which are too costly for the average person and hinder general adoption. Because the metaverse requires a massive amount of processing power&#x2014;possibly 1,000 times present usage&#x2014;the energy-intensive nature of enabling technologies like GPUs and blockchain networks raises significant environmental and ecological issues. Real-time generative object recognition and large-capacity 360-degree visual data processing are challenging jobs for existing systems. The evolution of the Metaverse is riddled with philosophical and practical contradictions. This leads to a basic conflict between safeguarding user security and privacy and improving the customized user experience. Regarding the societal impact, the metaverse has the potential to improve business, healthcare, and education, but it also runs the risk of escalating already-existing social injustices (a &#x201c;digital divide&#x201d;), encouraging inactive lifestyles, and causing mental health problems if there is less in-person connection. Massive volumes of sensitive user data, including physiological and behavioral information, must be collected due to the metaverse&#x2019;s immersive nature. The following are significant security issues in the metaverse:<list list-type="bullet">
<list-item>
<p>
<bold>Data Privacy &#x26; Surveillance:</bold> The extensive collection of biometric, behavioral, and personal data (such as eye movement and facial structure) poses serious privacy risks.</p>
</list-item>
<list-item>
<p>
<bold>Identity &#x26; Digital Assets:</bold> New security measures for virtual ownerships are required due to the significant risks associated with identity fraud and theft of digital assets (virtual properties, currencies).</p>
</list-item>
<list-item>
<p>
<bold>Device Vulnerabilities:</bold> IoT devices in the metaverse are susceptible to device hijacks, hacking, and infections.</p>
</list-item>
<list-item>
<p>
<bold>Third-Party Tracking:</bold> Due to device connectivity, user controls may be compromised by extensive cross-app and third-party tracking.</p>
</list-item>
<list-item>
<p>
<bold>AI &#x26; IoT Risks:</bold> When AI and IoT are combined, new attack surfaces may be created.</p>
</list-item>
</list>
</p>
<p>Therefore, security concerns impact not only traditional IT but also the integrity of virtual marketplaces, digital autonomy, and societal stability due to the convergence of virtual and physical realities in the metaverse. Therefore, these issues necessitate all-encompassing strategies like as privacy by design, user education, and new legislative frameworks, where it is imperative to incorporate privacy from the beginning into the foundations of metaverse systems. In order to regulate transactions, data, and privacy by including security at every level, from networks and devices to software and user interactions, people must also become more knowledgeable about risks and safe practices. Zero Trust Architecture (ZTA), which employs strict access restrictions and continuous verifications, is a workable solution for metaverse security. ZTA operates under the principle of &#x201c;never trust, always verify,&#x201d; as opposed to traditional security models that permit broad access after a preliminary verification.</p>
</sec>
<sec id="s1-2">
<label>1.2</label>
<title>Research methodology</title>
<p>The Literature reviews integrate bodies of existing information on certain topics or areas of interest, identify significant biases and knowledge gaps in literature for suggesting possible avenues for future studies. Several academic databases, including ACM Digital Library, MDPI, Science Direct, IEEE Xplore and Google Scholar were searched in this work to conduct a comprehensive review. Defined keywords were used in search strings for clear outcomes. Since there were no participants in this systematic literature review, neither ethical approval nor responder permission were obtained prior to study&#x2019;s start. Relevant resources were gathered using multi-step, meticulous searches. The original search criteria were &#x201c;AI in Metaverse&#x201d;, &#x201c;Metaverse Security&#x201d;, &#x201c;ZTA&#x201d;, &#x201c;Metaverse Security and Privacy Issues&#x201d;, &#x201c;Metaverse Security Solutions&#x201d; and &#x201c;challenges of AI in Metaverse&#x201d;, which were crafted to yield a wide variety of pertinent publications based on search phrases. Optimization of the search results were achieved by including relevant terms in the search queries for paper between 2018 and 2023 for selection. This work employed an iterative process comprising of systematic collections, screening, analysis, and evaluation of relevant research articles about metaverse for providing impartial answers to research questions of privacy, security, and with prospective solutions. After that, the distribution of 149 publications regarding the application of AI in the Metaverse throughout the previously specified period was gathered and analysed.<list list-type="simple">
<list-item>
<label>&#x27a2;</label>
<p>
<bold>Sampling:</bold> The literature review contained only relevant articles from the literature search that satisfied predefined inclusion and exclusion criteria. The publications were gathered from internet scientific sources using relevant keywords associated with the research topic. After that, the publications underwent recurrent assessments and screenings using recognized techniques to determine their suitability for the research field.</p>
</list-item>
<list-item>
<label>&#x27a2;</label>
<p>
<bold>Screening:</bold> After the study articles have been collected from the digital databases, they must be properly examined. This is done to filter out information that might not be pertinent to the subject matter. The search process, which involved regularly applying inclusion and exclusion criteria to the articles, was consistently relied upon.</p>
</list-item>
<list-item>
<label>&#x27a2;</label>
<p>
<bold>Inclusion Criteria:</bold> One essential part of the screening procedure is the inclusion criteria. The foundation for tackling the research issue will be the elements that are desired or have already been recognized. The elements of the inclusion criteria used in this work were:</p>
<list list-type="simple">
<list-item>
<p>&#x25CB; Papers related to the search keywords and published between 2018 and 2023 only were selected</p>
</list-item>
<list-item>
<p>&#x25CB; Only English-language publications were examined</p>
</list-item>
</list>
</list-item>
<list-item>
<label>&#x27a2;</label>
<p>
<bold>Exclusion Criteria:</bold> The second essential step in the screening process is the exclusion criteria, which form the basis for answering stated objectives or research questions. This process ensured that the search was limited to studies pertinent to answering the research topic. The criteria used were:</p>
<list list-type="simple">
<list-item>
<p>&#x25CB; The publications in question that did not fit the definition of scholarly research articles.</p>
</list-item>
<list-item>
<p>&#x25CB; The article copies of research articles that were already published.</p>
</list-item>
</list>
</list-item>
<list-item>
<label>&#x27a2;</label>
<p>
<bold>Application of Prisma 2020 in Data Collection:</bold> Relevant data and results directly related to the subject for this investigation were acquired using PRISMA 2020 flow diagram (Refer <xref ref-type="fig" rid="F2">Figure 2</xref>). The following steps and techniques were used to extract pertinent publications from literature review:</p>
<list list-type="bullet">
<list-item>
<p>
<bold>Step1:</bold> Searching digital libraries was the first step where the phrases &#x201c;AI in Metaverse&#x201d;, &#x201c;Metaverse Security&#x201d;, &#x201c;ZTA&#x201d;, &#x201c;Metaverse Security and Privacy Issues&#x201d;, &#x201c;Metaverse Security Solutions&#x201d; and &#x201c;challenges of AI in Metaverse&#x201d;, were used. All articles passed through a preliminary screening procedure to match keywords and selected for additional examination on satisfying selection criteria. A total of 149 items were acquired during this phase. Thirty papers from ACM Digital Library, twelve from MDPI, forty-two from Science Direct, thirty-one from IEEE Xplore and thirty-four from Google Scholar</p>
</list-item>
<list-item>
<p>
<bold>Step 2:</bold> This work considered the presence of similarity in findings and then implemented a procedure to eliminate duplicate publications. Resemblance in titles were deemed as duplicates leading to their removal where 36 articles remained.</p>
</list-item>
<list-item>
<p>
<bold>Step 3:</bold> Additional filtration criterion was implemented, including a combination of predetermined conditions such as newspapers, demos, extended abstracts, posters, and survey papers. Following the application of these exclusion criteria, resulting number of articles collected were 63. Though the number may seem small but there are the most relevant to the research questions.</p>
</list-item>
</list>
</list-item>
</list>
</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>Prisma diagram.</p>
</caption>
<graphic xlink:href="frvir-07-1753188-g002.tif">
<alt-text content-type="machine-generated">PRISMA flow diagram illustrating the identification, screening, eligibility, and inclusion process for new studies, detailing sources, exclusions, and final counts, with no studies found via other methods and sixty-three included studies.</alt-text>
</graphic>
</fig>
</sec>
<sec id="s1-3">
<label>1.3</label>
<title>The scope of this work</title>
<p>The possibilities of using new technologies in the evolution of Metaverse are being examined by the scientific community (<xref ref-type="bibr" rid="B61">Zhao et al., 2022</xref>). AI and Metaverse integrations are crucial for a number of reasons:<list list-type="order">
<list-item>
<p>AI can improve user experiences by offering realistic and captivating virtual worlds, personalized content, and even intelligent virtual assistants that help users navigate the Metaverse;</p>
</list-item>
<list-item>
<p>AI can support accessibility and inclusion in the Metaverse by providing features like voice recognition, text-to-speech, and language translations;</p>
</list-item>
<list-item>
<p>AI can analyze and provide insightful information from data, improving users&#x2019; Metaverse experiences;</p>
</list-item>
<list-item>
<p>Increase productivity by automating Metaverse operations, such as content creation and moderation for human moderators; and</p>
</list-item>
<list-item>
<p>Support Security and privacy by providing intelligent threat detection, preventions, identity verifications, and authentications.</p>
</list-item>
</list>
</p>
<p>This study&#x2019;s contributions consist of:<list list-type="order">
<list-item>
<p>Surveying usage of AI in Metaverse technologies.</p>
</list-item>
<list-item>
<p>Listing functions of AI in NLP, XR, and computer vision.</p>
</list-item>
<list-item>
<p>Detailing on integrated technologies like blockchain, IoT, VR, AR, and XR with AI.</p>
</list-item>
<list-item>
<p>Detailing integrations between AI and Metaverse applications.</p>
</list-item>
<list-item>
<p>Suggesting ZTA for improved Metaverse security.</p>
</list-item>
<list-item>
<p>Solutions to challenges faced in Metaverse security.</p>
</list-item>
</list>
</p>
<p>This paper is structured as follows: <xref ref-type="sec" rid="s1">Section 1</xref> is the introduction followed by a brief review of literature in <xref ref-type="sec" rid="s2">Section 2</xref>. Is a review of related literature. <xref ref-type="sec" rid="s3">Section 3</xref> details on security in the metaverse with suggestions for implementing a ZTA framework for Metaverse security while <xref ref-type="sec" rid="s4">Section 4</xref> is results and discussions where solutions are provided. This paper concludes with future scope in <xref ref-type="sec" rid="s5">Section 5</xref>.</p>
</sec>
</sec>
<sec id="s2">
<label>2</label>
<title>Review of related literature</title>
<p>AIs Metaverse implies persistent, cohesive networks of 3D virtual worlds that might be the mainstay of online experiences. The concepts, which were previously limited to science fiction and video games, are currently being researched in an effort to transform every sector of the economy, i.e., banking, healthcare, consumer products and education. Metaverse is a virtual world made up of various landscapes, NPCs, and player characters (avatars) that combine digital and real components. AI technologies can be employed to create a Metaverse environment. Scenes depict a range of virtual environments, including a campus (<xref ref-type="bibr" rid="B18">Duan et al., 2021</xref>) and a museum (<xref ref-type="bibr" rid="B5">Beer, 2015</xref>). NPCs are necessary to give the gaming world a more realistic feel despite not being within the player&#x2019;s control (<xref ref-type="bibr" rid="B56">Warpefelt, 2015</xref>). Metaverse visual constructions embrace different technologies including AR, VR, XR, and. These systems utilize a see-through display, either optical or video to capture visual data. Mesh models can rebuild structures with intricate surface patterns, like statues and paintings. For example, metropolitan buildings were rebuilt using mesh models and segmentation algorithms. Indoor spaces were also recreated as virtual worlds using the mesh approach (<xref ref-type="bibr" rid="B49">Tao et al., 2019</xref>). <xref ref-type="fig" rid="F3">Figure 3</xref> depicts AI branches employed for Metaverse.</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>Integration of AIs with metaverse.</p>
</caption>
<graphic xlink:href="frvir-07-1753188-g003.tif">
<alt-text content-type="machine-generated">Infographic summarizing the integration of artificial intelligence in the metaverse, showing applications like avatar creation, virtual world construction, and user interactivity, relevant AI branches such as computer vision and natural language processing, integrated technologies including virtual and augmented reality, blockchain, and digital twins, and highlights the importance of AI for enhanced user experience, access, security, data insights, and efficiency. Top and bottom sections display images of digital and physical worlds, emphasizing the connection between virtual and real environments.</alt-text>
</graphic>
</fig>
<p>Computer vision in AI enables computers to comprehend, extract, and use pertinent data that influences decisions from visual inputs, including digital images and videos. Computer vision gives robots the ability to see and understand, while AI gives them the ability to reason. Metaverse object detections based on AI can be generic like automobiles and people or specific like detecting faces, markers, and texts. Text detection techniques in XR have been found to be easily adaptable to Metaverse applications (<xref ref-type="bibr" rid="B48">Stotko et al., 2019</xref>). Facial recognitions are processes of identifying human faces by enclosing them in bounding boxes for recognitions. Facial detections have been studied extensively and reliable facial recognition settings in XR applications were suggested by (<xref ref-type="bibr" rid="B29">Kojic et al., 2020</xref>) NLP helps chatbots and virtual assistants to better interpret and respond to human inputs, resulting in more engaging interactions. They can be used in virtual environments to process and evaluate vast amounts of textual data (such as user reviews or social media posts). NLP can be used in Metaverse (see <xref ref-type="fig" rid="F4">Figure 4</xref>) for a variety of purposes. When people interact with Metaverse virtual environments, NLP can help them be more natural and intuitive. In hybrid societies, AI could be useful for learning to interact with people from different ages and backgrounds (<xref ref-type="bibr" rid="B8">C et al., 2022</xref>). Machine Translation applications are important when people from different nations interact amongst themselves. Any individual need not know a new as AI can automatically transform contents users&#x2019; languages, i.e., native language to a foreign language establishing proper communications when they write and speak in their native languages The knowledge acquired by training models can be used to improve machine translation performances. The work in (<xref ref-type="bibr" rid="B26">Hujon et al., 2023</xref>) examined transfer learning for translations of English-Khasi language pairs. Conversions of Texts-to-Images/Images-to-Texts enable creation of realistic images from textual descriptions. The initial phase entails language modelling and extractions of significant features to assist creation of visually realistic and semantically consistent target images in the second stage. One popular use of this technique is image captioning. The simplest methods involve extracting visual attributes from images or videos and feeding them into recurrent networks, which generated words one after the other. OpenAI was unveiled in (<xref ref-type="bibr" rid="B42">Ramesh et al., 2022</xref>) where DALLE-2, a text-to-image generative model, comprising of decoders generated conditioned images on image embeds. Meta developed models that could assess text, images, and audio (multi-modal systems) for promoting AR in Metaverse. For Example, Data2vec (<xref ref-type="bibr" rid="B2">Ba et al., 2022</xref>), trained on three separate modalities was used for a variety of purposes including speech recognitions in audio samples; classifying image contents, analysing emotions and proofread texts for grammar. Large language models (LLM) have greatly enhanced the potential of NLP. Large amounts of textual data, typically several terabytes in size are utilized to train these models GPT-3 (<xref ref-type="bibr" rid="B30">Leippold, 2023</xref>), the third iteration of OpenAI GPT language model with 175 billion ML parameters was one of the most amazing LLM. The application was modified to handle text summarization, answering question, and language translations. IoT can help users interact more physically with AR/VR virtual environments. For Example, in healthcare, a body suit including sensors or medical devices attached to the user&#x2019;s body can monitor patient&#x2019;s status including health issues resulting in virtual reactions (<xref ref-type="bibr" rid="B39">Pereira et al., 2021</xref>). The results of the study in (<xref ref-type="bibr" rid="B16">Deveci et al., 2023</xref>) demonstrated that, due to its wide range of applications and practical opportunities, public transportation is the best place to integrate Metaverse into traffic management for safety. E-commerce experiences can also be improved by integrating IoT to monitor user&#x2019;s mobility in virtual changing rooms. IoT enable machines or individuals in far-off places to use or access physical or digital products (<xref ref-type="bibr" rid="B41">Promwongsa et al., 2021</xref>). IoT data can facilitate data exchanges and link the virtual and real worlds providing AR/VR contexts and situational awareness for tangible objects (<xref ref-type="bibr" rid="B32">Lu et al., 2021</xref>) for example, AR devices might initiate cyber-physical processes in response to events in the actual world or respond to the user&#x2019;s finger motions. Digital Twins (DT) is considered a vital component of the Metaverse, serving as a portal to VR services. DT is digital depictions of real-world items that can be used to connect systems, procedures, and operational assets to the outside world. This includes activities including monitoring, displaying, assessing, and predicting. DT employs 3D models of intricate systems to create accurate, functional, and structured replicas of reality. DT can operate independently and carry out a range of duties, including commercial modification and technical instruction. DT or digital representations of actual states of specific physical objects are formed when real and virtual worlds merge due to IoT (<xref ref-type="bibr" rid="B35">Minerva et al., 2020</xref>). DT can provide virtual environments that are linked to physical workplaces, allowing remote maintenance. Urban planners and builders can use DT to implement development plans and identify potential urban projects in the future (<xref ref-type="bibr" rid="B1">Atlam and Wills, 2020</xref>). Developers and service providers can employ DT to create virtual models of operations and machinery that AI can remotely inspect (<xref ref-type="bibr" rid="B43">Rathore et al., 2021</xref>). 3D virtual worlds customized by avatars can give consumers authentic and engaging experiences. Virtual sceneries are created in Metaverse and presented to users for real immersive experiences through the use of computer vision, graphics, and visualization technologies (<xref ref-type="bibr" rid="B60">Zawish et al., 2024</xref>). Modeling tools and engines (like Unity3D or Engine) can be combined to create a virtual world. The process of creating scenes has been expedited to near-real time using animations. Scene identifications are a must for applications to connect and collaborate effectively in Metaverse. Deep learning (DL) approaches have been widely used in scene recognitions where Convolutional Neural Networks (CNNs) have been examined extensively (<xref ref-type="bibr" rid="B53">Wang et al., 2021</xref>). CNNs have also been used to shorten the prediction time of complicated scenes and extract low-level information. Fusion techniques which combine scene and object information with CNN recognition have also been suggested (<xref ref-type="bibr" rid="B46">Seong et al., 2020</xref>). SimpleNet and AlexNet were CNNs utilized in (<xref ref-type="bibr" rid="B15">Deng et al., 2021</xref>) to automatically recognize virtual scenes. The model was tested on virtual-world scenarios after being trained on Scene15 dataset. Avatars, are virtual representations of Metaverse users that enable communication with AI agents or other users (<xref ref-type="bibr" rid="B14">Davis et al., 2009</xref>). Despite their physical boundaries, Metaverse Avatars allow users to engage with others in the real world. XR devices can generate these worlds and interact with objects making virtual and augmented realities become more accurate and lifelike. <xref ref-type="fig" rid="F4">Figure 4</xref> shows the procedures for involving Avatar in Metaverse.</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption>
<p>Avatar involvement in metaverse.</p>
</caption>
<graphic xlink:href="frvir-07-1753188-g004.tif">
<alt-text content-type="machine-generated">Diagram illustrating the process of immersion creation in virtual environments, showing visual world construction elements like scene, NPC, and avatar, user interaction elements such as mobile input, feedback, and telepresence, with arrows connecting an image of a person using virtual reality on the left to a digital world on the right.</alt-text>
</graphic>
</fig>
<p>Facial reconstructions are essential steps in creating avatars. Traditionally, 3D Morphable Models (<xref ref-type="bibr" rid="B7">Booth et al., 2016</xref>) based on the Principal Component Analysis algorithm were used for this process; however, the data quality limited their ability to capture facial details. DL techniques like Generative Adversarial Network (GAN) have achieved a higher degree of realism. The work in (<xref ref-type="bibr" rid="B47">Shi et al., 2019</xref>) introduced an application that used facial scans instead of 2D images to create autonomous 3D avatars (<xref ref-type="bibr" rid="B10">Chal&#xe1;s et al., 2017</xref>). Microsoft introduced &#x201c;Rodin&#x201d;, 3D generative model that employs diffusions to create 3D digital avatars with neural radiances (<xref ref-type="bibr" rid="B55">Wang T. et al., 2023</xref>). VR Chat, a VR multiplayer game, enables users to create, customize, and personalize virtual worlds in which human-like avatars can impersonate people. In VR, facial detections are common, whereas word recognitions are prevalent in AR. Producing intricate and realistic 3D material based on real-world items and settings have become popular in VR and AR. One State-Of-The-Art algorithm utilized in the early stages of DL development was Faster R-CNN, a two-stage detector (<xref ref-type="bibr" rid="B45">Ren et al., 2017</xref>). Outstanding detection skills were shown by Yolo series and Single Shot Detector detectors in a wide range of classes and situations. These detectors have been effectively exploited by AR (<xref ref-type="bibr" rid="B3">Bahri et al., 2019</xref>). More sophisticated uses of AR object identifications aim to add 3D representations where users manipulate and change 3D virtual objects by attaching them to actual objects. AR item identification could improve and engage Metaverse&#x2019;s 3D environment. Text detection techniques have been used in the creation of Metaverse and have been thoroughly studied in XR (<xref ref-type="bibr" rid="B34">Mangiarua et al., 2020</xref>). A practical client-server framework for real-time capturing of static 3D scenes and multi-user exploration was developed in (<xref ref-type="bibr" rid="B48">Stotko et al., 2019</xref>). The differential framework for recording freestyle content in (<xref ref-type="bibr" rid="B33">Ma et al., 2021</xref>) encouraged development of lifelike &#x201c;virtual things&#x201d;. Recent DL-based algorithms have dramatically enhanced performance in datasets pertaining to urban driving that are intended for self-driving cars (<xref ref-type="bibr" rid="B12">Chen et al., 2018</xref>). Reliability of recognition contexts, including XR applications, has been proved by numerous studies on facial detections (<xref ref-type="bibr" rid="B31">Lu et al., 2020</xref>). XR is used for object detections in Metaverse. Avatars that are able to communicate with one other are used to represent users in Metaverse. First, intelligent devices make it easier to see and interact with the Metaverse, even if more complex rendering calculations are required. For example, real-time interactions, synchronizations, and complex reconstructions and renderings make multi-player interactions more challenging. Second, in order to access a variety of Metaverse services, users need to synchronize and navigate between the digital and physical worlds in different Metaverse zones. Lastly, multi-player interaction in a complex, large-scale scenario requires adaptive design and optimization of multidimensional network resources (<xref ref-type="bibr" rid="B25">Huang et al., 2022</xref>). <xref ref-type="fig" rid="F5">Figure 5</xref> displays Metaverse Interaction Components.</p>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption>
<p>Metaverse interaction components.</p>
</caption>
<graphic xlink:href="frvir-07-1753188-g005.tif">
<alt-text content-type="machine-generated">Infographic illustrating four aspects of virtual or augmented reality interaction: user interaction (navigation, contact, editing), perception channels (vision, touch, taste, brain signal), channels of feedback (visual, auditory, tactile), and collaborative interaction (communication, synchronization, collaborative editing), each represented with relevant images and a pastel gradient background.</alt-text>
</graphic>
</fig>
<p>The work in (<xref ref-type="bibr" rid="B51">Vernaza et al., 2012</xref>) presented an interactive approach for connecting smart wearable while (<xref ref-type="bibr" rid="B58">Wei et al., 2015</xref>) designed user interfaces that allowed customizations of characters in virtual worlds. The research community is becoming interested in user behavior analysis in Metaverse. Well-known clustering techniques can be utilized to comprehend the text written in a variety of virtual worlds, as well as avatar behaviors in virtual settings (<xref ref-type="bibr" rid="B6">Bello-Orgaz and Camacho, 2013</xref>). The study in (<xref ref-type="bibr" rid="B4">Barin et al., 2017</xref>) used first-person VR headsets to study accident rates in high-performance drone racing as Metaverse enables users to interact with other animated non-human objects.</p>
</sec>
<sec id="s3">
<label>3</label>
<title>Security aspects of metaverse</title>
<p>Metaverse security entails protecting against new and amplified digital threats like identity thefts, data breaches, frauds, and virtual asset thefts. Digital identity thefts, frauds, and duplicate accounts result from the complexity of verifying actual people behind avatars. VR/AR systems&#x2019; continuous collection of biometrics, movements, and audio data pose serious privacy threats, raising security concerns in Metaverse. Studies focussing on Metaverse security issues raise alarms, but fail to provide probable solutions, making development of appropriate authentication techniques for metaverse an impending need. Though there are multiple options for enhancing security in Metaverse, authentications using strong techniques like ZTA are pertinent as metaverse applications involve many parties. The drawbacks of conventional security measures are the basis for the recommendation for ZTA deployments. Businesses that have been protected for decades by conventional security methods must now combat sophisticated attacks that elude both exterior and perimeter defenses (organizational internal networks). Data safety approaches are altered by ZTA&#x2019;s views that all accesses are potentially harmful (data-centric security). Self-Sovereign Identity (SSI), AI anomaly detection, and granular access controls (Policy Decisions/Enforcement Points) for avatars, assets, and transactions map to metaverse components that follow the &#x201c;never trust, always verify&#x201d; tenets. This guarantees that all digital interactions in virtual worlds&#x2014;from the movement of avatars to the execution of smart contracts&#x2014;are verified and approved. Using underlying presumptions and methods, the key differences between ZTA and conventional security procedures are highlighted (see <xref ref-type="table" rid="T1">Table 1</xref>).</p>
<table-wrap id="T1" position="float">
<label>TABLE 1</label>
<caption>
<p>Key distinctions between ZTA and traditional security procedures.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Description</th>
<th align="center">Traditional security procedures</th>
<th align="center">ZTA</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">Core concepts</td>
<td align="left">The network is the main focus of traditional security. Strong perimeter defences are essential. Intrusion prevention systems and firewalls separate trustworthy internal networks from untrusted external ones. Users frequently enjoy broad access and implicit trust within, devices</td>
<td align="left">User, devices, and applications requesting access undergo stringent verifications and equal considerations for all access requests irrespective of being internal or external to the network</td>
</tr>
<tr>
<td align="center">Network perimeters</td>
<td align="left">Based on established boundaries (device protection)</td>
<td align="left">Removes security barriers (data-centric security)</td>
</tr>
<tr>
<td align="center">Philosophy of access</td>
<td align="left">Conventional security procedures are like a moated castle. Attackers are kept out with the biggest obstacle being crossing the perimeter. Users can access systems without verification once inside resulting in blind areas where insider threats can do significant harm before being discovered. Lateral movements give attackers unrestricted movements, elevated privileges, and access to private information</td>
<td align="left">Model of constant verification as ZTA scrutinizes all access attempts regardless of sources. All requests of users for access are considered potentially dangerous (evaluations) and risk assessments, device checks, and ongoing identity verifications follow, reducing dangers associated with compromised credentials</td>
</tr>
<tr>
<td align="center">Technical mechanisms for data protection</td>
<td align="left">Perimeter-oriented traditional security procedures rely heavily on controls (firewalls using IP addresses and protocols) to prevent unwanted connections where IPS analyses traffic patterns to prevent known threats. External attacks are prevented but internal threats remain. The internal network is divided into distinct sections via network segmentation. This lessens the effect of security events. Strong trust is frequently produced in each region by traditional division, limiting control over particular workloads</td>
<td align="left">Micro segmentation (sets of rules for access in each segment) improves network security by establishing distinct zones for particular workloads or applications. Attackers can&#x2019;t access other systems if they manage to break one. Extra tests are executed in switching between micro segments. Identity and access management (IAM) and multi-factor authentication (MFA) verify user identities. Permissions are modified by dynamic access controls in response to real-time checks. This covers elements like suspicious activity or odd login timings</td>
</tr>
<tr>
<td align="center">Security targets</td>
<td align="left">Aim to protect data hosting infrastructures using physical security and access controls on servers. The concept is that the contents of the container are kept safe by fastening it. When data remains in fixed locations, this works well. However, it has trouble in modern settings where data is frequently sent. Data in readable formats is easily accessible to attackers with minimal security. Uniform security policies are likewise used by these models. The same level of protection that applies to general communications may also apply to sensitive financial details resulting on over/under protections of data</td>
<td align="left">Discovers and classifies data to identify sensitive information based on sensitivity and commercial effects, views data as assets that require protection. Risk levels and handling requirements are assigned by classification systems. Contextual access policies consider user&#x2019;s identity, device security, location, and the sensitivity of the requested data. Example finance manager accessing payroll after office hours, needs additional authorization</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Thus, in ZTA designs, data encryption is essential, both in transit and at rest and protects data. Attackers cannot read encrypted data without the decryption keys, even if they manage to access it. Hence, this work suggests ZTA paradigm which encompasses features like multiple authentications, data validations, audit trails, blockchain provenance, fine-grained access controls, privilege management user/entity behavior monitoring, trust and reputation management with punitive actions against irresponsible users and entities for metaverse security. <xref ref-type="fig" rid="F6">Figure 6</xref> shows the suggested Quadra layer ZTA model.</p>
<fig id="F6" position="float">
<label>FIGURE 6</label>
<caption>
<p>Metaverse interaction Components.</p>
</caption>
<graphic xlink:href="frvir-07-1753188-g006.tif">
<alt-text content-type="machine-generated">Infographic outlining a multi-level identity and access management framework for the metaverse. It progresses from verified users or entities (Level Zero) through stages of identity establishment, authentication, access management, and metaverse participation, supported by trust and reputation management using blockchain and federated machine learning. Each stage includes distinct IDs, access policies, rendered environments, and compliance engines, displayed with concise icons and clear process flows.</alt-text>
</graphic>
</fig>
<p>
<bold>Level 0 - Verification and Registration</bold>: The anonymity provided by social networks is one of the main obstacles to reining in inappropriate user conduct. Fake news and misleading information are also commonly disseminated by bots. Therefore, it is suggested that user/entity verification be a requirement of the ZTA paradigm for Metaverse. Only pre-verified people or businesses will be able to register for Metaverse, which will lessen the difficulties of current platforms. A few user accounts now have the &#x201c;blue tick&#x201d; signal added by Twitter, signifying that the individual is authentic and verified. This prevented imposters from making undue profits and guaranteed that creators of genuine content were given credit for their efforts. Therefore, there is justification for requiring all users and entities to verify their identities and antecedents before to granting them access to the public services and infrastructure of Metaverse (<xref ref-type="bibr" rid="B37">Nguyen et al., 2022</xref>) Twitter&#x2019;s identity verification process is easily automated, despite being a human process. In many nations, identity verification services created by governments using their citizen register or other databases are available to third-party service providers for a price. Financial service providers regularly employ Aadhaar (unique ID) identity verification APIs to meet their KYC (Know-Your-Customer) obligations in India. It makes sense to assume that enabling verified individuals to register for Metaverse and authenticating user identities will stop any unfavorable situations that frequently occur on social networking sites.</p>
<p>
<bold>Level 1 - Identity Managements and Authentications</bold>: This step handles identities through authentications and completes the preliminary authentication process prior to allowing users to access the Metaverse. Users may choose to use DT or avatars to access particular services or to publicly display their identities. Thus, identity management operations are responsible for mapping a user&#x2019;s avatar or digital twin and securely preserving it. Thus, the privacy of the user is safeguarded. Having a trustworthy third party map the verified user&#x2019;s true identity with her digital twin without the platform being aware of the mapping is another method for offering trustworthy identity management in the Metaverse (<xref ref-type="bibr" rid="B11">Chang et al., 2022</xref>). A multi-party request and consensus protocol involving the user, platform, other entities, and the trustworthy third-party identity management service can be used to decode the authentic identity if necessary.</p>
<p>
<bold>Level 2 - Access Managements</bold>: The access management layer is in charge of guaranteeing smooth access to the Metaverse and the AR/VR/XR rendering layer. This layer contains access regulations for each entity in the Metaverse, allowing for fine-grained access control at the entity level. The privilege lists, which provide escalating permission levels based on the entity&#x2019;s reputation, are linked to each entity&#x2019;s access policies (<xref ref-type="bibr" rid="B62">Zheng et al., 1998</xref>). Raising privilege levels is meant to promote constructive Metaverse user conduct. Access control rules and privilege lists are provided to Level-3 metaverse sentinels, who use them to continuously assess user/entity behavior and respond appropriately in accordance with pre-established consent and access/privilege levels. Platform-level access control lists are updated to restrict user or entity access to particular Metaverse regions.</p>
<p>
<bold>Level 3 - Control, Trust and Reputation Management</bold>: The rules and regulations engine, which produces a large number of policy/rules enforcer agents, stores the codified access and privilege policies, rules, and regulations. Permission to access particular sites or content, as well as to provide other users access to their private spaces, is recorded by the consent engine. Together with policy/rules enforcement agents, metaverse sentinels constantly watch over interactions and transactions between a wide range of entities, including business-to-business, consumer-to-consumer, and business-to-business, to spot any anomalies or infractions by any of the entities. Trust and reputation scores are updated based on the blockchain&#x2019;s tracking of all violations. Promote the development and deployment of decentralized applications (dApps) while maintaining the security and caliber of data. Access permissions could be removed or demoted to lesser tiers if violations are committed repeatedly. Over time, reputation scores will be calculated, and entities with high scores will be granted access to a variety of more sophisticated features (<xref ref-type="bibr" rid="B22">Goel et al., 2022</xref>). Additionally, best-fit partners, potential violators, and other information will be found via federated machine learning. Another approach to guaranteeing data validation in this layer is the use of Decentralized Oracle Networks (DONs), which offer reliable and genuine off-chain real-world data that can be easily combined with on-chain data within Metaverse. This will guarantee data security and quality while facilitating the creation and deployment of innovative decentralized apps (dApps). Thus, the primary elements of the suggested ZTA paradigm for Metaverse are as follows:<list list-type="bullet">
<list-item>
<p>Establish virtual identities while protecting user privacy, Metaverse makes use of reputable third-party identity management services.</p>
</list-item>
<list-item>
<p>Check and validate identity&#x2019;s accountability and provenance,</p>
</list-item>
<list-item>
<p>Users have total control over their privacy settings and how other people engage with them, including the option to limit who can view their information in Metaverse.</p>
</list-item>
<list-item>
<p>DONs make it possible to merge trustworthy off-chain data with on-chain data.</p>
</list-item>
<list-item>
<p>Control access rights and privileges by maintaining the trust and reputation of all entities in Metaverse (<xref ref-type="bibr" rid="B24">Han and Kim, 2021</xref>).</p>
</list-item>
<list-item>
<p>Expel users from Metaverse based on serious violations, resulting in permanent revocation of access (<xref ref-type="bibr" rid="B17">Dissanayake, 2019</xref>).</p>
</list-item>
</list>
</p>
<p>Monitor user interactions using sentinels to identify and isolate malicious or undesirable behavior that violates rules and regulations.</p>
</sec>
<sec id="s4">
<label>4</label>
<title>Result and discussions</title>
<p>This section discusses Metaverse elements/applications from multiple angles with probable solutions for challenges and issues. The recommendations are meant to assist academics and practitioners in their exploration of AI inside Metaverse In order to promote additional advancements and a more captivating and immersive VR. Metaverse provides users with state-of-the-art, offline and online multimodal interaction services. Real-time data flow and multi-user interaction across different Metaverse zones are the biggest challenges to guaranteeing immersive experiences. The basic threats to individual rights, autonomy, justice, and trust are at the center of contemporary technology&#x2019;s consequences. Ethical implications address the broader moral obligations and societal repercussions of technology:<list list-type="order">
<list-item>
<p>Biases in training data may be inadvertently reinforced and amplified by AI systems, leading to discriminatory outcomes in important domains such as jobs, loans, and the legal system;</p>
</list-item>
<list-item>
<p>Determining who is responsible when an autonomous system causes harm or makes a bad decision (e.g., in self-driving cars or healthcare diagnostics) raises complicated questions about moral and legal accountability; and</p>
</list-item>
<list-item>
<p>Many advanced AI models operate as &#x201c;black boxes,&#x201d; making it challenging for humans to understand how they arrive at their conclusions. This lack of openness undermines confidence and makes it difficult to spot and correct biases or mistakes;</p>
</list-item>
<list-item>
<p>Concerns regarding broad job displacements and the necessity of upskilling and retraining programs to achieve equitable workforce transitions are raised by rapid automations employing AI.</p>
</list-item>
<list-item>
<p>A basic human right is the ability to manage one&#x2019;s personal data. These issues can be resolved by human-centric approaches that prioritize human wellbeing and values in technology via:</p>
</list-item>
</list>
</p>
<list list-type="bullet">
<list-item>
<p>
<bold>User Empowerment and Autonomy:</bold> Systems should be developed to enhance human abilities and decision-making rather than diminish or replace human agency. Users must have meaningful choices and controls over their data and interactions.</p>
</list-item>
<list-item>
<p>
<bold>Trust and User Understanding:</bold> Building trust requires open communication about the capabilities and limitations of technology. Transparency and explainability are crucial for people to feel at ease with technology, especially in sensitive fields like healthcare.</p>
</list-item>
<list-item>
<p>
<bold>Prioritizing Human Values:</bold> Human-centric design advocates incorporating moral principles (fairness, nondiscrimination, safety) into the core architecture of technology from the beginning rather than as an afterthought.</p>
</list-item>
<list-item>
<p>
<bold>Diverse perspectives and inclusivity:</bold> Ensuring that technology is developed with a range of user groups in mind produces equitable outcomes and helps prevent the marginalization or disadvantage of specific communities.</p>
</list-item>
</list>
<p>Therefore, to make sure that new technologies enhance rather than reduce human capacities and wellbeing, a human-centric approach is crucial. Scalability (the ability to handle increasing numbers of users and data without compromising performance), Latency (data processing or reaction delays), interoperability (smooth communications between devices and systems) and real-world deployment are connected obstacles that pose practical concerns of security in technical systems (IoT, Clouds, and Web3), heterogeneity, and integration. In order to balance complexity and performance, edge computing, standardized protocols, middleware, and hybrid architectures are often used to address these issues. Data overloads, device resource constraints, and cloud/network bottlenecks are examples of scalability issues that can be resolved by incorporating distributed systems, load balancing, micro services, and edge/fog computing. Complex processing, data center distances, and network congestion are examples of latency issues that can be resolved by employing efficient network protocols (5G) and edge computers nearer to sources. The use of heterogeneous devices (IoT), vendor lock-ins, data formats, and different protocols (MQTT, CoAP, etc.) can all lead to interoperability problems, which can be resolved by utilizing middleware, APIs, protocol translations, and standardized protocols like oneM2M. Issues include heterogeneity, data integrity, device management, physical and cyber environmental concerns, and regulatory compliance arise when the metaverse moves from pilot to large-scale operations. Robust testing, adaptive procedures, hybrid architectures, and security by design (AI/Blockchain) are some of the answers. In virtual environments, user identities are essential for protecting private information and granting safe access. PINs and passwords are examples of access control mechanisms that are challenging to use in virtual reality settings and are susceptible to attacks and impersonations (<xref ref-type="bibr" rid="B19">EDO et al., 2022</xref>). Strong encryption, clear user consent, and technical protections like multi-factor authentication (MFA) and liveness detection are all necessary for multi-layered biometric data protection strategies. Strong security measures are essential since biometric data is permanent and cannot be altered if compromised, as explained below:<list list-type="bullet">
<list-item>
<p>
<bold>Encryption and Secure Storage:</bold> Both during transmission (in transit) and storage (at rest), biometric data&#x2014;or, more often, an encrypted template of the data&#x2014;must be encrypted. Secure hardware-based storage, such a device&#x2019;s secure enclave, is superior to centralized cloud storage, which is a common target for extensive data breaches.</p>
</list-item>
<list-item>
<p>
<bold>Multi-Factor Authentication (MFA):</bold> Biometrics should be a part of a multi-factor authentication system rather than the sole method of verification. Combining a fingerprint scan with a secure password or security token can offer a crucial degree of protection.</p>
</list-item>
<list-item>
<p>
<bold>Data Minimization and Retention:</bold> Companies should only collect the bare minimum of data necessary for a specific, well-defined purpose, and then delete it after that objective has been met.</p>
</list-item>
</list>
</p>
<p>The rise of deepfakes and advanced AI raises substantial issues about avatar impersonation in virtual environments. In order to verify that actual, live people are present rather than AI-generated copies, photos, or recordings, anti-spoofing technologies and &#x201c;liveness detection&#x201d; are essential. Deepfake detection algorithms can identify faked footage by looking at differences in metadata, audio quality, and face gestures. Additionally, users can employ built-in precautions on platforms like VRChat to prevent or conceal potentially hazardous or overwhelming avatars and restrict who can clone their avatar. Biometric methods include head movements and biopotentials like electrocardiogram (ECG) and electroencephalogram (EEG) signals, which are physiological biometrics derived from the body&#x2019;s electrical activity, offer strong security and simplicity. They are really particular to each person.<list list-type="bullet">
<list-item>
<p>
<bold>Head Motions:</bold> Recognizing head movements is one type of behavioral biometric. Even in cases when specific large-scale implementation specifics are lacking, general advantages and disadvantages can be inferred from its nature.</p>
</list-item>
<list-item>
<p>
<bold>Implicit/Passive Authentication:</bold> This method can be continuously and secretly recorded using existing sensors (such those in a headset or camera system) and does not require conscious user effort. Susceptible to Variation: Head motions may be affected by weariness, health problems, or even temporary physical discomfort, which could lead to authentication failures.</p>
</list-item>
<list-item>
<p>
<bold>User Convenience:</bold> Easily integrates into the user experience, especially when using virtual reality or hands-free computing. Lack of Standardization: For this less common biometric, reliable performance metrics and consistent techniques are less developed than for fingerprint or facial identification.</p>
</list-item>
<list-item>
<p>
<bold>Difficult to Replicate Remotely:</bold> Because it requires the authorized user&#x2019;s physical presence and live action, spoofing using a simple recording is difficult. Privacy problems: Serious privacy problems regarding monitoring may arise from ongoing activity tracking.</p>
</list-item>
<list-item>
<p>
<bold>Low Hardware Cost (in some circumstances):</bold> It can use widely accessible, off-the-shelf motion sensor or camera hardware. DoS Attack Vulnerability: Systems may be vulnerable to simple behavior modification or denial-of-service (DoS) attacks.</p>
</list-item>
</list>
</p>
<p>Concerns regarding mass surveillance and anonymity are raised by the widespread use of biometric technologies by businesses or governments in public areas. The purposes can be limited by ensuring data is used for specified, legal purposes for which they were obtained, according to laws and best practices. This limits secondary, illegal general surveillances. Strict restrictions on the use of biometric data by imposing laws like GDPR, CCPA, and BIPA, necessities greater levels of accountability and protection for businesses. If biometric data is misused, it can be prevented by establishing explicit accountability, carrying out privacy impact assessments, and putting in place independent oversights. Though these methods can increase security, they have their own disadvantages and stem apprehensions as detailed below. <xref ref-type="table" rid="T2">Table 2</xref> displays the advantages and disadvantages of Bio-Potentials.</p>
<table-wrap id="T2" position="float">
<label>TABLE 2</label>
<caption>
<p>Advantages and disadvantages of bio-potentials.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Benefits</th>
<th align="center">Drawbacks</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">High uniqueness and security: Each person&#x2019;s ECG and EEG signal patterns are very different from one another, providing great security and making them challenging to copy or steal</td>
<td align="left">Specialized hardware needed: Accurate capture necessitates specialized and frequently costly equipment (electrodes, sensors), which restricts broad accessibility</td>
</tr>
<tr>
<td align="left">Liveness detection: Because the signals cease upon death, liveness detection inherently confirms that the user is alive, making simple spoofing extremely challenging</td>
<td align="left">Sensitive to noise and artifacts: Higher error rates might result from signal quality being readily impacted by environmental interference, user movement, or even the quality of the electrode gel</td>
</tr>
<tr>
<td align="left">Internal and stable: Unlike certain other biometrics, it is less vulnerable to extrinsic changes like dirt on a finger, dim lighting, or age</td>
<td align="left">Intra-class variability: Stress, physical activity, and medical issues can all affect a person&#x2019;s signals, which can lead to an increase in false rejection rates</td>
</tr>
<tr>
<td align="left">Potential to improve privacy: Biometric templates can be created in ways that make it impossible to decipher the original signal</td>
<td align="left">Usability and ergonomics: Attaching electrodes can be a laborious and time-consuming process that detracts from user convenience and enjoyment</td>
</tr>
<tr>
<td align="left">Medical benefits: May be able to provide both identification verification and integrated health monitoring</td>
<td align="left">Privacy issues: Because biopotential data contains extremely sensitive health information, there are serious issues with data privacy and regulatory compliance</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Facial recognitions and identification algorithms need to differentiate true from artificial faces in Metaverse as people may be represented by avatars. The challenges of occlusions, abrupt posture changes, and variations in illuminations can hamper facial detections in Metaverse. The dynamic states and events that take place in virtual territory must be considered in all DT processes. These can be challenging to control and coordinate, particularly when several people interact with virtual items simultaneously without any lag. Since latencies might deteriorate user experiences, it might be challenging to enable perfect interactions between an endless number of users in virtual environments. DT can stop changes in the real world from being reflected in the digital representations since they are situated at the nexus of the digital and physical worlds through Internet of Things connections (<xref ref-type="bibr" rid="B13">Chen et al., 2022</xref>). The work in (<xref ref-type="bibr" rid="B38">Park and Kim, 2022</xref>) implied that, enhanced hardware for Metaverse is needed tracking, audio, and vision systems which could be achieved by usage of AI. However, cutting-edge technology developments, like VR and AR devices, are necessary to fully actuate Metaverse&#x2019;s promise. Even if PCs and cell phones can communicate with Metaverse, they will soon become obsolete. For instance, human avatars need to detect movements of other avatars or objects for appropriate reactions in 3D virtual surroundings. Human avatars also need to be able to comprehend the emotional and psychological traits of other people. Avatar&#x2019;s facial characteristics (<xref ref-type="bibr" rid="B57">Wei et al., 2004</xref>) and micro-expressions (<xref ref-type="bibr" rid="B28">Kocur et al., 2020</xref>) may affect user perception throughout different social interactions in Metaverse. Consequently, a number of reconstruction methods have been created to produce incredibly lifelike 3D faces and bodies in order to improve realism. Avatar creations in Metaverse are far more customizable than other components, allowing for the creation of motion features and 3D models. Users frequently have a wide range of options for customizing and changing the appearance of their avatar. Because most video game designers either employ a small number of models or allow players to generate whole Avatars using only a few optional sub-models, including the mouth, eyes, nose, and so on, players&#x2019; avatars seem to be the same. Users can choose virtual clothing to mimic their real-life appearances by scanning their physical characteristics with a number of technologies. Even with significant advancements in realism, design-based avatars still seem absurd. Avatar animation is usually achieved using real-time tracking or user interaction with controllers (<xref ref-type="bibr" rid="B20">Genay et al., 2022</xref>) User engagement alone may not be sufficient to create animations which now require real-time tracking technologies to create convincing ownership illusions using inputs that do not accurately represent users&#x2019; hands or faces or bodies. These technologies provide users more exact control over their avatars by mapping their movements. <xref ref-type="fig" rid="F7">Figure 7</xref> shows examples of real-world avatars, such as Facebook Avatar, which allows users to personalize their social network avatars, Fortnite Memoji, which is a type of AR that allows users to interact with cartoon faces during FaceTime on Apple iOS devices, Facebook avatars and real-world images created with OpenCV are shown in <xref ref-type="fig" rid="F7">Figure 7</xref>.</p>
<fig id="F7" position="float">
<label>FIGURE 7</label>
<caption>
<p>Avatars from facebook and real-life image.</p>
</caption>
<graphic xlink:href="frvir-07-1753188-g007.tif">
<alt-text content-type="machine-generated">Three panels on the left show screenshots of creating and customizing a male cartoon avatar in an app, including using the avatar in stickers and setting it as a profile picture. The image on the right displays a portrait of a young child with short dark hair, a neutral facial expression, and a light shirt against a blue background.</alt-text>
</graphic>
</fig>
<p>Various novel authentication techniques have been proposed by researchers to address these concerns. Among these are 3d key block building, head movement-based continuous authentication, multi-attribute authentication by mitigating man-in-the-room attacks (<xref ref-type="bibr" rid="B52">Wang and Gao, 2021</xref>) Suggested systems like VRCAuth are necessary for Metaverse as they permit safe, reliable, and acceptable virtual experience to the users together with the requirements of this upcoming technology. It is made especially for usage in VR environments, where conventional authentication techniques (such as entering passwords) are frequently unfeasible and detrimental to the user experience. VRCAuth system is a unique continuous authentication technique using head movement as a behavioral biometric for VR settings. It has a claimed processing latency of about 0.02&#xa0;s (20 milliseconds), with high levels of accuracy (above 95 percemt). The system examines each user&#x2019;s distinct head movement patterns to differentiate between an authorized user and an unauthorized one. The system can reach excellent accuracy (above 95%) in a very short amount of time (0.02&#xa0;s), which makes it appropriate for continuous, real-time monitoring without generating motion sickness or visible delays&#x2014;a crucial component of VR systems. Moreover, VRCAuth is compatible with current VR setups because it does not require further hardware or operating system changes. Iit uses a variety of machine learning algorithms, including LMT (Logistic Model Tree) and PART (Partial Decision Trees) classifiers to process head motion data and categorize the user&#x2019;s identity. <xref ref-type="table" rid="T3">Table 3</xref> displays comparisons of VRCAuth systems with other biometrics.</p>
<table-wrap id="T3" position="float">
<label>TABLE 3</label>
<caption>
<p>Comparison of biometric methods.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Feature</th>
<th align="center">VRCAuth (head movement)</th>
<th align="center">Physiological biometrics (e.g., iris, fingerprint)</th>
<th align="center">Behavioral biometrics (e.g., voice, signature)</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">Biometric type</td>
<td align="left">Behavioral</td>
<td align="left">Physiological (physical part of the body)</td>
<td align="left">Behavioral (patterns of behavior)</td>
</tr>
<tr>
<td align="center">Environment</td>
<td align="left">Operates within VR/XR environments</td>
<td align="left">Typically used for physical or digital access in the real world</td>
<td align="left">Often used in remote applications like telephone/IVR systems</td>
</tr>
<tr>
<td align="center">User interaction</td>
<td align="left">Passive/continuous; uses existing head movements for ongoing verification without active input</td>
<td align="left">Requires active input (e.g., placing finger on scanner, looking into a camera)</td>
<td align="left">Requires active input (e.g., speaking a phrase, signing a document)</td>
</tr>
<tr>
<td align="center">Accuracy</td>
<td align="left">Average accuracy around 92.34% in one study; less robust across different VR content without model adaptation in early versions</td>
<td align="left">Very high accuracy, often exceeding 99% in controlled environments</td>
<td align="left">Generally, less accurate than physiological methods, but sufficient for specific applications</td>
</tr>
<tr>
<td align="center">Cost/Hardware</td>
<td align="left">Low additional hardware cost in modern VR setups that have built-in eye trackers and IMUs (inertial measurement units)</td>
<td align="left">Can require dedicated, sometimes expensive, scanning hardware</td>
<td align="left">Can use existing hardware like microphones or touchpads, resulting in low deployment costs</td>
</tr>
<tr>
<td align="center">Susceptibility to spoofing</td>
<td align="left">Potentially difficult to spoof as it relies on unique, continuous, and dynamic movement patterns in a virtual space</td>
<td align="left">Susceptible to spoofing via high-quality images or artificial molds, though advanced liveness detection mitigates this</td>
<td align="left">Susceptible to environmental noise in voice recognition</td>
</tr>
<tr>
<td align="center">Privacy concerns</td>
<td align="left">Raises concerns specific to constant monitoring and data collection within the immersive VR environment</td>
<td align="left">Raises general data security and privacy concerns if templates are breached</td>
<td align="left">Similar general data security concerns</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec sec-type="conclusion" id="s5">
<label>5</label>
<title>Conclusion</title>
<p>Despite being heralded as Web 3.0&#x2019;s next big thing, the success of platform companies will be greatly impacted by addressing security, privacy, and trust challenges. Because of several problems with early iterations, the Metaverse&#x2019;s future is uncertain. Any security breach, identity theft, or denial of service in the Metaverse could harm real-world commerce and reputations. Conversely, a favorable metaverse experience could benefit the company&#x2019;s offline operations. Customers should understand the privacy and security protocols used by the platform provider and the property owner. End users require unambiguous guarantees that their privacy will be protected and that they will have complete control over how they use Metaverse platforms and services, as well as how other users or entities use them, in order to fully realize the ideal Metaverse vision. Such concerns could be alleviated by limiting platform registration to verified users only, giving end users fine-grained control over their Metaverse experiences, and putting in place a ZTA paradigm. In addition to data validation utilizing DONs, the proposed ZTA architecture includes identity and access management, control, trust, and reputation management, user verification, and repeated authentication. Trustworthy identity management services from third parties are utilized to generate virtual identities unique to Metaverse while maintaining the anonymity of real identities to improve user privacy and make it possible to combine reliable off-chain data with on-chain data within the Metaverse. DONs are used as an effective technique for data validation. A user may be banned from Metaverse and denied access indefinitely, depending on the seriousness of the offense. It is projected that this method will significantly increase Metaverse&#x2019;s perceived value and aid in user growth.</p>
</sec>
<sec id="s6">
<label>6</label>
<title>Future work</title>
<p>Future models are anticipated to develop, mostly as a result of mergers, acquisitions, and corporate alliances. Technological developments should soon address issues of usability and access, but when developing Metaverse ecosystems, security issues need to be taken into consideration from even in early phases of development. Users&#x2019; confidence in embracing Metaverse will be seriously damaged by security breaches. Online bullying, fake news, hate speeches, and other issues will become common, if Metaverse follows the same user anonymity policy as contemporary social networks Hence, security need to be considered in basic design components while developing metaverse apps.</p>
</sec>
</body>
<back>
<sec sec-type="author-contributions" id="s7">
<title>Author contributions</title>
<p>JE: Conceptualization, Writing &#x2013; original draft, Formal Analysis, Writing &#x2013; review and editing, Data curation, Methodology. VE: Validation, Resources, Supervision, Data curation, Investigation, Conceptualization, Writing &#x2013; review and editing.</p>
</sec>
<sec sec-type="COI-statement" id="s9">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="s10">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="s11">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Atlam</surname>
<given-names>H. F.</given-names>
</name>
<name>
<surname>Wills</surname>
<given-names>G. B.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>IoT security, privacy, safety and ethics</article-title>. <fpage>123</fpage>&#x2013;<lpage>149</lpage>.</mixed-citation>
</ref>
<ref id="B2">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Baevski</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Krcmarik</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Koci</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2022</year>). &#x201c;<article-title>Data2vec: a general framework for self-supervised learning in speech, vision and language</article-title>,&#x201d; in <source>International conference on machine learning</source>, <fpage>1298</fpage>&#x2013;<lpage>1312</lpage>.</mixed-citation>
</ref>
<ref id="B3">
<mixed-citation publication-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Bahri</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Krcmarik</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Koci</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2019</year>). &#x201c;<article-title>Accurate object detection system on HoloLens using YOLO algorithm</article-title>,&#x201d; in <conf-name>2019 International Conference on Control, Artificial Intelligence, Robotics &#x26; Optimization (ICCAIRO)</conf-name>, <conf-loc>Athens, Greece</conf-loc>, <conf-date>08-10 December 2019</conf-date> (<publisher-name>IEEE</publisher-name>), <fpage>219</fpage>&#x2013;<lpage>224</lpage>.</mixed-citation>
</ref>
<ref id="B4">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Barin</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Dolgov</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Toups Dugas</surname>
<given-names>P. O.</given-names>
</name>
</person-group> (<year>2017</year>). &#x201c;<article-title>Understanding dangerous play</article-title>,&#x201d; in <source>Proceedings of the annual symposium on computer-human interaction in play</source> (<publisher-loc>New York, NY, USA</publisher-loc>: <publisher-name>ACM</publisher-name>), <fpage>485</fpage>&#x2013;<lpage>496</lpage>.</mixed-citation>
</ref>
<ref id="B5">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Beer</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2015</year>). &#x201c;<article-title>Virtual museums</article-title>,&#x201d; in <source>Proceedings of the 2015 virtual reality international conference</source> (<publisher-loc>New York, NY, USA</publisher-loc>: <publisher-name>ACM</publisher-name>), <fpage>1</fpage>&#x2013;<lpage>6</lpage>.</mixed-citation>
</ref>
<ref id="B6">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Bello-Orgaz</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Camacho</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2013</year>). &#x201c;<article-title>Comparative study of text clustering techniques in virtual worlds</article-title>,&#x201d; in <source>Proceedings of the 3rd international conference on web intelligence, mining and semantics</source> (<publisher-loc>New York, NY, USA</publisher-loc>: <publisher-name>ACM</publisher-name>), <fpage>1</fpage>&#x2013;<lpage>8</lpage>.</mixed-citation>
</ref>
<ref id="B7">
<mixed-citation publication-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Booth</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Iero</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Della Corte</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2016</year>). &#x201c;<article-title>A 3D morphable model learnt from 10,000 faces</article-title>,&#x201d; in <conf-name>Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</conf-name>, <conf-loc>Las Vegas, NV, USA</conf-loc>, <conf-date>27-30 June 2016</conf-date> (<publisher-name>IEEE</publisher-name>).</mixed-citation>
</ref>
<ref id="B8">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cheng</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Yuan</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>S. Z.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Roadmap toward the metaverse: an AI perspective</article-title>. <source>Innovation</source> <volume>3</volume> (<issue>5</issue>), <fpage>100293</fpage>. <pub-id pub-id-type="doi">10.1016/j.xinn.2022.100293</pub-id>
<pub-id pub-id-type="pmid">36032197</pub-id>
</mixed-citation>
</ref>
<ref id="B9">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Carotenuto</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Merenda</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Iero</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Della Corte</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Mobile synchronization recovery for ultrasonic indoor positioning</article-title>. <source>Sensors</source> <volume>20</volume> (<issue>3</issue>), <fpage>702</fpage>. <pub-id pub-id-type="doi">10.3390/s20030702</pub-id>
<pub-id pub-id-type="pmid">32012789</pub-id>
</mixed-citation>
</ref>
<ref id="B10">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chal&#xe1;s</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Urbanov&#xe1;</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Ju&#x159;&#xed;k</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Ferkov&#xe1;</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Jandov&#xe1;</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Sochor</surname>
<given-names>J.</given-names>
</name>
<etal/>
</person-group> (<year>2017</year>). <article-title>Generating various composite human faces from real 3D facial images</article-title>. <source>Vis. Comput.</source> <volume>33</volume> (<issue>4</issue>), <fpage>443</fpage>&#x2013;<lpage>458</lpage>. <pub-id pub-id-type="doi">10.1007/s00371-016-1277-1</pub-id>
</mixed-citation>
</ref>
<ref id="B11">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chang</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Xi</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Guo</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Shen</surname>
<given-names>Y.</given-names>
</name>
<etal/>
</person-group> (<year>2022</year>). <article-title>6G-Enabled edge AI for metaverse: challenges, methods, and future research directions</article-title>. <source>J. Commun. Inf. Netw.</source> <volume>7</volume> (<issue>2</issue>), <fpage>107</fpage>&#x2013;<lpage>121</lpage>. <pub-id pub-id-type="doi">10.23919/jcin.2022.9815195</pub-id>
</mixed-citation>
</ref>
<ref id="B12">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chen</surname>
<given-names>L. C.</given-names>
</name>
<name>
<surname>Zhu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Papandreou</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Schroff</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Adam</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Encoder-decoder with atrous separable convolution for semantic image segmentation</article-title>.</mixed-citation>
</ref>
<ref id="B13">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chen</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Fu</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Mu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Zeng</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Rezaeibagha</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Hwang</surname>
<given-names>M. S.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Blockchain-based random auditor committee for integrity verification</article-title>. <source>Future Gener. Comput. Syst.</source> <volume>131</volume>, <fpage>183</fpage>&#x2013;<lpage>193</lpage>. <pub-id pub-id-type="doi">10.1016/j.future.2022.01.019</pub-id>
</mixed-citation>
</ref>
<ref id="B14">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Davis</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Murphy</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Owens</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Khazanchi</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Zigurs</surname>
<given-names>I.</given-names>
</name>
</person-group> (<year>2009</year>). <article-title>Avatars, people, and virtual worlds: foundations for research in metaverses</article-title>. <source>J. Assoc. Inf. Syst.</source> <volume>10</volume> (<issue>2</issue>), <fpage>90</fpage>&#x2013;<lpage>117</lpage>. <pub-id pub-id-type="doi">10.17705/1jais.00183</pub-id>
</mixed-citation>
</ref>
<ref id="B15">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Deng</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Shen</surname>
<given-names>Z. J.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>A systematic review of a digital twin city: a new pattern of urban governance toward smart cities</article-title>. <source>J. Manag. Sci. Eng.</source> <volume>6</volume> (<issue>2</issue>), <fpage>125</fpage>&#x2013;<lpage>134</lpage>. <pub-id pub-id-type="doi">10.1016/j.jmse.2021.03.003</pub-id>
</mixed-citation>
</ref>
<ref id="B16">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Deveci</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Pamucar</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Gokasar</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>K&#xf6;ppen</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Gupta</surname>
<given-names>B. B.</given-names>
</name>
<name>
<surname>Daim</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Evaluation of metaverse traffic safety implementations using fuzzy einstein based logarithmic methodology of additive weights and TOPSIS method</article-title>. <source>Technol. Forecast Soc. Change</source> <volume>194</volume>, <fpage>122681</fpage>. <pub-id pub-id-type="doi">10.1016/j.techfore.2023.122681</pub-id>
</mixed-citation>
</ref>
<ref id="B17">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Dissanayake</surname>
<given-names>V. D.</given-names>
</name>
</person-group> (<year>2019</year>). <source>A review of cyber security risks in an augmented reality world</source>. <publisher-loc>Malabe, Sri Lanka</publisher-loc>: <publisher-name>University of Sri Lanka, Institute of Information Technology</publisher-name>.</mixed-citation>
</ref>
<ref id="B18">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Duan</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Fan</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Lin</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Cai</surname>
<given-names>W.</given-names>
</name>
</person-group> (<year>2021</year>). &#x201c;<article-title>Metaverse for social good</article-title>,&#x201d; in <source>Proceedings of the 29th ACM international conference on multimedia</source> (<publisher-loc>New York, NY, USA</publisher-loc>: <publisher-name>ACM</publisher-name>), <fpage>153</fpage>&#x2013;<lpage>161</lpage>.</mixed-citation>
</ref>
<ref id="B19">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Edo</surname>
<given-names>O. C.</given-names>
</name>
<name>
<surname>Tenebe</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Etu</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Ayuwu</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Emakhu</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Adebiyi</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Zero trust architecture: trend and impact on information security</article-title>. <source>Int. J. Emerg. Technol. Adv. Eng.</source> <volume>12</volume> (<issue>7</issue>), <fpage>140</fpage>&#x2013;<lpage>147</lpage>. <pub-id pub-id-type="doi">10.46338/ijetae0722_15</pub-id>
</mixed-citation>
</ref>
<ref id="B20">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Genay</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Lecuyer</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Hachet</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Being an avatar &#x201c;for real&#x201d;: a survey on virtual embodiment in augmented reality</article-title>. <source>IEEE Trans. Vis. Comput. Graph</source> <volume>28</volume> (<issue>12</issue>), <fpage>5071</fpage>&#x2013;<lpage>5090</lpage>. <pub-id pub-id-type="doi">10.1109/TVCG.2021.3099290</pub-id>
<pub-id pub-id-type="pmid">34310309</pub-id>
</mixed-citation>
</ref>
<ref id="B21">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ghosh</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Chakraborty</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Law</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Artificial intelligence in internet of things</article-title>. <source>CAAI Trans. Intell. Technol.</source> <volume>3</volume> (<issue>4</issue>), <fpage>208</fpage>&#x2013;<lpage>218</lpage>. <pub-id pub-id-type="doi">10.1049/trit.2018.1008</pub-id>
</mixed-citation>
</ref>
<ref id="B22">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Goel</surname>
<given-names>A. K.</given-names>
</name>
<name>
<surname>Bakshi</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Agrawal</surname>
<given-names>K. K.</given-names>
</name>
</person-group> (<year>2022</year>). &#x201c;<article-title>Web 3.0 and decentralized applications</article-title>,&#x201d; in <source>The 2nd international conference on innovative research in renewable energy technologies (IRRET 2022)</source> (<publisher-loc>Basel Switzerland</publisher-loc>: <publisher-name>MDPI</publisher-name>), <fpage>8</fpage>.</mixed-citation>
</ref>
<ref id="B23">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gupta</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Kumar</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Prabhat</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Gupta</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Tanwar</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Sharma</surname>
<given-names>G.</given-names>
</name>
<etal/>
</person-group> (<year>2022</year>). <article-title>Combating fake news: stakeholder interventions and potential solutions</article-title>. <source>IEEE Access</source> <volume>10</volume>, <fpage>78268</fpage>&#x2013;<lpage>78289</lpage>. <pub-id pub-id-type="doi">10.1109/access.2022.3193670</pub-id>
</mixed-citation>
</ref>
<ref id="B24">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Han</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Kim</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>News big data analysis of &#x2018;metaverse&#x2019;using topic modeling analysis</article-title>. <source>J. Digital Contents Soc.</source> <volume>22</volume> (<issue>7</issue>), <fpage>1091</fpage>&#x2013;<lpage>1099</lpage>. <pub-id pub-id-type="doi">10.9728/dcs.2021.22.7.1091</pub-id>
</mixed-citation>
</ref>
<ref id="B25">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Huang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Qiao</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Su</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Dustdar</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>P.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Multi-player immersive communications and interactions in metaverse: challenges, architecture, and future directions</article-title>.</mixed-citation>
</ref>
<ref id="B26">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hujon</surname>
<given-names>A. V.</given-names>
</name>
<name>
<surname>Singh</surname>
<given-names>T. D.</given-names>
</name>
<name>
<surname>Amitab</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Transfer learning based neural machine translation of English-Khasi on low-resource settings</article-title>. <source>Procedia Comput. Sci.</source> <volume>218</volume>, <fpage>1</fpage>&#x2013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1016/j.procs.2022.12.396</pub-id>
</mixed-citation>
</ref>
<ref id="B27">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Huynh-The</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Pham</surname>
<given-names>Q. V.</given-names>
</name>
<name>
<surname>Pham</surname>
<given-names>X. Q.</given-names>
</name>
<name>
<surname>Nguyen</surname>
<given-names>T. T.</given-names>
</name>
<name>
<surname>Han</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Kim</surname>
<given-names>D. S.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Artificial intelligence for the metaverse: a survey</article-title>. <source>Eng. Appl. Artif. Intell.</source> <volume>117</volume>, <fpage>105581</fpage>. <pub-id pub-id-type="doi">10.1016/j.engappai.2022.105581</pub-id>
</mixed-citation>
</ref>
<ref id="B28">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Kocur</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Graf</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Schwind</surname>
<given-names>V.</given-names>
</name>
</person-group> (<year>2020</year>). &#x201c;<article-title>The impact of missing fingers in virtual reality</article-title>,&#x201d; in <source>26th ACM symposium on virtual reality software and technology</source> (<publisher-loc>New York, NY, USA</publisher-loc>: <publisher-name>ACM</publisher-name>), <fpage>1</fpage>&#x2013;<lpage>5</lpage>.</mixed-citation>
</ref>
<ref id="B29">
<mixed-citation publication-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Kojic</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Ali</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Greinacher</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Moller</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Voigt-Antons</surname>
<given-names>J. N.</given-names>
</name>
</person-group> (<year>2020</year>). &#x201c;<article-title>User experience of reading in virtual reality &#x2014; finding values for text distance, size and contrast</article-title>,&#x201d; in <conf-name>2020 Twelfth International Conference on Quality of Multimedia Experience (QoMEX)</conf-name>, <conf-loc>Athlone, Ireland</conf-loc>, <conf-date>26-28 May 2020</conf-date> (<publisher-name>IEEE</publisher-name>), <fpage>1</fpage>&#x2013;<lpage>6</lpage>.</mixed-citation>
</ref>
<ref id="B30">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Leippold</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Thus spoke GPT-3: interviewing a large-language model on climate finance</article-title>. <source>Financ. Res. Lett.</source> <volume>53</volume>, <fpage>103617</fpage>. <pub-id pub-id-type="doi">10.1016/j.frl.2022.103617</pub-id>
</mixed-citation>
</ref>
<ref id="B31">
<mixed-citation publication-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Lu</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Yu</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Liang</surname>
<given-names>H. N.</given-names>
</name>
<name>
<surname>Xu</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>X.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). &#x201c;<article-title>Exploration of hands-free text entry techniques for virtual reality</article-title>,&#x201d; in <conf-name>2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)</conf-name>, <conf-loc>Porto de Galinhas, Brazil</conf-loc>, <conf-date>09-13 November 2020</conf-date> (<publisher-name>IEEE</publisher-name>), <fpage>344</fpage>&#x2013;<lpage>349</lpage>.</mixed-citation>
</ref>
<ref id="B32">
<mixed-citation publication-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Lu</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Miller</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Pereira</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Rowe</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2021</year>). &#x201c;<article-title>FLASH: video-embeddable AR anchors for live events</article-title>,&#x201d; in <conf-name>2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)</conf-name>, <conf-loc>Bari, Italy</conf-loc>, <conf-date>04-08 October 2021</conf-date> (<publisher-name>IEEE</publisher-name>), <fpage>489</fpage>&#x2013;<lpage>497</lpage>.</mixed-citation>
</ref>
<ref id="B33">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ma</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Kang</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Zhu</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Free-form scanning of non-planar appearance with neural trace photography</article-title>. <source>ACM Trans. Graph</source> <volume>40</volume> (<issue>4</issue>), <fpage>1</fpage>&#x2013;<lpage>13</lpage>. <pub-id pub-id-type="doi">10.1145/3450626.3459679</pub-id>
</mixed-citation>
</ref>
<ref id="B34">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Mangiarua</surname>
<given-names>N. A.</given-names>
</name>
<name>
<surname>Ierache</surname>
<given-names>J. S.</given-names>
</name>
<name>
<surname>Ab&#xe1;solo</surname>
<given-names>M. J.</given-names>
</name>
</person-group> (<year>2020</year>). <source>Scalable integration of image and face based augmented reality</source>, <fpage>232</fpage>&#x2013;<lpage>242</lpage>.</mixed-citation>
</ref>
<ref id="B35">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Minerva</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Lee</surname>
<given-names>G. M.</given-names>
</name>
<name>
<surname>Crespi</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Digital twin in the IoT context: a survey on technical features, scenarios, and architectural models</article-title>. <source>Proc. IEEE</source> <volume>108</volume> (<issue>10</issue>), <fpage>1785</fpage>&#x2013;<lpage>1824</lpage>. <pub-id pub-id-type="doi">10.1109/jproc.2020.2998530</pub-id>
</mixed-citation>
</ref>
<ref id="B36">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Moztarzadeh</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>Jamshidi</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Sargolzaei</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Keikhaee</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Jamshidi</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Shadroo</surname>
<given-names>S.</given-names>
</name>
<etal/>
</person-group> (<year>2023</year>). <article-title>Metaverse and medical diagnosis: a blockchain-based digital twinning approach based on MobileNetV2 algorithm for cervical vertebral maturation</article-title>. <source>Diagnostics</source> <volume>13</volume> (<issue>8</issue>), <fpage>1485</fpage>. <pub-id pub-id-type="doi">10.3390/diagnostics13081485</pub-id>
<pub-id pub-id-type="pmid">37189587</pub-id>
</mixed-citation>
</ref>
<ref id="B37">
<mixed-citation publication-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Nguyen</surname>
<given-names>C. T.</given-names>
</name>
<name>
<surname>Hoang</surname>
<given-names>D. T.</given-names>
</name>
<name>
<surname>Nguyen</surname>
<given-names>D. N.</given-names>
</name>
<name>
<surname>Dutkiewicz</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2022</year>). &#x201c;<article-title>MetaChain: a novel blockchain-based framework for metaverse applications</article-title>,&#x201d; in <conf-name>2022 IEEE 95th Vehicular Technology Conference: (VTC2022-Spring)</conf-name>, <conf-loc>Helsinki, Finland</conf-loc>, <conf-date>19-22 June 2022</conf-date> (<publisher-name>IEEE</publisher-name>), <fpage>1</fpage>&#x2013;<lpage>5</lpage>.</mixed-citation>
</ref>
<ref id="B38">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Park</surname>
<given-names>S. M.</given-names>
</name>
<name>
<surname>Kim</surname>
<given-names>Y. G.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>A metaverse: taxonomy, components, applications, and open challenges</article-title>. <source>IEEE Access</source> <volume>10</volume>, <fpage>4209</fpage>&#x2013;<lpage>4251</lpage>. <pub-id pub-id-type="doi">10.1109/access.2021.3140175</pub-id>
</mixed-citation>
</ref>
<ref id="B39">
<mixed-citation publication-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Pereira</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Rowe</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Farb</surname>
<given-names>M. W.</given-names>
</name>
<name>
<surname>Liang</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Lu</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Arena</surname>
<given-names>R. E.</given-names>
</name>
</person-group> (<year>2021</year>). &#x201c;<article-title>The augmented reality edge networking architecture</article-title>,&#x201d; in <conf-name>2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)</conf-name>, <conf-loc>Bari, Italy</conf-loc>, <conf-date>04-08 October 2021</conf-date> (<publisher-name>IEEE</publisher-name>), <fpage>479</fpage>&#x2013;<lpage>488</lpage>.</mixed-citation>
</ref>
<ref id="B40">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ponchio</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Hormann</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2008</year>). <article-title>Interactive rendering of dynamic geometry</article-title>. <source>IEEE Trans. Vis. Comput. Graph</source> <volume>14</volume> (<issue>4</issue>), <fpage>914</fpage>&#x2013;<lpage>925</lpage>. <pub-id pub-id-type="doi">10.1109/TVCG.2008.35</pub-id>
<pub-id pub-id-type="pmid">18467764</pub-id>
</mixed-citation>
</ref>
<ref id="B41">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Promwongsa</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Ebrahimzadeh</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Naboulsi</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Kianpisheh</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Belqasmi</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Glitho</surname>
<given-names>R.</given-names>
</name>
<etal/>
</person-group> (<year>2021</year>). <article-title>A comprehensive survey of the tactile internet: state-Of-the-art and research directions</article-title>. <source>IEEE Commun. Surv. &#x26; Tutorials</source> <volume>23</volume> (<issue>1</issue>), <fpage>472</fpage>&#x2013;<lpage>523</lpage>. <pub-id pub-id-type="doi">10.1109/comst.2020.3025995</pub-id>
</mixed-citation>
</ref>
<ref id="B42">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ramesh</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Dhariwal</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Nichol</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Chu</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Hierarchical text-conditional image generation with CLIP latents</article-title>.</mixed-citation>
</ref>
<ref id="B43">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rathore</surname>
<given-names>M. M.</given-names>
</name>
<name>
<surname>Shah</surname>
<given-names>S. A.</given-names>
</name>
<name>
<surname>Shukla</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Bentafat</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Bakiras</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>The role of AI, machine learning, and big data in digital twinning: a systematic literature review, challenges, and opportunities</article-title>. <source>IEEE Access</source> <volume>9</volume>, <fpage>32030</fpage>&#x2013;<lpage>32052</lpage>. <pub-id pub-id-type="doi">10.1109/access.2021.3060863</pub-id>
</mixed-citation>
</ref>
<ref id="B44">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Reiners</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Davahli</surname>
<given-names>M. R.</given-names>
</name>
<name>
<surname>Karwowski</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Cruz-Neira</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>The combination of artificial intelligence and extended reality: a systematic review</article-title>. <source>Front. Virtual Real</source> <volume>2</volume>, <fpage>721933</fpage>. <pub-id pub-id-type="doi">10.3389/frvir.2021.721933</pub-id>
</mixed-citation>
</ref>
<ref id="B45">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ren</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>He</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Girshick</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Sun</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Faster R-CNN: towards real-time object detection with region proposal networks</article-title>. <source>IEEE Trans. Pattern Anal. Mach. Intell.</source> <volume>39</volume> (<issue>6</issue>), <fpage>1137</fpage>&#x2013;<lpage>1149</lpage>. <pub-id pub-id-type="doi">10.1109/TPAMI.2016.2577031</pub-id>
<pub-id pub-id-type="pmid">27295650</pub-id>
</mixed-citation>
</ref>
<ref id="B46">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Seong</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Hyun</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Kim</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>FOSNet: an end-to-end trainable deep neural network for scene recognition</article-title>. <source>IEEE Access</source> <volume>8</volume>, <fpage>82066</fpage>&#x2013;<lpage>82077</lpage>. <pub-id pub-id-type="doi">10.1109/access.2020.2989863</pub-id>
</mixed-citation>
</ref>
<ref id="B47">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Shi</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Girshick</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Sun</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2019</year>). &#x201c;<article-title>Face-to-Parameter translation for game character auto-creation</article-title>,&#x201d; in <source>Proceedings of the IEEE/CVF international conference on computer vision ICCV</source>.</mixed-citation>
</ref>
<ref id="B48">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Stotko</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Krumpen</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Hullin</surname>
<given-names>M. B.</given-names>
</name>
<name>
<surname>Weinmann</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Klein</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>SLAMCast: large-scale, real-time 3D reconstruction and streaming for immersive multi-client live telepresence</article-title>. <source>IEEE Trans. Vis. Comput. Graph</source> <volume>25</volume> (<issue>5</issue>), <fpage>2102</fpage>&#x2013;<lpage>2112</lpage>. <pub-id pub-id-type="doi">10.1109/TVCG.2019.2899231</pub-id>
<pub-id pub-id-type="pmid">30794183</pub-id>
</mixed-citation>
</ref>
<ref id="B49">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Tao</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Nee</surname>
<given-names>A. Y. C.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Digital twin in industry: state-of-the-art</article-title>. <source>IEEE Trans. Ind. Inf.</source> <volume>15</volume> (<issue>4</issue>), <fpage>2405</fpage>&#x2013;<lpage>2415</lpage>. <pub-id pub-id-type="doi">10.1109/tii.2018.2873186</pub-id>
</mixed-citation>
</ref>
<ref id="B50">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Venkatesan</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Mohan</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Ryan</surname>
<given-names>J. R.</given-names>
</name>
<name>
<surname>Sch&#xfc;rch</surname>
<given-names>C. M.</given-names>
</name>
<name>
<surname>Nolan</surname>
<given-names>G. P.</given-names>
</name>
<name>
<surname>Frakes</surname>
<given-names>D. H.</given-names>
</name>
<etal/>
</person-group> (<year>2021</year>). <article-title>Virtual and augmented reality for biomedical applications</article-title>. <source>Cell Rep. Med.</source> <volume>2</volume> (<issue>7</issue>), <fpage>100348</fpage>. <pub-id pub-id-type="doi">10.1016/j.xcrm.2021.100348</pub-id>
<pub-id pub-id-type="pmid">34337564</pub-id>
</mixed-citation>
</ref>
<ref id="B51">
<mixed-citation publication-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Vernaza</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Armuelles</surname>
<given-names>V. I.</given-names>
</name>
<name>
<surname>Ruiz</surname>
<given-names>I.</given-names>
</name>
</person-group> (<year>2012</year>). &#x201c;<article-title>Towards to an open and interoperable virtual learning enviroment using metaverse at university of Panama</article-title>,&#x201d; in <conf-name>2012 Technologies Applied to Electronics Teaching (TAEE)</conf-name>, <conf-loc>Vigo, Spain</conf-loc>, <conf-date>13-15 June 2012</conf-date> (<publisher-name>IEEE</publisher-name>), <fpage>320</fpage>&#x2013;<lpage>325</lpage>.</mixed-citation>
</ref>
<ref id="B52">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Gao</surname>
<given-names>B.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>Analysis of multi-attribute user authentication to against man-in-the-room attack in virtual reality</article-title>. <fpage>455</fpage>&#x2013;<lpage>461</lpage>.</mixed-citation>
</ref>
<ref id="B53">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Armuelles</surname>
<given-names>V. I.</given-names>
</name>
<name>
<surname>Ruiz</surname>
<given-names>I.</given-names>
</name>
</person-group> (<year>2021</year>). &#x201c;<article-title>EvDistill: asynchronous events to end-task learning via bidirectional reconstruction-guided cross-modal knowledge distillation</article-title>,&#x201d; in <source>Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR)</source>, <fpage>608</fpage>&#x2013;<lpage>619</lpage>.</mixed-citation>
</ref>
<ref id="B54">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Ning</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Lin</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Dhelim</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Farha</surname>
<given-names>F.</given-names>
</name>
<etal/>
</person-group> (<year>2023</year>). <article-title>A survey on the metaverse: the State-of-the-Art, technologies, applications, and challenges</article-title>. <source>IEEE Internet Things J.</source> <volume>10</volume> (<issue>16</issue>), <fpage>14671</fpage>&#x2013;<lpage>14688</lpage>. <pub-id pub-id-type="doi">10.1109/jiot.2023.3278329</pub-id>
</mixed-citation>
</ref>
<ref id="B55">
<mixed-citation publication-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Sun</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Zhu</surname>
<given-names>X.</given-names>
</name>
</person-group> (<year>2023</year>). &#x201c;<article-title>A generative model for sculpting 3D digital avatars using diffusion</article-title>,&#x201d; in <conf-name>Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)</conf-name>, <conf-loc>Vancouver, BC, Canada</conf-loc>, <conf-date>17-24 June 2023</conf-date> (<publisher-name>IEEE</publisher-name>), <fpage>4563</fpage>&#x2013;<lpage>4573</lpage>.</mixed-citation>
</ref>
<ref id="B56">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Warpefelt</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2015</year>). &#x201c;<article-title>Towards an updated typology of non-player character roles</article-title>,&#x201d; in <source>Proceedings of the international conference on game and entertainment technologies</source>, <fpage>1</fpage>&#x2013;<lpage>9</lpage>.</mixed-citation>
</ref>
<ref id="B57">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Wei</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Yin</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Zhu</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Ji</surname>
<given-names>Q.</given-names>
</name>
</person-group> (<year>2004</year>). &#x201c;<article-title>Avatar-mediated face tracking and lip reading for human computer interaction</article-title>,&#x201d; in <source>Proceedings of the 12th annual ACM international conference on multimedia</source> (<publisher-loc>New York, NY, USA</publisher-loc>: <publisher-name>ACM</publisher-name>), <fpage>500</fpage>&#x2013;<lpage>503</lpage>.</mixed-citation>
</ref>
<ref id="B58">
<mixed-citation publication-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Wei</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Qin</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Tan</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Yu</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Sun</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Zhu</surname>
<given-names>X.</given-names>
</name>
</person-group> (<year>2015</year>). &#x201c;<article-title>The design of a visual tool for the quick customization of virtual characters in OSSL</article-title>,&#x201d; in <conf-name>2015 International Conference on Cyberworlds (CW)</conf-name>, <conf-loc>Visby, Sweden</conf-loc>, <conf-date>07-09 October 2015</conf-date> (<publisher-name>IEEE</publisher-name>), <fpage>314</fpage>&#x2013;<lpage>320</lpage>.</mixed-citation>
</ref>
<ref id="B59">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yong</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Xiaoxia</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Nana</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Research on 3D object detection method based on convolutional attention mechanism</article-title>. <source>J. Phys. Conf. Ser.</source> <volume>1848</volume> (<issue>1</issue>), <fpage>012097</fpage>. <pub-id pub-id-type="doi">10.1088/1742-6596/1848/1/012097</pub-id>
</mixed-citation>
</ref>
<ref id="B60">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zawish</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Dharejo</surname>
<given-names>F. A.</given-names>
</name>
<name>
<surname>Khowaja</surname>
<given-names>S. A.</given-names>
</name>
<name>
<surname>Raza</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Davy</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Dev</surname>
<given-names>K.</given-names>
</name>
<etal/>
</person-group> (<year>2024</year>). <article-title>AI and 6G into the metaverse: fundamentals, challenges and future research trends</article-title>. <source>IEEE Open J. Commun. Soc.</source> <volume>5</volume>, <fpage>730</fpage>&#x2013;<lpage>778</lpage>. <pub-id pub-id-type="doi">10.1109/ojcoms.2024.3349465</pub-id>
</mixed-citation>
</ref>
<ref id="B61">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhao</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Jiang</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Xue</surname>
<given-names>X.</given-names>
</name>
<etal/>
</person-group> (<year>2022</year>). <article-title>Metaverse: perspectives from graphics, interactions and visualization</article-title>. <source>Vis. Inf.</source> <volume>6</volume> (<issue>1</issue>), <fpage>56</fpage>&#x2013;<lpage>67</lpage>.</mixed-citation>
</ref>
<ref id="B62">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zheng</surname>
<given-names>J. M.</given-names>
</name>
<name>
<surname>Chan</surname>
<given-names>K. W.</given-names>
</name>
<name>
<surname>Gibson</surname>
<given-names>I.</given-names>
</name>
</person-group> (<year>1998</year>). <article-title>Virtual reality</article-title>. <source>IEEE Potentials</source> <volume>17</volume> (<issue>2</issue>), <fpage>20</fpage>&#x2013;<lpage>23</lpage>. <pub-id pub-id-type="doi">10.1109/45.666641</pub-id>
</mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1008780/overview">Maria Limniou</ext-link>, University of Liverpool, United Kingdom</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3086185/overview">Sonali Patwe</ext-link>, COEP Technological University, India</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3219061/overview">Marija Markovi&#x107; Blagojevi&#x107;</ext-link>, Singidunum University, Serbia</p>
</fn>
</fn-group>
</back>
</article>