<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Archiving and Interchange DTD v2.3 20070202//EN" "archivearticle.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="systematic-review" dtd-version="2.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Commun.</journal-id>
<journal-title>Frontiers in Communication</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Commun.</abbrev-journal-title>
<issn pub-type="epub">2297-900X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fcomm.2025.1645168</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Communication</subject>
<subj-group>
<subject>Systematic Review</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Deep learning in cultural imagery dissemination: a systematic scoping review of AI-driven visual transmission mechanisms</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Yang</surname><given-names>Jinhua</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="author-notes" rid="fn0001"><sup>&#x2020;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2610704/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Liu</surname><given-names>Ting</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="author-notes" rid="fn0002"><sup>&#x2020;</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Luo</surname><given-names>Yiming Taclis</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="author-notes" rid="fn0003"><sup>&#x2020;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3132489/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Pang</surname><given-names>Patrick Cheong-Iao</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<xref ref-type="author-notes" rid="fn0004"><sup>&#x2020;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2266938/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>The School of Humanities, Tongji University</institution>, <addr-line>Shanghai</addr-line>, <country>China</country></aff>
<aff id="aff2"><sup>2</sup><institution>Faculty of Applied Sciences, Macao Polytechnic University</institution>, <addr-line>Macao, Macao SAR</addr-line>, <country>China</country></aff>
<author-notes>
<fn fn-type="edited-by" id="fn0005">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2011191/overview">Ankan Bhattacharya</ext-link>, Hooghly Engineering and Technology College, India</p></fn>
<fn fn-type="edited-by" id="fn0006">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3191667/overview">Abhranil De</ext-link>, Hooghly Engineering and Technology College, India</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3195234/overview">Krishanu Kundu</ext-link>, GL Bajaj Group of Institutions, India</p></fn>
<corresp id="c001">&#x002A;Correspondence: Patrick Cheong-Iao Pang, <email>mail@patrickpang.net</email></corresp>
<fn fn-type="other" id="fn0001"><p><sup>&#x2020;</sup>ORCID: Jinhua Yang, <ext-link ext-link-type="uri" xlink:href="https://orcid.org/0009-0004-4633-1830">https://orcid.org/0009-0004-4633-1830</ext-link></p></fn>
<fn fn-type="other" id="fn0002"><p>Ting Liu, <ext-link ext-link-type="uri" xlink:href="https://orcid.org/0009-0001-0331-262X">https://orcid.org/0009-0001-0331-262X</ext-link></p></fn>
<fn fn-type="other" id="fn0003"><p>Yiming Taclis Luo, <ext-link ext-link-type="uri" xlink:href="https://orcid.org/0009-0002-6117-738X">https://orcid.org/0009-0002-6117-738X</ext-link></p></fn>
<fn fn-type="other" id="fn0004"><p>Patrick Cheong-Iao Pang, <ext-link ext-link-type="uri" xlink:href="https://orcid.org/0000-0002-8820-5443">https://orcid.org/0000-0002-8820-5443</ext-link></p></fn>
</author-notes>
<pub-date pub-type="epub">
<day>19</day>
<month>09</month>
<year>2025</year>
</pub-date>
<pub-date pub-type="collection">
<year>2025</year>
</pub-date>
<volume>10</volume>
<elocation-id>1645168</elocation-id>
<history>
<date date-type="received">
<day>11</day>
<month>06</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>09</day>
<month>09</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2025 Yang, Liu, Luo and Pang.</copyright-statement>
<copyright-year>2025</copyright-year>
<copyright-holder>Yang, Liu, Luo and Pang</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<sec id="sec1">
<title>Background</title>
<p>In an era of rapid media technology and AI advancement, deep learning (DL)-driven visual images (VI) is emerging as a critical mode of cultural transmission (CT). Despite the growing application of DL in the VI domain, there is a lack of a systematic review that comprehensively explores its transmission pathways, mechanisms of influence, and associated challenges. This study aims to systematically explore the pathways and impacts of DL-driven VI in CT and identify key trends and issues in the field through a systematic scoping review of existing literature.</p>
</sec>
<sec id="sec2">
<title>Methods</title>
<p>This review analyzes 18 studies published between 2015 and 2024. The literature search was conducted across five databases: WOS, ScienceDirect, Scopus, ACM, and A&#x0026;HCI. The research was undertaken rigorously following the Preferred Reporting Items for Systematic Reviews and Meta-Analyses extension for Scoping Reviews (PRISMA-ScR) guidelines, ensuring systematic selection, extraction, and analysis of the identified studies.</p>
</sec>
<sec id="sec3">
<title>Results</title>
<p>The study analyzed the literature from four aspects: transmission pathways, content, technology, and cultural context, identifying three main research areas: (1) the influence mechanisms of AI and social media on cultural transmission; (2) the role of VI in cross-cultural communication; and (3) the application of AI and digital technology in the conservation of Cultural Ecosystem Services (CES). The study finds that AI-driven visual technologies significantly enhance the breadth and impact of CT, particularly through DL algorithms. However, the field faces critical challenges such as algorithmic bias, cultural homogenization, and the reliability of user-generated content.</p>
</sec>
<sec id="sec4">
<title>Conclusion</title>
<p>By systematically synthesizing the existing literature, this study provides a theoretical foundation for future research and points to emerging research directions, such as how to use DL to address ethical challenges in cultural communication and explore the differences in the application of DL and VI in different cultural contexts.</p>
</sec>
</abstract>
<kwd-group>
<kwd>deep learning</kwd>
<kwd>cultural transmission</kwd>
<kwd>visual images</kwd>
<kwd>systematic scoping review</kwd>
<kwd>DL</kwd>
</kwd-group>
<counts>
<fig-count count="6"/>
<table-count count="5"/>
<equation-count count="0"/>
<ref-count count="65"/>
<page-count count="12"/>
<word-count count="7397"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Culture and Communication</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec5">
<label>1</label>
<title>Introduction</title>
<p>Human behavior, shaped by preferences, beliefs, and norms, is partly a result of genetic evolution and partly acquired through generations via learning and other forms of social interaction (<xref ref-type="bibr" rid="ref45">Panchanathan, 2024</xref>). This transmission occurs through both inter-generational and inter-generational social interactions and is referred to as CT (<xref ref-type="bibr" rid="ref5">Bisin and Verdier, 2025</xref>). CT is a unique human behavior, central to which is the high-fidelity replication of cultural traits (<xref ref-type="bibr" rid="ref8">Crema et al., 2024</xref>), accurately copying elements, values, and customs so that each generation can build upon the knowledge and practices of the previous one (<xref ref-type="bibr" rid="ref24">Hewlett et al., 2024</xref>). This process distinguishes human culture from that of non-human primates and is a key mechanism in fostering intercultural understanding, respect, and global diversity (<xref ref-type="bibr" rid="ref35">Legare, 2017</xref>). While traditional CT media such as language, writing, and rituals have deep historical roots, they are prone to information loss during cross-temporal and cross-spatial transmission, resulting in discontinuities in CT (<xref ref-type="bibr" rid="ref9">Della Lena and Panebianco, 2021</xref>; <xref ref-type="bibr" rid="ref52">Sch&#x00F6;npflug, 2008</xref>).</p>
<p>Against this backdrop, VI has increasingly become a vital mode of CT. As one of the primary channels for CT, imagery has a long history (<xref ref-type="bibr" rid="ref11">Eerkens and Lipo, 2007</xref>). From prehistoric cave paintings to medieval religious art, and from modern photography to digital media, the patterns and impacts of image dissemination have evolved alongside technological advancements (<xref ref-type="bibr" rid="ref26">Homer, 1998</xref>; <xref ref-type="bibr" rid="ref49">Robb, 2020</xref>). Early images were primarily used to record societal values and religious beliefs, while modern VI focuses on the immediacy and diversity of visuals, effectively conveying different cultural lifestyles, societal values, and aesthetic ideals (<xref ref-type="bibr" rid="ref23">Heise, 2004</xref>; <xref ref-type="bibr" rid="ref42">Mirzoeff, 1999</xref>). With the rapid development of globalization and digital technologies, the widespread use of visual media has greatly enhanced the efficiency of CT and facilitated emotional resonance between individuals, especially in cross-cultural communication, where the influence of VI now surpasses that of traditional textual communication (<xref ref-type="bibr" rid="ref14">Fahmy et al., 2014</xref>).</p>
<p>Compared to textual dissemination, VI&#x2019;s intuitive nature breaks down language barriers, extending the reach and scope of cultural transmission (<xref ref-type="bibr" rid="ref56">Soreanu and German, 2022</xref>). The dissemination of urban imagery and landscapes utilizes VI to vividly depict the historical and cultural evolution of cities, making it an effective CT tool for showcasing unique cultural characteristics and historical heritage (<xref ref-type="bibr" rid="ref1">Bai, 2023</xref>; <xref ref-type="bibr" rid="ref29">Huang and Yang, 2016</xref>). While VI demonstrates significant advantages across various CT domains, its limitations are equally evident. Due to its intuitive and emotionally resonant nature, VI is regarded by scholars as an effective tool for fostering intercultural understanding (<xref ref-type="bibr" rid="ref42">Mirzoeff, 1999</xref>). Despite these challenges, AI technology presents new opportunities to enhance the application of VI in CT. By combining AI, VI has significantly accelerated the global flow of CT (<xref ref-type="bibr" rid="ref55">Somaini, 2023</xref>). Moreover, integrating AI and digital technologies offers innovative solutions for preserving and reconstructing cultural heritage, such as image restoration driven by DL and the digital reconstruction of virtual cultural sites (<xref ref-type="bibr" rid="ref3">Basu et al., 2023</xref>). The advances in AI technology, especially DL, provide innovative solutions to these challenges with their powerful image processing and generation capabilities, playing a core role in the cultural transmission of visual imagery (<xref ref-type="bibr" rid="ref37">Li and Wang, 2022</xref>; <xref ref-type="bibr" rid="ref55">Somaini, 2023</xref>). DL plays a core role in the CT of VI through its powerful image processing and generation capabilities. This study primarily focuses on DL algorithms such as Convolutional Neural Networks (CNNs) (<xref ref-type="bibr" rid="ref63">Xia et al., 2025</xref>), Large Language Models (LLMs) (<xref ref-type="bibr" rid="ref41">Luo et al., 2025</xref>), and Natural Language Processing (NLP) (<xref ref-type="bibr" rid="ref31">Jiang et al., 2023</xref>). However, despite its breakthroughs, AI&#x2019;s limitations in generating and disseminating VI remain substantial, particularly regarding dataset representativeness and the simplicity of algorithmic designs, which could introduce biases in the CT process (<xref ref-type="bibr" rid="ref33">Laba, 2024</xref>; <xref ref-type="bibr" rid="ref53">Shahbazi et al., 2023</xref>).</p>
<p>Current reviews on CT, especially those focusing on VI, still exhibit certain limitations in scope and methodological frameworks (see Appendix A). First, these studies have not sufficiently addressed the complexity and diversity of CT mechanisms, often focusing on a single perspective or partial analysis. For example, the study by Wang et al. focuses solely on gamification in cultural heritage, without considering the broader applications of VI (<xref ref-type="bibr" rid="ref60">Wang et al., 2024</xref>). Similarly, Romanazzi et al. limit their analysis to economic evaluation methods based on a single database (<xref ref-type="bibr" rid="ref50">Romanazzi et al., 2023</xref>). Research by Plieninger et al. and Scholte et al. lacks a systematic review and focuses solely on ecological service systems (<xref ref-type="bibr" rid="ref46">Plieninger et al., 2015</xref>; <xref ref-type="bibr" rid="ref51">Scholte et al., 2015</xref>), while Hegetschweiler et al. concentrate only on the European context (<xref ref-type="bibr" rid="ref22">Hegetschweiler et al., 2017</xref>).</p>
<p>In the context of the AI era, exploring pathways and methods for VI in CT can promote the effective dissemination and preservation of cultural heritage across regions. The specific research questions are as follows:</p><list list-type="order">
<list-item>
<p>What are the scientific outcomes and geographical distributions of studies related to VI in CT (e.g., country distribution, publication years)?</p>
</list-item>
<list-item>
<p>Which DL has been used?</p>
</list-item>
<list-item>
<p>What are the CT themes and VI processing methods?</p>
</list-item>
<list-item>
<p>What platforms and dissemination paths can be used for VI-based CT?</p>
</list-item>
<list-item>
<p>What are the impacts of different forms of VI processing on CT?</p>
</list-item>
</list>
<p>This study systematically explores the pathways and impacts of VI in CT, examining its potential for CT. By comprehensively addressing these topics, this research contributes to understanding how VI affects CT and identifying key trends and issues in the field of VI research in CT.</p>
</sec>
<sec sec-type="methods" id="sec6">
<label>2</label>
<title>Methods</title>
<sec id="sec7">
<label>2.1</label>
<title>Search strategy</title>
<p>This study employs the PRISMA method for conducting a systematic scoping review, aiming to enhance the transparency and reproducibility of the literature screening process. The literature searches spans five databases: Web of Science, ScienceDirect, Scopus, ACM Digital Library, and A&#x0026;HCI. The PRISMA approach ensures the structural rigor of the research process while increasing the transparency and consistency of the review process. This systematic scoping review adheres to the guidelines outlined in the Preferred Reporting Items for Systematic Reviews and Meta-Analyses extension for Scoping Reviews (PRISMA-ScR).</p>
<p>The literature search was conducted on November 20, 2024, using search terms including &#x201C;image,&#x201D; &#x201C;image vision,&#x201D; &#x201C;computer vision,&#x201D; &#x201C;cultural,&#x201D; &#x201C;cultural transmission,&#x201D; &#x201C;cultural communication,&#x201D; &#x201C;public,&#x201D; and &#x201C;service.&#x201D; The search strategy included the following steps: (1) a preliminary screening in all databases to exclude irrelevant studies; (2) removal of duplicate references using EndNote 21.4 software; (3) detailed screening of titles and abstracts to identify studies relevant to the research topic; and (4) full-text review to exclude studies that were not directly related to the research topic. The specific search formula is shown in <xref ref-type="table" rid="tab1">Table 1</xref> (The PRISMA-ScR checklist is provided in Appendix B).</p>
<table-wrap position="float" id="tab1">
<label>Table 1</label>
<caption>
<p>Selected databases and search formats.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Database</th>
<th align="left" valign="top">Search formula</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">WOS</td>
<td align="left" valign="middle">ALL&#x202F;=&#x202F;((Image OR &#x201C;Image vision&#x201D; OR &#x201C;Computer vision&#x201D;) AND (Cultural OR &#x201C;Cultural transmission&#x201D; OR &#x201C;Cultural communication&#x201D;) AND (Public AND Service))</td>
</tr>
<tr>
<td align="left" valign="middle">ScienceDirect</td>
<td align="left" valign="middle">(Image OR &#x201C;Image vision&#x201D; OR &#x201C;Computer vision&#x201D;) AND (Cultural OR &#x201C;Cultural transmission&#x201D; OR &#x201C;Cultural communication&#x201D;) AND (Public AND Service)</td>
</tr>
<tr>
<td align="left" valign="middle">Scopus</td>
<td align="left" valign="middle">(&#x201C;Image&#x201D; OR &#x201C;Image vision&#x201D; OR &#x201C;Computer vision&#x201D;)<break/>AND (&#x201C;Cultural&#x201D; OR &#x201C;Cultural transmission&#x201D; OR &#x201C;Cultural communication&#x201D;)<break/>AND (&#x201C;Public&#x201D; AND &#x201C;Service&#x201D;)</td>
</tr>
<tr>
<td align="left" valign="middle">ACM</td>
<td align="left" valign="middle">(Image OR &#x201C;Image vision&#x201D; OR &#x201C;Computer vision&#x201D;) AND (Cultural OR &#x201C;Cultural transmission&#x201D; OR &#x201C;Cultural communication&#x201D;) AND (Public AND Service)</td>
</tr>
<tr>
<td align="left" valign="middle">A&#x0026;HCI</td>
<td align="left" valign="middle">ALL&#x202F;=&#x202F;((Image OR &#x201C;Image vision&#x201D; OR &#x201C;Computer vision&#x201D;) AND (Cultural OR &#x201C;Cultural transmission&#x201D; OR &#x201C;Cultural communication&#x201D;) AND (Public AND Service))</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="sec8">
<label>2.2</label>
<title>Data selection and extraction</title>
<p>All retrieved records were imported into the reference management software EndNote version 21.5, and duplicate entries were removed using the software. Two independent reviewers (JY and TL) screened the titles and abstracts of the articles based on predefined inclusion criteria. In cases of discrepancies between the reviewers, a third reviewer (PP) was consulted to reach a consensus. The inclusion criteria (see <xref ref-type="table" rid="tab2">Table 2</xref>) for selecting studies were as follows: (1) research involving the application of VI in CT; (2) involves the specific application of CT and inheritance; (3) VI studies based on AI technology; (4) studies presenting original research; (5) published between 2015 and 2024; (6) full text in English.</p>
<table-wrap position="float" id="tab2">
<label>Table 2</label>
<caption>
<p>Inclusion and exclusion criteria.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Inclusion criteria</th>
<th align="left" valign="top">Exclusion criteria</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Research on the application of VI in CT</td>
<td align="left" valign="middle">Study on the application of non-VI in CT</td>
</tr>
<tr>
<td align="left" valign="middle">Involves the specific application of CT and inheritance</td>
<td align="left" valign="middle">Studies that focus only on culture, not on the process of transmission</td>
</tr>
<tr>
<td align="left" valign="middle">VI research based on AI technology</td>
<td align="left" valign="middle">VI research not based on AI technology</td>
</tr>
<tr>
<td align="left" valign="middle">Research type articles</td>
<td align="left" valign="middle">Review articles, theses, non-academic publications, book chapters, etc.</td>
</tr>
<tr>
<td align="left" valign="middle">Published between 2015 and 2024</td>
<td align="left" valign="middle">Published outside the 2015&#x2013;2024 range</td>
</tr>
<tr>
<td align="left" valign="middle">Full text in English</td>
<td align="left" valign="middle">Full text in other languages</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="sec9">
<label>2.3</label>
<title>Data charting</title>
<p>A data extraction form was developed based on the scoping review methodology guidelines provided by the Joanna Briggs Institute. After conducting a preliminary trial with five articles, the form was revised to enhance its effectiveness and accuracy. The form includes the following key data extraction items: author, publication year, country, AI technology, topic, VI type, target group, dissemination platform, dissemination path, and findings. Data extraction was performed by two independent reviewers, and any discrepancies were resolved through consultation with a senior reviewer to ensure the accuracy and consistency of the extracted data.</p>
</sec>
<sec id="sec10">
<label>2.4</label>
<title>Collating, summarizing, and reporting the results</title>
<p>During this process, all three authors concurred on narrowing the scope of categories to ensure that they specifically address the research questions of this study. Descriptive statistics were employed to systematically organize, summarize, and report the results. The findings were presented in a narrative format, complemented by figures and tables to enhance clarity.</p>
</sec>
</sec>
<sec sec-type="results" id="sec11">
<label>3</label>
<title>Results</title>
<p>As illustrated in <xref ref-type="fig" rid="fig1">Figure 1</xref>, a total of 15,743 articles were retrieved through the systematic search. After removing duplicate entries using EndNote software, 15,461 articles remained. Two reviewers independently screened the titles and abstracts, excluding 15,395 articles that were not directly related to the research topic and 17 non-English articles. Studies focusing on image usage in advertising and marketing, corporate image construction, and body image perception were excluded due to their lack of direct relevance to the CT theme. Additionally, excluded studies primarily emphasized the functionality of images in business, education, or health promotion rather than the specific analysis or application of CT. The remaining 49 articles underwent a thorough evaluation by both reviewers, resulting in the exclusion of 30 articles. Ultimately, 18 articles were included in the scope evaluation of the system (see <xref ref-type="table" rid="tab3">Table 3</xref>).</p>
<fig position="float" id="fig1">
<label>Figure 1</label>
<caption>
<p>PRISMA flowchart.</p>
</caption>
<graphic xlink:href="fcomm-10-1645168-g001.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Flowchart detailing the identification and screening process for studies. Initially, 15,743 records were identified from databases. After removing duplicates and non-English entries, 15,454 records were screened, with 15,395 excluded. Fifty-one reports were sought, two not retrieved. Forty-nine were assessed for eligibility, excluding thirty-one for reasons like mismatched topics and focus issues. Eighteen studies were included in the review.</alt-text>
</graphic>
</fig>
<table-wrap position="float" id="tab3">
<label>Table 3</label>
<caption>
<p>Overview of study characteristics.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">
<bold>Authors, year, country</bold>
</th>
<th align="left" valign="top">AI technology</th>
<th align="left" valign="top">Topics</th>
<th align="left" valign="top">Visual image type</th>
<th align="left" valign="top">Target group</th>
<th align="left" valign="top">Dissemination platform</th>
<th align="left" valign="top">CT path</th>
<th align="left" valign="top">Findings</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle"><xref ref-type="bibr" rid="ref57">Su et al. (2023)</xref><break/>China</td>
<td align="left" valign="middle">CNN</td>
<td align="left" valign="middle">Perception of Urban Image</td>
<td align="left" valign="middle">Landmark buildings, natural scenery</td>
<td align="left" valign="middle">Urban residents, tourists</td>
<td align="left" valign="middle">Weibo</td>
<td align="left" valign="middle">Digital dissemination</td>
<td align="left" valign="middle">Social media images influence urban image and cultural cognition</td>
</tr>
<tr>
<td align="left" valign="middle"><xref ref-type="bibr" rid="ref6">Cardoso et al. (2022)</xref><break/>Portugal</td>
<td align="left" valign="middle">CNN</td>
<td align="left" valign="middle">CES evaluation</td>
<td align="left" valign="middle">Natural landscapes, cultural heritage images</td>
<td align="left" valign="middle">Social media users</td>
<td align="left" valign="middle">Flickr, Wikiloc</td>
<td align="left" valign="middle">Digital dissemination</td>
<td align="left" valign="middle">Image classification supports CES evaluation and promotes cultural dissemination</td>
</tr>
<tr>
<td align="left" valign="middle"><xref ref-type="bibr" rid="ref65">Zhao et al. (2024)</xref><break/>China</td>
<td align="left" valign="middle">SHAP (XAI)</td>
<td align="left" valign="middle">Park accessibility and attractiveness</td>
<td align="left" valign="middle">User-generated images</td>
<td align="left" valign="middle">Urban residents, social media users</td>
<td align="left" valign="middle">Weibo, WeChat</td>
<td align="left" valign="middle">Digital dissemination, localization adaptation</td>
<td align="left" valign="middle">Social media images influence cultural cognition of urban parks</td>
</tr>
<tr>
<td align="left" valign="middle"><xref ref-type="bibr" rid="ref43">Motte and McInnes (2019)</xref><break/>France</td>
<td align="left" valign="middle">Human-computer interaction (HCI)</td>
<td align="left" valign="middle">Estuarine landscape changes</td>
<td align="left" valign="middle">Paintings, engravings, postcards</td>
<td align="left" valign="middle">Citizens, tourists</td>
<td align="left" valign="middle">Specialized websites</td>
<td align="left" valign="middle">Localization adaptation, digital dissemination</td>
<td align="left" valign="middle">Artistic images enhance cultural understanding of coastal landscape changes</td>
</tr>
<tr>
<td align="left" valign="middle"><xref ref-type="bibr" rid="ref28">Huai et al. (2022)</xref><break/>China</td>
<td align="left" valign="middle">CNN</td>
<td align="left" valign="middle">CES evaluation in urban parks</td>
<td align="left" valign="middle">Social media photos</td>
<td align="left" valign="middle">Urban residents, tourists, social media users</td>
<td align="left" valign="middle">Flickr</td>
<td align="left" valign="middle">Digital dissemination</td>
<td align="left" valign="middle">Social media photos and computer vision advance CES dissemination</td>
</tr>
<tr>
<td align="left" valign="middle"><xref ref-type="bibr" rid="ref25">Hoffmann et al. (2023)</xref><break/>Germany</td>
<td align="left" valign="middle">CNN</td>
<td align="left" valign="middle">Building function classification</td>
<td align="left" valign="middle">Street view images</td>
<td align="left" valign="middle">Urban residents, social media users</td>
<td align="left" valign="middle">Flickr</td>
<td align="left" valign="middle">Digital dissemination</td>
<td align="left" valign="middle">Social media images contribute to cultural cognition of urban building functions</td>
</tr>
<tr>
<td align="left" valign="middle"><xref ref-type="bibr" rid="ref48">Richards and Tun&#x00E7;er (2018)</xref><break/>Singapore</td>
<td align="left" valign="middle">CNN</td>
<td align="left" valign="middle">CES evaluation</td>
<td align="left" valign="middle">Geo-tagged photos</td>
<td align="left" valign="middle">Urban residents, tourists, social media users</td>
<td align="left" valign="middle">Flickr</td>
<td align="left" valign="middle">Digital dissemination</td>
<td align="left" valign="middle">Image recognition technology improves efficiency in CES dissemination</td>
</tr>
<tr>
<td align="left" valign="middle"><xref ref-type="bibr" rid="ref17">Ferracani et al. (2024)</xref><break/>Italy</td>
<td align="left" valign="middle">LLM</td>
<td align="left" valign="middle">Cultural heritage tourism</td>
<td align="left" valign="middle">Narrative tourism image</td>
<td align="left" valign="middle">Urban residents, tourists</td>
<td align="left" valign="middle">APP</td>
<td align="left" valign="middle">Cultural inheritance</td>
<td align="left" valign="middle">Ai-generated narratives and images that visually represent the user in the context of the story achieve good immersion and engagement</td>
</tr>
<tr>
<td align="left" valign="middle"><xref ref-type="bibr" rid="ref12">Eizenberg and Cohen (2015)</xref><break/>Israel</td>
<td align="left" valign="middle">CNN</td>
<td align="left" valign="middle">Cultural events and urban image</td>
<td align="left" valign="middle">Urban landscapes, art exhibitions</td>
<td align="left" valign="middle">Residents, tourists, art enthusiasts</td>
<td align="left" valign="middle">Social media, exhibition spaces</td>
<td align="left" valign="middle">Cross-cultural dissemination, localization adaptation</td>
<td align="left" valign="middle">Cultural flagship events reconstruct urban image, enhance cultural identity</td>
</tr>
<tr>
<td align="left" valign="middle"><xref ref-type="bibr" rid="ref54">Shi et al. (2021)</xref><break/>Japan</td>
<td align="left" valign="middle">CNN</td>
<td align="left" valign="middle">River landscape management</td>
<td align="left" valign="middle">River photos</td>
<td align="left" valign="middle">Tourists, urban residents, social media users</td>
<td align="left" valign="middle">Social media, traditional media</td>
<td align="left" valign="middle">Digital dissemination, localization adaptation</td>
<td align="left" valign="middle">Image recognition classification enhances cultural landscape dissemination</td>
</tr>
<tr>
<td align="left" valign="middle"><xref ref-type="bibr" rid="ref58">Van Berkel et al. (2018)</xref><break/>USA</td>
<td align="left" valign="middle">Cloud Computing</td>
<td align="left" valign="middle">CES evaluation</td>
<td align="left" valign="middle">Natural landscapes, historical sites</td>
<td align="left" valign="middle">Environmental researchers, social media users, tourists</td>
<td align="left" valign="middle">Panoramio</td>
<td align="left" valign="middle">Digital dissemination, localization adaptation</td>
<td align="left" valign="middle">Social media and LiDAR enhance CES dissemination</td>
</tr>
<tr>
<td align="left" valign="middle"><xref ref-type="bibr" rid="ref62">Winder et al. (2022)</xref><break/>USA</td>
<td align="left" valign="middle">CNN</td>
<td align="left" valign="middle">Recreation activities and landscape evaluation</td>
<td align="left" valign="middle">Social media images</td>
<td align="left" valign="middle">Social media users, activity enthusiasts</td>
<td align="left" valign="middle">Flickr</td>
<td align="left" valign="middle">Digital dissemination</td>
<td align="left" valign="middle">Open-source classifiers promote cultural dissemination of recreational activities</td>
</tr>
<tr>
<td align="left" valign="middle"><xref ref-type="bibr" rid="ref34">Lee et al. (2019)</xref><break/>Germany</td>
<td align="left" valign="middle">CNN</td>
<td align="left" valign="middle">Spatial distribution of CES</td>
<td align="left" valign="middle">Social media images</td>
<td align="left" valign="middle">Social media users, tourists</td>
<td align="left" valign="middle">Flickr, Clarifai</td>
<td align="left" valign="middle">Digital dissemination, social media dissemination</td>
<td align="left" valign="middle">Unlabeled images exhibit advantages and limitations in cultural dissemination</td>
</tr>
<tr>
<td align="left" valign="middle"><xref ref-type="bibr" rid="ref20">Gosal and Ziv (2020)</xref><break/>UK</td>
<td align="left" valign="middle">NLP</td>
<td align="left" valign="middle">Scenic aesthetics evaluation</td>
<td align="left" valign="middle">Social media images</td>
<td align="left" valign="middle">Social media users, general public</td>
<td align="left" valign="middle">Social media</td>
<td align="left" valign="middle">Digital dissemination</td>
<td align="left" valign="middle">Integration of social media and machine learning enhances aesthetic cultural dissemination</td>
</tr>
<tr>
<td align="left" valign="middle"><xref ref-type="bibr" rid="ref15">Fan et al. (2023)</xref><break/>China</td>
<td align="left" valign="middle">CNN</td>
<td align="left" valign="middle">ICH image classification</td>
<td align="left" valign="middle">New Year paintings, clay sculptures</td>
<td align="left" valign="middle">Social media users, tourists</td>
<td align="left" valign="middle">Social media, exhibition spaces</td>
<td align="left" valign="middle">Digital dissemination, cross-cultural dissemination</td>
<td align="left" valign="middle">Multimodal image classification promotes ICH dissemination</td>
</tr>
<tr>
<td align="left" valign="middle"><xref ref-type="bibr" rid="ref64">You et al. (2022)</xref><break/>China</td>
<td align="left" valign="middle">CNN</td>
<td align="left" valign="middle">Forest eco-tourism</td>
<td align="left" valign="middle">Remote sensing imagery data</td>
<td align="left" valign="middle">Social media users, tourists</td>
<td align="left" valign="middle">Social media, remote sensing technologies</td>
<td align="left" valign="middle">Digital dissemination, social media dissemination</td>
<td align="left" valign="middle">Integration of remote sensing and social media enhances forest cultural value dissemination</td>
</tr>
<tr>
<td align="left" valign="middle"><xref ref-type="bibr" rid="ref27">Hu et al. (2017)</xref><break/>China</td>
<td align="left" valign="middle">Virtual reality (VR)</td>
<td align="left" valign="middle">Hybrid 3D virtual museums</td>
<td align="left" valign="middle">Panoramic images, 3D models</td>
<td align="left" valign="middle">Tourists, smartphone users</td>
<td align="left" valign="middle">Unity 3D, web, smartphones</td>
<td align="left" valign="middle">Digital dissemination</td>
<td align="left" valign="middle">Panoramic images and 3D models enhance immersive cultural dissemination</td>
</tr>
<tr>
<td align="left" valign="middle"><xref ref-type="bibr" rid="ref47">Richards and Friess (2015)</xref><break/>Singapore</td>
<td align="left" valign="middle">CNN</td>
<td align="left" valign="middle">CES utilization evaluation</td>
<td align="left" valign="middle">Social media images</td>
<td align="left" valign="middle">Social media users, urban residents, tourists</td>
<td align="left" valign="middle">Flickr</td>
<td align="left" valign="middle">Digital dissemination</td>
<td align="left" valign="middle">Social media data enables rapid response in CES dissemination</td>
</tr>
</tbody>
</table>
</table-wrap>
<sec id="sec12">
<label>3.1</label>
<title>Publication characteristics of studies</title>
<p><xref ref-type="fig" rid="fig2">Figure 2</xref> illustrates the annual evolution trend of the included literature. The time series indicates that relevant research began in 2015, gradually developing thereafter, and reaching a peak in 2022 with a total of four studies published. During the COVID-19 pandemic, the closure of cultural institutions and the global shift to online spaces prompted a transition of cultural content toward virtual engagement. This increased the need to explore the role of digital and virtual media in CT, which may explain the rise in publications in 2022 and the following year (<xref ref-type="bibr" rid="ref36">Li et al., 2022</xref>).</p>
<fig position="float" id="fig2">
<label>Figure 2</label>
<caption>
<p>Annual number of publications.</p>
</caption>
<graphic xlink:href="fcomm-10-1645168-g002.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Line graph showing yearly numeric values from 2015 to 2024. Peaks at 2 in 2015, 2019, and 2024, with a significant spike to 4 in 2022. Values drop to 1 in 2016-2018, 2020-2021, and rise to 3 in 2023.</alt-text>
</graphic>
</fig>
<p>Regarding the geographical distribution of the research, <xref ref-type="fig" rid="fig3">Figure 3</xref> displays the number of contributions by country and region. China published six papers, accounting for 33.3% of the total, temporarily leading the field. China has 56 cultural and natural heritage sites listed on the UNESCO World Heritage List, ranking second globally. This leadership is likely closely related to the rapid development of China&#x2019;s digital cultural industry in recent years and government support for visual culture research. At the regional level, Asia dominates with ten articles, representing 55.6%, followed by Europe with six articles (33.3%), and North America with two articles (11.1%).</p>
<fig position="float" id="fig3">
<label>Figure 3</label>
<caption>
<p>Regional distribution map of publications.</p>
</caption>
<graphic xlink:href="fcomm-10-1645168-g003.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Stacked bar chart showing counts for Asia, Europe, and North America by country. Asia has contributions from China, Singapore, Israel, and Japan. Europe includes Portugal, Italy, Germany, France, and the UK. North America solely shows the USA. A legend provides color codes for each country.</alt-text>
</graphic>
</fig>
</sec>
<sec id="sec13">
<label>3.2</label>
<title>AI technology</title>
<p>In the included studies, the majority employed DL algorithms for AI-based VI processing in CT applications, particularly CNN. 11 studies utilized CNN to process and generate image data. CNN has significant advantages in handling image data, enabling tasks such as image classification and detection. Secondly, cloud computing was used for data processing in two studies. Image processing often involves textual data; for example, input text data can be transformed into VI through generative AI, thereby involving LLM and NLP. However, HCI and XAI were less frequently employed in the research (see <xref ref-type="fig" rid="fig4">Figure 4</xref>).</p>
<fig position="float" id="fig4">
<label>Figure 4</label>
<caption>
<p>AI technology type.</p>
</caption>
<graphic xlink:href="fcomm-10-1645168-g004.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Pie chart showing distribution percentages of various technologies. DL-CNN dominates with sixty-one percent, followed by Cloud Computing at eleven percent. VR and LLM each hold six percent. XAI is at six percent, while HCI and NLP each have five percent.</alt-text>
</graphic>
</fig>
</sec>
<sec id="sec14">
<label>3.3</label>
<title>Research topics</title>
<p>The thematic classification identified 18 topics across seven major categories. CES evaluation and utilization was the most prominent, encompassing five topics, highlighting researchers&#x2019; focus on assessing and applying CES in various contexts, essential for urban planning and environmental management. Landscape &#x0026; environmental management, as well as urban image &#x0026; perception, included four and two topics, respectively, emphasizing the role of landscape management and urban image in enhancing sustainability and residents&#x2019; quality of life. Classification systems and recreation &#x0026; aesthetic evaluation each comprised two topics, indicating the need for specific classification methodologies and aesthetic value assessments. Cultural heritage and technology in urban Studies featured one and two topics, respectively. This reflects the increasing recognition of cultural heritage tourism and emerging technologies, such as hybrid 3D virtual museums and generative AI, in enriching cultural experiences and research. Overall, the distribution of topics underscores current academic interests in CES, landscape management, and technological applications (see <xref ref-type="table" rid="tab4">Table 4</xref>).</p>
<table-wrap position="float" id="tab4">
<label>Table 4</label>
<caption>
<p>Research topics.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Category</th>
<th align="left" valign="top">Topics</th>
<th align="center" valign="top"><italic>N</italic></th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Evaluation and utilization of CES</td>
<td align="left" valign="middle">CES evaluation, evaluation in urban parks, Spatial distribution of CES, CES utilization evaluation</td>
<td align="center" valign="middle">5</td>
</tr>
<tr>
<td align="left" valign="middle">Landscape and environmental management</td>
<td align="left" valign="middle">Park accessibility and attractiveness, estuarine landscape changes, river landscape management, forest eco-tourism</td>
<td align="center" valign="middle">4</td>
</tr>
<tr>
<td align="left" valign="middle">Urban image and perception</td>
<td align="left" valign="middle">Perception of urban image, cultural events and urban image</td>
<td align="center" valign="middle">2</td>
</tr>
<tr>
<td align="left" valign="middle">Classification systems</td>
<td align="left" valign="middle">Building function classification, image classification</td>
<td align="center" valign="middle">2</td>
</tr>
<tr>
<td align="left" valign="middle">Recreation and aesthetic evaluation</td>
<td align="left" valign="middle">Recreation activities and landscape evaluation, scenic aesthetics evaluation</td>
<td align="center" valign="middle">2</td>
</tr>
<tr>
<td align="left" valign="middle">Technology in cultural and urban studies</td>
<td align="left" valign="middle">Hybrid 3D virtual museums, generative AI in image generation</td>
<td align="center" valign="middle">2</td>
</tr>
<tr>
<td align="left" valign="middle">Tourism and cultural heritage</td>
<td align="left" valign="middle">Cultural heritage tourism</td>
<td align="center" valign="middle">1</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="sec15">
<label>3.4</label>
<title>Target group</title>
<p>The analysis reveals that social media users (36%), tourists (28%), and urban residents (22%) are the primary target groups across the studies. The frequent combination of these groups highlights the importance of social media in urban and tourism research, as well as the need to understand the dynamics between local residents and visitors. Additionally, the inclusion of niche groups points to diverse research interests that can be further explored to enrich the understanding of cultural and urban ecosystems (see <xref ref-type="fig" rid="fig5">Figure 5</xref>).</p>
<fig position="float" id="fig5">
<label>Figure 5</label>
<caption>
<p>Target group.</p>
</caption>
<graphic xlink:href="fcomm-10-1645168-g005.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Pie chart showing four groups: Social Media Users 36% (blue), Tourists 28% (orange), Urban Residents 22% (yellow), and Other Specific Groups 14% (green).</alt-text>
</graphic>
</fig>
</sec>
<sec id="sec16">
<label>3.5</label>
<title>Dissemination platforms</title>
<p>The analysis reveals that Flickr and social media are the predominant dissemination platforms, with usage frequencies of 41 and 35%, respectively. This emphasizes the importance of visual content and social interactions in information dissemination. The strategic combination of multiple platforms underscores the need to reach a broader and more varied audience. While emerging technologies are currently underutilized, their potential for enhancing dissemination strategies is evident, presenting opportunities for future research and application. The integration of physical and digital dissemination methods also highlights the evolving landscape of information sharing, aiming for more comprehensive and effective communication (see <xref ref-type="table" rid="tab5">Table 5</xref>).</p>
<table-wrap position="float" id="tab5">
<label>Table 5</label>
<caption>
<p>Dissemination platform.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Category</th>
<th align="left" valign="top">Topics</th>
<th align="center" valign="top"><italic>N</italic></th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle" rowspan="4">Social media</td>
<td align="left" valign="middle">Flickr</td>
<td align="center" valign="middle" rowspan="4">18</td>
</tr>
<tr>
<td align="left" valign="middle">General social media</td>
</tr>
<tr>
<td align="left" valign="middle">Weibo</td>
</tr>
<tr>
<td align="left" valign="middle">Others (WeChat, Wikiloc, Panoramio)</td>
</tr>
<tr>
<td align="left" valign="middle" rowspan="2">Other Digital</td>
<td align="left" valign="middle">Smartphones</td>
<td align="center" valign="middle" rowspan="2">6</td>
</tr>
<tr>
<td align="left" valign="middle">Specialized Websites, APP, Clarifai, Remote sensing Technologies, Unity 3D, Web</td>
</tr>
<tr>
<td align="left" valign="middle" rowspan="2">Physical Platforms</td>
<td align="left" valign="middle">Exhibition spaces</td>
<td align="center" valign="middle" rowspan="2">3</td>
</tr>
<tr>
<td align="left" valign="middle">Traditional media</td>
</tr>
<tr>
<td align="left" valign="middle">Combined Usage</td>
<td align="left" valign="middle">Various Combinations</td>
<td align="center" valign="middle">7</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="sec17">
<label>3.6</label>
<title>The path of CT</title>
<p>The analysis reveals that digital dissemination (50%) is the predominant pathway for CT, highlighting its central role in contemporary cultural exchange. The strategic combination of digital dissemination with localization adaptation (22%) and social media dissemination (11%) underscores the importance of contextual and interactive approaches in enhancing the effectiveness of CT. Cross-cultural dissemination (11%) also plays a notable role, reflecting the interconnectedness of global cultures. Cultural inheritance (6%) maintains a unique position, emphasizing the preservation of traditional cultural elements (see <xref ref-type="fig" rid="fig6">Figure 6</xref>).</p>
<fig position="float" id="fig6">
<label>Figure 6</label>
<caption>
<p>The path of CT.</p>
</caption>
<graphic xlink:href="fcomm-10-1645168-g006.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Bar chart showing five categories for dissemination. Cultural Inheritance scores 1, Social Media Dissemination scores 2, Cross-cultural Dissemination scores 2, Localization Adaptation scores 4, and Digital Dissemination scores 9.</alt-text>
</graphic>
</fig>
</sec>
<sec id="sec18">
<label>3.7</label>
<title>Findings</title>
<p>The analysis of the findings reveals several key themes underpinning the mechanisms of CT within urban contexts. Social media images emerge as a predominant factor, influencing both urban image and cultural cognition across various domains such as urban parks, building functions, and coastal landscapes. Specifically, SMP facilitates the dissemination and enhancement of cultural understanding through visual content, as evidenced by multiple findings.</p>
<p>Image classification and recognition technologies play a crucial role in supporting CES evaluation and promoting broader cultural dissemination. These technologies enhance the efficiency and accuracy of disseminating cultural landscapes and intangible cultural heritage, leveraging tools such as computer vision, open-source classifiers, and multi-modal image classification. DL further augments CT by generating immersive narratives and facilitating aesthetic dissemination. The integration of advanced technologies like LiDAR, remote sensing, and 3D models enhances the depth and reach of cultural dissemination efforts, enabling more comprehensive and interactive cultural experiences. Cultural flagship events significantly contribute to the reconstruction of urban images and the enhancement of cultural identity. The use of unlabeled images presents both advantages and limitations, indicating a need for balanced approaches in CT strategies. Overall, the findings underscore the synergistic interplay between social media, advanced image technologies, and DL in fostering effective CT.</p>
</sec>
</sec>
<sec sec-type="discussion" id="sec19">
<label>4</label>
<title>Discussion</title>
<sec id="sec20">
<label>4.1</label>
<title>The role of AI technologies in visual imagery cultural transmission</title>
<p>DL enhances CT by generating immersive narratives and promoting aesthetic dissemination. Studies show that AI-generated narratives and images that visually present users in a story context can achieve good immersion and engagement (<xref ref-type="bibr" rid="ref10">Dong, 2025</xref>). Image classification and recognition technologies play a crucial role in supporting CES evaluations and promoting broader cultural dissemination (<xref ref-type="bibr" rid="ref32">Ju, 2024</xref>). DL improves the efficiency and accuracy of disseminating cultural landscapes and intangible cultural heritage by leveraging tools such as computer vision and multimodal image classification (<xref ref-type="bibr" rid="ref18">G&#x00EE;rbacia, 2024</xref>). Pre-trained CNNs have shown high effectiveness in accurately identifying cultural heritage elements from social media images (<xref ref-type="bibr" rid="ref4">Belhi et al., 2021</xref>). For instance, some studies have used pre-trained VGG19 and Xception models to significantly improve the accuracy and efficiency of traditional cultural heritage image classification through transfer learning (<xref ref-type="bibr" rid="ref30">Jankovi&#x0107; Babi&#x0107;, 2024</xref>). The integration of advanced technologies such as LiDAR, remote sensing, and 3D modeling enhances the depth and breadth of CT by creating interactive cultural experiences for urban populations (Y. <xref ref-type="bibr" rid="ref38">Li et al., 2023</xref>), as such technologies enable computers to capture in-depth environmental data and generate artistic output based on sensing. A hybrid 3D virtual museum combines panoramic images and models to offer a more realistic and interactive cultural experience (<xref ref-type="bibr" rid="ref2">Barrile et al., 2022</xref>).</p>
</sec>
<sec id="sec21">
<label>4.2</label>
<title>The impact of social media platforms on visual imagery cultural transmission</title>
<p>Social media images, through a collective construction process, significantly influence cultural cognition and the formation of urban identity across different urban contexts (<xref ref-type="bibr" rid="ref40">Loughran et al., 2015</xref>). Unlike the unidirectional dissemination of traditional media, SMP facilitate two-way cultural exchange through user-generated content and interactive functions (such as comments, reposts, and likes), encouraging users to actively participate in content creation and sharing (<xref ref-type="bibr" rid="ref13">Eroglu, 2023</xref>). This collective construction process significantly influences cultural cognition and enhances the role of visual content in promoting cultural understanding (<xref ref-type="bibr" rid="ref19">Gooding, 2004</xref>). Moreover, real-time content dissemination on SMP significantly accelerates CT relative to the slower, fixed schedules of traditional broadcast media (<xref ref-type="bibr" rid="ref7">Chukwu, 2023</xref>). On a technical level, image classification and recognition technologies play a crucial role in supporting CES evaluations and promoting broader cultural dissemination. These technologies, utilizing tools like computer vision and multimodal image classification, improve the efficiency and accuracy of transmitting cultural landscapes and intangible cultural heritage. Research indicates that platforms like Flickr and Weibo are core channels for disseminating visual content and facilitating social interaction (<xref ref-type="bibr" rid="ref39">Liang et al., 2022</xref>).</p>
</sec>
<sec id="sec22">
<label>4.3</label>
<title>Visual imagery and cross-cultural transmission</title>
<p>VI, due to its intuitive and universal nature, can transcend language barriers, simplifying complex cultural concepts and promoting understanding without the need for translation (<xref ref-type="bibr" rid="ref59">Vishwakarma, 2023</xref>). For example, traditional Chinese New Year pictures express themes of happiness, good fortune, and prosperity through symbols like figures, animals, and plants, which helps audiences from different cultural backgrounds understand core Chinese cultural values (<xref ref-type="bibr" rid="ref61">Welch, 2013</xref>). Algorithmic diversification strategies, such as &#x201C;explore mode&#x201D; and randomized recommendations (<xref ref-type="bibr" rid="ref16">Fang et al., 2020</xref>), are effective in broadening users&#x2019; exposure to cross-cultural content by correcting the &#x201C;filter bubble&#x201D; effect often found in recommendation systems (<xref ref-type="bibr" rid="ref21">Grossetti et al., 2021</xref>).</p>
<p>Furthermore, AI-driven visual technologies are pivotal in creating immersive and interactive cultural experiences that drive cross-cultural communication. In the context of digital cultural tourism, technologies like virtual tours and 3D modeling allow global audiences to explore cultural heritage sites and traditions regardless of physical distance (<xref ref-type="bibr" rid="ref44">Napolitano et al., 2018</xref>). These visual mediums not only attract tourists but also serve as educational tools, enabling a deeper appreciation of diverse cultures. The application of AI in this domain transforms passive viewing into an active, engaging experience, fostering a more direct and personal connection between individuals and foreign cultures.</p>
</sec>
<sec id="sec23">
<label>4.4</label>
<title>Challenges and risks posed by AI</title>
<p>Despite advancements, AI technology introduces several challenges in CT. A primary concern is algorithmic bias, as many AI models are trained on datasets dominated by Western aesthetic standards, leading to biases in recognizing non-Western cultural content. This can distort cultural heritage representations and exacerbate cultural inequalities (<xref ref-type="bibr" rid="ref20">Gosal and Ziv, 2020</xref>). AI models often lack a deep understanding of cultural symbols and contexts, resulting in misinterpretations, especially with religious or historical images (<xref ref-type="bibr" rid="ref15">Fan et al., 2023</xref>). Another significant risk is cultural homogenization and the loss of creativity. In creative fields, reliance on repetitive data can stifle innovation, leading to homogenized cultural products (<xref ref-type="bibr" rid="ref15">Fan et al., 2023</xref>).</p>
<p>Privacy and data security also present pressing concerns. Training facial recognition models requires vast amounts of personal images, raising issues of privacy infringement (<xref ref-type="bibr" rid="ref57">Su et al., 2023</xref>). AI can be misused to create fake images or videos, which can mislead the public and undermine social trust, such as fabricated political videos used as propaganda (<xref ref-type="bibr" rid="ref54">SHI et al., 2021</xref>). To mitigate these issues, both technological and methodological improvements are essential. Expanding AI training datasets to include diverse cultural elements such as language, music, and text can help reduce bias (<xref ref-type="bibr" rid="ref15">Fan et al., 2023</xref>). Incorporating cultural context into algorithm design through cultural tagging or building knowledge bases can enhance the understanding of cultural nuances and reduce misinterpretations (<xref ref-type="bibr" rid="ref28">Huai et al., 2022</xref>). Developing explainable AI algorithms is crucial for improving transparency in decision-making and identifying potential biases (<xref ref-type="bibr" rid="ref28">Huai et al., 2022</xref>).</p>
</sec>
<sec id="sec24">
<label>4.5</label>
<title>Future directions</title>
<p>Future research should expand its methodological scope to provide more balanced insights into the application of AI and SMP in diverse cultural contexts. This can be achieved by incorporating a wider range of sources from various cultural backgrounds and conducting more nuanced analyses of the practical challenges and ethical implications. Additionally, the potential of digital tools such as VR, 3D modeling, and panoramic imaging to simultaneously strengthen cultural engagement and ecological conservation should be explored. This could be achieved by creating virtual tours of heritage sites that highlight both their cultural significance and the need for ecological protection. Ultimately, future studies should investigate the potential of user-generated content in supporting CES by developing collaborative cultural-ecological conservation models that foster public participation and promote conservation efforts.</p>
</sec>
<sec id="sec25">
<label>4.6</label>
<title>Limitations</title>
<p>This scoping review has several limitations and strengths that warrant discussion. Many studies relied heavily on user-generated content from SMP, which inherently introduces biases. Factors such as sample selection, tagging practices, and subjective interpretations may distort the representativeness of cultural narratives. These limitations are compounded by the under-representation of diverse cultural contexts, particularly from regions with limited access to advanced technological tools. Addressing this imbalance would require broader geographic and demographic inclusion in future research. In conclusion, while this systematic scoping review provides valuable insights into the role of VI in CT, particularly in the era of DL, it is crucial for future research to address these limitations by expanding the scope of the literature review, incorporating a broader range of sources, and providing a more nuanced analysis of the challenges and practicalities of implementing AI and SMP strategies in diverse cultural contexts.</p>
</sec>
</sec>
<sec sec-type="conclusions" id="sec26">
<label>5</label>
<title>Conclusion</title>
<p>In this study, we conducted a systematic scoping review to analyze the role of DL-driven VI in CT. We have identified that DL-driven visual technologies, especially DL algorithms, significantly enhance the breadth and impact of CT. One of our contributions also highlighted key challenges, including algorithmic bias, cultural homogenization, and the reliability of user-generated content. Future research should focus on improving the inclusivity of DL algorithms, addressing biases in cultural representation, and enhancing the accuracy and authenticity of content through advanced image recognition technologies. This research provides a foundational framework for understanding the complex interplay between AI, VI, and CT, paving the way for more nuanced and effective applications in the future.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="sec27">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/supplementary material, further inquiries can be directed to the corresponding author.</p>
</sec>
<sec sec-type="author-contributions" id="sec28">
<title>Author contributions</title>
<p>JY: Conceptualization, Writing &#x2013; review &#x0026; editing, Investigation, Formal analysis, Data curation, Writing &#x2013; original draft. TL: Investigation, Writing &#x2013; review &#x0026; editing, Methodology. YL: Writing &#x2013; review &#x0026; editing, Visualization, Methodology, Investigation. PP: Writing &#x2013; review &#x0026; editing, Supervision.</p>
</sec>
<sec sec-type="funding-information" id="sec29">
<title>Funding</title>
<p>The author(s) declare that no financial support was received for the research and/or publication of this article.</p>
</sec>
<sec sec-type="COI-statement" id="sec30">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="sec31">
<title>Generative AI statement</title>
<p>The authors declare that no Gen AI was used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="sec32">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="ref1"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bai</surname><given-names>Y.</given-names></name></person-group> (<year>2023</year>). <article-title>Historical changes of urban landscape in the field of visual culture: take the bund as an example</article-title>. <source>Lecture Notes Educ. Psychol. Pub. Media</source> <volume>22</volume>, <fpage>23</fpage>&#x2013;<lpage>34</lpage>. doi: <pub-id pub-id-type="doi">10.54254/2753-7048/22/20230209</pub-id></citation></ref>
<ref id="ref2"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Barrile</surname><given-names>V.</given-names></name> <name><surname>Bernardo</surname><given-names>E.</given-names></name> <name><surname>Fotia</surname><given-names>A.</given-names></name> <name><surname>Bilotta</surname><given-names>G.</given-names></name></person-group> (<year>2022</year>). <article-title>A combined study of cultural heritage in archaeological museums: 3D survey and mixed reality</article-title>. <source>Heritage</source> <volume>5</volume>, <fpage>1330</fpage>&#x2013;<lpage>1349</lpage>. doi: <pub-id pub-id-type="doi">10.3390/heritage5030069</pub-id></citation></ref>
<ref id="ref3"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Basu</surname><given-names>A.</given-names></name> <name><surname>Paul</surname><given-names>S.</given-names></name> <name><surname>Ghosh</surname><given-names>S.</given-names></name> <name><surname>Das</surname><given-names>S.</given-names></name> <name><surname>Chanda</surname><given-names>B.</given-names></name> <name><surname>Bhagvati</surname><given-names>C.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Digital restoration of cultural heritage with data-driven computing: a survey</article-title>. <source>IEEE Access</source> <volume>11</volume>, <fpage>53939</fpage>&#x2013;<lpage>53977</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ACCESS.2023.3280639</pub-id></citation></ref>
<ref id="ref4"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Belhi</surname><given-names>A.</given-names></name> <name><surname>Ahmed</surname><given-names>H. O.</given-names></name> <name><surname>Alfaqheri</surname><given-names>T.</given-names></name> <name><surname>Bouras</surname><given-names>A.</given-names></name> <name><surname>Sadka</surname><given-names>A. H.</given-names></name> <name><surname>Foufou</surname><given-names>S.</given-names></name></person-group> (<year>2021</year>). <article-title>Study and evaluation of pre-trained CNN networks for cultural heritage image classification</article-title>. In Belhi, A., <person-group person-group-type="editor"><name><surname>Bouras</surname><given-names>A.</given-names></name> <name><surname>Al-Ali</surname><given-names>A. K.</given-names></name> <name><surname>Sadka</surname><given-names>A. H.</given-names></name></person-group> <source>Data analytics for cultural heritage: Current trends and concepts</source> (pp. <fpage>47</fpage>&#x2013;<lpage>69</lpage>): <publisher-loc>Cham</publisher-loc> <publisher-name>Springer</publisher-name>.</citation></ref>
<ref id="ref5"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Bisin</surname><given-names>A.</given-names></name> <name><surname>Verdier</surname><given-names>T.</given-names></name></person-group> (<year>2025</year>). <source>Economic models of cultural transmission</source>. <publisher-loc>Cham</publisher-loc>: <publisher-name>Springer</publisher-name>.</citation></ref>
<ref id="ref6"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cardoso</surname><given-names>A. S.</given-names></name> <name><surname>Renna</surname><given-names>F.</given-names></name> <name><surname>Moreno-Llorca</surname><given-names>R.</given-names></name> <name><surname>Alcaraz-Segura</surname><given-names>D.</given-names></name> <name><surname>Tabik</surname><given-names>S.</given-names></name> <name><surname>Ladle</surname><given-names>R. J.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Classifying the content of social media images to support cultural ecosystem service assessments using deep learning models</article-title>. <source>Ecosystem Serv.</source> <volume>54</volume>:<fpage>101410</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ecoser.2022.101410</pub-id></citation></ref>
<ref id="ref7"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chukwu</surname><given-names>O. J.</given-names></name></person-group> (<year>2023</year>). <article-title>Interrogating the online internet-based broadcast media stations: platforms, implications and emerged paradigms</article-title>. <source>J. Manage. Sci.</source> <volume>13</volume>, <fpage>74</fpage>&#x2013;<lpage>81</lpage>. doi: <pub-id pub-id-type="doi">10.26524/jms.13.36</pub-id></citation></ref>
<ref id="ref8"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Crema</surname><given-names>E. R.</given-names></name> <name><surname>Bortolini</surname><given-names>E.</given-names></name> <name><surname>Lake</surname><given-names>M.</given-names></name></person-group> (<year>2024</year>). <article-title>How cultural transmission through objects impacts inferences about cultural evolution</article-title>. <source>J. Archaeol. Method Theory</source> <volume>31</volume>, <fpage>202</fpage>&#x2013;<lpage>226</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10816-022-09599-x</pub-id></citation></ref>
<ref id="ref9"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Della Lena</surname><given-names>S.</given-names></name> <name><surname>Panebianco</surname><given-names>F.</given-names></name></person-group> (<year>2021</year>). <article-title>Cultural transmission with incomplete information</article-title>. <source>J. Econ. Theory</source> <volume>198</volume>:<fpage>105373</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jet.2021.105373</pub-id></citation></ref>
<ref id="ref10"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Dong</surname><given-names>A.</given-names></name></person-group> (<year>2025</year>). <source>LUMIEA: Enhancing user engagement in storytelling: Empowering personal narratives through AI-generated environments and tactile interaction in mixed reality</source>. <publisher-loc>Toronto, ON</publisher-loc>: <publisher-name>OCAD University</publisher-name>.</citation></ref>
<ref id="ref11"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Eerkens</surname><given-names>J. W.</given-names></name> <name><surname>Lipo</surname><given-names>C. P.</given-names></name></person-group> (<year>2007</year>). <article-title>Cultural transmission theory and the archaeological record: providing context to understanding variation and temporal changes in material culture</article-title>. <source>J. Archaeol. Res.</source> <volume>15</volume>, <fpage>239</fpage>&#x2013;<lpage>274</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10814-007-9013-z</pub-id></citation></ref>
<ref id="ref12"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Eizenberg</surname><given-names>E.</given-names></name> <name><surname>Cohen</surname><given-names>N.</given-names></name></person-group> (<year>2015</year>). <article-title>Reconstructing urban image through cultural flagship events: the case of bat-yam</article-title>. <source>Cities</source> <volume>42</volume>, <fpage>54</fpage>&#x2013;<lpage>62</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cities.2014.09.003</pub-id></citation></ref>
<ref id="ref13"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Eroglu</surname><given-names>D. I.</given-names></name></person-group> (<year>2023</year>). <source>Medium is the message: Unraveling the social media platforms' effects on communication and opinions</source>. <publisher-loc>Blacksburg VI</publisher-loc>: <publisher-name>Virginia Polytechnic Institute and State University</publisher-name>.</citation></ref>
<ref id="ref14"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Fahmy</surname><given-names>S.</given-names></name> <name><surname>Bock</surname><given-names>M.</given-names></name> <name><surname>Wanta</surname><given-names>W.</given-names></name></person-group> (<year>2014</year>). <source>Visual communication theory and research: A mass communication perspective</source>. <publisher-loc>Cham</publisher-loc>: <publisher-name>Springer</publisher-name>.</citation></ref>
<ref id="ref15"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fan</surname><given-names>T.</given-names></name> <name><surname>Wang</surname><given-names>H.</given-names></name> <name><surname>Deng</surname><given-names>S.</given-names></name></person-group> (<year>2023</year>). <article-title>Intangible cultural heritage image classification with multimodal attention and hierarchical fusion</article-title>. <source>Expert Syst. Appl.</source> <volume>231</volume>:<fpage>120555</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.eswa.2023.120555</pub-id></citation></ref>
<ref id="ref16"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fang</surname><given-names>H.</given-names></name> <name><surname>Zhang</surname><given-names>D.</given-names></name> <name><surname>Shu</surname><given-names>Y.</given-names></name> <name><surname>Guo</surname><given-names>G.</given-names></name></person-group> (<year>2020</year>). <article-title>Deep learning for sequential recommendation: algorithms, influential factors, and evaluations</article-title>. <source>ACM Trans. Inf. Syst.</source> <volume>39</volume>, <fpage>1</fpage>&#x2013;<lpage>42</lpage>. doi: <pub-id pub-id-type="doi">10.1145/3426723</pub-id></citation></ref>
<ref id="ref17"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Ferracani</surname><given-names>A.</given-names></name> <name><surname>Bertini</surname><given-names>M.</given-names></name> <name><surname>Pala</surname><given-names>P.</given-names></name> <name><surname>Nannotti</surname><given-names>G.</given-names></name> <name><surname>Principi</surname><given-names>F.</given-names></name> <name><surname>Becchi</surname><given-names>G.</given-names></name></person-group> (<year>2024</year>). &#x201C;Personalized generative storytelling with AI-visual illustrations for the promotion of knowledge in cultural heritage tourism,&#x201D; in <italic>Paper Presented at the Proceedings of the 6th Workshop on the Analysis, Understanding and Promotion of Heritage Contents.</italic></citation></ref>
<ref id="ref18"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>G&#x00EE;rbacia</surname><given-names>F.</given-names></name></person-group> (<year>2024</year>). <article-title>An analysis of research trends for using artificial intelligence in cultural heritage</article-title>. <source>Electronics</source> <volume>13</volume>:<fpage>3738</fpage>. doi: <pub-id pub-id-type="doi">10.3390/electronics13183738</pub-id></citation></ref>
<ref id="ref19"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gooding</surname><given-names>D.</given-names></name></person-group> (<year>2004</year>). <article-title>Cognition, construction and culture: visual theories in the sciences</article-title>. <source>J. Cogn. Cult.</source> <volume>4</volume>, <fpage>551</fpage>&#x2013;<lpage>593</lpage>. doi: <pub-id pub-id-type="doi">10.1163/1568537042484896</pub-id></citation></ref>
<ref id="ref20"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gosal</surname><given-names>A.</given-names></name> <name><surname>Ziv</surname><given-names>G.</given-names></name></person-group> (<year>2020</year>). <article-title>Landscape aesthetics: spatial modelling and mapping using social media images and machine learning</article-title>. <source>Ecol. Indic.</source> <volume>117</volume>:<fpage>106638</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ecolind.2020.106638</pub-id></citation></ref>
<ref id="ref21"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Grossetti</surname><given-names>Q.</given-names></name> <name><surname>Du Mouza</surname><given-names>C.</given-names></name> <name><surname>Travers</surname><given-names>N.</given-names></name> <name><surname>Constantin</surname><given-names>C.</given-names></name></person-group> (<year>2021</year>). <article-title>Reducing the filter bubble effect on twitter by considering communities for recommendations</article-title>. <source>Int. J. Web Inf. Syst.</source> <volume>17</volume>, <fpage>728</fpage>&#x2013;<lpage>752</lpage>. doi: <pub-id pub-id-type="doi">10.1108/IJWIS-06-2021-0065</pub-id></citation></ref>
<ref id="ref22"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hegetschweiler</surname><given-names>K. T.</given-names></name> <name><surname>de Vries</surname><given-names>S.</given-names></name> <name><surname>Arnberger</surname><given-names>A.</given-names></name> <name><surname>Bell</surname><given-names>S.</given-names></name> <name><surname>Brennan</surname><given-names>M.</given-names></name> <name><surname>Siter</surname><given-names>N.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>Linking demand and supply factors in identifying cultural ecosystem services of urban green infrastructures: a review of European studies</article-title>. <source>Urban For. Urban Green.</source> <volume>21</volume>, <fpage>48</fpage>&#x2013;<lpage>59</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ufug.2016.11.002</pub-id></citation></ref>
<ref id="ref23"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Heise</surname><given-names>D.</given-names></name></person-group> (<year>2004</year>). <article-title>Is visual culture becoming our canon of art?</article-title> <source>Art Educ.</source> <volume>57</volume>, <fpage>41</fpage>&#x2013;<lpage>46</lpage>. doi: <pub-id pub-id-type="doi">10.1080/00043125.2004.11653567</pub-id></citation></ref>
<ref id="ref24"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hewlett</surname><given-names>B. S.</given-names></name> <name><surname>Boyette</surname><given-names>A. H.</given-names></name> <name><surname>Lew-Levy</surname><given-names>S.</given-names></name> <name><surname>Gallois</surname><given-names>S.</given-names></name> <name><surname>Dira</surname><given-names>S. J.</given-names></name></person-group> (<year>2024</year>). <article-title>Cultural transmission among hunter-gatherers</article-title>. <source>Proc. Natl. Acad. Sci.</source> <volume><italic>121</italic></volume>:<fpage>e2322883121</fpage>. doi: <pub-id pub-id-type="doi">10.1073/pnas.2322883121</pub-id>, PMID: <pub-id pub-id-type="pmid">39556738</pub-id></citation></ref>
<ref id="ref25"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hoffmann</surname><given-names>E. J.</given-names></name> <name><surname>Abdulahhad</surname><given-names>K.</given-names></name> <name><surname>Zhu</surname><given-names>X. X.</given-names></name></person-group> (<year>2023</year>). <article-title>Using social media images for building function classification</article-title>. <source>Cities</source> <volume>133</volume>:<fpage>104107</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cities.2022.104107</pub-id></citation></ref>
<ref id="ref26"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Homer</surname><given-names>W. I.</given-names></name></person-group> (<year>1998</year>). <article-title>Visual culture: a new paradigm</article-title>. <source>Am. Art.</source> <volume>12</volume>, <fpage>6</fpage>&#x2013;<lpage>9</lpage>. doi: <pub-id pub-id-type="doi">10.1086/424309</pub-id></citation></ref>
<ref id="ref27"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hu</surname><given-names>Q.</given-names></name> <name><surname>Yu</surname><given-names>D.</given-names></name> <name><surname>Wang</surname><given-names>S.</given-names></name> <name><surname>Fu</surname><given-names>C.</given-names></name> <name><surname>Ai</surname><given-names>M.</given-names></name> <name><surname>Wang</surname><given-names>W.</given-names></name></person-group> (<year>2017</year>). <article-title>Hybrid three-dimensional representation based on panoramic images and three-dimensional models for a virtual museum: data collection, model, and visualization</article-title>. <source>Inf. Vis.</source> <volume>16</volume>, <fpage>126</fpage>&#x2013;<lpage>138</lpage>. doi: <pub-id pub-id-type="doi">10.1177/1473871616655467</pub-id></citation></ref>
<ref id="ref28"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Huai</surname><given-names>S.</given-names></name> <name><surname>Chen</surname><given-names>F.</given-names></name> <name><surname>Liu</surname><given-names>S.</given-names></name> <name><surname>Canters</surname><given-names>F.</given-names></name> <name><surname>Van de Voorde</surname><given-names>T.</given-names></name></person-group> (<year>2022</year>). <article-title>Using social media photos and computer vision to assess cultural ecosystem services and landscape features in urban parks</article-title>. <source>Ecosystem Serv.</source> <volume>57</volume>:<fpage>101475</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ecoser.2022.101475</pub-id></citation></ref>
<ref id="ref29"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Huang</surname><given-names>Y.</given-names></name> <name><surname>Yang</surname><given-names>S.</given-names></name></person-group> (<year>2016</year>). &#x201C;The orientation of urban image and the strategy of cultural communication,&#x201D; in <italic>Paper Presented at the 2nd International Conference on Computer Engineering, Information Science &#x0026; Application Technology (ICCIA 2017)</italic></citation></ref>
<ref id="ref30"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jankovi&#x0107; Babi&#x0107;</surname><given-names>R.</given-names></name></person-group> (<year>2024</year>). <article-title>A comparison of methods for image classification of cultural heritage using transfer learning for feature extraction</article-title>. <source>Neural Comput. &#x0026; Applic.</source> <volume>36</volume>, <fpage>11699</fpage>&#x2013;<lpage>11709</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00521-023-08764-x</pub-id></citation></ref>
<ref id="ref31"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jiang</surname><given-names>Y.</given-names></name> <name><surname>Pang</surname><given-names>P. C.-I.</given-names></name> <name><surname>Wong</surname><given-names>D.</given-names></name> <name><surname>Kan</surname><given-names>H. Y.</given-names></name></person-group> (<year>2023</year>). <article-title>Natural language processing adoption in governments and future research directions: a systematic review</article-title>. <source>Appl. Sci.</source> <volume>13</volume>:<fpage>12346</fpage>. doi: <pub-id pub-id-type="doi">10.3390/app132212346</pub-id></citation></ref>
<ref id="ref32"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ju</surname><given-names>F.</given-names></name></person-group> (<year>2024</year>). <article-title>Mapping the knowledge structure of image recognition in cultural heritage: a scientometric analysis using CiteSpace, VOSviewer, and bibliometrix</article-title>. <source>J. Imaging</source> <volume><italic>10</italic></volume>:<fpage>272</fpage>. doi: <pub-id pub-id-type="doi">10.3390/jimaging10110272</pub-id>, PMID: <pub-id pub-id-type="pmid">39590736</pub-id></citation></ref>
<ref id="ref33"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Laba</surname><given-names>N.</given-names></name></person-group> (<year>2024</year>). <article-title>Engine for the imagination? Visual generative media and the issue of representation</article-title>. <source>Media Cult. Soc.</source> <volume>46</volume>, <fpage>1599</fpage>&#x2013;<lpage>1620</lpage>. doi: <pub-id pub-id-type="doi">10.1177/01634437241259950</pub-id></citation></ref>
<ref id="ref34"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lee</surname><given-names>H.</given-names></name> <name><surname>Seo</surname><given-names>B.</given-names></name> <name><surname>Koellner</surname><given-names>T.</given-names></name> <name><surname>Lautenbach</surname><given-names>S.</given-names></name></person-group> (<year>2019</year>). <article-title>Mapping cultural ecosystem services 2.0&#x2013;potential and shortcomings from unlabeled crowd sourced images</article-title>. <source>Ecol. Indic.</source> <volume>96</volume>, <fpage>505</fpage>&#x2013;<lpage>515</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ecolind.2018.08.035</pub-id></citation></ref>
<ref id="ref35"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Legare</surname><given-names>C. H.</given-names></name></person-group> (<year>2017</year>). <article-title>Cumulative cultural learning: development and diversity</article-title>. <source>Proc. Natl. Acad. Sci.</source> <volume><italic>114</italic></volume>, <fpage>7877</fpage>&#x2013;<lpage>7883</lpage>. doi: <pub-id pub-id-type="doi">10.1073/pnas.1620743114</pub-id>, PMID: <pub-id pub-id-type="pmid">28739945</pub-id></citation></ref>
<ref id="ref36"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname><given-names>X.</given-names></name> <name><surname>Liang</surname><given-names>X.</given-names></name> <name><surname>Yu</surname><given-names>T.</given-names></name> <name><surname>Ruan</surname><given-names>S.</given-names></name> <name><surname>Fan</surname><given-names>R.</given-names></name></person-group> (<year>2022</year>). <article-title>Research on the integration of cultural tourism industry driven by digital economy in the context of COVID-19&#x2014;based on the data of 31 Chinese provinces</article-title>. <source>Front. Public Health</source> <volume><italic>10</italic></volume>:<fpage>780476</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpubh.2022.780476</pub-id>, PMID: <pub-id pub-id-type="pmid">35356017</pub-id></citation></ref>
<ref id="ref37"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname><given-names>R.</given-names></name> <name><surname>Wang</surname><given-names>C.</given-names></name></person-group> (<year>2022</year>). <article-title>Cultural and creative product design and image recognition based on deep learning</article-title>. <source>Comput. Intell. Neurosci.</source> <volume><italic>2022</italic></volume>, <fpage>1</fpage>&#x2013;<lpage>9</lpage>. doi: <pub-id pub-id-type="doi">10.1155/2022/7256584</pub-id>, PMID: <pub-id pub-id-type="pmid">35865496</pub-id></citation></ref>
<ref id="ref38"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname><given-names>Y.</given-names></name> <name><surname>Zhao</surname><given-names>L.</given-names></name> <name><surname>Chen</surname><given-names>Y.</given-names></name> <name><surname>Zhang</surname><given-names>N.</given-names></name> <name><surname>Fan</surname><given-names>H.</given-names></name> <name><surname>Zhang</surname><given-names>Z.</given-names></name></person-group> (<year>2023</year>). <article-title>3D LiDAR and multi-technology collaboration for preservation of built heritage in China: a review</article-title>. <source>Int. J. Appl. Earth Obs. Geoinf.</source> <volume>116</volume>:<fpage>103156</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jag.2022.103156</pub-id></citation></ref>
<ref id="ref39"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liang</surname><given-names>X.</given-names></name> <name><surname>Hua</surname><given-names>N.</given-names></name> <name><surname>Martin</surname><given-names>J.</given-names></name> <name><surname>Dellapiana</surname><given-names>E.</given-names></name> <name><surname>Coscia</surname><given-names>C.</given-names></name> <name><surname>Zhang</surname><given-names>Y.</given-names></name></person-group> (<year>2022</year>). <article-title>Social media as a medium to promote local perception expression in China&#x2019;s world heritage sites</article-title>. <source>Land</source> <volume>11</volume>:<fpage>841</fpage>. doi: <pub-id pub-id-type="doi">10.3390/land11060841</pub-id></citation></ref>
<ref id="ref40"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Loughran</surname><given-names>K.</given-names></name> <name><surname>Fine</surname><given-names>G. A.</given-names></name> <name><surname>Hunter</surname><given-names>M. A.</given-names></name></person-group> (<year>2015</year>). &#x201C;<article-title>Urban spaces, city cultures, and collective memories</article-title>&#x201D; in <source>Routledge international handbook of memory studies</source>. eds. <person-group person-group-type="editor"><name><surname>Tota</surname><given-names>A. L.</given-names></name> <name><surname>Hagen</surname><given-names>T.</given-names></name></person-group> (<publisher-loc>London</publisher-loc>: <publisher-name>Routledge</publisher-name>), <fpage>193</fpage>&#x2013;<lpage>204</lpage>.</citation></ref>
<ref id="ref41"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Luo</surname><given-names>Y. T.</given-names></name> <name><surname>Liu</surname><given-names>T.</given-names></name> <name><surname>Pang</surname><given-names>P. C.-I.</given-names></name> <name><surname>Wang</surname><given-names>Z.</given-names></name> <name><surname>Chan</surname><given-names>K. I.</given-names></name></person-group> (<year>2025</year>). <article-title>Exploring information interaction preferences in an LLM-assisted learning environment with a topic modeling framework</article-title>. <source>Appl. Sci.</source> <volume>15</volume>:<fpage>7515</fpage>. doi: <pub-id pub-id-type="doi">10.3390/app15137515</pub-id></citation></ref>
<ref id="ref42"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Mirzoeff</surname><given-names>N.</given-names></name></person-group> (<year>1999</year>). <source>An introduction to visual culture</source>, <italic>vol</italic>. <volume>274</volume>. <publisher-loc>London</publisher-loc>: <publisher-name>Routledge</publisher-name>.</citation></ref>
<ref id="ref43"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Motte</surname><given-names>E.</given-names></name> <name><surname>McInnes</surname><given-names>R.</given-names></name></person-group> (<year>2019</year>). <article-title>Using artistic imagery to improve understanding of coastal landscape changes on the Rance estuary (French Channel coast)</article-title>. <source>Geoheritage</source> <volume>11</volume>, <fpage>961</fpage>&#x2013;<lpage>972</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s12371-018-00341-2</pub-id></citation></ref>
<ref id="ref44"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Napolitano</surname><given-names>R. K.</given-names></name> <name><surname>Scherer</surname><given-names>G.</given-names></name> <name><surname>Glisic</surname><given-names>B.</given-names></name></person-group> (<year>2018</year>). <article-title>Virtual tours and informational modeling for conservation of cultural heritage sites</article-title>. <source>J. Cult. Herit.</source> <volume>29</volume>, <fpage>123</fpage>&#x2013;<lpage>129</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.culher.2017.08.007</pub-id></citation></ref>
<ref id="ref45"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Panchanathan</surname><given-names>K.</given-names></name></person-group> (<year>2024</year>). <article-title>15 Cultural Evolution</article-title>. <source>Hum. Behav. Ecol.</source> <volume>92</volume>:<fpage>356</fpage>.</citation></ref>
<ref id="ref46"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Plieninger</surname><given-names>T.</given-names></name> <name><surname>Bieling</surname><given-names>C.</given-names></name> <name><surname>Fagerholm</surname><given-names>N.</given-names></name> <name><surname>Byg</surname><given-names>A.</given-names></name> <name><surname>Hartel</surname><given-names>T.</given-names></name> <name><surname>Hurley</surname><given-names>P.</given-names></name> <etal/></person-group>. (<year>2015</year>). <article-title>The role of cultural ecosystem services in landscape management and planning</article-title>. <source>Curr. Opin. Environ. Sustain.</source> <volume>14</volume>, <fpage>28</fpage>&#x2013;<lpage>33</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cosust.2015.02.006</pub-id></citation></ref>
<ref id="ref47"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Richards</surname><given-names>D. R.</given-names></name> <name><surname>Friess</surname><given-names>D. A.</given-names></name></person-group> (<year>2015</year>). <article-title>A rapid indicator of cultural ecosystem service usage at a fine spatial scale: content analysis of social media photographs</article-title>. <source>Ecol. Indic.</source> <volume>53</volume>, <fpage>187</fpage>&#x2013;<lpage>195</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ecolind.2015.01.034</pub-id></citation></ref>
<ref id="ref48"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Richards</surname><given-names>D. R.</given-names></name> <name><surname>Tun&#x00E7;er</surname><given-names>B.</given-names></name></person-group> (<year>2018</year>). <article-title>Using image recognition to automate assessment of cultural ecosystem services from social media photographs</article-title>. <source>Ecosystem Serv.</source> <volume>31</volume>, <fpage>318</fpage>&#x2013;<lpage>325</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ecoser.2017.09.004</pub-id></citation></ref>
<ref id="ref49"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Robb</surname><given-names>J.</given-names></name></person-group> (<year>2020</year>). <article-title>Art (pre) history: ritual, narrative and visual culture in Neolithic and bronze age Europe</article-title>. <source>J. Archaeol. Method Theory</source> <volume><italic>27</italic></volume>, <fpage>454</fpage>&#x2013;<lpage>480</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10816-020-09471-w</pub-id>, PMID: <pub-id pub-id-type="pmid">32879588</pub-id></citation></ref>
<ref id="ref50"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Romanazzi</surname><given-names>G. R.</given-names></name> <name><surname>Koto</surname><given-names>R.</given-names></name> <name><surname>De Boni</surname><given-names>A.</given-names></name> <name><surname>Palmisano</surname><given-names>G. O.</given-names></name> <name><surname>Cioffi</surname><given-names>M.</given-names></name> <name><surname>Roma</surname><given-names>R.</given-names></name></person-group> (<year>2023</year>). <article-title>Cultural ecosystem services: a review of methods and tools for economic evaluation</article-title>. <source>Environ. Sustain. Indic.</source> <volume>20</volume>:<fpage>100304</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.indic.2023.100304</pub-id></citation></ref>
<ref id="ref51"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Scholte</surname><given-names>S. S.</given-names></name> <name><surname>Van Teeffelen</surname><given-names>A. J.</given-names></name> <name><surname>Verburg</surname><given-names>P. H.</given-names></name></person-group> (<year>2015</year>). <article-title>Integrating socio-cultural perspectives into ecosystem service valuation: a review of concepts and methods</article-title>. <source>Ecol. Econ.</source> <volume>114</volume>, <fpage>67</fpage>&#x2013;<lpage>78</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ecolecon.2015</pub-id></citation></ref>
<ref id="ref52"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Sch&#x00F6;npflug</surname><given-names>U.</given-names></name></person-group> (<year>2008</year>). <source>Cultural transmission: Psychological, developmental, social, and methodological aspects</source>. <publisher-loc>Cambridge</publisher-loc>: <publisher-name>Cambridge University Press</publisher-name>.</citation></ref>
<ref id="ref53"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Shahbazi</surname><given-names>N.</given-names></name> <name><surname>Lin</surname><given-names>Y.</given-names></name> <name><surname>Asudeh</surname><given-names>A.</given-names></name> <name><surname>Jagadish</surname><given-names>H.</given-names></name></person-group> (<year>2023</year>). <article-title>Representation bias in data: a survey on identification and resolution techniques</article-title>. <source>ACM Comput. Surv.</source> <volume>55</volume>, <fpage>1</fpage>&#x2013;<lpage>39</lpage>. doi: <pub-id pub-id-type="doi">10.1145/3588433</pub-id></citation></ref>
<ref id="ref54"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Shi</surname><given-names>J.</given-names></name> <name><surname>Honjo</surname><given-names>T.</given-names></name> <name><surname>Yazawa</surname><given-names>Y.</given-names></name> <name><surname>Furuya</surname><given-names>K.</given-names></name></person-group> (<year>2021</year>). <article-title>Recognition and classification of homogeneous landscape with visitor&#x2013;employed photography and cloud image annotation API&#x2014;an example of the Riverscape in Nihonbashi, Tokyo, Japan</article-title>. <source>Landscape Architecture Frontiers</source> <volume><italic>9</italic></volume>, <fpage>12</fpage>&#x2013;<lpage>31</lpage>. doi: <pub-id pub-id-type="doi">10.15302/J-LAF-1-020054</pub-id></citation></ref>
<ref id="ref55"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Somaini</surname><given-names>A.</given-names></name></person-group> (<year>2023</year>). <article-title>Algorithmic images: artificial intelligence and visual culture</article-title>. <source>Grey Room</source>:<fpage>93</fpage>, <fpage>74</fpage>&#x2013;<lpage>115</lpage>. doi: <pub-id pub-id-type="doi">10.1162/grey_a_00383</pub-id></citation></ref>
<ref id="ref56"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Soreanu</surname><given-names>C.</given-names></name> <name><surname>German</surname><given-names>L.</given-names></name></person-group> (<year>2022</year>). <article-title>Visual communication in cultural media. The Rashomon effect in the image globalization paradigm</article-title>. <source>Rev. Art Educ.</source> <volume>26</volume>, <fpage>177</fpage>&#x2013;<lpage>185</lpage>. doi: <pub-id pub-id-type="doi">10.2478/rae-2023-0025</pub-id></citation></ref>
<ref id="ref57"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Su</surname><given-names>L.</given-names></name> <name><surname>Chen</surname><given-names>W.</given-names></name> <name><surname>Zhou</surname><given-names>Y.</given-names></name> <name><surname>Fan</surname><given-names>L.</given-names></name></person-group> (<year>2023</year>). <article-title>Exploring city image perception in social media big data through deep learning: a case study of Zhongshan City</article-title>. <source>Sustainability</source> <volume>15</volume>:<fpage>3311</fpage>. doi: <pub-id pub-id-type="doi">10.3390/su15043311</pub-id></citation></ref>
<ref id="ref58"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Van Berkel</surname><given-names>D. B.</given-names></name> <name><surname>Tabrizian</surname><given-names>P.</given-names></name> <name><surname>Dorning</surname><given-names>M. A.</given-names></name> <name><surname>Smart</surname><given-names>L.</given-names></name> <name><surname>Newcomb</surname><given-names>D.</given-names></name> <name><surname>Mehaffey</surname><given-names>M.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>Quantifying the visual-sensory landscape qualities that contribute to cultural ecosystem services using social media and LiDAR</article-title>. <source>Ecosystem Serv.</source> <volume>31</volume>, <fpage>326</fpage>&#x2013;<lpage>335</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ecoser.2018.03.022</pub-id>, PMID: <pub-id pub-id-type="pmid">30148061</pub-id></citation></ref>
<ref id="ref59"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Vishwakarma</surname><given-names>V. K.</given-names></name></person-group> (<year>2023</year>). <article-title>Translating cultural nuances: challenges and strategies</article-title>. <source>ELT Voices</source> <volume>13</volume>:<fpage>8268531</fpage>. doi: <pub-id pub-id-type="doi">10.5281/ZENODO.8268531</pub-id></citation></ref>
<ref id="ref60"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname><given-names>H.</given-names></name> <name><surname>Gao</surname><given-names>Z.</given-names></name> <name><surname>Zhang</surname><given-names>X.</given-names></name> <name><surname>Du</surname><given-names>J.</given-names></name> <name><surname>Xu</surname><given-names>Y.</given-names></name> <name><surname>Wang</surname><given-names>Z.</given-names></name></person-group> (<year>2024</year>). <article-title>Gamifying cultural heritage: exploring the potential of immersive virtual exhibitions</article-title>. <source>Telemat. Inform. Rep.</source> <volume>15</volume>:<fpage>100150</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.teler.2024.100150</pub-id></citation></ref>
<ref id="ref61"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Welch</surname><given-names>P. B.</given-names></name></person-group> (<year>2013</year>). <source>Chinese art: A guide to motifs and visual imagery</source>. <publisher-loc>Vermont</publisher-loc>: <publisher-name>Tuttle Publishing</publisher-name>.</citation></ref>
<ref id="ref62"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Winder</surname><given-names>S. G.</given-names></name> <name><surname>Lee</surname><given-names>H.</given-names></name> <name><surname>Seo</surname><given-names>B.</given-names></name> <name><surname>Lia</surname><given-names>E. H.</given-names></name> <name><surname>Wood</surname><given-names>S. A.</given-names></name></person-group> (<year>2022</year>). <article-title>An open-source image classifier for characterizing recreational activities across landscapes</article-title>. <source>People Nat.</source> <volume>4</volume>, <fpage>1249</fpage>&#x2013;<lpage>1262</lpage>. doi: <pub-id pub-id-type="doi">10.1002/pan3.10382</pub-id></citation></ref>
<ref id="ref63"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Xia</surname><given-names>S.</given-names></name> <name><surname>Xia</surname><given-names>Y.</given-names></name> <name><surname>Liu</surname><given-names>T.</given-names></name> <name><surname>Luo</surname><given-names>Y.</given-names></name> <name><surname>Pang</surname><given-names>P. C.-I.</given-names></name></person-group> (<year>2025</year>). <article-title>Application of deep learning models in gastric cancer pathology image analysis: a systematic scoping review</article-title>. <source>BMC Cancer</source> <volume><italic>25</italic></volume>:<fpage>1257</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s12885-025-14662-3</pub-id>, PMID: <pub-id pub-id-type="pmid">40750872</pub-id></citation></ref>
<ref id="ref64"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>You</surname><given-names>S.</given-names></name> <name><surname>Zheng</surname><given-names>Q.</given-names></name> <name><surname>Chen</surname><given-names>B.</given-names></name> <name><surname>Xu</surname><given-names>Z.</given-names></name> <name><surname>Lin</surname><given-names>Y.</given-names></name> <name><surname>Gan</surname><given-names>M.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Identifying the spatiotemporal dynamics of forest ecotourism values with remotely sensed images and social media data: a perspective of public preferences</article-title>. <source>J. Clean. Prod.</source> <volume>341</volume>:<fpage>130715</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jclepro.2022.130715</pub-id></citation></ref>
<ref id="ref65"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhao</surname><given-names>X.</given-names></name> <name><surname>Lu</surname><given-names>Y.</given-names></name> <name><surname>Huang</surname><given-names>W.</given-names></name> <name><surname>Lin</surname><given-names>G.</given-names></name></person-group> (<year>2024</year>). <article-title>Assessing and interpreting perceived park accessibility, usability and attractiveness through texts and images from social media</article-title>. <source>Sustain. Cities Soc.</source> <volume>112</volume>:<fpage>105619</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.scs.2024.105619</pub-id></citation></ref>
</ref-list>
</back>
</article>