<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="methods-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Genet.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Genetics</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Genet.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">1664-8021</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">1779455</article-id>
<article-id pub-id-type="doi">10.3389/fgene.2026.1779455</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Methods</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>DWGCN: distance-weighted graph convolutional network for robust spatial domain identification in spatial transcriptomics</article-title>
<alt-title alt-title-type="left-running-head">Peng et al.</alt-title>
<alt-title alt-title-type="right-running-head">
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fgene.2026.1779455">10.3389/fgene.2026.1779455</ext-link>
</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" equal-contrib="yes">
<name>
<surname>Peng</surname>
<given-names>Chunfang</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="author-notes" rid="fn001">
<sup>&#x2020;</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/3325645"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing - original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
</contrib>
<contrib contrib-type="author" equal-contrib="yes">
<name>
<surname>Li</surname>
<given-names>Guobin</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="author-notes" rid="fn001">
<sup>&#x2020;</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/3334664"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing - original draft</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Wu</surname>
<given-names>Jiamiao</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/3335855"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal Analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Fan</surname>
<given-names>Qiao</given-names>
</name>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1226424"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal Analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Guo</surname>
<given-names>Xiaobo</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1176212"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal Analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
</contrib>
</contrib-group>
<aff id="aff1">
<label>1</label>
<institution>Department of Statistical Science, School of Mathematics, Sun Yat-sen University</institution>, <city>Guangzhou</city>, <country country="CN">China</country>
</aff>
<aff id="aff2">
<label>2</label>
<institution>Southern China Center for Statistical Science, School of Mathematics, Sun Yat-sen University</institution>, <city>Guangzhou</city>, <country country="CN">China</country>
</aff>
<aff id="aff3">
<label>3</label>
<institution>Center for Biomedical Data Science, Duke-NUS</institution>, <city>Singapore</city>, <country country="SG">Singapore</country>
</aff>
<author-notes>
<corresp id="c001">
<label>&#x2a;</label>Correspondence: Xiaobo Guo, <email xlink:href="mailto:guoxb3@mail.sysu.edu.cn">guoxb3@mail.sysu.edu.cn</email>
</corresp>
<fn fn-type="equal" id="fn001">
<label>&#x2020;</label>
<p>These authors have contributed equally to this work</p>
</fn>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-10">
<day>10</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>17</volume>
<elocation-id>1779455</elocation-id>
<history>
<date date-type="received">
<day>02</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="rev-recd">
<day>23</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>27</day>
<month>01</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2026 Peng, Li, Wu, Fan and Guo.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Peng, Li, Wu, Fan and Guo</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-10">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<sec>
<title>Background</title>
<p>Graph Convolutional Networks (GCNs) are widely applied for spatial domain identification in spatial transcriptomics (ST), where node representations are learned by aggregating information from neighboring spots. However, most ST workflows construct spatial graphs by assigning equal weights to neighbors and self-loops, and then applying degree-based normalization. This procedure often yields near-uniform adjacency matrices, suppressing natural distance heterogeneity, diminishing spatial resolution, aggravating GCN over-smoothing, and obscuring fine-grained tissue boundaries.</p>
</sec>
<sec>
<title>Methods</title>
<p>We introduce DWGCN, a Distance-Weighted Graph Convolutional Network that replaces uniform neighbor assignment with inverse-distance weighting (IDW) and spot-wise normalization. DWGCN enhances locality-sensitive aggregation by assigning larger weights to proximal neighbors, while preserving self-loop dominance to maintain intrinsic spot information and reduce hub-driven dilution.</p>
</sec>
<sec>
<title>Results</title>
<p>Across four real and four simulated ST datasets, integrating DWGCN with representative GCN-based frameworks (SEDR, GraphST, SpaNCMG, SpaGIC) generally improved clustering accuracy, particularly in tissues with complex spatial architectures.</p>
</sec>
<sec>
<title>Conclusion</title>
<p>These results demonstrate that DWGCN offers a broadly applicable approach for restoring distance-aware structure in spatial graphs, thereby improving the delineation of spatial domain identification.</p>
</sec>
</abstract>
<kwd-group>
<kwd>clustering</kwd>
<kwd>graph convolutional networks</kwd>
<kwd>representation learning</kwd>
<kwd>spatial domain identification</kwd>
<kwd>spatial transcriptomics</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This work was supported by the National Natural Science Foundation of China (Grant No. 12371280).</funding-statement>
</funding-group>
<counts>
<fig-count count="3"/>
<table-count count="0"/>
<equation-count count="9"/>
<ref-count count="31"/>
<page-count count="9"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Computational Genomics</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<label>1</label>
<title>Introduction</title>
<p>Spatial transcriptomics (ST) has revolutionized tissue biology research by enabling transcriptome-wide profiling while preserving the spatial context of gene expression (<xref ref-type="bibr" rid="B31">Zhuang, 2021</xref>; <xref ref-type="bibr" rid="B11">Larsson et al., 2021</xref>). Using diverse imaging- and sequencing-based platforms, ST provides valuable insights into cellular heterogeneity, local microenvironments, and the spatial organization of biological processes within tissues (<xref ref-type="bibr" rid="B3">Chen et al., 2015</xref>; <xref ref-type="bibr" rid="B7">Eng et al., 2019</xref>; <xref ref-type="bibr" rid="B23">St&#xe5;hl et al., 2016</xref>; <xref ref-type="bibr" rid="B17">Rodriques et al., 2019</xref>). Accurate spatial domain delineation is foundational for reconstructing tissue organization, inferring developmental processes, and resolving disease-associated microenvironments (<xref ref-type="bibr" rid="B26">Xu et al., 2022</xref>).</p>
<p>Early computational approaches, such as Seurat (<xref ref-type="bibr" rid="B21">Satija et al., 2015</xref>) and spatialLIBD (<xref ref-type="bibr" rid="B16">Pardo et al., 2022</xref>), primarily relied on transcriptomic similarity and often overlooked spatial continuity, leading to fragmented or biologically implausible clusters. Subsequent frameworks, including BayesSpace (<xref ref-type="bibr" rid="B29">Zhao et al., 2021</xref>) and Giotto (<xref ref-type="bibr" rid="B6">Dries et al., 2021</xref>), incorporated spatial priors to improve cluster coherence. However, these methods rely on predefined spatial priors and therefore lack the capacity to learn complex, nonlinear gene&#x2013;space relationships. In recent years, graph-based deep learning, particularly graph convolutional networks (GCNs), has emerged as a powerful paradigm that simultaneously integrates transcriptomic similarity and spatial proximity. GCN-based spatial identification methods such as SEDR (<xref ref-type="bibr" rid="B27">Xu et al., 2024</xref>) and GraphST (<xref ref-type="bibr" rid="B14">Long et al., 2023</xref>) have demonstrated superior accuracy and robustness across diverse SRT platforms, highlighting the potential of GCN-based approaches for spatial domain identification.</p>
<p>GCNs build representations by iteratively aggregating information from neighboring spots; consequently, their effectiveness strongly depends on the quality of the underlying adjacency graph (<xref ref-type="bibr" rid="B10">Kipf, 2016</xref>). In most GCN-based spatial transcriptomics workflows, the spatial graph is constructed using a <inline-formula id="inf1">
<mml:math id="m1">
<mml:mrow>
<mml:mi>K</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>-nearest neighbor (KNN) strategy with equal edge weights, ignoring distance differences and treating all neighbors as equally influential. Edge weights are further rescaled using degree normalization to construct a normalized adjacency matrix (<xref ref-type="bibr" rid="B22">Si et al., 2024</xref>; <xref ref-type="bibr" rid="B13">Liu et al., 2024</xref>). As a result, the propagation weights become nearly uniform, oversimplifying the underlying spatial structure, where local spatial heterogeneity is diminished, and the contributions of spatially proximal neighbors may be disproportionately diluted. These effects exacerbate the inherent problem of over-smoothing in GCNs, during which repeated propagation under symmetric normalization gradually homogenizes node features and obscures meaningful spatial boundaries (<xref ref-type="bibr" rid="B28">Yang et al., 2020</xref>; <xref ref-type="bibr" rid="B18">Rong et al., 2019</xref>; <xref ref-type="bibr" rid="B20">Rusch et al., 2023</xref>). To partially mitigate over-smoothing, many spatial GCN pipelines restrict the neighborhood size and commonly adopt shallow GCN architectures (<xref ref-type="bibr" rid="B12">Li et al., 2018</xref>). Therefore, preserving natural distance heterogeneity in the adjacency matrix is crucial for enhancing the performance of spatial GCN-based models.</p>
<p>To address these limitations, we developed DWGCN (a Distance-Weighted Graph Convolutional Network), a framework designed to refine spatial graph construction and improve spatial domain identification (<xref ref-type="fig" rid="F1">Figure 1</xref>). Unlike traditional adjacency matrices with uniform edge weights, DWGCN uses an inverse distance weighting (IDW) scheme to construct adjacency graphs. IDW assigns higher weights to nearby neighbors while keeping each node&#x2019;s self-loop as the largest, preserving its own information. A tunable exponent parameter <inline-formula id="inf2">
<mml:math id="m2">
<mml:mrow>
<mml:mi>p</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> controls the rate of distance decay, enabling adaptation to tissues with diverse local structural patterns. By applying relative rather than absolute distance scaling, DWGCN robustly quantifies spatial similarity across datasets with varying resolutions. Furthermore, instead of degree-based normalization, DWGCN employs spot-wise normalization to preserve the relative ordering of distance-derived weights and prevent hub dominance during message passing. Together, these innovations mitigate over-smoothing, enhance local structural resolution, and improve the biological fidelity of spatial domain delineation.</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>Overview of DWGCN framework. <bold>(A)</bold> Distance-weighted adjacency construction: distance-weighted adjacency matrix <inline-formula id="inf3">
<mml:math id="m3">
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:mi>A</mml:mi>
</mml:mrow>
<mml:mo>&#x303;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>w</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> is constructed using inverse-distance weighting (IDW) over each spot&#x2019;s top <inline-formula id="inf4">
<mml:math id="m4">
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> nearest neighbors, whereas the conventional adjacency matrix <inline-formula id="inf5">
<mml:math id="m5">
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:mi>A</mml:mi>
</mml:mrow>
<mml:mo>&#x303;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">ori</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> is usually constructed with equal-weight edges between nodes. <bold>(B)</bold> GCN-based spatial identification: <inline-formula id="inf6">
<mml:math id="m6">
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:mi>A</mml:mi>
</mml:mrow>
<mml:mo>&#x303;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>w</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> substitutes <inline-formula id="inf7">
<mml:math id="m7">
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:mi>A</mml:mi>
</mml:mrow>
<mml:mo>&#x303;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">ori</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> during GCN-based representation learning in GCN-based spatial clustering frameworks (including SEDR, GraphST, SpaGIC, and SpaNCMG), without altering their original network architectures, yielding DWGCN-enhanced models (including DW_SEDR, DW_GraphST, DW_SpaGIC, and DW_SpaNCMG). <bold>(C)</bold> Benchmarking framework: the performance gains of DWGCN-enhanced models are evaluated on real and simulated datasets, with spatial domain accuracy quantified using ARI, NMI, and Homogeneity.</p>
</caption>
<graphic xlink:href="fgene-17-1779455-g001.tif">
<alt-text content-type="machine-generated">Three-part schematic summarizes a workflow for spatial domain identification using graph convolutional networks (GCN). Panel A shows distance-weighted adjacency construction from spot coordinates and gene features. Panel B compares representation learning with original versus enhanced models using different adjacency matrices, illustrated by network diagrams and clustering output. Panel C details benchmarking using real and simulated datasets, with evaluation metrics such as ARI, NMI, homogeneity, and significance testing.</alt-text>
</graphic>
</fig>
</sec>
<sec sec-type="materials|methods" id="s2">
<label>2</label>
<title>Materials and methods</title>
<sec id="s2-1">
<label>2.1</label>
<title>Framework of DWGCN</title>
<p>DWGCN is developed to refine spatial graph construction for spatial transcriptomics (ST) analysis. The workflow of DWGCN consists of two main stages (<xref ref-type="fig" rid="F1">Figure 1</xref>). In the first stage, a sparse, weighted adjacency matrix is constructed using inverse distance weighting (IDW) (<xref ref-type="bibr" rid="B15">Lu and Wong, 2008</xref>) over each spot&#x2019;s neighbors. In the second stage, this distance-weighted adjacency matrix replaces the conventional degree-normalized adjacency matrix in GCN-based spatial clustering frameworks. The performance of DWGCN was systematically evaluated by comparing baseline models with their DWGCN-enhanced versions across both real and simulated spatial transcriptomic datasets.</p>
</sec>
<sec id="s2-2">
<label>2.2</label>
<title>Distance-weighted graph construction</title>
<p>Let <inline-formula id="inf8">
<mml:math id="m8">
<mml:mrow>
<mml:mi>X</mml:mi>
<mml:mo>&#x2208;</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mi mathvariant="double-struck">R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mo>&#xd7;</mml:mo>
<mml:mi>M</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> denote the gene expression matrix, where <inline-formula id="inf9">
<mml:math id="m9">
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> denotes the number of spatial spots and <inline-formula id="inf10">
<mml:math id="m10">
<mml:mrow>
<mml:mi>M</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> the number of genes.</p>
<sec id="s2-2-1">
<label>2.2.1</label>
<title>Relative distance computation</title>
<p>For any two spots <inline-formula id="inf11">
<mml:math id="m11">
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf12">
<mml:math id="m12">
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> with 2D coordinates <inline-formula id="inf13">
<mml:math id="m13">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="bold">p</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf14">
<mml:math id="m14">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="bold">p</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>, the Euclidean distance is computed as shown in <xref ref-type="disp-formula" rid="e1">Equation 1</xref>:<disp-formula id="e1">
<mml:math id="m15">
<mml:mrow>
<mml:msubsup>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mn>0</mml:mn>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:msubsup>
<mml:mo>&#x3d;</mml:mo>
<mml:mo stretchy="false">&#x2016;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="bold">p</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="bold">p</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mrow>
<mml:mo stretchy="false">&#x2016;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
<label>(1)</label>
</disp-formula>
</p>
<p>To ensure cross-platform consistency across ST technologies with varying resolutions, distances are normalized using the mean nearest-neighbor distance, as defined in <xref ref-type="disp-formula" rid="e2">Equation 2</xref>:<disp-formula id="e2">
<mml:math id="m16">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
<mml:mo>&#x304;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:mtext>min</mml:mtext>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:munderover>
</mml:mstyle>
<mml:munder>
<mml:mrow>
<mml:mi>min</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>&#x2260;</mml:mo>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:munder>
<mml:msubsup>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mn>0</mml:mn>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:msubsup>
<mml:mo>,</mml:mo>
</mml:mrow>
</mml:math>
<label>(2)</label>
</disp-formula>the relative distance is defined in <xref ref-type="disp-formula" rid="e3">Equation 3</xref> as:<disp-formula id="e3">
<mml:math id="m17">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
<mml:mo>&#x2b;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:msubsup>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mn>0</mml:mn>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:msubsup>
</mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
<mml:mo>&#x304;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:mtext>min</mml:mtext>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
<label>(3)</label>
</disp-formula>
</p>
<p>The additive constant of 1 ensures <inline-formula id="inf15">
<mml:math id="m18">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf16">
<mml:math id="m19">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3e;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula> for <inline-formula id="inf17">
<mml:math id="m20">
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x2260;</mml:mo>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>, stabilizing inverse-distance computation and preserving the dominance of self-loops during propagation.</p>
</sec>
<sec id="s2-2-2">
<label>2.2.2</label>
<title>Neighborhood graph construction</title>
<p>The relative distances between spots are used to create a neighborhood graph <inline-formula id="inf18">
<mml:math id="m21">
<mml:mrow>
<mml:mi mathvariant="script">G</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="script">E</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>, where each node <inline-formula id="inf19">
<mml:math id="m22">
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x2208;</mml:mo>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> corresponds to a spatial spot, and edges represent spatial proximity. Specifically, top <inline-formula id="inf20">
<mml:math id="m23">
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> nearest neighbors for each spot are selected to construct an adjacency matrix, denoted as <inline-formula id="inf21">
<mml:math id="m24">
<mml:mrow>
<mml:mi>A</mml:mi>
<mml:mo>&#x2208;</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mi mathvariant="double-struck">R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mo>&#xd7;</mml:mo>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula>, defined in <xref ref-type="disp-formula" rid="e4">Equation 4</xref>:<disp-formula id="e4">
<mml:math id="m25">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>A</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mfenced open="{" close="">
<mml:mrow>
<mml:mtable class="cases">
<mml:mtr>
<mml:mtd columnalign="left">
<mml:mn>1</mml:mn>
<mml:mo>,</mml:mo>
<mml:mspace width="1em"/>
</mml:mtd>
<mml:mtd columnalign="left">
<mml:mtext>if&#x2009;</mml:mtext>
<mml:mi>j</mml:mi>
<mml:mo>&#x2208;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">N</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>,</mml:mo>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="left">
<mml:mn>0</mml:mn>
<mml:mo>,</mml:mo>
<mml:mspace width="1em"/>
</mml:mtd>
<mml:mtd columnalign="left">
<mml:mtext>otherwise.</mml:mtext>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
<label>(4)</label>
</disp-formula>where <inline-formula id="inf22">
<mml:math id="m26">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">N</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula> denotes the set of top <inline-formula id="inf23">
<mml:math id="m27">
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> nearest neighbors of node <inline-formula id="inf24">
<mml:math id="m28">
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>. Self-loops are explicitly added to <inline-formula id="inf25">
<mml:math id="m29">
<mml:mrow>
<mml:mi>A</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> via <inline-formula id="inf26">
<mml:math id="m30">
<mml:mrow>
<mml:mi>A</mml:mi>
<mml:mo>&#x2190;</mml:mo>
<mml:mi>A</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>I</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>. Note that this adjacency matrix is generally asymmetric, since <inline-formula id="inf27">
<mml:math id="m31">
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>&#x2208;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">N</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula> does not imply <inline-formula id="inf28">
<mml:math id="m32">
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x2208;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">N</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>. This construction yields an asymmetric KNN graph, which retains directional neighbor relationships rather than enforcing symmetry.</p>
<p>By contrast, many traditional ST workflows symmetrize the graph by setting <inline-formula id="inf29">
<mml:math id="m33">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>A</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>A</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula> whenever either node is in the other&#x2019;s KNN set. Although this simplifies the topology, it may eliminate informative directional structure.</p>
</sec>
<sec id="s2-2-3">
<label>2.2.3</label>
<title>Inverse distance weighting</title>
<p>Edge weights are computed via inverse distance weighting as defined in <xref ref-type="disp-formula" rid="e5">Equation 5</xref>:<disp-formula id="e5">
<mml:math id="m34">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>W</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mfenced open="{" close="">
<mml:mrow>
<mml:mtable class="cases">
<mml:mtr>
<mml:mtd columnalign="left">
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:msubsup>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>p</mml:mi>
</mml:mrow>
</mml:msubsup>
</mml:mrow>
</mml:mfrac>
<mml:mo>,</mml:mo>
<mml:mspace width="1em"/>
</mml:mtd>
<mml:mtd columnalign="left">
<mml:mtext>if&#x2009;</mml:mtext>
<mml:mi>j</mml:mi>
<mml:mo>&#x2208;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">N</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x222a;</mml:mo>
<mml:mfenced open="{" close="}">
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>,</mml:mo>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="left">
<mml:mn>0</mml:mn>
<mml:mo>,</mml:mo>
<mml:mspace width="1em"/>
</mml:mtd>
<mml:mtd columnalign="left">
<mml:mtext>otherwise</mml:mtext>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
<label>(5)</label>
</disp-formula>here, <inline-formula id="inf30">
<mml:math id="m35">
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x2265;</mml:mo>
<mml:mn>0</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula> is a tunable exponent controlling the decay rate: larger values of <inline-formula id="inf31">
<mml:math id="m36">
<mml:mrow>
<mml:mi>p</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> emphasize spatially close neighbors more strongly. Including the self-node <inline-formula id="inf32">
<mml:math id="m37">
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> ensures that intrinsic spot-level information is preserved and generally dominates the message aggregation.</p>
</sec>
<sec id="s2-2-4">
<label>2.2.4</label>
<title>Normalization strategy</title>
<p>To preserve spatial interpretability and avoid undue uniformization, we apply spot-wise row normalization to the distance-weighted adjacency matrix. Given the unnormalized inverse distance weighted adjacency matrix <inline-formula id="inf33">
<mml:math id="m38">
<mml:mrow>
<mml:mi>W</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>, we compute the row-normalized distance-weighted adjacency matrix as shown in <xref ref-type="disp-formula" rid="e6">Equation 6</xref>:<disp-formula id="e6">
<mml:math id="m39">
<mml:mrow>
<mml:msubsup>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:mi>A</mml:mi>
</mml:mrow>
<mml:mo>&#x303;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>w</mml:mi>
</mml:mrow>
</mml:msubsup>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>W</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>&#x2208;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">N</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x222a;</mml:mo>
<mml:mfenced open="{" close="}">
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mrow>
<mml:mi>W</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
<label>(6)</label>
</disp-formula>
</p>
<p>This operation normalizes each node&#x2019;s outgoing weights to sum to 1, ensuring that the relative ordering of distance-derived weights is preserved. Row-wise normalization is consistent with widely adopted practices in message-passing Graph Neural Networks (GNNs). In classical spectral graph theory, the random-walk Laplacian <inline-formula id="inf34">
<mml:math id="m40">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>L</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>r</mml:mi>
<mml:mi>w</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>I</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msup>
<mml:mi>A</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> (<xref ref-type="bibr" rid="B25">Von Luxburg, 2007</xref>) naturally corresponds to row-normalized propagation, providing a well-established theoretical basis for this normalization strategy.</p>
<p>Thus, our normalization strategy remains fully compatible with standard GCN layers and does not require modifications to training dynamics.</p>
</sec>
</sec>
<sec id="s2-3">
<label>2.3</label>
<title>GCN propagation</title>
<p>In a graph convolutional network (GCN), the core operation involves aggregating information from a node&#x2019;s local neighborhood based on a normalized adjacency matrix. Let <inline-formula id="inf35">
<mml:math id="m41">
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mi>Z</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:msup>
<mml:mo>&#x2208;</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mi mathvariant="double-struck">R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mo>&#xd7;</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> denote the node representations at layer <inline-formula id="inf36">
<mml:math id="m42">
<mml:mrow>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>, the layer-wise propagation rule is formally defined in <xref ref-type="disp-formula" rid="e7">Equation 7</xref> as:<disp-formula id="e7">
<mml:math id="m43">
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mi>Z</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:msup>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>&#x3c3;</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:mi>A</mml:mi>
</mml:mrow>
<mml:mo>&#x303;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mi>Z</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>l</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msup>
<mml:msup>
<mml:mrow>
<mml:mi>W</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>l</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
<label>(7)</label>
</disp-formula>where <inline-formula id="inf37">
<mml:math id="m44">
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mi>W</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>l</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> is a learnable weight matrix, <inline-formula id="inf38">
<mml:math id="m45">
<mml:mrow>
<mml:mi>&#x3c3;</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mo>&#x22c5;</mml:mo>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula> is a nonlinear activation, and <inline-formula id="inf39">
<mml:math id="m46">
<mml:mrow>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:mi>A</mml:mi>
</mml:mrow>
<mml:mo>&#x303;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mo>&#x2208;</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mi mathvariant="double-struck">R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mo>&#xd7;</mml:mo>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> is the normalized adjacency matrix.</p>
<p>Spatial GCN frameworks typically construct a KNN graph with a binary adjacency matrix <inline-formula id="inf40">
<mml:math id="m47">
<mml:mrow>
<mml:mi>A</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>. The degree matrix <inline-formula id="inf41">
<mml:math id="m48">
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> is defined as <inline-formula id="inf42">
<mml:math id="m49">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mrow>
<mml:mi>A</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>. Applying symmetric degree normalization yields the original normalized adjacency matrix as shown in <xref ref-type="disp-formula" rid="e8">Equation 8</xref>:<disp-formula id="e8">
<mml:math id="m50">
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:mi>A</mml:mi>
</mml:mrow>
<mml:mo>&#x303;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">ori</mml:mi>
</mml:mrow>
</mml:msup>
<mml:mo>&#x3d;</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:msup>
<mml:mi>A</mml:mi>
<mml:msup>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
<label>(8)</label>
</disp-formula>
</p>
<p>In DWGCN, the original degree-normalized adjacency matrix <inline-formula id="inf43">
<mml:math id="m51">
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:mi>A</mml:mi>
</mml:mrow>
<mml:mo>&#x303;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">ori</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> is replaced with the proposed distance-weighted adjacency matrix <inline-formula id="inf44">
<mml:math id="m52">
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:mi>A</mml:mi>
</mml:mrow>
<mml:mo>&#x303;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>w</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> (as described in <xref ref-type="sec" rid="s2-2">Section 2.2</xref>), which assigns larger weights to spatially closer spots. The propagation rule is defined in <xref ref-type="disp-formula" rid="e9">Equation 9</xref> as:<disp-formula id="e9">
<mml:math id="m53">
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mi>Z</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:msup>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>&#x3c3;</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:mi>A</mml:mi>
</mml:mrow>
<mml:mo>&#x303;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>w</mml:mi>
</mml:mrow>
</mml:msup>
<mml:msup>
<mml:mrow>
<mml:mi>Z</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>l</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msup>
<mml:msup>
<mml:mrow>
<mml:mi>W</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>l</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
<label>(9)</label>
</disp-formula>
</p>
</sec>
<sec id="s2-4">
<label>2.4</label>
<title>Integration into existing spatial GCN workflows</title>
<p>DWGCN replaces the original spatial adjacency in GCN-based frameworks with the distance-weighted adjacency matrix <inline-formula id="inf45">
<mml:math id="m54">
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:mi>A</mml:mi>
</mml:mrow>
<mml:mo>&#x303;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>w</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> while leaving all model architectures, losses, and hyperparameters unchanged. This design isolates the effect of graph construction, enabling fair and architecture-independent benchmarking of distance-aware connectivity.</p>
<p>Beyond providing a controlled comparison setting, the resulting <inline-formula id="inf46">
<mml:math id="m55">
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:mi>A</mml:mi>
</mml:mrow>
<mml:mo>&#x303;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>w</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> functions as a generalizable graph foundation that can be readily incorporated into diverse spatial transcriptomic workflows. Although broadly applicable to tasks such as spatial domain identification, trajectory inference, visualization, and denoising, here we focus specifically on evaluating its impact on spatial domain identification across representative GCN-based models.</p>
</sec>
<sec id="s2-5">
<label>2.5</label>
<title>Benchmarking and evaluation</title>
<p>We benchmarked DWGCN against four graph-based deep learning frameworks for spatial domain identification (<xref ref-type="sec" rid="s11">Supplementary Table S1</xref>): SEDR (<xref ref-type="bibr" rid="B27">Xu et al., 2024</xref>), GraphST (<xref ref-type="bibr" rid="B14">Long et al., 2023</xref>), SpaNCMG (<xref ref-type="bibr" rid="B22">Si et al., 2024</xref>), and SpaGIC (<xref ref-type="bibr" rid="B13">Liu et al., 2024</xref>). For each baseline method, we followed the official implementations and default parameter settings unless otherwise specified. In the DWGCN-enhanced models, the original degree-normalized adjacency matrix <inline-formula id="inf47">
<mml:math id="m56">
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:mi>A</mml:mi>
</mml:mrow>
<mml:mo>&#x303;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">ori</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> was replaced with the distance-weighted adjacency matrix <inline-formula id="inf48">
<mml:math id="m57">
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:mi>A</mml:mi>
</mml:mrow>
<mml:mo>&#x303;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>w</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> (with <inline-formula id="inf49">
<mml:math id="m58">
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; 12 and <inline-formula id="inf50">
<mml:math id="m59">
<mml:mrow>
<mml:mi>p</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; 2), and all other hyperparameters were kept unchanged to ensure a fair comparison.</p>
<p>Benchmarking was conducted on eight datasets, including four real and four simulated collections. The real-world collection comprised four publicly available spatial transcriptomics datasets with curated domain annotations (<xref ref-type="sec" rid="s11">Supplementary Table S2</xref>): 12 slices of human dorsolateral prefrontal cortex (DLPFC) (<xref ref-type="bibr" rid="B16">Pardo et al., 2022</xref>), one&#xa0;mouse brain sagittal anterior section (Mouse_Brain) (<xref ref-type="bibr" rid="B9">Ji et al., 2020</xref>), one human breast cancer Block A <xref ref-type="sec" rid="s1">Section 1</xref> (Human_Breast) (<xref ref-type="bibr" rid="B9">Ji et al., 2020</xref>), and three mouse embryos (Mouse_Embryos) of E9.5-stage (<xref ref-type="bibr" rid="B4">Chen et al., 2022</xref>). In addition, four <italic>in silico</italic> datasets were generated using simSRT (<xref ref-type="bibr" rid="B30">Zhu et al., 2023</xref>) to model tissues containing 3, 5, 8, and 10 spatial domains (referred to as the cluster_3 to cluster_10 datasets; <xref ref-type="sec" rid="s11">Supplementary Table S3</xref>). Each simulated dataset contained eight independently generated samples to account for stochastic variability and ensure statistical robustness. Details of data preprocessing and normalization are provided in the <xref ref-type="sec" rid="s11">Supplementary Material</xref>.</p>
<p>Clustering accuracy was evaluated using three complementary metrics: Adjusted Rand Index (ARI) (<xref ref-type="bibr" rid="B8">Hubert and Arabie, 1985</xref>), Normalized Mutual Information (NMI) (<xref ref-type="bibr" rid="B24">Strehl and Ghosh, 2002</xref>), and Homogeneity (<xref ref-type="bibr" rid="B19">Rosenberg and Hirschberg, 2007</xref>). For each sample, both the baseline and DWGCN-enhanced models were run 20 times to account for randomness in model training. Performance improvement was evaluated using paired run-wise differences between the two models. For each sample <inline-formula id="inf51">
<mml:math id="m60">
<mml:mrow>
<mml:mi>s</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> and run <inline-formula id="inf52">
<mml:math id="m61">
<mml:mrow>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>, the improvement was computed as <inline-formula id="inf53">
<mml:math id="m62">
<mml:mrow>
<mml:mi mathvariant="normal">&#x394;</mml:mi>
<mml:mi>v</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>u</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>e</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>v</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>u</mml:mi>
<mml:msubsup>
<mml:mrow>
<mml:mi>e</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>r</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>w</mml:mi>
</mml:mrow>
</mml:msubsup>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>v</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>u</mml:mi>
<mml:msubsup>
<mml:mrow>
<mml:mi>e</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>r</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">ori</mml:mi>
</mml:mrow>
</mml:msubsup>
</mml:mrow>
</mml:math>
</inline-formula>, where <inline-formula id="inf54">
<mml:math id="m63">
<mml:mrow>
<mml:mi>v</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>u</mml:mi>
<mml:msubsup>
<mml:mrow>
<mml:mi>e</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>r</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>w</mml:mi>
</mml:mrow>
</mml:msubsup>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf55">
<mml:math id="m64">
<mml:mrow>
<mml:mi>v</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>u</mml:mi>
<mml:msubsup>
<mml:mrow>
<mml:mi>e</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>r</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">ori</mml:mi>
</mml:mrow>
</mml:msubsup>
</mml:mrow>
</mml:math>
</inline-formula> denote the metric values of the DWGCN-enhanced and baseline models, respectively. For summary reporting, run-wise differences were averaged within each sample.</p>
<p>The statistical significance of the paired differences was assessed using the paired Wilcoxon signed-rank test (<xref ref-type="bibr" rid="B1">Bauer, 1972</xref>). Multiple-testing correction was performed using the Benjamini&#x2013;Hochberg procedure (<xref ref-type="bibr" rid="B2">Benjamini and Hochberg, 1995</xref>), and the resulting adjusted p-values are reported as FDR values. Unless otherwise stated, differences were considered statistically significant at FDR <inline-formula id="inf56">
<mml:math id="m65">
<mml:mrow>
<mml:mo>&#x3c;</mml:mo>
<mml:mn>0.05</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>. In addition, effect sizes for the paired comparison were quantified using Cliff&#x2019;s Delta <inline-formula id="inf57">
<mml:math id="m66">
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>&#x3b4;</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>, which measures the probability that a randomly selected value from the DWGCN-enhanced method exceeds that from the baseline (<xref ref-type="bibr" rid="B5">Cliff, 2014</xref>).</p>
<p>Analyses were conducted at both the sample and dataset levels. Sample-level analyses enabled direct paired comparison within each biological replicate, controlling for intra-dataset variability. Dataset-level analyses aggregated sample-level statistics to evaluate whether DWGCN yielded consistent, statistically significant improvements across heterogeneous biological and technical contexts. Together, this multi-level paired statistical design provided a robust and interpretable assessment of the performance enhancement introduced by DWGCN.</p>
</sec>
</sec>
<sec id="s3">
<label>3</label>
<title>Performance of DWGCN</title>
<sec id="s3-1">
<label>3.1</label>
<title>DWGCN generates distance-aware neighborhood weights</title>
<p>To examine how DWGCN reshapes the local aggregation structure, we compared the normalized edge-weight profiles generated by DWGCN (<inline-formula id="inf58">
<mml:math id="m67">
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; 12) with those produced by a degree-normalized KNN adjacency constructed using symmetric normalization. For reference, degree normalization is representative of the normalization schemes widely adopted in standard GCNs, and is therefore used here as a conventional baseline. As summarized in <xref ref-type="sec" rid="s11">Supplementary Table S4</xref>, the conventional adjacency construction produces nearly uniform weights (self-loop <inline-formula id="inf59">
<mml:math id="m68">
<mml:mrow>
<mml:mo>&#x2248;</mml:mo>
<mml:mn>0.077</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>; neighbor weights <inline-formula id="inf60">
<mml:math id="m69">
<mml:mrow>
<mml:mo>&#x2248;</mml:mo>
<mml:mn>0.064</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>&#x2013;0.077), indicating that spatial distance exerts minimal influence on the aggregation process. Because conventional KNN adjacency assigns all connected neighbors an identical weight before normalization, degree normalization cannot recover the lost geometric information. This near-uniform weighting reflects the smoothing-dominant behavior of conventional GCN propagation, where symmetric normalization reduces geometric distinctions among neighbors.</p>
<p>DWGCN, in contrast, introduces distance-aware weighting that becomes progressively more localized as the distance exponent <inline-formula id="inf61">
<mml:math id="m70">
<mml:mrow>
<mml:mi>p</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> increases. When <inline-formula id="inf62">
<mml:math id="m71">
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, DWGCN produces uniform weights within each node&#x2019;s KNN neighborhood, although the resulting graph is naturally asymmetric due to directional relative distances. For moderate exponents (<inline-formula id="inf63">
<mml:math id="m72">
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x2248;</mml:mo>
<mml:mn>0.5</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>&#x2013;1), the self-loop weight increases, and closer neighbors consistently receive higher weights than farther ones, establishing meaningful spatial discrimination while still retaining contributions from all neighbors. For large exponents <inline-formula id="inf64">
<mml:math id="m73">
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x2265;</mml:mo>
<mml:mn>4</mml:mn>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>, the distribution becomes highly concentrated: the self-loop weight dominates, and distant neighbors contribute only marginally (on the order of <inline-formula id="inf65">
<mml:math id="m74">
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:msup>
<mml:mrow>
<mml:mn>0</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> or lower). The parameter <inline-formula id="inf66">
<mml:math id="m75">
<mml:mrow>
<mml:mi>p</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> continuously controls the strength of spatial smoothing, ranging from uniform averaging <inline-formula id="inf67">
<mml:math id="m76">
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0</mml:mn>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> to distance-sensitive, structure-preserving aggregation.</p>
<p>Rather than seeking the optimal hyperparameters, which can vary across datasets and downstream tasks, we adopt a representative and stable configuration (<inline-formula id="inf68">
<mml:math id="m77">
<mml:mrow>
<mml:mi>k</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>12</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf69">
<mml:math id="m78">
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>) to illustrate DWGCN&#x2019;s intended behavior. At <inline-formula id="inf70">
<mml:math id="m79">
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, the resulting weight profile (self-loop <inline-formula id="inf71">
<mml:math id="m80">
<mml:mrow>
<mml:mo>&#x2248;</mml:mo>
<mml:mn>0.36</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>; nearest neighbors <inline-formula id="inf72">
<mml:math id="m81">
<mml:mrow>
<mml:mo>&#x2248;</mml:mo>
<mml:mn>0.09</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>; distant neighbors decreasing smoothly to <inline-formula id="inf73">
<mml:math id="m82">
<mml:mrow>
<mml:mo>&#x2248;</mml:mo>
<mml:mn>0.03</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>) reflects a balanced regime in which self-information is strengthened and spatially proximate neighbors are emphasized, while farther neighbors still retain non-negligible influence.</p>
</sec>
<sec id="s3-2">
<label>3.2</label>
<title>Benchmarking results of real datasets</title>
<p>Across four real spatial transcriptomics datasets, incorporating DWGCN consistently improved clustering accuracy under diverse model architectures and biological contexts. As summarized in <xref ref-type="fig" rid="F2">Figure 2A</xref>, the DWGCN-enhanced versions outperformed their baseline counterparts on all three evaluation metrics (Adjusted Rand Index (ARI), Normalized Mutual Information (NMI), and Homogeneity), yielding average performance gains of 0.038 (9.09%), 0.032 (5.44%), and 0.038 (6.30%), respectively. Visual inspection of spatial clustering patterns further corroborated these trends: DWGCN-enhanced models produced clearer laminar structures on representative real datasets (<xref ref-type="sec" rid="s11">Supplementary Figure S1</xref>).</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>Performance of DWGCN-enhanced methods on real datasets. <bold>(A)</bold> Boxplots summarizing clustering performance across real datasets using three evaluation metrics: Adjusted Rand Index (ARI), Normalized Mutual Information (NMI), and Homogeneity. <bold>(B)</bold> Line plots showing the average improvement in clustering performance induced by DWGCN across datasets. Each subplot corresponds to an evaluation metric, and each line represents a clustering framework. <bold>(C)</bold> Heatmap illustrating sample-level performance gains, where color intensity denotes the magnitude of improvement (difference between DWGCN-enhanced and baseline model scores). Significance levels are denoted as: FDR <inline-formula id="inf74">
<mml:math id="m83">
<mml:mrow>
<mml:mo>&#x3c;</mml:mo>
<mml:mn>0.0001</mml:mn>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mo>&#x2a;</mml:mo>
<mml:mo>&#x2a;</mml:mo>
<mml:mo>&#x2a;</mml:mo>
<mml:mo>&#x2a;</mml:mo>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mo>,</mml:mo>
<mml:mo>&#x3c;</mml:mo>
<mml:mn>0.001</mml:mn>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mo>&#x2a;</mml:mo>
<mml:mo>&#x2a;</mml:mo>
<mml:mo>&#x2a;</mml:mo>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mo>,</mml:mo>
<mml:mo>&#x3c;</mml:mo>
<mml:mn>0.01</mml:mn>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mo>&#x2a;</mml:mo>
<mml:mo>&#x2a;</mml:mo>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mo>,</mml:mo>
<mml:mo>&#x3c;</mml:mo>
<mml:mn>0.05</mml:mn>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mo>&#x2a;</mml:mo>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mo>,</mml:mo>
<mml:mtext>a</mml:mtext>
<mml:mtext>n</mml:mtext>
<mml:mtext>d</mml:mtext>
<mml:mo>&#x2265;</mml:mo>
<mml:mn>0.05</mml:mn>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>n</mml:mi>
<mml:mi>s</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>. <bold>(D)</bold> Volcano plots depicting the relationship between the magnitude of improvement and its statistical significance. The x-axis indicates the mean improvement, and the y-axis shows the log-transformed <inline-formula id="inf75">
<mml:math id="m84">
<mml:mrow>
<mml:mi>F</mml:mi>
<mml:mi>D</mml:mi>
<mml:mi>R</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> from the paired Wilcoxon signed-rank test.</p>
</caption>
<graphic xlink:href="fgene-17-1779455-g002.tif">
<alt-text content-type="machine-generated">Figure containing four panels comparing spatial transcriptomics analysis methods. Panel A shows boxplots of ARI, NMI, and Homogeneity scores for DLPFC, mouse brain, human breast, and mouse embryos, with statistical significance indicated. Panel B is a line graph comparing delta values of the same metrics across methods and datasets. Panel C presents a heatmap of delta values for each metric, dataset, and method, color-coded by magnitude and direction. Panel D features three volcano plots showing delta values against negative log FDR for ARI, NMI, and Homogeneity across methods, with color-coded points per method.</alt-text>
</graphic>
</fig>
<p>At the dataset level, the accuracy improvement provided by DWGCN was consistent across all four datasets, demonstrating stable performance gains regardless of tissue type or experimental condition (<xref ref-type="fig" rid="F2">Figure 2B</xref>). Across four datasets and three clustering metrics, a total of 48 paired comparisons were performed between each baseline model and its DWGCN-enhanced version. Among them, 27 cases (56.3%) exhibited significant enhancement, whereas only three (6.3%) showed significant decreases (<xref ref-type="sec" rid="s11">Supplementary Table S5</xref>). Effect-size analysis showed consistent trends, with positive large effect sizes in 23 (47.9%) cases and negative large effect sizes in four (8.3%) cases (<xref ref-type="sec" rid="s11">Supplementary Table S6</xref>).</p>
<p>On a per-sample basis, 75%, 67.65%, and 69.12% of samples exhibited improved performance, approximately 50% reached statistical significance (<xref ref-type="sec" rid="s11">Supplementary Table S7</xref>). In comparison, only 4%&#x2013;10% of samples exhibited significant declines. The heatmap in <xref ref-type="fig" rid="F2">Figure 2C</xref> further visualizes sample-level performance gains, where color intensity reflects the magnitude of improvement. Sample-wise distributions of ARI, NMI, and Homogeneity also consistently favored DWGCN across methods (<xref ref-type="sec" rid="s11">Supplementary Figure S2</xref>), with performance gains consistently observed across individual samples (<xref ref-type="sec" rid="s11">Supplementary Figure S3</xref>).</p>
<p>To further characterize model-specific behavior, volcano plots highlighted distinct sensitivity patterns among the four representative models (<xref ref-type="fig" rid="F2">Figure 2D</xref>). Overall, DWGCN yielded the greatest enhancement when integrated with SpaNCMG, followed by GraphST, SpaGIC, and SEDR. DWGCN achieved 12 and 11 significant improvements for SpaNCMG and GraphST, respectively, without any observed degradation. SpaNCMG displayed the most stable behavior, showing 100% significant gains based on 12 dataset&#x2013;metric combinations. GraphST exhibited slightly lower but still consistent improvement with 80%&#x2013;85% significant improvements and no negative cases. In contrast, SpaGIC exhibited both positive and negative effects, with roughly half of its comparisons showing significant improvements and the remainder showing significant decreases. SEDR showed minimal response to DWGCN, with only marginal decreases in the Human_Breast dataset for NMI and Homogeneity, suggesting that its embedding structure is less influenced by distance-weighted connectivity.</p>
</sec>
<sec id="s3-3">
<label>3.3</label>
<title>Benchmarking based on simulated datasets</title>
<p>Performance evaluation on simulated datasets revealed an even more pronounced improvement achieved by DWGCN across all tested frameworks (<xref ref-type="fig" rid="F3">Figure 3</xref>). As shown in <xref ref-type="fig" rid="F3">Figure 3A</xref>, the DWGCN-enhanced versions consistently outperformed their baseline counterparts across three metrics. The average gains over the baseline methods were 0.047 (an 11.46% improvement relative to the baseline mean ARI) for ARI, 0.033 (5.66%) for NMI, and 0.036 (5.67%) for Homogeneity. A large majority of samples (84.38%, 89.06%, and 88.28% for ARI, NMI, and Homogeneity) showed improved performance, of which approximately 57.81%&#x2013;72.66% reached statistical significance (<xref ref-type="sec" rid="s11">Supplementary Table S10</xref>, <xref ref-type="sec" rid="s11">Supplementary Figure S5</xref>). The proportion of samples with decreased performance remained low (10%&#x2013;15%), and significant decreases were minimal (3%&#x2013;6%). Representative examples from simulated datasets also illustrated the improved recovery of ground-truth spatial structure under DWGCN (<xref ref-type="sec" rid="s11">Supplementary Figure S4</xref>).</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>Evaluation of DWGCN-enhanced methods on simulated datasets. <bold>(A)</bold> Boxplots summarizing clustering accuracy across simulated datasets, evaluated using ARI, NMI, and Homogeneity. Comparison of clustering performance for representative simulated samples across original and DWGCN-enhanced methods. <bold>(B)</bold> Line plots displaying the mean improvement trends of DWGCN across all simulated scenarios. <bold>(C)</bold> Heatmap illustrating dataset-level performance improvements, where the color gradient reflects the relative effect size of DWGCN over baseline methods. Significance levels are denoted as: FDR <inline-formula id="inf76">
<mml:math id="m85">
<mml:mrow>
<mml:mo>&#x3c;</mml:mo>
<mml:mn>0.0001</mml:mn>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mo>&#x2a;</mml:mo>
<mml:mo>&#x2a;</mml:mo>
<mml:mo>&#x2a;</mml:mo>
<mml:mo>&#x2a;</mml:mo>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mo>,</mml:mo>
<mml:mo>&#x3c;</mml:mo>
<mml:mn>0.001</mml:mn>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mo>&#x2a;</mml:mo>
<mml:mo>&#x2a;</mml:mo>
<mml:mo>&#x2a;</mml:mo>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mo>,</mml:mo>
<mml:mo>&#x3c;</mml:mo>
<mml:mn>0.01</mml:mn>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mo>&#x2a;</mml:mo>
<mml:mo>&#x2a;</mml:mo>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mo>,</mml:mo>
<mml:mo>&#x3c;</mml:mo>
<mml:mn>0.05</mml:mn>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mo>&#x2a;</mml:mo>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mo>,</mml:mo>
<mml:mi>a</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>d</mml:mi>
<mml:mo>&#x2265;</mml:mo>
<mml:mn>0.05</mml:mn>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>n</mml:mi>
<mml:mi>s</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>. <bold>(D)</bold> Volcano plots showing the relationship between the effect magnitude and statistical significance of improvements in simulated settings. Each point represents one simulated dataset, with the x-axis indicating the average DWGCN-induced gain and the y-axis representing the log-transformed FDR.</p>
</caption>
<graphic xlink:href="fgene-17-1779455-g003.tif">
<alt-text content-type="machine-generated">Panel A shows boxplots comparing ARI, NMI, and Homogeneity for four clustering methods across clusters three, five, eight, and ten with statistical significance indicated. Panel B presents line plots of &#x394; values for each method and cluster for ARI, NMI, and Homogeneity. Panel C displays a heatmap of &#x394; values across clusters and methods, with color coding indicating the magnitude and direction of change. Panel D contains scatter plots for ARI, NMI, and Homogeneity, displaying &#x394; values versus negative log FDR for each method, with colors corresponding to methods.</alt-text>
</graphic>
</fig>
<p>At the dataset level, DWGCN outperformed baseline models in 89.6% (43/48) of pairwise comparisons (<xref ref-type="fig" rid="F3">Figure 3B</xref>). Statistical significance was achieved in 81.3% (39/48) of cases, and 70.8% (24/48) showed large effect sizes (<xref ref-type="sec" rid="s11">Supplementary Tables S8, S9</xref>). In contrast, only 10.4% (5/48) exhibited significant decreases, and 8.3% (4/48) showed no difference. Consistently, the sample-level heatmap in <xref ref-type="fig" rid="F3">Figure 3C</xref> illustrated that 87.2% (335/384) of individual samples showed higher accuracy under DWGCN, with 66.1% achieving significant improvements. The volcano plot in <xref ref-type="fig" rid="F3">Figure 3D</xref> further confirmed this pattern, with most points shifted toward the positive-effect side, indicating predominantly beneficial and frequently significant improvements. Together, these results demonstrate that the observed enhancement is both statistically significant and a large-effect improvement across dataset- and sample-level analyses.</p>
<p>Notably, the performance benefit of DWGCN became more pronounced on datasets with higher clustering complexity. As shown in <xref ref-type="fig" rid="F3">Figure 3A</xref>, clustering performance declined as the number of clusters increased, reflecting the increasing difficulty of resolving finer-grained spatial boundaries as cluster numbers grow. However, DWGCN-enhanced models maintained higher clustering accuracy and demonstrated progressively stronger benefits as the number of spatial domains increased from 3 to 10 (<xref ref-type="sec" rid="s11">Supplementary Figure S6</xref>). At low complexity (3 clusters), improvements were modest and occasionally unstable, with slight declines observed for GraphST and SpaNCMG. In contrast, at higher complexity (5&#x2013;10 clusters), all frameworks achieved consistent and substantial performance gains. Effect size analysis confirmed this trend: the average Cliff&#x2019;s Delta increased steadily from 0.272 (small effect) at cluster_3 setting, to 0.575 (large effect) at cluster_5, 0.760 (large effect) at cluster_8, and 0.816 (large effect) at cluster_10 (<xref ref-type="sec" rid="s11">Supplementary Table S10</xref>).</p>
<p>Collectively, these findings indicate that DWGCN not only improves clustering accuracy but also enhances model robustness and scalability under increasingly complex spatial structures, underscoring its strong potential to generalize across diverse spatial transcriptomics settings.</p>
</sec>
</sec>
<sec sec-type="discussion" id="s4">
<label>4</label>
<title>Discussion</title>
<p>In this study, we present DWGCN, a distance-weighted graph convolutional framework that enhances spatial domain identification by refining the construction of spatial adjacency. GCN-based spatial models aggregate information from graph neighbors, making their performance tightly dependent on the fidelity of the adjacency matrix. However, conventional binary KNN adjacency followed by degree normalization in many ST pipelines assigns almost uniform neighbor weights, suppressing natural distance heterogeneity and diminishing distance-dependent variation during message passing. This tendency further amplifies the intrinsic over-smoothing behavior of GCNs, potentially obscuring fine or irregular domain boundaries.</p>
<p>DWGCN reintroduces distance heterogeneity into graph construction, thereby breaking the uniform propagation pattern and mitigating excessive feature averaging. The dominance of the self-loop ensures that spot-specific information remains the primary signal throughout propagation. DWGCN achieves distance-aware adjacency refinement through three coordinated mechanisms. First, inverse-distance weighting (IDW) with a tunable decay exponent <inline-formula id="inf77">
<mml:math id="m86">
<mml:mrow>
<mml:mi>p</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> emphasizes proximal neighbors and restores distance decay patterns commonly observed in spatial transcriptomics. Second, relative distance scaling within each tissue section ensures comparability of distance-derived weights across platforms with distinct spatial resolutions. Third, replacing global degree normalization with local row-wise normalization preserves within-spot weight ratios and prevents hub dominance. Together, these mechanisms produce adjacency matrices that retain biologically plausible spatial heterogeneity, reflecting the expectation that nearby spots tend to share transcriptomic similarity and belong to the same cell type.</p>
<p>Our benchmarking results across four real and four simulated datasets demonstrate that DWGCN yields performance gains in the majority of sample-level comparisons. Improvements were most pronounced in simulated datasets with complex spatial architectures or fine-grained transitions, where distance-aware weighting more effectively captures local structural continuity. These results suggest that DWGCN is particularly beneficial in scenarios where domain boundaries are subtle. In contrast, datasets with coarse and well-separated domains showed smaller improvements, likely because their broad homogeneous regions already limit the adverse effects of uniform neighbor weighting. These observations highlight the importance of selecting an appropriate decay exponent <inline-formula id="inf78">
<mml:math id="m87">
<mml:mrow>
<mml:mi>p</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> in accordance with the spatial granularity of the tissue.</p>
<p>Performance differences across frameworks further reveal that adjacency refinement interacts with model architecture. SpaNCMG exhibited the largest and most consistent improvements after integration with DWGCN, likely because its multi-view graph reconstruction and attention-based fusion make it more sensitive to the quality of spatial graphs. In comparison, SEDR, GraphST, and SpaGIC showed more dataset-dependent gains, reflecting the fact that adjacency refinement constitutes only one component within their broader architectures. We also observed larger improvements in simulated datasets, which typically have uniform spot densities and lower noise, conditions under which distance heterogeneity is more clearly reflected in the weighted adjacency. Real tissues inherently contain morphological irregularities and measurement noise, leading to more moderate but still predominantly positive gains. Taken together, these results indicate that DWGCN offers broadly beneficial enhancements, with the magnitude of improvement shaped by both model architecture and dataset characteristics.</p>
<p>DWGCN has several limitations and practical considerations. The choice of <inline-formula id="inf79">
<mml:math id="m88">
<mml:mrow>
<mml:mi>p</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf80">
<mml:math id="m89">
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> controls the trade-off between locality and connectivity. While default settings (<inline-formula id="inf81">
<mml:math id="m90">
<mml:mrow>
<mml:mi>k</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>12</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf82">
<mml:math id="m91">
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>) provided stable performance in our benchmarks, optimal configurations depend on spot density, tissue scale, and the downstream model. Additional validation across more platforms and tissue types will be needed to establish robust parameter recommendations. Future work may focus on adaptive strategies for selecting <inline-formula id="inf83">
<mml:math id="m92">
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf84">
<mml:math id="m93">
<mml:mrow>
<mml:mi>p</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>, incorporating learned distance functions, and extending distance-aware graph construction to multimodal or time-resolved spatial omics.</p>
<p>In summary, DWGCN provides a biologically motivated and implementation-friendly strategy for restoring distance sensitivity in spatial GCN pipelines. By embedding distance awareness directly into graph construction, it mitigates excessive homogenization and improves domain delineation, particularly in tissues requiring high spatial resolution.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s5">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/<xref ref-type="sec" rid="s11">Supplementary Material</xref>, further inquiries can be directed to the corresponding author.</p>
</sec>
<sec sec-type="author-contributions" id="s6">
<title>Author contributions</title>
<p>CP: Data curation, Investigation, Methodology, Validation, Visualization, Writing &#x2013; original draft, Writing &#x2013; review and editing. GL: Data curation, Methodology, Validation, Visualization, Writing &#x2013; original draft. JW: Data curation, Visualization, Writing &#x2013; review and editing, Formal Analysis, Investigation. QF: Formal Analysis, Funding acquisition, Validation, Writing &#x2013; review and editing. XG: Formal Analysis, Funding acquisition, Resources, Writing &#x2013; review and editing.</p>
</sec>
<sec sec-type="COI-statement" id="s8">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="s9">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="s10">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec sec-type="supplementary-material" id="s11">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fgene.2026.1779455/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fgene.2026.1779455/full&#x23;supplementary-material</ext-link>
</p>
<supplementary-material xlink:href="DataSheet1.docx" id="SM1" mimetype="application/docx" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<fn-group>
<fn fn-type="custom" custom-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/258592/overview">Himel Mallick</ext-link>, Cornell University, United States</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1123110/overview">Xiangyu Luo</ext-link>, Renmin University of China, China</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2760507/overview">Satwik Acharyya</ext-link>, University of Alabama at Birmingham, United States</p>
</fn>
</fn-group>
<ref-list>
<title>References</title>
<ref id="B1">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bauer</surname>
<given-names>D. F.</given-names>
</name>
</person-group> (<year>1972</year>). <article-title>Constructing confidence sets using rank statistics</article-title>. <source>J. Am. Stat. Assoc.</source> <volume>67</volume>, <fpage>687</fpage>&#x2013;<lpage>690</lpage>. <pub-id pub-id-type="doi">10.2307/2284469</pub-id>
</mixed-citation>
</ref>
<ref id="B2">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Benjamini</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Hochberg</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>1995</year>). <article-title>Controlling the false discovery rate: a practical and powerful approach to multiple testing</article-title>. <source>J. R. Statistical Society Series B Methodol.</source> <volume>57</volume>, <fpage>289</fpage>&#x2013;<lpage>300</lpage>. <pub-id pub-id-type="doi">10.1111/j.2517-6161.1995.tb02031.x</pub-id>
</mixed-citation>
</ref>
<ref id="B3">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chen</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Boettiger</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Moffitt</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Zhuang</surname>
<given-names>X.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Rna imaging spatially resolved, highly multiplexed rna profiling in single cells</article-title>. <source>Science</source> <volume>348</volume>, <fpage>aaa6090</fpage>. <pub-id pub-id-type="doi">10.1126/science.aaa6090</pub-id>
<pub-id pub-id-type="pmid">25858977</pub-id>
</mixed-citation>
</ref>
<ref id="B4">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chen</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Liao</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Cheng</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Ma</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Lai</surname>
<given-names>Y.</given-names>
</name>
<etal/>
</person-group> (<year>2022</year>). <article-title>Spatiotemporal transcriptomic atlas of mouse organogenesis using dna nanoball-patterned arrays</article-title>. <source>Cell</source> <volume>185</volume>, <fpage>1777</fpage>&#x2013;<lpage>1792</lpage>. <pub-id pub-id-type="doi">10.1016/j.cell.2022.04.003</pub-id>
<pub-id pub-id-type="pmid">35512705</pub-id>
</mixed-citation>
</ref>
<ref id="B5">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Cliff</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2014</year>). <source>Ordinal methods for behavioral data analysis</source>. <publisher-name>New York: Psychology Press</publisher-name>.</mixed-citation>
</ref>
<ref id="B6">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dries</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Zhu</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Dong</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Eng</surname>
<given-names>C.-H. L.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>K.</given-names>
</name>
<etal/>
</person-group> (<year>2021</year>). <article-title>Giotto: a toolbox for integrative analysis and visualization of spatial expression data</article-title>. <source>Genome Biology</source> <volume>22</volume>, <fpage>78</fpage>. <pub-id pub-id-type="doi">10.1186/s13059-021-02286-2</pub-id>
<pub-id pub-id-type="pmid">33685491</pub-id>
</mixed-citation>
</ref>
<ref id="B7">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Eng</surname>
<given-names>C.-H. L.</given-names>
</name>
<name>
<surname>Lawson</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Zhu</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Dries</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Koulena</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Takei</surname>
<given-names>Y.</given-names>
</name>
<etal/>
</person-group> (<year>2019</year>). <article-title>Transcriptome-scale super-resolved imaging in tissues by rna seqfish&#x2b;</article-title>. <source>Nature</source> <volume>568</volume>, <fpage>235</fpage>&#x2013;<lpage>239</lpage>. <pub-id pub-id-type="doi">10.1038/s41586-019-1049-y</pub-id>
<pub-id pub-id-type="pmid">30911168</pub-id>
</mixed-citation>
</ref>
<ref id="B8">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hubert</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Arabie</surname>
<given-names>P.</given-names>
</name>
</person-group> (<year>1985</year>). <article-title>Comparing partitions</article-title>. <source>J. Classification</source> <volume>2</volume>, <fpage>193</fpage>&#x2013;<lpage>218</lpage>. <pub-id pub-id-type="doi">10.1007/bf01908075</pub-id>
</mixed-citation>
</ref>
<ref id="B9">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ji</surname>
<given-names>A. L.</given-names>
</name>
<name>
<surname>Rubin</surname>
<given-names>A. J.</given-names>
</name>
<name>
<surname>Thrane</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Jiang</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Reynolds</surname>
<given-names>D. L.</given-names>
</name>
<name>
<surname>Meyers</surname>
<given-names>R. M.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). <article-title>Multimodal analysis of composition and spatial architecture in human squamous cell carcinoma</article-title>. <source>Cell</source> <volume>182</volume>, <fpage>497</fpage>&#x2013;<lpage>514</lpage>. <pub-id pub-id-type="doi">10.1016/j.cell.2020.08.043</pub-id>
<pub-id pub-id-type="pmid">32946785</pub-id>
</mixed-citation>
</ref>
<ref id="B10">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kipf</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Semi-supervised classification with graph convolutional networks</article-title>. <source>arXiv Preprint arXiv:1609.02907</source>. <pub-id pub-id-type="doi">10.48550/arXiv.1609.02907</pub-id>
</mixed-citation>
</ref>
<ref id="B11">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Larsson</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Fris&#xe9;n</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Lundeberg</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Spatially resolved transcriptomics adds a new dimension to genomics</article-title>. <source>Nat. Methods</source> <volume>18</volume>, <fpage>15</fpage>&#x2013;<lpage>18</lpage>. <pub-id pub-id-type="doi">10.1038/s41592-020-01038-7</pub-id>
<pub-id pub-id-type="pmid">33408402</pub-id>
</mixed-citation>
</ref>
<ref id="B12">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Han</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>X.-M.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Deeper insights into graph convolutional networks for semi-supervised learning</article-title>. <source>Proc. AAAI Conference Artificial Intelligence</source> <volume>32</volume>. <pub-id pub-id-type="doi">10.1609/aaai.v32i1.11604</pub-id>
</mixed-citation>
</ref>
<ref id="B13">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Bai</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Liang</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Xue</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Luo</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Spagic: graph-informed clustering in spatial transcriptomics <italic>via</italic> self-supervised contrastive learning</article-title>. <source>Briefings Bioinforma.</source> <volume>25</volume>, <fpage>bbae578</fpage>. <pub-id pub-id-type="doi">10.1093/bib/bbae578</pub-id>
<pub-id pub-id-type="pmid">39541189</pub-id>
</mixed-citation>
</ref>
<ref id="B14">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Long</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Ang</surname>
<given-names>K. S.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Chong</surname>
<given-names>K. L. K.</given-names>
</name>
<name>
<surname>Sethi</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Zhong</surname>
<given-names>C.</given-names>
</name>
<etal/>
</person-group> (<year>2023</year>). <article-title>Spatially informed clustering, integration, and deconvolution of spatial transcriptomics with graphst</article-title>. <source>Nat. Commun.</source> <volume>14</volume>, <fpage>1155</fpage>. <pub-id pub-id-type="doi">10.1038/s41467-023-36796-3</pub-id>
<pub-id pub-id-type="pmid">36859400</pub-id>
</mixed-citation>
</ref>
<ref id="B15">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lu</surname>
<given-names>G. Y.</given-names>
</name>
<name>
<surname>Wong</surname>
<given-names>D. W.</given-names>
</name>
</person-group> (<year>2008</year>). <article-title>An adaptive inverse-distance weighting spatial interpolation technique</article-title>. <source>Comput. and Geosciences</source> <volume>34</volume>, <fpage>1044</fpage>&#x2013;<lpage>1055</lpage>. <pub-id pub-id-type="doi">10.1016/j.cageo.2007.07.010</pub-id>
</mixed-citation>
</ref>
<ref id="B16">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Pardo</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Spangler</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Weber</surname>
<given-names>L. M.</given-names>
</name>
<name>
<surname>Page</surname>
<given-names>S. C.</given-names>
</name>
<name>
<surname>Hicks</surname>
<given-names>S. C.</given-names>
</name>
<name>
<surname>Jaffe</surname>
<given-names>A. E.</given-names>
</name>
<etal/>
</person-group> (<year>2022</year>). <article-title>Spatiallibd: an r/bioconductor package to visualize spatially-resolved transcriptomics data</article-title>. <source>BMC Genomics</source> <volume>23</volume>, <fpage>434</fpage>. <pub-id pub-id-type="doi">10.1186/s12864-022-08601-w</pub-id>
<pub-id pub-id-type="pmid">35689177</pub-id>
</mixed-citation>
</ref>
<ref id="B17">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rodriques</surname>
<given-names>S. G.</given-names>
</name>
<name>
<surname>Stickels</surname>
<given-names>R. R.</given-names>
</name>
<name>
<surname>Goeva</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Martin</surname>
<given-names>C. A.</given-names>
</name>
<name>
<surname>Murray</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Vanderburg</surname>
<given-names>C. R.</given-names>
</name>
<etal/>
</person-group> (<year>2019</year>). <article-title>Slide-seq: a scalable technology for measuring genome-wide expression at high spatial resolution</article-title>. <source>Science</source> <volume>363</volume>, <fpage>1463</fpage>&#x2013;<lpage>1467</lpage>. <pub-id pub-id-type="doi">10.1126/science.aaw1219</pub-id>
<pub-id pub-id-type="pmid">30923225</pub-id>
</mixed-citation>
</ref>
<ref id="B18">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rong</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Huang</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Xu</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Huang</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Dropedge: towards deep graph convolutional networks on node classification</article-title>. <source>arXiv Preprint arXiv:1907.10903</source>. <pub-id pub-id-type="doi">10.48550/arXiv.1907.10903</pub-id>
</mixed-citation>
</ref>
<ref id="B19">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Rosenberg</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Hirschberg</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2007</year>). &#x201c;<article-title>V-measure: a conditional entropy-based external cluster evaluation measure</article-title>,&#x201d; in <source>Proceedings of the 2007 joint conference on empirical methods in natural language processing and computational natural language learning (EMNLP-CoNLL)</source>, <fpage>410</fpage>&#x2013;<lpage>420</lpage>.</mixed-citation>
</ref>
<ref id="B20">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rusch</surname>
<given-names>T. K.</given-names>
</name>
<name>
<surname>Bronstein</surname>
<given-names>M. M.</given-names>
</name>
<name>
<surname>Mishra</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>A survey on oversmoothing in graph neural networks</article-title>. <source>arXiv Preprint arXiv:2303.10993</source>. <pub-id pub-id-type="doi">10.48550/arXiv.2303.10993</pub-id>
</mixed-citation>
</ref>
<ref id="B21">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Satija</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Farrell</surname>
<given-names>J. A.</given-names>
</name>
<name>
<surname>Gennert</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Schier</surname>
<given-names>A. F.</given-names>
</name>
<name>
<surname>Regev</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Spatial reconstruction of single-cell gene expression data</article-title>. <source>Nat. Biotechnology</source> <volume>33</volume>, <fpage>495</fpage>&#x2013;<lpage>502</lpage>. <pub-id pub-id-type="doi">10.1038/nbt.3192</pub-id>
<pub-id pub-id-type="pmid">25867923</pub-id>
</mixed-citation>
</ref>
<ref id="B22">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Si</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Shang</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Zhao</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Kong</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Long</surname>
<given-names>C.</given-names>
</name>
<etal/>
</person-group> (<year>2024</year>). <article-title>Spancmg: improving spatial domains identification of spatial transcriptomics using neighborhood-complementary mixed-view graph convolutional network</article-title>. <source>Briefings Bioinforma.</source> <volume>25</volume>, <fpage>bbae259</fpage>. <pub-id pub-id-type="doi">10.1093/bib/bbae259</pub-id>
<pub-id pub-id-type="pmid">38811360</pub-id>
</mixed-citation>
</ref>
<ref id="B23">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>St&#xe5;hl</surname>
<given-names>P. L.</given-names>
</name>
<name>
<surname>Salm&#xe9;n</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Vickovic</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Lundmark</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Navarro</surname>
<given-names>J. F.</given-names>
</name>
<name>
<surname>Magnusson</surname>
<given-names>J.</given-names>
</name>
<etal/>
</person-group> (<year>2016</year>). <article-title>Visualization and analysis of gene expression in tissue sections by spatial transcriptomics</article-title>. <source>Science</source> <volume>353</volume>, <fpage>78</fpage>&#x2013;<lpage>82</lpage>. <pub-id pub-id-type="doi">10.1126/science.aaf2403</pub-id>
<pub-id pub-id-type="pmid">27365449</pub-id>
</mixed-citation>
</ref>
<ref id="B24">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Strehl</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Ghosh</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2002</year>). <article-title>Cluster ensembles&#x2014;a knowledge reuse framework for combining multiple partitions</article-title>. <source>J. Machine Learning Research</source> <volume>3</volume>, <fpage>583</fpage>&#x2013;<lpage>617</lpage>. <pub-id pub-id-type="doi">10.1162/153244303321897735</pub-id>
</mixed-citation>
</ref>
<ref id="B25">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Von Luxburg</surname>
<given-names>U.</given-names>
</name>
</person-group> (<year>2007</year>). <article-title>A tutorial on spectral clustering</article-title>. <source>Statistics Computing</source> <volume>17</volume>, <fpage>395</fpage>&#x2013;<lpage>416</lpage>. <pub-id pub-id-type="doi">10.1007/s11222-007-9033-z</pub-id>
</mixed-citation>
</ref>
<ref id="B26">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Xu</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Jin</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Wei</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Luo</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Xu</surname>
<given-names>Z.</given-names>
</name>
<etal/>
</person-group> (<year>2022</year>). <article-title>Deepst: identifying spatial domains in spatial transcriptomics by deep learning</article-title>. <source>Nucleic Acids Res.</source> <volume>50</volume>, <fpage>e131</fpage>. <pub-id pub-id-type="doi">10.1093/nar/gkac901</pub-id>
<pub-id pub-id-type="pmid">36250636</pub-id>
</mixed-citation>
</ref>
<ref id="B27">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Xu</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Fu</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Long</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Ang</surname>
<given-names>K. S.</given-names>
</name>
<name>
<surname>Sethi</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Chong</surname>
<given-names>K.</given-names>
</name>
<etal/>
</person-group> (<year>2024</year>). <article-title>Unsupervised spatially embedded deep representation of spatial transcriptomics</article-title>. <source>Genome Med.</source> <volume>16</volume>, <fpage>12</fpage>. <pub-id pub-id-type="doi">10.1186/s13073-024-01283-x</pub-id>
<pub-id pub-id-type="pmid">38217035</pub-id>
</mixed-citation>
</ref>
<ref id="B28">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yang</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Yao</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Abdelzaher</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Revisiting over-smoothing in deep gcns</article-title>. <source>arXiv Preprint arXiv:2003.13663</source>. <pub-id pub-id-type="doi">10.48550/arXiv.2003.13663</pub-id>
</mixed-citation>
</ref>
<ref id="B29">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhao</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Stone</surname>
<given-names>M. R.</given-names>
</name>
<name>
<surname>Ren</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Guenthoer</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Smythe</surname>
<given-names>K. S.</given-names>
</name>
<name>
<surname>Pulliam</surname>
<given-names>T.</given-names>
</name>
<etal/>
</person-group> (<year>2021</year>). <article-title>Spatial transcriptomics at subspot resolution with bayesspace</article-title>. <source>Nat. Biotechnology</source> <volume>39</volume>, <fpage>1375</fpage>&#x2013;<lpage>1384</lpage>. <pub-id pub-id-type="doi">10.1038/s41587-021-00935-2</pub-id>
<pub-id pub-id-type="pmid">34083791</pub-id>
</mixed-citation>
</ref>
<ref id="B30">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhu</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Shang</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>X.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Srtsim: spatial pattern preserving simulations for spatially resolved transcriptomics</article-title>. <source>Genome Biology</source> <volume>24</volume>, <fpage>39</fpage>. <pub-id pub-id-type="doi">10.1186/s13059-023-02879-z</pub-id>
<pub-id pub-id-type="pmid">36869394</pub-id>
</mixed-citation>
</ref>
<ref id="B31">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhuang</surname>
<given-names>X.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Spatially resolved single-cell genomics and transcriptomics by imaging</article-title>. <source>Nat. Methods</source> <volume>18</volume>, <fpage>18</fpage>&#x2013;<lpage>22</lpage>. <pub-id pub-id-type="doi">10.1038/s41592-020-01037-8</pub-id>
<pub-id pub-id-type="pmid">33408406</pub-id>
</mixed-citation>
</ref>
</ref-list>
</back>
</article>