<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="2.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Immunol.</journal-id>
<journal-title>Frontiers in Immunology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Immunol.</abbrev-journal-title>
<issn pub-type="epub">1664-3224</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fimmu.2025.1614099</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Immunology</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Automated interpretation of PD-L1 CPS based on multi-AI models integration strategy in gastric cancer</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" equal-contrib="yes">
<name>
<surname>Han</surname>
<given-names>Ting</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="author-notes" rid="fn003">
<sup>&#x2020;</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2392815/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
</contrib>
<contrib contrib-type="author" equal-contrib="yes">
<name>
<surname>Zhuo</surname>
<given-names>Meng</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="author-notes" rid="fn003">
<sup>&#x2020;</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2946084/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Song</surname>
<given-names>Ziyu</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Chen</surname>
<given-names>Peilin</given-names>
</name>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Chen</surname>
<given-names>Shiting</given-names>
</name>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Zhang</surname>
<given-names>Wei</given-names>
</name>
<xref ref-type="aff" rid="aff4">
<sup>4</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Zhou</surname>
<given-names>Yuanyuan</given-names>
</name>
<xref ref-type="aff" rid="aff5">
<sup>5</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Li</surname>
<given-names>Hong</given-names>
</name>
<xref ref-type="aff" rid="aff6">
<sup>6</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1125968/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Zhang</surname>
<given-names>Dadong</given-names>
</name>
<xref ref-type="aff" rid="aff4">
<sup>4</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1459084/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Lin</surname>
<given-names>Xiaolin</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="author-notes" rid="fn001">
<sup>*</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1410903/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Liu</surname>
<given-names>Zebing</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="author-notes" rid="fn001">
<sup>*</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Xiao</surname>
<given-names>Xiuying</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="author-notes" rid="fn001">
<sup>*</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/933632/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
</contrib>
</contrib-group>
<aff id="aff1">
<sup>1</sup>
<institution>Department of Oncology, Renji Hospital, School of Medicine, Shanghai Jiao Tong University</institution>, <addr-line>Shanghai</addr-line>, <country>China</country>
</aff>
<aff id="aff2">
<sup>2</sup>
<institution>Department of Pathology, Renji Hospital, School of Medicine, Shanghai Jiao Tong University</institution>, <addr-line>Shanghai</addr-line>, <country>China</country>
</aff>
<aff id="aff3">
<sup>3</sup>
<institution>Product Development Department, SODA Data Technology Inc.</institution>, <addr-line>Shanghai</addr-line>, <country>China</country>
</aff>
<aff id="aff4">
<sup>4</sup>
<institution>Department of Clinical and Translational Research, 3D Medicines Inc.</institution>, <addr-line>Shanghai</addr-line>, <country>China</country>
</aff>
<aff id="aff5">
<sup>5</sup>
<institution>School of Pharmacy, East China University of Science and Technology</institution>, <addr-line>Shanghai</addr-line>, <country>China</country>
</aff>
<aff id="aff6">
<sup>6</sup>
<institution>State Key Laboratory of Systems Medicine for Cancer, Shanghai Cancer Institute, Renji Hospital, Shanghai Jiaotong University School of Medicine</institution>, <addr-line>Shanghai</addr-line>, <country>China</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>Edited by: Arka Bhowmik, Memorial Sloan Kettering Cancer Center, United States</p>
</fn>
<fn fn-type="edited-by">
<p>Reviewed by: Jie Wu, First Affiliated Hospital of Soochow University, China</p>
<p>Fuchuang Zhang, Fudan University, China</p>
</fn>
<fn fn-type="corresp" id="fn001">
<p>*Correspondence: Xiuying Xiao, <email xlink:href="mailto:xiaoxiuying2002@163.com">xiaoxiuying2002@163.com</email>; Zebing Liu, <email xlink:href="mailto:liuzebing@renji.com">liuzebing@renji.com</email>; Xiaolin Lin, <email xlink:href="mailto:renjilxl@163.com">renjilxl@163.com</email>
</p>
</fn>
<fn fn-type="equal" id="fn003">
<p>&#x2020;These authors have contributed equally to this work and share first authorship</p>
</fn>
</author-notes>
<pub-date pub-type="epub">
<day>06</day>
<month>08</month>
<year>2025</year>
</pub-date>
<pub-date pub-type="collection">
<year>2025</year>
</pub-date>
<volume>16</volume>
<elocation-id>1614099</elocation-id>
<history>
<date date-type="received">
<day>18</day>
<month>04</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>11</day>
<month>07</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2025 Han, Zhuo, Song, Chen, Chen, Zhang, Zhou, Li, Zhang, Lin, Liu and Xiao.</copyright-statement>
<copyright-year>2025</copyright-year>
<copyright-holder>Han, Zhuo, Song, Chen, Chen, Zhang, Zhou, Li, Zhang, Lin, Liu and Xiao</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<sec>
<title>Introduction</title>
<p>Programmed cell death ligand-1 (PD-L1) combined positive score (CPS) evaluation plays a pivotal role in predicting immunotherapy efficacy for gastric cancer. However, manual CPS assessment suffers from significant inter-observer variability among pathologists, leading to clinical inconsistencies. To address this limitation, we developed a deep learning-based artificial intelligence (AI) system that automates PD-L1 CPS quantification for patients with gastric cancer (GC) using whole slide images (WSIs).</p>
</sec>
<sec>
<title>Methods</title>
<p>We developed a deep learning-based artificial intelligence (AI) system that automates PD-L1 CPS quantification for patients with gastric cancer (GC) using whole slide images (WSIs). Our pipeline firstly employs a dual-network architecture for tumor region detection: MobileNet for patch-level classification and U-Net for pixel-level segmentation. Followed by a YOLO-based cell detection model to compute PD-L1 expression on different cells for CPS calculation. A total of 308 GC WSIs were included, including 210 in the internal cohort and 98 in the external cohort. Within the internal cohort, 100 WSIs were utilized for the model development, while the remaining 110 WSIs served as an internal testing set for comparative analysis between AI-derived CPS values and pathologist-derived reference standards.</p>
</sec>
<sec>
<title>Results</title>
<p>The AI-derived CPS demonstrated strong concordance with expert pathologists&#x2019; consensus in internal cohort (Cohen&#x2019;s kappa = 0.782). Furthermore, the AI-based CPS prediction pipeline was evaluated for its performance in the external cohort, and showed robust performance (Cohen&#x2019;s kappa = 0.737).</p>
</sec>
<sec>
<title>Discussion</title>
<p>Our system provides a standardized decision-support tool for immunotherapy stratification in GC management, demonstrating potential to improve CPS assessment reproducibility.</p>
</sec>
</abstract>
<kwd-group>
<kwd>PD-L1</kwd>
<kwd>CPS</kwd>
<kwd>gastric cancer</kwd>
<kwd>automated scoring</kwd>
<kwd>artificial intelligence</kwd>
</kwd-group>
<counts>
<fig-count count="5"/>
<table-count count="4"/>
<equation-count count="2"/>
<ref-count count="30"/>
<page-count count="11"/>
<word-count count="5150"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-in-acceptance</meta-name>
<meta-value>Cancer Immunity and Immunotherapy</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec id="s1" sec-type="intro">
<label>1</label>
<title>Introduction</title>
<p>According to 2022 Global Cancer Statistics (<xref ref-type="bibr" rid="B1">1</xref>, <xref ref-type="bibr" rid="B2">2</xref>), gastric cancer is the world&#x2019;s fifth-most prevalent cancer and a major cause of cancer-related death. It is characterized by high heterogeneity and poor prognosis. In recent research, immune checkpoint inhibitors (ICIs), particularly programmed death - ligand 1 (PD-L1) inhibitors, have garnered substantial attention. Immunotherapy, with ICIs at its core, has emerged as a widely adopted approach in treating a spectrum of cancers, including lung cancer, colorectal cancer, liver cancer, and gastric cancer. Multiple prospective, multicenter clinical randomized controlled trials have confirmed that the combination of chemotherapy and ICIs can significantly improve the prognosis of advanced gastric cancer compared with chemotherapy alone. Studies such as Checkmate-649 (<xref ref-type="bibr" rid="B3">3</xref>), ORIENT-16 (<xref ref-type="bibr" rid="B4">4</xref>), and RATIONALE 305 (<xref ref-type="bibr" rid="B5">5</xref>) have established the important role of combined ICI therapy in advanced gastric cancer, which can bring significant survival benefits to some patients.</p>
<p>Despite significant advancements in immunotherapy, its efficacy remains limited to a subset of patients (<xref ref-type="bibr" rid="B6">6</xref>). Identifying potential responders who could benefit from this treatment and achieve prolonged survival is therefore of paramount importance in clinical practice. Currently, the CPS system serves as a primary evaluation metric, quantifying the expression level of PD-L1 protein on tumor and immune cell surfaces as a percentage value. Clinically, a higher CPS score likely correlates with increased tumor sensitivity to immunotherapy and predicts better therapeutic outcomes, making it a crucial indicator for clinical decision-making in immunotherapy administration (<xref ref-type="bibr" rid="B7">7</xref>, <xref ref-type="bibr" rid="B8">8</xref>). The 2024 CSCO guidelines have made significant updates to the immunotherapy section for gastric cancer, introducing refined stratification based on PD-L1 CPS for the first-line immunotherapy of HER2-positive gastric cancer. However, the current practice of manual CPS calculation for PD-L1 expression assessment presents substantial challenges, primarily due to issues with reproducibility and consistency among pathologists&#x2019; evaluations (<xref ref-type="bibr" rid="B9">9</xref>).</p>
<p>The integration of artificial intelligence (AI) with digital pathology has catalyzed transformative innovations in diagnostic medicine. Advanced deep learning architectures, including convolutional neural networks (CNNs) and vision transformers, have been engineered to revolutionize pathological workflows by enabling precise tissue segmentation (<xref ref-type="bibr" rid="B10">10</xref>), automated metastasis detection (<xref ref-type="bibr" rid="B11">11</xref>), and AI-driven prognostic prediction (<xref ref-type="bibr" rid="B12">12</xref>). These innovations demonstrate remarkable diagnostic concordance with human pathologists across multiple clinical scenarios (<xref ref-type="bibr" rid="B13">13</xref>). Several AI solutions have been developed to assist pathologists in accurately scoring the PD - L1 (Dako 22C3) TPS in non-small cell lung cancer (<xref ref-type="bibr" rid="B14">14</xref>, <xref ref-type="bibr" rid="B15">15</xref>), and these solutions demonstrate clinical-grade diagnostic reliability in supporting pathological evaluations. Even when it comes to the more complex interpretation of the (Dako 22C3) CPS, studies suggest that AI models can help reduce discrepancies among pathologists in the context of breast cancer and urothelial carcinoma (<xref ref-type="bibr" rid="B16">16</xref>, <xref ref-type="bibr" rid="B17">17</xref>). These AI systems have been shown to enhance both consistency and reproducibility in clinical practice, thereby improving the overall reliability of pathological assessments.</p>
<p>However, the first step in most of these AI-based methods for quantifying tumor markers typically requires segmenting the tumor region using a semantic segmentation model, which is relatively time-consuming. Additionally, for some tumor samples, distinguishing between tumor regions and non-tumor areas (such as normal epithelial tissue, glands, etc.) based solely on immunohistochemistry (IHC) images presents certain challenges. Achieving higher accuracy often requires the integration of multi-dimensional AI algorithms (<xref ref-type="bibr" rid="B18">18</xref>).</p>
<p>In this study, to automatically calculate the CPS in gastric cancer, and to improve the efficiency of analysis, we propose an AI-based whole-slide analysis pipeline. The proposed pipeline integrates a pixel-level segmentation model for tumor region delineation with a patch-level classification model for enhanced tumor recognition. Subsequently, a YOLO algorithm was employed to identify target cells for PD-L1 quantification. The primary objective of this study was to develop and evaluate an integrated pipeline to support standardized CPS assessment in gastric cancer diagnostics, with the goal of establishing a framework for automated, AI-assisted clinical CPS evaluation. This framework aims to assist pathologists in CPS calculation and provide a foundation for screening patients who may be suitable for immunotherapy.</p>
</sec>
<sec id="s2" sec-type="materials|methods">
<label>2</label>
<title>Materials and methods</title>
<sec id="s2_1">
<label>2.1</label>
<title>Materials</title>
<p>A total of 210 formalin-fixed, paraffin-embedded, anonymized samples from patients diagnosed with gastric cancer were collected from 3DMed Clinical Laboratory (accredited by CAP and CLIA) as model development and internal test cohort in this study. Among these, 100 samples were used to develop the deep learning (DL) models, while the remaining 110 samples constituted a held-out internal test set to evaluate the AI-based CPS prediction pipeline performance. Besides, 98 external samples were obtained from Shanghai Renji Hospital and used as external cohort to test the generalization ability of the AI-based pipeline. All the samples were prepared and stained using the PD-L1 IHC 22C3 pharmDx assay (Dako, Carpenteria, CA, USA) on the Dako Autostainer Link 48 platform, according to the manufacturer&#x2019;s protocol. After the completion of section staining, all the tissues on the stained sections were scanned and digitized at 20&#xd7; magnification (0.475 &#x3bc;m/pixel) as WSIs using a KFBIO FK-Pro-120 slide scanner. The exclusion criteria for samples include severe tissue folds/tears, strong nonspecific staining, the presence of large bubble issues, among others. The interpretations of CPS values were performed by two trained pathologists (PD-L1 22C3 assay certified) under double-blinded conditions. To ensure the precision and reliability of the DL model, only WSI samples with concordant diagnoses from two pathologists were retained as ground truth for subsequent comparison with DL model outputs.</p>
</sec>
<sec id="s2_2">
<label>2.2</label>
<title>Overall workflow of CPS prediction</title>
<p>To achieve automated prediction of PD-L1 expression in gastric cancer, we constructed an AI algorithm-based prediction pipeline. This pipeline integrates sequential deep learning models operating without manual intervention during testing. Each model was individually trained and validated with corresponding annotated data. All data annotations were performed by pathologists using an in-house developed software (APTime, developed by 3D Medicines Inc.). The fully automated pipeline for CPS prediction, as illustrated in <xref ref-type="fig" rid="f1">
<bold>Figure&#xa0;1C</bold>
</xref>, initiates with tissue localization. It was performed on WSIs using Otsu thresholding on grayscale-converted slides at 0.625 &#xd7; magnification (<xref ref-type="fig" rid="f1">
<bold>Figure&#xa0;1A</bold>
</xref>). Otsu preprocessing significantly enhanced computational efficiency by eliminating redundant patch classification across non-informative background regions. During the subsequent model prediction phase, all processing was conducted at a magnification of 20 &#xd7; (0.475 &#x3bc;m/pixel). Identified tissue regions were then partitioned into non-overlapping patches with 256 &#xd7; 256 pixel size. A trained MobileNet-v2 patch classifier then categorized these patches as either tumor-containing or non-tumor-containing. To refine tumor regions identification, patches classified as tumor by the MobileNet-v2 were then processed by a trained U-Net model for pixel-level segmentation of tumor versus non-tumor regions. Only patches exhibiting consensus tumor regions (those classified as tumor by MobileNet-v2 and simultaneously segmented as tumor by U-Net) were retained for subsequent tumor cell analysis. At last, the trained YOLO-based detector performed triple-task recognition: detection of (1) PD-L1<sup>+</sup> tumor cells, (2) PD-L1<sup>&#x2212;</sup> tumor cells in the tumor regions, and detection of (3) PD-L1<sup>+</sup> immune cells in tumor-containing patches associated non-tumor regions. The final CPS was calculated based on the cellular counts derived from YOLO detection outputs.</p>
<fig id="f1" position="float">
<label>Figure&#xa0;1</label>
<caption>
<p>AI pipeline for CPS evaluation. Overview of the proposed AI pipeline in this study. <bold>(A)</bold> Preprocessing: prior to DL models prediction, tissue regions within WSIs were automatically localized through Otsu&#x2019;s thresholding, followed by dividing into non-overlapping 256&#xd7;256 pixel patches at 20&#xd7; magnification. <bold>(B)</bold> Model training. The patch classification model (MobileNet v2), tumor segmentation model (U-Net) and cell detection model (YOLOX) were trained on the corresponding annotation datasets. <bold>(C)</bold> The fully automated pipeline for CPS prediction. After preprocessing, the patches were input into the patch classification model to identify tumor-containing patches. These tumor-containing patches were also fed into the tumor segmentation model to obtain the segmented tumor regions. By combining the results from the above two models, the output patches were input into the cell detection model. The resulting cells were used for CPS calculation.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimmu-16-1614099-g001.tif">
<alt-text content-type="machine-generated">Diagram illustrating a workflow for processing and analyzing PD-L1 stained whole slide images (WSI) for tumor detection.   A. The Processing section shows a sequence from identifying tissue regions to obtaining 256 by 256 pixel image patches.   B. The Training phase consists of three steps: Patch classification using MobileNet-v2 to classify tumor and stroma patches, tumor segmentation using U-Net for tumor and stroma regions, and cell detection using YOLOX to identify PD-L1 positive and negative tumor and immune cells.   C. The Interference workflow depicts preprocessing, patch classification, tumor segmentation, and cell detection leading to the final detection, with labeled outcomes.</alt-text>
</graphic>
</fig>
</sec>
<sec id="s2_3">
<label>2.3</label>
<title>The development of patch classification model</title>
<p>To prepare the training dataset of the patch classification model, the 100 WSIs used for model development cohort were divided into 256 &#xd7; 256 pixel patches. For samples with extensive tumor regions, pathologists selected those representative tumor patches. For samples containing limited tumor regions, we retained as many tumor-containing patches as possible to incorporate into the training data set. These patches were labeled as either tumor-containing patches or non-tumor patches based on the presence or absence of tumor cells. To maintain class balance and enhance model performance during training, we selected a comparable number of non-tumor patches to tumor-containing patches from each sample, resulting in a final dataset comprising 118,715 tumor-containing patches and 119,476 non-tumor patches (including necrotic areas, normal epithelial regions, stromal regions, etc.).</p>
<p>The dataset was partitioned into training, validation, and test subsets at a 6:2:2 ratio, and to enhance model robustness, we use random flip, rotation, and blur to augment the data during training. We employed the MobileNet-v2 architecture, a lightweight convolutional neural network (CNN) optimized for computational efficiency, as the backbone for patch classification (<xref ref-type="bibr" rid="B19">19</xref>). The model leveraged transfer learning through ImageNet pre-trained weights, with strategic fine-tuning: only the final seven layers were unfrozen to adapt domain-specific histopathological features while preserving generic pattern recognition capabilities from pre-training. Following the convolutional layers, the architecture includes a flatten layer and a dense layer (<xref ref-type="fig" rid="f1">
<bold>Figure&#xa0;1B</bold>
</xref>).</p>
</sec>
<sec id="s2_4">
<label>2.4</label>
<title>The development of tumor segmentation model</title>
<p>To prepare the training dataset of the tumor segmentation model, representative patches with 1024 &#xd7; 1024 pixels size from the 100 WSIs in the model development dataset were selected for pixel-level annotation. We constructed a dataset comprising 3,923 image patches. Among these, 1,929 tumor-containing patches were annotated with pixel-level tumor region labels by pathologists, while 1,994 additional patches containing normal tissues or adjacent non-tumorous tissues were incorporated into the training set, serving as background to enhance the model&#x2019;s ability to distinguish tumor boundaries.</p>
<p>The datasets were randomly split into training, validation and test set in a ratio of 7:2:1. Data augmentation including random flipping and rotation, and hue, saturation and value change were used during training to avoid overfitting and improve accuracy and generalization ability of the model. Tumor segmentation model was built based on a U-Net structure, with Xception-style block, which consists of separable convolution layer to reduce the number of parameters and accelerate inference (<xref ref-type="bibr" rid="B20">20</xref>). The model is a symmetric encoder-decoder architecture with skip connection. In the training procedure, labeled patches were further cropped to 512 &#xd7; 512 size during training. The DL model was trained to simultaneously segment the tumor area and classify the input region as auxiliary loss. Only the output of the tumor area segmentation task was used to predict the tumor region (<xref ref-type="fig" rid="f1">
<bold>Figure&#xa0;1B</bold>
</xref>).</p>
</sec>
<sec id="s2_5">
<label>2.5</label>
<title>The development of cell detection model</title>
<p>To prepare the training dataset of the cell detection model, representative patches (256 &#xd7; 256 pixel size) containing PD-L1<sup>+</sup> and PD-L1<sup>&#x2212;</sup> tumor cells, or PD-L1<sup>+</sup> immune cells, were selected from the 100 cropped WSIs used for model development. We finally constructed a dataset comprising 4604 image patches. On these patches, cells were annotated by experienced pathologists using spots with cell tags and were grouped into PD-L1<sup>+</sup> tumor cells (85,659), PD-L1<sup>+</sup> immune cells (19,434), and PD-L1<sup>&#x2212;</sup> tumors cells (130,512). When annotating PD-L1-positive cells, we labeled cells exhibiting diverse expression intensity levels (including strong, moderate, and weak). For immune cells, we also labeled various morphological forms of both lymphocytes and macrophages.</p>
<p>We built the cell detection model based on the YOLOX (<xref ref-type="bibr" rid="B21">21</xref>), which can directly classify, locate, and count the objects on the input patches (<xref ref-type="fig" rid="f1">
<bold>Figure&#xa0;1B</bold>
</xref>). In the data augmentation step, the same strategies as U-Net and mosaic and mixup were applied.</p>
</sec>
<sec id="s2_6">
<label>2.6</label>
<title>CPS algorithm</title>
<p>CPS is generally calculated by dividing the number of PD-L1 stained cells (including tumor cells, lymphocytes and macrophages) by the total number of viable tumor cells, multiplied by 100. The total formula is shown below:</p>
<disp-formula>
<mml:math display="block" id="M1">
<mml:mrow>
<mml:mtext>CPS</mml:mtext>
<mml:mo>=</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mtable equalrows="true" equalcolumns="true">
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>m</mml:mi>
<mml:mi>b</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>r</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>o</mml:mi>
<mml:mi>f</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>P</mml:mi>
<mml:mi>D</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>L</mml:mi>
<mml:mn>1</mml:mn>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>d</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>c</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>s</mml:mi>
</mml:mrow>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:mfenced>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>m</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>r</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>c</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>s</mml:mi>
<mml:mo>,</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>l</mml:mi>
<mml:mi>y</mml:mi>
<mml:mi>m</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>h</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>y</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>s</mml:mi>
<mml:mo>,</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>a</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>d</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>m</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>h</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>g</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>s</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>l</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>n</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>m</mml:mi>
<mml:mi>b</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>r</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>o</mml:mi>
<mml:mi>f</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>v</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>b</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>e</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>t</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>m</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>r</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>c</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>s</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:mo>&#xd7;</mml:mo>
<mml:mn>100</mml:mn>
</mml:mrow>
</mml:math>
</disp-formula>
<p>To ensure precise calculation of the CPS, the following criteria must be rigorously applied: 1) PD-L1-positive tumor cells are defined as tumor cells exhibiting partial or complete linear membrane staining within tumor nests, excluding cells in necrotic areas. 2) PD-L1-positive immune cells should be quantified only if they are located within tumor nests or adjacent supporting stroma and maintain direct spatial proximity to tumor cells (within a 0.5 mm radius).</p>
<p>As cells were grouped into PD-L1<sup>+</sup> tumor cells, PD-L1<sup>+</sup> immune cells, and PD-L1<sup>&#x2212;</sup> tumors cells by cell detection model, the final formula is shown below:</p>
<disp-formula>
<mml:math display="block" id="M2">
<mml:mrow>
<mml:mtext>CPS</mml:mtext>
<mml:mo>=</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mi>D</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>L</mml:mi>
<mml:msup>
<mml:mn>1</mml:mn>
<mml:mo>+</mml:mo>
</mml:msup>
<mml:mtext>&#xa0;tumor&#xa0;cells</mml:mtext>
<mml:mo>+</mml:mo>
<mml:mi>P</mml:mi>
<mml:mi>D</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>L</mml:mi>
<mml:msup>
<mml:mn>1</mml:mn>
<mml:mo>+</mml:mo>
</mml:msup>
<mml:mtext>&#xa0;immune&#xa0;cells</mml:mtext>
</mml:mrow>
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mi>D</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>L</mml:mi>
<mml:msup>
<mml:mn>1</mml:mn>
<mml:mo>+</mml:mo>
</mml:msup>
<mml:mtext>&#xa0;tumor&#xa0;cells</mml:mtext>
<mml:mo>+</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>P</mml:mi>
<mml:mi>D</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>L</mml:mi>
<mml:msup>
<mml:mn>1</mml:mn>
<mml:mo>&#x2212;</mml:mo>
</mml:msup>
<mml:mtext>&#xa0;tumors&#xa0;cells</mml:mtext>
</mml:mrow>
</mml:mfrac>
<mml:mo>&#xd7;</mml:mo>
<mml:mn>100</mml:mn>
</mml:mrow>
</mml:math>
</disp-formula>
<p>In clinical diagnosis, pathologists approximately distinguish the tumor cell region from other regions firstly at the lower magnification scale and then zoom into the higher magnification for accurate cell counting, and ultimately render a definitive CPS positive/negative assessment. In this study, the final CPS was calculated using counts of PD-L1<sup>+</sup> tumor cells, PD-L1<sup>+</sup> immune cells, and PD-L1<sup>&#x2212;</sup> tumor cells detected by YOLOX.</p>
<p>The CPS threshold for PD-L1 positivity is defined as &#x2265; 1. Based on this cutoff, samples were stratified into two distinct subgroups: PD-L1<sup>&#x2212;</sup> samples: CPS &lt; 1; PD-L1<sup>+</sup> samples: CPS &#x2265; 1.</p>
</sec>
<sec id="s2_7">
<label>2.7</label>
<title>Evaluation metrics and statistical analyses</title>
<p>This study employed a comprehensive set of evaluation metrics to assess AI model performance, including: Classification metrics (accuracy, precision, recall, specificity, and F1 score); Segmentation metrics (dice coefficient and pixel accuracy); Target detection metrics (Intersection over Union (IoU), Average Precision (AP)). Consistency between AI-calculated CPS values and pathologist assessments was analyzed using confusion matrices and Cohen&#x2019;s kappa coefficient. The kappa statistic (range: 0 - 1) was interpreted using established clinical benchmarks: slight agreement (0 - 0.2); fair agreement (0.2 - 0.4); moderate agreement (0.4 - 0.6); substantial agreement (0.6 - 0.8); near-perfect agreement (0.8 - 1.0).</p>
<p>All statistical analyses and graphical visualizations were conducted using Microsoft Excel and Python (version 3.9.12), implemented through the PyCharm 2021.3.3 integrated development environment.</p>
</sec>
</sec>
<sec id="s3" sec-type="results">
<label>3</label>
<title>Results</title>
<sec id="s3_1">
<label>3.1</label>
<title>Clinicopathological characteristics of patients</title>
<p>For the 210 specimens utilized in model development and internal validation. As detailed in <xref ref-type="table" rid="T1">
<bold>Table&#xa0;1</bold>
</xref>, both cohorts demonstrated comparable clinicopathological characteristics. The majority of the samples were surgical resection 79% (166/210), and a minority were needle biopsy and others 21% (44/210). All tumor samples were exclusively collected from the stomach. No statistically significant differences were observed between the two groups (all p-values &gt; 0.05).</p>
<table-wrap id="T1" position="float">
<label>Table&#xa0;1</label>
<caption>
<p>Clinicopathological characteristics of gastric cancer samples.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" rowspan="2" align="center">Characteristics</th>
<th valign="middle" colspan="2" align="center">Internal data</th>
<th valign="middle" rowspan="2" align="center">
<italic>X</italic>
<sup>2</sup>
</th>
<th valign="middle" rowspan="2" align="center">
<italic>P</italic>-value</th>
</tr>
<tr>
<th valign="middle" align="center">Training set (N =100)</th>
<th valign="middle" align="center">Test set <break/>(N =110)</th>
</tr>
</thead>
<tbody>
<tr>
<th valign="middle" align="left" colspan="5">Gender</th>
</tr>
<tr>
<td valign="middle" align="left">Male</td>
<td valign="middle" align="center">57</td>
<td valign="middle" align="center">76</td>
<td valign="middle" align="center">3.298</td>
<td valign="middle" align="center">0.069</td>
</tr>
<tr>
<td valign="middle" align="left">Female</td>
<td valign="middle" align="center">43</td>
<td valign="middle" align="center">34</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center"/>
</tr>
<tr>
<th valign="middle" align="left" colspan="5">Age (years)</th>
</tr>
<tr>
<td valign="middle" align="left">&#x2264; 65</td>
<td valign="middle" align="center">64</td>
<td valign="middle" align="center">61</td>
<td valign="middle" align="center">1.588</td>
<td valign="middle" align="center">0.208</td>
</tr>
<tr>
<td valign="middle" align="left">&gt; 65</td>
<td valign="middle" align="center">36</td>
<td valign="middle" align="center">49</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center"/>
</tr>
<tr>
<th valign="middle" align="left" colspan="5">Sampling methods</th>
</tr>
<tr>
<td valign="middle" align="left">Surgical Operation</td>
<td valign="middle" align="center">79</td>
<td valign="middle" align="center">87</td>
<td valign="middle" align="center">1.411</td>
<td valign="middle" align="center">0.494</td>
</tr>
<tr>
<td valign="middle" align="left">Needle Biopsy</td>
<td valign="middle" align="center">8</td>
<td valign="middle" align="center">5</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center"/>
</tr>
<tr>
<td valign="middle" align="left">Others</td>
<td valign="middle" align="center">13</td>
<td valign="middle" align="center">18</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center"/>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s3_2">
<label>3.2</label>
<title>Performance of patch classification model</title>
<p>To evaluate the PD-L1 expression in the tumor region of a sample, it is essential to accurately localize the tumor area. Considering that patch-level classification models are more efficient in analysis compared to pixel-level segmentation models, and the annotations required for training patch classification models are relatively easier to obtain, we first trained a patch classification model to localize the tumor region. We divided the annotated patches into a training set, a validation set, and an independent test set. The trained model demonstrated high performance on both the validation (<xref ref-type="fig" rid="f2">
<bold>Figure&#xa0;2A</bold>
</xref>) and test sets (<xref ref-type="fig" rid="f2">
<bold>Figure&#xa0;2B</bold>
</xref>), with accuracy, specificity, and sensitivity all exceeding 97% (<xref ref-type="table" rid="T2">
<bold>Table&#xa0;2</bold>
</xref>). The trained classification model also effectively distinguished stained necrotic regions from tumor regions (<xref ref-type="fig" rid="f2">
<bold>Figure&#xa0;2C</bold>
</xref>), thereby eliminating the impact of necrotic regions on PD-L1 evaluation.</p>
<fig id="f2" position="float">
<label>Figure&#xa0;2</label>
<caption>
<p>Performance evaluation of classification model. Patch classification results of the MobileNet v2 based on confusion matrix in <bold>(A)</bold> validation set and <bold>(B)</bold> test set. <bold>(C)</bold> Example diagram of the patch classification model results, with two zoomed-in sections demonstrating the model&#x2019;s ability to exclude necrotic regions and recognize Tumor regions. The blue-tinted regions denote algorithm-identified tumor areas.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimmu-16-1614099-g002.tif">
<alt-text content-type="machine-generated">Diagram with three sections: A and B show confusion matrices for internal validation and test, depicting true vs. predicted labels of &#x201c;others&#x201d; and &#x201c;tumor.&#x201d; Panel A shows high accuracy with most values on the diagonal (23224, 23228). Panel B similarly shows high accuracy (23195, 23155). Section C is a histological image with identified tumor regions in blue, highlighting necrotic and tumor regions. Scale bar indicates 2 millimeters.</alt-text>
</graphic>
</fig>
<table-wrap id="T2" position="float">
<label>Table&#xa0;2</label>
<caption>
<p>Performance evaluation of classification model.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Statistics</th>
<th valign="middle" align="center">Validation set</th>
<th valign="middle" align="center">Test set</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="center">Accuracy</td>
<td valign="middle" align="center">0.975 [0.974, 0.977]</td>
<td valign="middle" align="center">0.973 [0.972, 0.974]</td>
</tr>
<tr>
<td valign="middle" align="center">Precision</td>
<td valign="middle" align="center">0.978 [0.977, 0.980]</td>
<td valign="middle" align="center">0.977 [0.976, 0.978]</td>
</tr>
<tr>
<td valign="middle" align="center">Recall</td>
<td valign="middle" align="center">0.972 [0.971, 0.974]</td>
<td valign="middle" align="center">0.970 [0.967, 0.971]</td>
</tr>
<tr>
<td valign="middle" align="center">Specificity</td>
<td valign="middle" align="center">0.978 [0.977, 0.980]</td>
<td valign="middle" align="center">0.977 [0.976, 0.978]</td>
</tr>
<tr>
<td valign="middle" align="center">F1 score</td>
<td valign="middle" align="center">0.975 [0.974, 0.977]</td>
<td valign="middle" align="center">0.973 [0.972, 0.974]</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>Data was presented as score [95% confidence interval (CI)].</p>
</fn>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="s3_3">
<label>3.3</label>
<title>Performance of tumor segmentation model</title>
<p>The tumor patches identified by the patch classification model not only encompass tumor regions but also contain partial stroma areas. Therefore, a segmentation model is further required to distinguish between tumor and stroma.</p>
<p>In this study, we trained a segmentation model that can distinguish tumor regions from non-tumor regions, which include necrosis and normal epithelium. The model&#x2019;s performance was evaluated using dice coefficient and pixel accuracy metrics. On both the validation and test sets, the model demonstrated high segmentation performance. Notably, it effectively identified non-tumor regions, with all metrics exceeding 97% (<xref ref-type="fig" rid="f3">
<bold>Figures&#xa0;3A, B</bold>
</xref>). Additionally, the segmentation model can accurately distinguished tumor regions from necrotic areas and normal glandular structures (<xref ref-type="fig" rid="f3">
<bold>Figure&#xa0;3C</bold>
</xref>).</p>
<fig id="f3" position="float">
<label>Figure&#xa0;3</label>
<caption>
<p>Performance evaluation of tumor segmentation model. The analysis of pixel accuracy and dice coefficient in <bold>(A)</bold> validation set and <bold>(B)</bold> test set. <bold>(C)</bold> An example diagram of the segmentation model result, with the comparison between the segmentation results and the corresponding original patch demonstrating the model&#x2019;s ability to exclude necrotic regions and normal glandular structures. Green curves delineate manually annotated normal glands and necrotic regions; red contours indicate AI-predicted tumor regions.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimmu-16-1614099-g003.tif">
<alt-text content-type="machine-generated">Bar charts and medical images showing tumor and stroma evaluation metrics. Charts A and B display pixel accuracy and Dice coefficient scores for both tumor and stroma, with stroma scoring higher. Image C shows a microscopic view of tissue with segmented tumor regions in red. Zoomed sections compare segmentation results with original images, highlighting normal gland and necrotic regions.</alt-text>
</graphic>
</fig>
</sec>
<sec id="s3_4">
<label>3.4</label>
<title>Performance of model on cell detection</title>
<p>Within our PD-L1 expression evaluation pipeline, precise tumor region localization is followed by quantification of PD-L1<sup>+</sup> tumor cells, PD-L1<sup>&#x2212;</sup> tumor cells, and PD-L1<sup>+</sup> immune cells within tumor-associated regions. To accomplish this, we developed a deep learning-based cell detection model utilizing the YOLO framework for identification of these three cellular phenotypes. Since immune cells can infiltrate into the tumor region, the calculation of the CPS requires integration with the output of tumor segmentation model. This allows us to distinguish PD-L1<sup>+</sup> tumor cells, PD-L1<sup>&#x2212;</sup> tumor cells, PD-L1<sup>+</sup> immune cells within the tumor regions, and PD-L1<sup>+</sup> immune cells within the non-tumor regions for final CPS calculation (<xref ref-type="supplementary-material" rid="SM1">
<bold>Supplementary Figure S1</bold>
</xref>).</p>
<p>We evaluated the model&#x2019;s performance using IoU and AP, with an IoU threshold set at 0.5. The trained cell detection model demonstrated strong performance on both the validation and test sets, achieving AP scores close to 0.900 for true positives (0.889 on the validation set and 0.888 on the test set) (<xref ref-type="table" rid="T3">
<bold>Table&#xa0;3</bold>
</xref>).</p>
<table-wrap id="T3" position="float">
<label>Table&#xa0;3</label>
<caption>
<p>Performance of model on cell detection.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" rowspan="2" align="center">Cell type</th>
<th valign="middle" colspan="2" align="center">AP</th>
</tr>
<tr>
<th valign="middle" align="center">Validation</th>
<th valign="middle" align="center">Test</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="center">TN</td>
<td valign="middle" align="center">0.857</td>
<td valign="middle" align="center">0.866</td>
</tr>
<tr>
<td valign="middle" align="center">TP</td>
<td valign="middle" align="center">0.899</td>
<td valign="middle" align="center">0.888</td>
</tr>
<tr>
<td valign="middle" align="center">IP</td>
<td valign="middle" align="center">0.869</td>
<td valign="middle" align="center">0.864</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>TN, tumor negative cells; TP, tumor positive cells; IP, immune positive cells; AP, average precision.</p>
</fn>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="s3_5">
<label>3.5</label>
<title>Comparison of consistency between AI pipeline and pathologists</title>
<p>To validate the accuracy of our AI-based pipeline, we assessed the agreement between CPS-AI (AI-derived CPS) and CPS-Doc (pathologist-evaluated CPS) using Cohen&#x2019;s kappa coefficient in internal and external cohorts. First, we evaluated our pipeline on a held-out internal test cohort (n = 110), which was excluded from model training. To further test generalizability, an independent external cohort (n = 98) was introduced. As shown in <xref ref-type="fig" rid="f4">
<bold>Figure&#xa0;4</bold>
</xref>, the internal dataset achieved a kappa value of 0.782, demonstrating substantial agreement between model predictions and pathologist-evaluated scores. While the external dataset exhibited a slightly lower value of 0.737, the kappa value remained clinically meaningful, confirming the model&#x2019;s robustness across diverse datasets.</p>
<fig id="f4" position="float">
<label>Figure&#xa0;4</label>
<caption>
<p>Consistency evaluation between AI pipeline and pathologists. Measures of concordance of combined positive score (CPS) interpretation results between AI pipelines (with and without classification model) and pathologists using Kappa values, all p&lt;0.001. Statistical comparison revealed no significant difference in predictive performance between models with and without the classification module (Internal cohort: p=0.832; External cohort: p=0.683, two-sided Mann-Whitney U test).</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimmu-16-1614099-g004.tif">
<alt-text content-type="machine-generated">Bar chart comparing Cohen's Kappa values for internal and external datasets. Internal data shows values of 0.782 with and 0.764 without a classification model, P = 0.832. External data shows values of 0.737 with and 0.696 without the model, P = 0.683.</alt-text>
</graphic>
</fig>
<p>To further quantify performance, confusion matrices were generated to compare CPS-AI against CPS-Doc. Using CPS-Doc as the reference standard, we evaluated the CPS prediction accuracy of the AI-based pipeline across multiple metrics. In the internal test cohort, AI-based pipeline achieved an accuracy of 0.882 [95% CI = 0.822 - 0.942], sensitivity of 0.964 [95% CI = 0.929 - 0.999], and specificity of 0.800 [95% CI = 0.725 - 0.875], highlighting its strong discriminative capability (<xref ref-type="fig" rid="f5">
<bold>Figures&#xa0;5A, C</bold>
</xref>). Performance remained robust in the external cohort, with retained accuracy and high sensitivity (<xref ref-type="fig" rid="f5">
<bold>Figures&#xa0;5B, C</bold>
</xref>). These results collectively underscore the reliability and generalizability of the AI pipeline.</p>
<fig id="f5" position="float">
<label>Figure&#xa0;5</label>
<caption>
<p>Performance evaluation of the AI pipeline for combined positive score (CPS) prediction. Comparison between CPS predicted by the AI pipeline (CPS-AI) and by doctors (CPS-Doc) in <bold>(A)</bold> the internal validation cohort and <bold>(B)</bold> the external test cohort. <bold>(C)</bold> Histograms of AI models performance in the internal cohort and the external cohort.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimmu-16-1614099-g005.tif">
<alt-text content-type="machine-generated">Confusion matrices and a bar chart compare the performance of CPS-AI. Panel A shows the internal test with 45 negatives and 53 positives correctly identified. Panel B shows the external test with 41 negatives and 44 positives correctly identified. Panel C displays bar charts of performance metrics, highlighting accuracy, precision, recall, specificity, and F1 score for both internal and external data. Scores range from 0.788 to 0.964.</alt-text>
</graphic>
</fig>
</sec>
<sec id="s3_6">
<label>3.6</label>
<title>Evaluating combined effectiveness of classification model and segmentation model</title>
<p>In this study, we combined a patch classification model with a region segmentation model to localize tumor areas, where only tumor regions simultaneously identified by both models would proceed to subsequent cell detection. This design offers dual advantages: On one hand, integrating results from both models may enhance the accuracy of tumor region identification. On the other hand, leveraging the higher efficiency of the patch classification model to first roughly localize tumor regions, followed by region segmentation on these pre-selected patches, significantly improves the overall analysis efficiency. Our findings demonstrate that the integrated pipeline incorporating the patch classification model achieved slightly improved consistency with the pathologists compared to the pipeline using only region segmentation with cell detection (<xref ref-type="fig" rid="f4">
<bold>Figure&#xa0;4</bold>
</xref>, <xref ref-type="supplementary-material" rid="SM1">
<bold>Supplementary Figure S2</bold>
</xref>). Regarding efficiency, the integrated workflow showed 25 - 30% improvement in average processing time per sample (<xref ref-type="table" rid="T4">
<bold>Table&#xa0;4</bold>
</xref>). Particularly for samples with small tumor areas relative to the whole tissue section, a more than twofold enhancement in processing efficiency was achieved (<xref ref-type="supplementary-material" rid="SM1">
<bold>Supplementary Figure S3</bold>
</xref>).</p>
<table-wrap id="T4" position="float">
<label>Table&#xa0;4</label>
<caption>
<p>Compare the efficiency of different pipelines in image processing.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Process</th>
<th valign="middle" align="center">Metric</th>
<th valign="middle" align="center">Pipeline with classification</th>
<th valign="middle" align="center">Pipeline without classification</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" rowspan="3" align="center">Internal Cohort</td>
<td valign="top" align="center">Avg Time (s)</td>
<td valign="middle" align="center">136.473</td>
<td valign="middle" align="center">183.761</td>
</tr>
<tr>
<td valign="top" align="center">Avg Time per Tumor Patch (s)</td>
<td valign="middle" align="center">0.119</td>
<td valign="middle" align="center">0.174</td>
</tr>
<tr>
<td valign="top" align="center">Avg Time per Unit Area (s/cm<sup>2</sup>)</td>
<td valign="middle" align="center">166.386</td>
<td valign="middle" align="center">210.471</td>
</tr>
<tr>
<td valign="middle" rowspan="3" align="center">External Cohort</td>
<td valign="top" align="center">Average Time (s)</td>
<td valign="middle" align="center">176.762</td>
<td valign="middle" align="center">263.655</td>
</tr>
<tr>
<td valign="top" align="center">Avg Time per Tumor Patch (s)</td>
<td valign="middle" align="center">0.182</td>
<td valign="middle" align="center">0.059</td>
</tr>
<tr>
<td valign="top" align="center">Avg Time per Unit Area (s/cm<sup>2</sup>)</td>
<td valign="middle" align="center">67.722</td>
<td valign="middle" align="center">101.991</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>Avg, average.</p>
</fn>
</table-wrap-foot>
</table-wrap>
</sec>
</sec>
<sec id="s4" sec-type="discussion">
<label>4</label>
<title>Discussion</title>
<p>Immunotherapy with ICIs has now become a new and important treatment option for gastric cancer. Accurate assessment of PD-L1 by pathologists provides essential guidance for selecting gastric cancer patients suitable for ICI therapy. However, current evaluations of PD-L1 by pathologists still lack satisfactory consistency and reproducibility (<xref ref-type="bibr" rid="B22">22</xref>). Particularly for assessing the CPS, challenges arise not only from evaluating PD-L1 expression on tumor cells but also on immune cells. Given the vast morphological differences between immune cells and tumor cells, objective CPS quantification poses significant difficulties (<xref ref-type="bibr" rid="B23">23</xref>).</p>
<p>With the rapid advancement of image analysis technology, AI-based digital pathology tools are playing an increasingly critical role in pathological diagnosis (<xref ref-type="bibr" rid="B24">24</xref>). In clinical practice, pathologists are skilled at qualitative tasks such as localizing tumor regions or excluding nonspecific staining regions, but they are less precise in quantitative counting compared to computational methods. Recent studies have applied deep learning algorithms to develop AI models for assisting various quantitative biomarker assessments, including HER2, Ki67 in breast cancer (<xref ref-type="bibr" rid="B25">25</xref>&#x2013;<xref ref-type="bibr" rid="B27">27</xref>), and PD-L1 in lung cancer (<xref ref-type="bibr" rid="B14">14</xref>, <xref ref-type="bibr" rid="B28">28</xref>), aiming to improve accuracy and reproducibility. Currently, most PD-L1 expression evaluation AI models focus on evaluating tumor cell expression in lung cancer, with limited research on CPS scoring. However, PD-L1 assessment is clinically relevant across various cancer types and requires consideration of immune cell PD-L1 expression. The AI-assisted CPS diagnostic model for gastric cancer developed in this study effectively addresses this gap.</p>
<p>Most previous AI workflows for biomarker quantification typically involve a segmentation model to identify tumor regions, followed by a nuclues segmentation model, and another model for cell types classification (<xref ref-type="bibr" rid="B15">15</xref>&#x2013;<xref ref-type="bibr" rid="B17">17</xref>). These workflows could be time-consuming. Moreover, distinguishing tumor regions solely based on IHC images can be challenging in some cases, leading to suboptimal segmentation accuracy (<xref ref-type="bibr" rid="B29">29</xref>). To mitigate this, some studies align corresponding hematoxylin and eosin (H&amp;E) stained WSIs with IHC images, leveraging the richer structural information in H&amp;E images to map tumor regions onto IHC images (<xref ref-type="bibr" rid="B30">30</xref>). In this study, our AI model innovatively combines a patch classification model and a tumor segmentation model to localize tumor regions, enhancing both performance and processing efficiency. Only regions simultaneously identified by both models are included in subsequent calculations - akin to pathologists prioritizing consensus regions for biomarker assessment in clinical practice.</p>
<p>The integration of patch classification and region segmentation also improves computational efficiency, particularly for samples with small tumor-to-tissue ratios. Since segmentation models process images at the pixel level and are computationally intensive, restricting segmentation to tumor-containing patches (pre-identified by the classification model) significantly reduces processing time. Moreover, if the classification model achieves high accuracy, the segmentation model can focus solely on distinguishing tumor and stromal regions within these patches, bypassing morphologically ambiguous structures like benign lesions or normal epithelium. While this dual-model approach requires training two tumor localization models, patch-level annotations are relatively easier to obtain, potentially reducing overall labeling efforts.</p>
<p>The study still has some limitations: 1) The study included data from only two institutions, with limited sample size (100 samples for model development) and uniform scanner use. This may lead to decreased model performance when deployed across different hospitals. Future efforts should diversify sample sizes and data sources to enhance the model&#x2019;s generalizability. 2) Given potential staining variations across different PD-L1 antibody assays, which may reduce model performance or compromise generalizability, application to other PD-L1 antibodies requires model retraining with expanded samples. 3) Although the AI-based CPS prediction pipeline developed in this study is fully automated and demonstrates close concordance with pathologist assessments, inconsistencies persist, highlighting the need for further optimization. These discrepancies are mainly attributable to two factors: Firstly, weakly stained tumor cells were confirmed as a primary source of inconsistency, as faint or incomplete membranous PD-L1 staining occasionally led to their missed detection by the AI system, whereas pathologists successfully identified them through careful microscopic evaluation. Secondly, background interference caused by necrotic debris, mucin deposits, or staining artifacts occasionally generated false-positive signals in the cell detection algorithm, particularly in challenging histological subtypes. Consequently, this pipeline should currently serve as an adjunct tool to assist pathologists in interpretation. 4) The dual-model for tumor localization strategy has room for refinement. Future studies could train models on distinct datasets to simulate pathologists with varying experience, improving localization accuracy.</p>
<p>In summary, the AI pipeline developed in this study demonstrated high consistency with pathologists in internal and external test cohorts, along with efficient image processing. By enabling precise cell quantification and tumor region delineation on WSIs, these AI pipelines enhance model interpretability and assist pathologists in reviewing and verifying results, minimizing oversights or misjudgments, meanwhile enhancing pathologists&#x2019; trust in the AI model. This approach holds significant potential for clinical adoption in PD-L1 CPS assessment for gastric cancer and beyond.</p>
</sec>
</body>
<back>
<sec id="s5" sec-type="data-availability">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/<xref ref-type="supplementary-material" rid="SM1">
<bold>Supplementary Material</bold>
</xref>. Further inquiries can be directed to the corresponding authors.</p>
</sec>
<sec id="s6" sec-type="ethics-statement">
<title>Ethics statement</title>
<p>This study received approval from the Ethics Committee of Renji Hospital affiliated to Shanghai Jiao Tong University (LY2022-065-B). The studies were conducted in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study.</p>
</sec>
<sec id="s7" sec-type="author-contributions">
<title>Author contributions</title>
<p>TH: Writing &#x2013; original draft, Investigation, Formal analysis. MZ: Methodology, Project administration, Investigation, Writing &#x2013; original draft. ZS: Resources, Writing &#x2013; review &amp; editing, Investigation. PC: Software, Writing &#x2013; original draft, Methodology. SC: Writing &#x2013; original draft, Methodology. WZ: Writing &#x2013; review &amp; editing, Visualization. YZ: Writing &#x2013; original draft, Visualization, Formal analysis. HL: Writing &#x2013; original draft, Data curation, Validation. DZ: Project administration, Writing &#x2013; review &amp; editing. XL: Conceptualization, Methodology, Writing &#x2013; review &amp; editing. ZL: Writing &#x2013; review &amp; editing, Project administration, Resources. XX: Resources, Writing &#x2013; review &amp; editing, Supervision, Conceptualization.</p>
</sec>
<sec id="s8" sec-type="funding-information">
<title>Funding</title>
<p>The author(s) declare financial support was received for the research and/or publication of this article. This study was supported by Shanghai Academician Expert Workstation Project (2023).</p>
</sec>
<ack>
<title>Acknowledgments</title>
<p>The authors extend their sincere gratitude to the patients and their families for their invaluable support throughout the study.</p>
</ack>
<sec id="s9" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>All authors affiliated with 3D Medicines Inc. or SODA Data Technology Inc. are current or former employees.</p>
<p>The remaining authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="s10" sec-type="ai-statement">
<title>Generative AI statement</title>
<p>The author(s) declare that no Generative AI was used in the creation of this manuscript.</p>
</sec>
<sec id="s11" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec id="s12" sec-type="supplementary-material">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fimmu.2025.1614099/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fimmu.2025.1614099/full#supplementary-material</ext-link>
</p>
<supplementary-material xlink:href="DataSheet1.docx" id="SM1" mimetype="application/vnd.openxmlformats-officedocument.wordprocessingml.document"/>
</sec>
<fn-group>
<title>Abbreviations</title>
<fn fn-type="abbr" id="abbrev1">
<p>ICI, immune checkpoint inhibitor; CPS, combined positive score; WSI, whole slide images; PD-L1, programmed death ligand-1; DL, deep learning; IHC, immunohistochemistry; CNN, convolutional neural network; H&amp;E, hematoxylin and eosin.</p>
</fn>
</fn-group>
<ref-list>
<title>References</title>
<ref id="B1">
<label>1</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bray</surname> <given-names>F</given-names>
</name>
<name>
<surname>Laversanne</surname> <given-names>M</given-names>
</name>
<name>
<surname>Sung</surname> <given-names>H</given-names>
</name>
<name>
<surname>Ferlay</surname> <given-names>J</given-names>
</name>
<name>
<surname>Siegel</surname> <given-names>RL</given-names>
</name>
<name>
<surname>Soerjomataram</surname> <given-names>I</given-names>
</name>
<etal/>
</person-group>. <article-title>Global cancer statistics 2022: GLOBOCAN estimates of incidence and mortality worldwide for 36 cancers in 185 countries</article-title>. <source>CA Cancer J Clin</source>. (<year>2024</year>) <volume>74</volume>:<page-range>229&#x2013;63</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.3322/caac.21834</pub-id>, PMID: <pub-id pub-id-type="pmid">38572751</pub-id></citation></ref>
<ref id="B2">
<label>2</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname> <given-names>S</given-names>
</name>
<name>
<surname>Zheng</surname> <given-names>R</given-names>
</name>
<name>
<surname>Li</surname> <given-names>J</given-names>
</name>
<name>
<surname>Zeng</surname> <given-names>H</given-names>
</name>
<name>
<surname>Li</surname> <given-names>L</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>R</given-names>
</name>
<etal/>
</person-group>. <article-title>Global, regional, and national lifetime risks of developing and dying from gastrointestinal cancers in 185 countries: a population-based systematic analysis of GLOBOCAN</article-title>. <source>Lancet Gastroenterol Hepatol</source>. (<year>2024</year>) <volume>9</volume>(<issue>3</issue>):<page-range>229&#x2013;37</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/S2468-1253(23)00366-7</pub-id>, PMID: <pub-id pub-id-type="pmid">38185129</pub-id></citation></ref>
<ref id="B3">
<label>3</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Janjigian</surname> <given-names>YY</given-names>
</name>
<name>
<surname>Shitara</surname> <given-names>K</given-names>
</name>
<name>
<surname>Moehler</surname> <given-names>MH</given-names>
</name>
<name>
<surname>Garrido</surname> <given-names>M</given-names>
</name>
<name>
<surname>Gallardo</surname> <given-names>C</given-names>
</name>
<name>
<surname>Shen</surname> <given-names>L</given-names>
</name>
<etal/>
</person-group>. <article-title>Nivolumab (NIVO) plus chemotherapy (chemo) vs chemo as first-line (1L) treatment for advanced gastric cancer/gastroesophageal junction cancer/esophageal adenocarcinoma (GC/GEJC/EAC): 3-year follow-up from Check-Mate 649</article-title>. <source>Ann Oncol</source>. (<year>2023</year>) <volume>34</volume>:<page-range>S1254&#x2013;335</page-range>.</citation></ref>
<ref id="B4">
<label>4</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Xu</surname> <given-names>J</given-names>
</name>
<name>
<surname>Jiang</surname> <given-names>H</given-names>
</name>
<name>
<surname>Pan</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Gu</surname> <given-names>K</given-names>
</name>
<name>
<surname>Cang</surname> <given-names>S</given-names>
</name>
<name>
<surname>Han</surname> <given-names>L</given-names>
</name>
<etal/>
</person-group>. <article-title>Sintilimab plus chemotherapy for unresectable gastric or gastroesophageal junction cancer: the ORIENT-16 randomized clinical trial[J</article-title>. <source>JAMA</source>. (<year>2023</year>) <volume>330</volume>:<page-range>2064&#x2013;74</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1001/jama.2023.19918</pub-id>, PMID: <pub-id pub-id-type="pmid">38051328</pub-id></citation></ref>
<ref id="B5">
<label>5</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Moehler</surname> <given-names>M</given-names>
</name>
<name>
<surname>Kato</surname> <given-names>K</given-names>
</name>
<name>
<surname>Arkenau</surname> <given-names>T</given-names>
</name>
<name>
<surname>Oh</surname> <given-names>DY</given-names>
</name>
<name>
<surname>Tabernero</surname> <given-names>J</given-names>
</name>
<name>
<surname>Cruz-Correa</surname> <given-names>M</given-names>
</name>
<etal/>
</person-group>. <article-title>Rationale 305: Phase 3 study of tislelizumab+chemotherapy vs placebo+chemotherapy as first-line treatment of advanced gastric or gastroesophageal junction adenocarcinoma</article-title>. <source>J Clin Oncol</source>. (<year>2023</year>) <volume>41</volume>:<fpage>286</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1200/JCO.2023.41.4_suppl.286</pub-id>
</citation></ref>
<ref id="B6">
<label>6</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bintintan</surname> <given-names>V</given-names>
</name>
<name>
<surname>Burz</surname> <given-names>C</given-names>
</name>
<name>
<surname>Pintea</surname> <given-names>I</given-names>
</name>
<name>
<surname>Muntean</surname> <given-names>A</given-names>
</name>
<name>
<surname>Deleanu</surname> <given-names>D</given-names>
</name>
<name>
<surname>Lupan</surname> <given-names>I</given-names>
</name>
<etal/>
</person-group>. <article-title>Predictive factors of immunotherapy in gastric cancer: A 2024 update</article-title>. <source>Diagnostics (Basel)</source>. (<year>2024</year>) <volume>14</volume>:<fpage>1247</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/diagnostics14121247</pub-id>, PMID: <pub-id pub-id-type="pmid">38928662</pub-id></citation></ref>
<ref id="B7">
<label>7</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Xie</surname> <given-names>T</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>Z</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>X</given-names>
</name>
<name>
<surname>Qi</surname> <given-names>C</given-names>
</name>
<name>
<surname>Shen</surname> <given-names>L</given-names>
</name>
<name>
<surname>Peng</surname> <given-names>Z</given-names>
</name>
</person-group>. <article-title>Appropriate PD-L1 cutoff value for gastric cancer immunotherapy: A systematic review and meta-analysis</article-title>. <source>Front Oncol</source>. (<year>2021</year>) <volume>11</volume>:<elocation-id>646355</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fonc.2021.646355</pub-id>, PMID: <pub-id pub-id-type="pmid">34540656</pub-id></citation></ref>
<ref id="B8">
<label>8</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Noori</surname> <given-names>M</given-names>
</name>
<name>
<surname>Fayyaz</surname> <given-names>F</given-names>
</name>
<name>
<surname>Zali</surname> <given-names>MR</given-names>
</name>
<name>
<surname>Bashash</surname> <given-names>D</given-names>
</name>
</person-group>. <article-title>Predictive value of PD-L1 expression in response to immune checkpoint inhibitors for gastric cancer treatment: a systematic review and meta-analysis</article-title>. <source>Expert Rev Anticancer Ther</source>. (<year>2023</year>) <volume>23</volume>:<page-range>1029&#x2013;39</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1080/14737140.2023.2238896</pub-id>, PMID: <pub-id pub-id-type="pmid">37466449</pub-id></citation></ref>
<ref id="B9">
<label>9</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Robert</surname> <given-names>ME</given-names>
</name>
<name>
<surname>R&#xfc;schoff</surname> <given-names>J</given-names>
</name>
<name>
<surname>Jasani</surname> <given-names>B</given-names>
</name>
<name>
<surname>Graham</surname> <given-names>RP</given-names>
</name>
<name>
<surname>Badve</surname> <given-names>SS</given-names>
</name>
<name>
<surname>Rodriguez-Justo</surname> <given-names>M</given-names>
</name>
<etal/>
</person-group>. <article-title>High interobserver variability among pathologists using combined positive score to evaluate PD-L1 expression in gastric, gastroesophageal junction, and esophageal adenocarcinoma</article-title>. <source>Mod Pathol</source>. (<year>2023</year>) <volume>36</volume>:<fpage>100154</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.modpat.2023.100154</pub-id>, PMID: <pub-id pub-id-type="pmid">36925069</pub-id></citation></ref>
<ref id="B10">
<label>10</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ma</surname> <given-names>J</given-names>
</name>
<name>
<surname>He</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Li</surname> <given-names>F</given-names>
</name>
<name>
<surname>Han</surname> <given-names>L</given-names>
</name>
<name>
<surname>You</surname> <given-names>C</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>B</given-names>
</name>
</person-group>. <article-title>Segment anything in medical images</article-title>. <source>Nat Commun</source>. (<year>2024</year>) <volume>15</volume>:<fpage>654</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41467-024-44824-z</pub-id>, PMID: <pub-id pub-id-type="pmid">38253604</pub-id></citation></ref>
<ref id="B11">
<label>11</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ehteshami Bejnordi</surname> <given-names>B</given-names>
</name>
<name>
<surname>Veta</surname> <given-names>M</given-names>
</name>
<name>
<surname>Johannes van Diest</surname> <given-names>P</given-names>
</name>
<name>
<surname>van Ginneken</surname> <given-names>B</given-names>
</name>
<name>
<surname>Karssemeijer</surname> <given-names>N</given-names>
</name>
<name>
<surname>Litjens</surname> <given-names>G</given-names>
</name>
<etal/>
</person-group>. <article-title>Diagnostic assessment of deep learning algorithms for detection of lymph node metastases in women with breast cancer</article-title>. <source>JAMA</source>. (<year>2017</year>) <volume>318</volume>:<page-range>2199&#x2013;210</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1001/jama.2017.14585</pub-id>, PMID: <pub-id pub-id-type="pmid">29234806</pub-id></citation></ref>
<ref id="B12">
<label>12</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Volinsky-Fremond</surname> <given-names>S</given-names>
</name>
<name>
<surname>Horeweg</surname> <given-names>N</given-names>
</name>
<name>
<surname>Andani</surname> <given-names>S</given-names>
</name>
<name>
<surname>Barkey Wolf</surname> <given-names>J</given-names>
</name>
<name>
<surname>Lafarge</surname> <given-names>MW</given-names>
</name>
<name>
<surname>de Kroon</surname> <given-names>CD</given-names>
</name>
<etal/>
</person-group>. <article-title>Prediction of recurrence risk in endometrial cancer with multimodal deep learning</article-title>. <source>Nat Med</source>. (<year>2024</year>) <volume>30</volume>:<page-range>1962&#x2013;73</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41591-024-02993-w</pub-id>, PMID: <pub-id pub-id-type="pmid">38789645</pub-id></citation></ref>
<ref id="B13">
<label>13</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Echle</surname> <given-names>A</given-names>
</name>
<name>
<surname>Rindtorff</surname> <given-names>NT</given-names>
</name>
<name>
<surname>Brinker</surname> <given-names>TJ</given-names>
</name>
<name>
<surname>Luedde</surname> <given-names>T</given-names>
</name>
<name>
<surname>Pearson</surname> <given-names>AT</given-names>
</name>
<name>
<surname>Kather</surname> <given-names>JN</given-names>
</name>
</person-group>. <article-title>Deep learning in cancer pathology: a new generation of clinical biomarkers</article-title>. <source>Br J Cancer</source>. (<year>2021</year>) <volume>124</volume>:<page-range>686&#x2013;96</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41416-020-01122-x</pub-id>, PMID: <pub-id pub-id-type="pmid">33204028</pub-id></citation></ref>
<ref id="B14">
<label>14</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cheng</surname> <given-names>G</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>F</given-names>
</name>
<name>
<surname>Xing</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Hu</surname> <given-names>X</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>H</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>S</given-names>
</name>
<etal/>
</person-group>. <article-title>Artificial intelligence-assisted score analysis for predicting the expression of the immunotherapy biomarker PD-L1 in lung cancer</article-title>. <source>Front Immunol</source>. (<year>2022</year>) <volume>13</volume>:<elocation-id>893198</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fimmu.2022.893198</pub-id>, PMID: <pub-id pub-id-type="pmid">35844508</pub-id></citation></ref>
<ref id="B15">
<label>15</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wu</surname> <given-names>J</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>C</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>X</given-names>
</name>
<name>
<surname>Sun</surname> <given-names>W</given-names>
</name>
<name>
<surname>Li</surname> <given-names>L</given-names>
</name>
<name>
<surname>Gao</surname> <given-names>N</given-names>
</name>
<etal/>
</person-group>. <article-title>Artificial intelligence-assisted system for precision diagnosis of PD-L1 expression in non-small cell lung cancer</article-title>. <source>Mod Pathol</source>. (<year>2022</year>) <volume>35</volume>:<page-range>403&#x2013;11</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41379-021-00904-9</pub-id>, PMID: <pub-id pub-id-type="pmid">34518630</pub-id></citation></ref>
<ref id="B16">
<label>16</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lee</surname> <given-names>KS</given-names>
</name>
<name>
<surname>Choi</surname> <given-names>E</given-names>
</name>
<name>
<surname>Cho</surname> <given-names>SI</given-names>
</name>
<name>
<surname>Park</surname> <given-names>S</given-names>
</name>
<name>
<surname>Ryu</surname> <given-names>J</given-names>
</name>
<name>
<surname>Puche</surname> <given-names>AV</given-names>
</name>
<etal/>
</person-group>. <article-title>An artificial intelligence-powered PD-L1 combined positive score (CPS) analyser in urothelial carcinoma alleviating interobserver and intersite variability</article-title>. <source>Histopathology</source>. (<year>2024</year>) <volume>85</volume>:<fpage>81</fpage>&#x2013;<lpage>91</lpage>. doi:&#xa0;0.1111/his.15176, PMID: <pub-id pub-id-type="pmid">38477366</pub-id></citation></ref>
<ref id="B17">
<label>17</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname> <given-names>J</given-names>
</name>
<name>
<surname>Dong</surname> <given-names>P</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>X</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>J</given-names>
</name>
<name>
<surname>Zhao</surname> <given-names>M</given-names>
</name>
<name>
<surname>Shen</surname> <given-names>H</given-names>
</name>
<etal/>
</person-group>. <article-title>Artificial intelligence enhances whole-slide interpretation of PD-L1 CPS in triple-negative breast cancer: A multi-institutional ring study</article-title>. <source>Histopathology</source>. (<year>2024</year>) <volume>85</volume>:<page-range>451&#x2013;67</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1111/his.15205</pub-id>, PMID: <pub-id pub-id-type="pmid">38747491</pub-id></citation></ref>
<ref id="B18">
<label>18</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chen</surname> <given-names>H</given-names>
</name>
<name>
<surname>Li</surname> <given-names>C</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>G</given-names>
</name>
<name>
<surname>Li</surname> <given-names>X</given-names>
</name>
<name>
<surname>Mamunur Rahaman</surname> <given-names>M</given-names>
</name>
<name>
<surname>Sun</surname> <given-names>H</given-names>
</name>
<etal/>
</person-group>. <article-title>GasHis-Transformer: A multi-scale visual transformer approach for gastric histopathological image detection</article-title>. <source>Pattern Recognit</source>. (<year>2021</year>) <volume>130</volume>:<fpage>108827</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.patcog.2022.108827</pub-id>
</citation></ref>
<ref id="B19">
<label>19</label>
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Sandler</surname> <given-names>M</given-names>
</name>
<name>
<surname>Howard</surname> <given-names>A</given-names>
</name>
<name>
<surname>Zhu</surname> <given-names>M</given-names>
</name>
<name>
<surname>Zhmoginov</surname> <given-names>A</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>L-C</given-names>
</name>
</person-group>. &#x201c;<article-title>MobileNetV2: Inverted residuals and linear bottlenecks</article-title>,&#x201d; <source>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</source>, <publisher-loc>Salt Lake City, UT, USA</publisher-loc>. (<year>2018</year>). p. <page-range>4510&#x2013;20</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/CVPR.2018.00474</pub-id>
</citation></ref>
<ref id="B20">
<label>20</label>
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Chollet</surname> <given-names>F</given-names>
</name>
</person-group>. &#x201c;<article-title>Xception: Deep learning with depthwise separable convolutions</article-title>,&#x201d; In: <source>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</source>, <publisher-loc>Honolulu, HI, USA</publisher-loc> (<year>2017</year>), pp. <fpage>1800</fpage>-<lpage>1807</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/CVPR.2017.195</pub-id>
</citation></ref>
<ref id="B21">
<label>21</label>
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Ge</surname> <given-names>Z</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>S</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>F</given-names>
</name>
<name>
<surname>Li</surname> <given-names>Z</given-names>
</name>
<name>
<surname>Sun</surname> <given-names>J</given-names>
</name>
</person-group>. <article-title>YOLOX: Exceeding YOLO Series in 2021</article-title>. <source>arXiv; 2021</source> (<year>2021</year>). doi:&#xa0;<pub-id pub-id-type="doi">10.48550/ARXIV.2107.08430</pub-id>
</citation></ref>
<ref id="B22">
<label>22</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hirsch</surname> <given-names>FR</given-names>
</name>
<name>
<surname>McElhinny</surname> <given-names>A</given-names>
</name>
<name>
<surname>Stanforth</surname> <given-names>D</given-names>
</name>
<name>
<surname>Ranger-Moore</surname> <given-names>J</given-names>
</name>
<name>
<surname>Jansson</surname> <given-names>M</given-names>
</name>
<name>
<surname>Kulangara</surname> <given-names>K</given-names>
</name>
<etal/>
</person-group>. <article-title>PD-L1 immunohistochemistry assays for lung cancer: results from phase 1 of the blueprint PD-L1 IHC assay comparison project</article-title>. <source>J Thorac Oncol</source>. (<year>2017</year>) <volume>12</volume>:<page-range>208&#x2013;22</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.jtho.2016.11.2228</pub-id>, PMID: <pub-id pub-id-type="pmid">27913228</pub-id></citation></ref>
<ref id="B23">
<label>23</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Crosta</surname> <given-names>S</given-names>
</name>
<name>
<surname>Boldorini</surname> <given-names>R</given-names>
</name>
<name>
<surname>Bono</surname> <given-names>F</given-names>
</name>
<name>
<surname>Brambilla</surname> <given-names>V</given-names>
</name>
<name>
<surname>Dainese</surname> <given-names>E</given-names>
</name>
<name>
<surname>Fusco</surname> <given-names>N</given-names>
</name>
<etal/>
</person-group>. <article-title>PD-L1 testing and squamous cell carcinoma of the head and neck: A multicenter study on the diagnostic reproducibility of different protocols</article-title>. <source>Cancers (Basel)</source>. (<year>2021</year>) <volume>13</volume>:<fpage>292</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/cancers13020292</pub-id>, PMID: <pub-id pub-id-type="pmid">33466794</pub-id></citation></ref>
<ref id="B24">
<label>24</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bera</surname> <given-names>K</given-names>
</name>
<name>
<surname>Schalper</surname> <given-names>KA</given-names>
</name>
<name>
<surname>Rimm</surname> <given-names>DL</given-names>
</name>
<name>
<surname>Velcheti</surname> <given-names>V</given-names>
</name>
<name>
<surname>Madabhushi</surname> <given-names>A</given-names>
</name>
</person-group>. <article-title>Artificial intelligence in digital pathology - new tools for diagnosis and precision oncology</article-title>. <source>Nat Rev Clin Oncol</source>. (<year>2019</year>) <volume>16</volume>:<page-range>703&#x2013;15</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41571-019-0252-y</pub-id>, PMID: <pub-id pub-id-type="pmid">31399699</pub-id></citation></ref>
<ref id="B25">
<label>25</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Vandenberghe</surname> <given-names>ME</given-names>
</name>
<name>
<surname>Scott</surname> <given-names>ML</given-names>
</name>
<name>
<surname>Scorer</surname> <given-names>PW</given-names>
</name>
<name>
<surname>S&#xf6;derberg</surname> <given-names>M</given-names>
</name>
<name>
<surname>Balcerzak</surname> <given-names>D</given-names>
</name>
<name>
<surname>Barker</surname> <given-names>C</given-names>
</name>
</person-group>. <article-title>Relevance of deep learning to facilitate the diagnosis of HER2 status in breast cancer</article-title>. <source>Sci Rep</source>. (<year>2017</year>) <volume>7</volume>:<fpage>45938</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/srep45938</pub-id>, PMID: <pub-id pub-id-type="pmid">28378829</pub-id></citation></ref>
<ref id="B26">
<label>26</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wu</surname> <given-names>S</given-names>
</name>
<name>
<surname>Yue</surname> <given-names>M</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>J</given-names>
</name>
<name>
<surname>Li</surname> <given-names>X</given-names>
</name>
<name>
<surname>Li</surname> <given-names>Z</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>H</given-names>
</name>
<etal/>
</person-group>. <article-title>The role of artificial intelligence in accurate interpretation of HER2 immunohistochemical scores 0 and 1+ in breast cancer</article-title>. <source>Mod Pathol</source>. (<year>2023</year>) <volume>36</volume>:<fpage>100054</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.modpat.2022.100054</pub-id>, PMID: <pub-id pub-id-type="pmid">36788100</pub-id></citation></ref>
<ref id="B27">
<label>27</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Saha</surname> <given-names>M</given-names>
</name>
<name>
<surname>Chakraborty</surname> <given-names>C</given-names>
</name>
<name>
<surname>Arun</surname> <given-names>I</given-names>
</name>
<name>
<surname>Ahmed</surname> <given-names>R</given-names>
</name>
<name>
<surname>Chatterjee</surname> <given-names>S</given-names>
</name>
</person-group>. <article-title>An advanced deep learning approach for ki-67 stained hotspot detection and proliferation rate scoring for prognostic evaluation of breast cancer</article-title>. <source>Sci Rep</source>. (<year>2017</year>) <volume>7</volume>:<fpage>3213</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41598-017-03405-5</pub-id>, PMID: <pub-id pub-id-type="pmid">28607456</pub-id></citation></ref>
<ref id="B28">
<label>28</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname> <given-names>J</given-names>
</name>
<name>
<surname>Zheng</surname> <given-names>Q</given-names>
</name>
<name>
<surname>Mu</surname> <given-names>X</given-names>
</name>
<name>
<surname>Zuo</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Xu</surname> <given-names>B</given-names>
</name>
<name>
<surname>Jin</surname> <given-names>Y</given-names>
</name>
<etal/>
</person-group>. <article-title>Automated tumor proportion score analysis for PD-L1 (22C3) expression in lung squamous cell carcinoma</article-title>. <source>Sci Rep</source>. (<year>2021</year>) <volume>11</volume>:<fpage>15907</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41598-021-95372-1</pub-id>, PMID: <pub-id pub-id-type="pmid">34354151</pub-id></citation></ref>
<ref id="B29">
<label>29</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Zhen</surname> <given-names>T</given-names>
</name>
<name>
<surname>Fu</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>Y</given-names>
</name>
<name>
<surname>He</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Han</surname> <given-names>A</given-names>
</name>
<etal/>
</person-group>. <article-title>AI-powered segmentation of invasive carcinoma regions in breast cancer immunohistochemical whole-slide images</article-title>. <source>Cancers (Basel)</source>. (<year>2023</year>) <volume>16</volume>:<fpage>167</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/cancers16010167</pub-id>, PMID: <pub-id pub-id-type="pmid">38201594</pub-id></citation></ref>
<ref id="B30">
<label>30</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Feng</surname> <given-names>M</given-names>
</name>
<name>
<surname>Deng</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>L</given-names>
</name>
<name>
<surname>Jing</surname> <given-names>Q</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>Z</given-names>
</name>
<name>
<surname>Xu</surname> <given-names>L</given-names>
</name>
<etal/>
</person-group>. <article-title>Automated quantitative analysis of Ki-67 staining and HE images recognition and registration based on whole tissue sections in breast carcinoma</article-title>. <source>Diagn Pathol</source>. (<year>2020</year>) <volume>15</volume>:<fpage>65</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s13000-020-00957-5</pub-id>, PMID: <pub-id pub-id-type="pmid">32471471</pub-id></citation></ref>
</ref-list>
</back>
</article>