<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Oncol.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Oncology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Oncol.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2234-943X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fonc.2026.1763859</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Identification of KRAS mutation in rectal cancer based on a 2.5D deep learning model</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Zhang</surname><given-names>Chengmeng</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3304423/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Li</surname><given-names>Jinge</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Chen</surname><given-names>Peng</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Zhou</surname><given-names>Yanyan</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Shen</surname><given-names>Jian</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>*</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project-administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Chen</surname><given-names>Guanfeng</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>*</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project-administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Radiology Department of Huzhou Central Hospital, Fifth School of Clinical Medicine of Zhejiang Chinese Medical University</institution>, <city>Huzhou</city>,&#xa0;<country country="cn">China</country></aff>
<aff id="aff2"><label>2</label><institution>Medical School of Huzhou University</institution>, <city>Huzhou</city>,&#xa0;<country country="cn">China</country></aff>
<aff id="aff3"><label>3</label><institution>Radiology Department of Changxing County Traditional Chinese Medicine Hospital</institution>, <city>Huzhou</city>,&#xa0;<country country="cn">China</country></aff>
<aff id="aff4"><label>4</label><institution>Radiology Department of Quanzhou First Hospital Affiliated to Fujian Medical University</institution>, <city>Quanzhou</city>,&#xa0;<country country="cn">China</country></aff>
<author-notes>
<corresp id="c001"><label>*</label>Correspondence: Jian Shen, <email xlink:href="mailto:noah80@163.com">noah80@163.com</email>; Guanfeng Chen, <email xlink:href="mailto:18005952659@163.com">18005952659@163.com</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-25">
<day>25</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>16</volume>
<elocation-id>1763859</elocation-id>
<history>
<date date-type="received">
<day>09</day>
<month>12</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>06</day>
<month>02</month>
<year>2026</year>
</date>
<date date-type="rev-recd">
<day>27</day>
<month>01</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2026 Zhang, Li, Chen, Zhou, Shen and Chen.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Zhang, Li, Chen, Zhou, Shen and Chen</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-25">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<sec>
<title>Objective</title>
<p>To explore the utility of a 2.5D deep transfer learning (DTL) model for distinguishing between Kirsten rat sarcoma viral oncogene (KRAS) mutant and wild-type phenotypes in patients with rectal cancer (RC).</p>
</sec>
<sec>
<title>Methods</title>
<p>We retrospectively analyzed 138 patients with pathologically confirmed RC who underwent next-generation sequencing to detect KRAS mutations. Among these, 43 KRAS mutant and 95 wild-type cases were enrolled and divided randomly into a training set (30 mutant, 66 wild-type) and a validation set (13 mutant, 29 wild-type) in a 7:3 ratio. Tumor regions of interest (ROIs) were delineated manually slice-by-slice in thin-section arterial-phase computed tomography images. DTL and radiomic features were extracted from ROIs using 2.5D deep learning and traditional radiomic approaches, respectively. After feature-dimensionality reduction and selection, six machine learning models were employed to construct radiomic models and 2.5D deep learning models. The diagnostic performance of each model was evaluated using the area under the receiver operating characteristic curve (AUC).</p>
</sec>
<sec>
<title>Results</title>
<p>After feature selection, 10 radiomic features and 17 DTL features were included for model construction. The AUCs for the radiomic models ranged from 0.808&#x2013;0.988 in the training set and 0.521&#x2013;0.672 in the validation set, with the XGBoost classifier achieving the optimal performance (AUC = 0.672) in the validation set. The AUCs for the 2.5D deep learning models ranged from 0.950&#x2013;1.000 in the training set and 0.788&#x2013;0.913 in the validation set, with the support vector machine classifier demonstrating the best diagnostic efficacy (AUC = 0.913) in the validation set.</p>
</sec>
<sec>
<title>Conclusion</title>
<p>A 2.5D deep learning model can effectively distinguish between KRAS mutant and KRAS wild-type RC, outperforming traditional radiomic models. It provides a novel non-invasive approach for the preoperative assessment of KRAS mutation status.</p>
</sec>
</abstract>
<kwd-group>
<kwd>deep transfer learning</kwd>
<kwd>gene mutation</kwd>
<kwd>radiomics</kwd>
<kwd>rectal cancer</kwd>
<kwd>X-ray computed tomography</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This research was funded by the Fujian Provincial Natural Science Foundation (2023J011786) and the Quanzhou Science and Technology Program Project(2024NY008).</funding-statement>
</funding-group>
<counts>
<fig-count count="7"/>
<table-count count="3"/>
<equation-count count="0"/>
<ref-count count="29"/>
<page-count count="10"/>
<word-count count="4695"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Gastrointestinal Cancers: Colorectal Cancer</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec id="s1" sec-type="intro">
<label>1</label>
<title>Introduction</title>
<p>Rectal cancer (RC) is a malignant gastrointestinal tumor with high morbidity and mortality, posing a significant threat to human health (<xref ref-type="bibr" rid="B1">1</xref>). Numerous previous studies have established the significance of the KRAS (Kirsten rat sarcoma viral oncogene homolog) gene and demonstrated its close correlation with the prognosis of RC (<xref ref-type="bibr" rid="B2">2</xref>). Several studies have also identified KRAS mutation as a negative predictive biomarker for treatment with epidermal growth factor receptor antibodies (<xref ref-type="bibr" rid="B3">3</xref>).Traditionally, KRAS mutations are detected using surgical or needle biopsies; however, these are invasive, carry certain risks, and may not be tolerated by some patients with advanced disease (<xref ref-type="bibr" rid="B4">4</xref>). Exploring non-invasive and efficient methods for KRAS mutation detection has thus become a focus of clinical research.</p>
<p>Computed tomography (CT), as a routine preoperative imaging modality for RC, can clearly display tumor morphology, density, and enhancement patterns, providing abundant biological information. In addition, artificial intelligence, which quantifies tumor characteristics by analyzing quantitative features imperceptible to the human eye, has shown promising potential for tumor classification, prognosis evaluation, and in other fields (<xref ref-type="bibr" rid="B5">5</xref>, <xref ref-type="bibr" rid="B6">6</xref>). Previous studies have suggested associations between KRAS mutation and factors such as tumor N stage and maximum tumor diameter (<xref ref-type="bibr" rid="B7">7</xref>). Additionally, high D* and low D values derived from intravoxel incoherent motion diffusion-weighted imaging have been reported to correlate significantly with KRAS mutation (<xref ref-type="bibr" rid="B8">8</xref>). However, these findings lack consensus, with substantial variations across studies, making it difficult to establish unified standards.</p>
<p>Deep learning, particularly convolutional neural networks, have recently achieved notable progress in terms of disease diagnosis, prognosis prediction, and treatment response assessment (<xref ref-type="bibr" rid="B9">9</xref>). Lui et&#xa0;al. (<xref ref-type="bibr" rid="B10">10</xref>) utilized the MobileNetV2 deep learning network to extract high-dimensional image features, achieving favorable performance in predicting KRAS mutation status in patients with RC. Similar to conventional radiomic methods, however, this method required manual delineation of the three-dimensional (3D) tumor region, resulting in substantial workload and time costs. The present study accordingly aimed to construct a 2.5D deep learning-based predictive model for distinguishing between KRAS mutant and wild-type RC. We also aimed to compare the diagnostic performance of this model with conventional radiomic models to explore the clinical application value of this 2.5D deep learning model for the preoperative, non-invasive assessment of KRAS mutation status.</p>
</sec>
<sec id="s2" sec-type="materials|methods">
<label>2</label>
<title>Materials and methods</title>
<sec id="s2_1">
<label>2.1</label>
<title>Data collection</title>
<p>This retrospective study analyzed data for patients with pathologically confirmed RC after surgery, and who underwent KRAS mutation detection via next-generation sequencing at Quanzhou First Affiliated Hospital of Fujian Medical University between April 2022 and February 2025. All patients underwent whole-abdominal plain CT plus dual-phase enhanced scanning within 2 weeks before surgery.</p>
<p>The inclusion criteria were pathologically confirmed RC with gene detection results, complete imaging data with excellent image quality, and patients with intestinal diseases such as ulcerative colitis or Crohn&#x2019;s disease. The exclusion criteria were receipt of anti-tumor treatments (e.g., radiotherapy, chemotherapy) prior to gene detection, tumor involvement extending to the sigmoid colon or descending colon, tumor invasion of surrounding tissues leading to unfeasible region of interest (ROI) delineation, and ROI that failed to fully cover the entire tumor tissue.</p>
<p>After applying the inclusion and exclusion criteria, a total of 138 patients were enrolled in the study (93 men, 45 women; age 33&#x2013;87 years), including 43 KRAS mutant and 95 wild-type cases. This retrospective study was approved by the Ethics Committee of Quanzhou First Affiliated Hospital of Fujian Medical University (Approval No.: Quan-Yi-Lun-2024-K133), in compliance with the Declaration of Helsinki. The need for informed consent was waived for all patients.</p>
</sec>
<sec id="s2_2">
<label>2.2</label>
<title>Equipment and scanning protocol</title>
<p>Routine whole-abdominal scanning was carried out using a GE 64-slice CT scanner (Optima CT660, General Electric, USA). Patients were placed in a supine position, with a scanning range from the dome of the diaphragm to the lower edge of the pubic symphysis. The scanning parameters were as follows: tube voltage 120 kV, automatic tube current, slice thickness 5 mm, and slice interval 5 mm. Enhanced scanning was carried out using the time method: after plain scanning, iohexol (iodine concentration: 300 mg/mL) was injected via an antecubital vein cannula using a high-pressure injector at a flow rate of 3.0 mL/s. Arterial- and venous-phase scans were initiated at 30 s and 60 s after the start of injection, respectively.</p>
</sec>
<sec id="s2_3">
<label>2.3</label>
<title>ROI delineation</title>
<p>All arterial-phase CT images were standardized to a voxel spacing of 1 mm &#xd7; 1 mm &#xd7; 1 mm to ensure a consistent resolution. ROIs were delineated by two radiologists (Physician A with &gt; 5 years of diagnostic experience and Physician B with &gt; 15 years of diagnostic experience) using ITK-SNAP software (version 4.2.2). The transverse tumor range was delineated slice-by-slice as the ROI to extract radiomic and 2.5D deep transfer learning (DTL) features (<xref ref-type="fig" rid="f1"><bold>Figure&#xa0;1</bold></xref>).</p>
<fig id="f1" position="float">
<label>Figure&#xa0;1</label>
<caption>
<p>Workflow of the key steps in our study.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-16-1763859-g001.tif">
<alt-text content-type="machine-generated">Flowchart illustration showing three sections: tumor segmentation using CT scans with highlighted tumor in red, feature extraction via radiomics and 2.5D deep learning (ResNet-50) using image slices, and feature selection and model construction using LASSO regression and ROC curve analyses.</alt-text>
</graphic></fig>
</sec>
<sec id="s2_4">
<label>2.4</label>
<title>Extraction of DTL and radiomic features</title>
<p>Radiomic features were extracted from segmented images using the open-source Python library PyRadiomics tool (version 3.0.1). Scikit-Learn (version 1.0.2) was employed to implement machine learning models for classification, regression, and feature selection. Deep learning model development was carried out using PyTorch (version 1.11.0), as a widely adopted deep learning framework. To improve computational efficiency, CUDA (version 11.3.1) and CUDNN (version 8.2.1) were used to reduce the training time and enable more efficient model optimization. Unlike traditional 2D deep learning models that only use the maximum cross-section for training, the 2.5D deep learning approach selected five slices for each lesion: the maximum slice and the adjacent slices at the &#xb1;1 and &#xb1;2 positions. This method retains spatial information to a certain extent and ensures sufficient capture of image structural details. The ResNet50 deep convolutional network architecture was subsequently selected as the basic DTL model in the PyTorch deep learning library. Image features were extracted using convolutional layer modules, and DTL features were finally output through a global average pooling layer. A multiple instance learning (MIL) framework was integrated into our 2.5D deep learning model, enabling flexible adaptation to tumor lesions with variable slice counts without the need for preprocessing steps such as slice padding or truncation. Specifically, for lesions with fewer than 5 slices, the model directly performs feature extraction and aggregation using the available slices, thereby preserving the original imaging information of small lesions to the greatest extent. For lesions with 5 or more slices, 5 consecutive slices covering the tumor core region were preferentially selected as input; this approach maximizes the retention of tumor spatial heterogeneity and ensures the stability of model performance.</p>
</sec>
<sec id="s2_5">
<label>2.5</label>
<title>Feature dimensionality reduction, selection, and diagnostic model construction</title>
<p>The extracted radiomic features and 2.5D deep learning features were initially standardized using the z-score to ensure consistency across the entire dataset. The dataset was randomly partitioned into a training set and validation set using a ratio of 7:3. Stratified random sampling was employed with tumor mutation status as the stratification criterion to ensure that the distribution of mutation rates was consistent in the training and validation cohorts. This was followed by t-tests to assess the statistical significance of each feature. Only features with a P-value &lt; 0.05 were retained. To mitigate multicollinearity, Pearson&#x2019;s correlation coefficients were used to evaluate the correlation between each pair of features, and one feature from each pair with a correlation coefficient &gt; 0.9 was excluded. The least absolute shrinkage and selection operator (LASSO) was applied for dimensionality reduction and feature selection of the extracted DTL and radiomic features. Diagnostic models were constructed using six classifiers: support vector machine, K-nearest neighbor, RandomForest, ExtraTrees, XGBoost, and LightGBM. Sample size was calculated for an expected area under the receiver operating characteristic curve (AUC) value of 0.85 (<xref ref-type="supplementary-material" rid="SM1"><bold>Supplementary File 1</bold></xref>). The performance of different diagnostic models was evaluated using the AUC. Decision curve analysis (DCA) was used to assess the clinical net benefit of the models. Calibration curves were plotted to evaluate the goodness-of-fit of the models, and the Hosmer&#x2013;Lemeshow test was adopted to verify the consistency of the calibration curves.</p>
</sec>
<sec id="s2_6">
<label>2.6</label>
<title>Statistical analysis</title>
<p>Statistical analyses were performed using Python 3.12, SPSS 26.0, and R 4.2.1 software. The Kolmogorov&#x2013;Smirnov test was used to assess the normality of continuous data. Normally distributed data were expressed as mean &#xb1; standard deviation, while skewed data were presented as quartiles. Categorical data were compared between groups using Pearson&#x2019;s &#x3c7;2 test, and continuous data were compared using an independent samples t-test (for normally distributed data) or Mann&#x2013;Whitney U test (for skewed distributed data).</p>
</sec>
</sec>
<sec id="s3" sec-type="results">
<label>3</label>
<title>Results</title>
<sec id="s3_1">
<label>3.1</label>
<title>Comparison of clinical data</title>
<p>There was no significant difference between the KRAS mutant and wild-type groups in terms of patient age, sex, or tumor markers (carcinoembryonic antigen, carbohydrate antigen 199, or carbohydrate antigen 125) (all P &gt; 0.05) (<xref ref-type="table" rid="T1"><bold>Table&#xa0;1</bold></xref>).</p>
<table-wrap id="T1" position="float">
<label>Table&#xa0;1</label>
<caption>
<p>Comparison of clinical characteristics between KRAS mutant and wild-type rectal cancer patients.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Factor</th>
<th valign="middle" align="center">KRAS mutant (n=43)</th>
<th valign="middle" align="center">KRAS wild-type (n=95)</th>
<th valign="middle" align="center">Statistic</th>
<th valign="middle" align="center">P -value</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="center">Age, y(M &#xb1; SD)</td>
<td valign="middle" align="center">65.49 &#xb1; 11.24</td>
<td valign="middle" align="center">64.18 &#xb1; 11.21</td>
<td valign="middle" align="center">0.635<sup>t</sup></td>
<td valign="middle" align="center">0.527</td>
</tr>
<tr>
<td valign="middle" align="center">Gender, n(Male/Female)</td>
<td valign="middle" align="center">33/10</td>
<td valign="middle" align="center">60/35</td>
<td valign="middle" align="center">2.468<sup>a</sup></td>
<td valign="middle" align="center">0.115</td>
</tr>
<tr>
<td valign="middle" align="center">CEAelevation, n(yes/no)</td>
<td valign="middle" align="center">22/21</td>
<td valign="middle" align="center">34/61</td>
<td valign="middle" align="center">2.901<sup>a</sup></td>
<td valign="middle" align="center">0.089</td>
</tr>
<tr>
<td valign="middle" align="center">CA199 elevation, n(yes/no)</td>
<td valign="middle" align="center">14/29</td>
<td valign="middle" align="center">18/77</td>
<td valign="middle" align="center">3.079<sup>a</sup></td>
<td valign="middle" align="center">0.079</td>
</tr>
<tr>
<td valign="middle" align="center">CA125 elevation, n(yes/no)</td>
<td valign="middle" align="center">8/35</td>
<td valign="middle" align="center">16/79</td>
<td valign="middle" align="center">0.064<sup>a</sup></td>
<td valign="middle" align="center">0.800</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p><sup>t</sup>t-value; <sup>a</sup>Chi-square test.</p></fn>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="s3_2">
<label>3.2</label>
<title>Feature selection and model construction</title>
<p>A total of 960 radiomic features and 2048 DTL features were extracted from the arterial-phase images of each patient. After excluding features with intraclass correlation coefficients &lt; 0.8, followed by dimensionality reduction and selection, 10 radiomic features were used for radiomic model construction and 17 DTL features were used for deep learning model construction (<xref ref-type="fig" rid="f2"><bold>Figures&#xa0;2</bold></xref>, <xref ref-type="fig" rid="f3"><bold>3</bold></xref>).</p>
<fig id="f2" position="float">
<label>Figure&#xa0;2</label>
<caption>
<p>Feature dimensionality reduction and selection for radiomic models (<bold>A, B</bold>: Schematic diagrams of feature selection and dimensionality reduction; <bold>C</bold>: Selected features and their weights after dimensionality reduction and selection).</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-16-1763859-g002.tif">
<alt-text content-type="machine-generated">Panel (a) shows a line graph of LASSO coefficients versus lambda values with multiple colored lines representing different features. Panel (b) presents a line graph with error bars illustrating mean squared error against lambda values for model selection. Panel (c) displays a horizontal bar chart ranking feature coefficients, labeled by feature names, for feature importance interpretation.</alt-text>
</graphic></fig>
<fig id="f3" position="float">
<label>Figure&#xa0;3</label>
<caption>
<p>Feature dimensionality reduction and selection for deep transfer learning models (<bold>A, B</bold>: Schematic diagrams of feature selection and dimensionality reduction; <bold>C</bold>: Selected features and their weights after dimensionality reduction and selection).</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-16-1763859-g003.tif">
<alt-text content-type="machine-generated">Panel (a) shows a line chart of lasso regression coefficients versus log-scaled lambda values with a vertical dashed line indicating optimal lambda. Panel (b) displays a line chart of mean squared error (MSE) with error bars as a function of lambda, highlighting minimum MSE at the same vertical dashed line. Panel (c) presents a horizontal bar chart of regression coefficients for various feature names, with both positive and negative values visible.</alt-text>
</graphic></fig>
<p>Radiomic and 2.5D deep learning models were established based on the selected radiomic and DTL features, respectively. The AUCs of the radiomic models ranged from 0.808&#x2013;0.988 in the training set and 0.521&#x2013;0.672 in the validation set, and the AUCs of the 2.5D deep learning models ranged from 0.950&#x2013;1.000 in the training set and 0.788&#x2013;0.913 in the validation set. The 2.5D deep learning models exhibited excellent classification performance for identifying KRAS gene mutations in patients with RC, outperforming traditional radiomic models. In the validation set, the optimal classifier for the radiomic models was the XGBoost model (AUC = 0.672), while the best classifier for the deep learning models was the SVM model (AUC = 0.913) (<xref ref-type="table" rid="T2"><bold>Tables&#xa0;2</bold></xref>, <xref ref-type="table" rid="T3"><bold>3</bold></xref>; <xref ref-type="fig" rid="f4"><bold>Figures&#xa0;4</bold></xref>, <xref ref-type="fig" rid="f5"><bold>5</bold></xref>).</p>
<table-wrap id="T2" position="float">
<label>Table&#xa0;2</label>
<caption>
<p>Diagnostic performance of radiomic models.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="top" align="center">Model</th>
<th valign="top" align="center">Cohort</th>
<th valign="top" align="center">AUC (95% CI)</th>
<th valign="top" align="center">Accuracy</th>
<th valign="top" align="center">Sensitivity</th>
<th valign="top" align="center">Specificity</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" rowspan="2" align="center">SVM</td>
<td valign="top" align="center">Training</td>
<td valign="top" align="center">0.904(0.845-0.963)</td>
<td valign="top" align="center">0.823</td>
<td valign="top" align="center">0.967</td>
<td valign="top" align="center">0.758</td>
</tr>
<tr>
<td valign="top" align="center">Validation</td>
<td valign="top" align="center">0.592(0.402-0.781)</td>
<td valign="top" align="center">0.595</td>
<td valign="top" align="center">0.769</td>
<td valign="top" align="center">0.517</td>
</tr>
<tr>
<td valign="top" rowspan="2" align="center">KNN</td>
<td valign="top" align="center">Training</td>
<td valign="top" align="center">0.808(0.724-0.893)</td>
<td valign="top" align="center">0.698</td>
<td valign="top" align="center">0.767</td>
<td valign="top" align="center">0.667</td>
</tr>
<tr>
<td valign="top" align="center">Validation</td>
<td valign="top" align="center">0.521(0.316-0.727)</td>
<td valign="top" align="center">0.738</td>
<td valign="top" align="center">0.154</td>
<td valign="top" align="center">1.000</td>
</tr>
<tr>
<td valign="top" rowspan="2" align="center">RF</td>
<td valign="top" align="center">Training</td>
<td valign="top" align="center">0.928(0.878-0.977)</td>
<td valign="top" align="center">0.844</td>
<td valign="top" align="center">0.833</td>
<td valign="top" align="center">0.848</td>
</tr>
<tr>
<td valign="top" align="center">Validation</td>
<td valign="top" align="center">0.566(0.363-0.770)</td>
<td valign="top" align="center">0.714</td>
<td valign="top" align="center">0.385</td>
<td valign="top" align="center">0.862</td>
</tr>
<tr>
<td valign="top" rowspan="2" align="center">ExtraTrees</td>
<td valign="top" align="center">Training</td>
<td valign="top" align="center">0.815(0.721-0.909)</td>
<td valign="top" align="center">0.771</td>
<td valign="top" align="center">0.767</td>
<td valign="top" align="center">0.773</td>
</tr>
<tr>
<td valign="top" align="center">Validation</td>
<td valign="top" align="center">0.618(0.438-0.798)</td>
<td valign="top" align="center">0.524</td>
<td valign="top" align="center">1.000</td>
<td valign="top" align="center">0.310</td>
</tr>
<tr>
<td valign="top" rowspan="2" align="center">XGBoost</td>
<td valign="top" align="center">Training</td>
<td valign="top" align="center">0.988(0.973-1.000)</td>
<td valign="top" align="center">0.948</td>
<td valign="top" align="center">0.933</td>
<td valign="top" align="center">0.955</td>
</tr>
<tr>
<td valign="top" align="center">Validation</td>
<td valign="top" align="center">0.672(0.506-0.839)</td>
<td valign="top" align="center">0.571</td>
<td valign="top" align="center">1.000</td>
<td valign="top" align="center">0.379</td>
</tr>
<tr>
<td valign="top" rowspan="2" align="center">LightGBM</td>
<td valign="top" align="center">Training</td>
<td valign="top" align="center">0.854(0.774-0.934)</td>
<td valign="top" align="center">0.823</td>
<td valign="top" align="center">0.700</td>
<td valign="top" align="center">0.879</td>
</tr>
<tr>
<td valign="top" align="center">Validation</td>
<td valign="top" align="center">0.581(0.387-0.775)</td>
<td valign="top" align="center">0.548</td>
<td valign="top" align="center">0.692</td>
<td valign="top" align="center">0.483</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>AUC, the area under the curve; 95% CI, 95% confidence interval; SVM, support vector machines; KNN, K Nearest Neighbors; RF, Random Forest; ExtraTrees, Extreme Gradient Boosting; XGBoost, Extreme Gradient Boosting; LightGBM, Light Gradient Boosting Machine.</p></fn>
</table-wrap-foot>
</table-wrap>
<table-wrap id="T3" position="float">
<label>Table&#xa0;3</label>
<caption>
<p>Diagnostic performance of deep learning models.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="top" align="center">Model</th>
<th valign="top" align="center">Cohort</th>
<th valign="top" align="center">AUC (95% CI)</th>
<th valign="top" align="center">Accuracy</th>
<th valign="top" align="center">Sensitivity</th>
<th valign="top" align="center">Specificity</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" rowspan="2" align="center">SVM</td>
<td valign="top" align="center">Training</td>
<td valign="top" align="center">0.997(0.991-1.000)</td>
<td valign="top" align="center">0.990</td>
<td valign="top" align="center">0.967</td>
<td valign="top" align="center">1.000</td>
</tr>
<tr>
<td valign="top" align="center">Validation</td>
<td valign="top" align="center">0.913(0.825-1.000)</td>
<td valign="top" align="center">0.881</td>
<td valign="top" align="center">0.846</td>
<td valign="top" align="center">0.897</td>
</tr>
<tr>
<td valign="top" rowspan="2" align="center">KNN</td>
<td valign="top" align="center">Training</td>
<td valign="top" align="center">0.950(0.911-0.988)</td>
<td valign="top" align="center">0.896</td>
<td valign="top" align="center">0.833</td>
<td valign="top" align="center">0.924</td>
</tr>
<tr>
<td valign="top" align="center">Validation</td>
<td valign="top" align="center">0.842(0.709-0.975)</td>
<td valign="top" align="center">0.833</td>
<td valign="top" align="center">0.769</td>
<td valign="top" align="center">0.862</td>
</tr>
<tr>
<td valign="top" rowspan="2" align="center">RF</td>
<td valign="top" align="center">Training</td>
<td valign="top" align="center">0.985(0.967-1.000)</td>
<td valign="top" align="center">0.948</td>
<td valign="top" align="center">0.933</td>
<td valign="top" align="center">0.955</td>
</tr>
<tr>
<td valign="top" align="center">Validation</td>
<td valign="top" align="center">0.822(0.694-0.951)</td>
<td valign="top" align="center">0.762</td>
<td valign="top" align="center">0.923</td>
<td valign="top" align="center">0.690</td>
</tr>
<tr>
<td valign="top" rowspan="2" align="center">ExtraTrees</td>
<td valign="top" align="center">Training</td>
<td valign="top" align="center">0.980(0.956-1.000)</td>
<td valign="top" align="center">0.958</td>
<td valign="top" align="center">0.933</td>
<td valign="top" align="center">0.970</td>
</tr>
<tr>
<td valign="top" align="center">Validation</td>
<td valign="top" align="center">0.788(0.632-0.944)</td>
<td valign="top" align="center">0.833</td>
<td valign="top" align="center">0.462</td>
<td valign="top" align="center">1.000</td>
</tr>
<tr>
<td valign="top" rowspan="2" align="center">XGBoost</td>
<td valign="top" align="center">Training</td>
<td valign="top" align="center">1.000(1.000-1.000)</td>
<td valign="top" align="center">1.000</td>
<td valign="top" align="center">1.000</td>
<td valign="top" align="center">1.000</td>
</tr>
<tr>
<td valign="top" align="center">Validation</td>
<td valign="top" align="center">0.825(0.679-0.971)</td>
<td valign="top" align="center">0.810</td>
<td valign="top" align="center">0.692</td>
<td valign="top" align="center">0.862</td>
</tr>
<tr>
<td valign="top" rowspan="2" align="center">LightGBM</td>
<td valign="top" align="center">Training</td>
<td valign="top" align="center">0.968(0.939-0.998)</td>
<td valign="top" align="center">0.906</td>
<td valign="top" align="center">0.967</td>
<td valign="top" align="center">0.879</td>
</tr>
<tr>
<td valign="top" align="center">Validation</td>
<td valign="top" align="center">0.802(0.654-0.951)</td>
<td valign="top" align="center">0.762</td>
<td valign="top" align="center">0.692</td>
<td valign="top" align="center">0.793</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>AUC, the area under the curve; 95% CI, 95% confidence interval; SVM, support vector machines; KNN, K Nearest Neighbors; RF, Random Forest; ExtraTrees, Extreme Gradient Boosting; XGBoost, Extreme Gradient Boosting; LightGBM, Light Gradient Boosting Machine.</p></fn>
</table-wrap-foot>
</table-wrap>
<fig id="f4" position="float">
<label>Figure&#xa0;4</label>
<caption>
<p>ROC curve results of radiomic diagnostic models (<bold>A</bold>: Training set; <bold>B</bold>: Validation set).</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-16-1763859-g004.tif">
<alt-text content-type="machine-generated">Two side-by-side receiver operating characteristic (ROC) curve plots comparing six machine learning classifiers&#x2014;SVM, KNN, RF, ExtraTrees, XGBoost, and LightGBM&#x2014;demonstrate their performance with area under the curve (AUC) values and confidence intervals. Panel (a) shows higher AUC values across all classifiers, with XGBoost performing best. Panel (b) shows substantially lower AUC values, indicating reduced performance for all classifiers compared to panel (a). Legends and color-coded lines distinguish the models.</alt-text>
</graphic></fig>
<fig id="f5" position="float">
<label>Figure&#xa0;5</label>
<caption>
<p>ROC curve results of 2.5D deep learning diagnostic models (<bold>A</bold>: Training set; <bold>B</bold>: Validation set).</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-16-1763859-g005.tif">
<alt-text content-type="machine-generated">Two side-by-side ROC curve graphs compare the performance of six machine learning models&#x2014;SVM, KNN, RF, ExtraTrees, XGBoost, and LightGBM&#x2014;using different colored lines. Graph (a) shows higher AUC values, with XGBoost achieving perfect AUC, while graph (b) displays lower AUC values across all models. Legends beneath each graph list the models and their AUC scores with confidence intervals. Both axes are labeled sensitivities and 1-specificities.</alt-text>
</graphic></fig>
<p>For the radiomic models, the goodness-of-fit of the XGBoost model was assessed using the Hosmer&#x2013;Lemeshow test, which yielded non-significant statistical results (training set: &#x3c7;&#xb2; = 0.467, P = 0.977; validation set: &#x3c7;&#xb2; = 4.925, P = 0.295). These findings indicate that the model did not deviate from perfect calibration, and the calibration curves demonstrated favorable calibration performance. DCA revealed that the clinical net benefits of all models were generally comparable in the validation set (<xref ref-type="fig" rid="f6"><bold>Figure&#xa0;6</bold></xref>). For the deep learning models, the Hosmer&#x2013;Lemeshow test was performed to evaluate the goodness-of-fit of the SVM model, with non-significant results obtained (training set: &#x3c7;&#xb2; = 1.942, P = 0.747; validation set: &#x3c7;&#xb2; = 3.349, P = 0.501). This suggests that the SVM model was perfectly calibrated. The calibration curves further confirmed its satisfactory calibration performance. DCA further showed that the SVM model achieved superior clinical net benefit compared with other models in the validation set (<xref ref-type="fig" rid="f7"><bold>Figure&#xa0;7</bold></xref>).</p>
<fig id="f6" position="float">
<label>Figure&#xa0;6</label>
<caption>
<p>Decision curve analysis (DCA) curves of different radiomics models in the validation set <bold>(A)</bold>; Calibration curve of the radiomics-based XGBoost model in the validation set <bold>(B)</bold>. DCA(Decision curve analysis); Apparent (fitting line); Bias-corrected (deviation correction curve); Ideal (reference line).</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-16-1763859-g006.tif">
<alt-text content-type="machine-generated">Panel (a) presents a line graph comparing net benefit versus threshold probability for several machine learning algorithms including SVM, KNN, RF, ExtraTrees, XGBoost, LightGBM, as well as 'All' and 'None' references. Panel (b) displays a calibration plot showing observed probability against predicted probability, with lines representing apparent, bias-corrected, and ideal models for model prediction reliability assessment.</alt-text>
</graphic></fig>
<fig id="f7" position="float">
<label>Figure&#xa0;7</label>
<caption>
<p>Decision curve analysis (DCA) curves of different 2.5D deep learning models in the validation set <bold>(A)</bold>; Calibration curve of the 2.5D deep learning-based XGBoost model in the validation set <bold>(B)</bold>. DCA(Decision curve analysis); Apparent (fitting line); Bias-corrected (deviation correction curve); Ideal (reference line).</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-16-1763859-g007.tif">
<alt-text content-type="machine-generated">Figure panels show two machine learning evaluation plots. Panel a compares net benefit versus threshold probability for multiple models including SVM, KNN, RF, ExtraTrees, XGBoost, LightGBM, All, and None, each represented by a different colored line. Panel b displays a calibration plot of observed probability versus predicted probability with lines for apparent, bias-corrected, and ideal performance. Both plots include legends identifying each curve.</alt-text>
</graphic></fig>
</sec>
</sec>
<sec id="s4" sec-type="discussion">
<label>4</label>
<title>Discussion</title>
<p>This study explored the construction of diagnostic models based on CT radiomic features and 2.5D deep learning features to distinguish between KRAS mutant and wild-type RC. Six machine learning models were constructed based on radiomic and 2.5D deep learning features. The results showed that the diagnostic performances of the 2.5D deep learning models in the validation set were significantly superior to those of traditional radiomic models (optimal classifier: SVM model with AUC = 0.913), thus providing a novel approach for the preoperative, non-invasive assessment of KRAS mutations in patients with RC.</p>
<p>The KRAS gene is the most commonly mutated oncogene in human cancers, with &gt; 50% of colorectal cancer cases harboring KRAS mutations (<xref ref-type="bibr" rid="B11">11</xref>). KRAS mutations can lead to continuous activation of the Ras/Raf/mitogen-activated protein kinase signaling pathway, promoting tumor cell proliferation and rendering anti-epidermal growth factor receptor monoclonal antibody therapy ineffective (<xref ref-type="bibr" rid="B12">12</xref>). The early identification of KRAS mutations is therefore crucial to support the personalized treatment of patients with RC. However, traditional post-biopsy genetic testing takes time, and some patients cannot tolerate the trauma of biopsy. There is thus an urgent need for non-invasive and efficient methods for assessing KRAS mutations in patients with RC. Li et&#xa0;al. (<xref ref-type="bibr" rid="B13">13</xref>) achieved excellent predictive results for perineural invasion and KRAS mutation in colon cancer using machine learning methods based on preoperative portal venous-phase CT images. Both these studies, however, adopted traditional radiomic methods, which may lack high-dimensional features, thereby affecting model accuracy. In addition, the delineation of 3D tumor regions significantly increases the workload of researchers. In this study, we used 2.5D deep learning technology to construct and validate a model using the maximum tumor slice and two adjacent slices above and below (a total of 5 slices). The resulting 2.5D deep learning model outperformed the traditional 3D radiomic model, in terms of improving the diagnostic performance and reducing the workload of researchers for delineating tumor lesions. Zhao et&#xa0;al. (<xref ref-type="bibr" rid="B14">14</xref>) constructed a habitat radiomic model based on 18F-fluorodeoxyglucose-positron emission tomography images of 62 patients to identify KRAS/NRAS/BRAF mutations in patients with RC. The resulting AUC values for the training and validation cohorts were 0.759 and 0.701, respectively. Furthermore, the SHapley Additive exPlanations method indicated that radiomic features derived from the tumor microenvironment had the greatest impact on model prediction. Compared with functional molecular imaging techniques such as 18F-fluorodeoxyglucose-positron emission tomography, CT offers fast imaging, high resolution, and cost-effectiveness.</p>
<p>Magnetic resonance imaging (MRI) can obtain excellent soft tissue resolution and thus performs an important role in evaluating the local infiltration range and lymph node metastasis of RC (<xref ref-type="bibr" rid="B15">15</xref>,&#xa0;<xref ref-type="bibr" rid="B16">16</xref>).Cui et&#xa0;al. (<xref ref-type="bibr" rid="B17">17</xref>) used radiomic methods based on T2-weighted images and applied three classification methods (logistic regression, decision tree, and support vector machine) to identify KRAS mutations in RC, achieving an AUC of 0.714 in the external validation set. Zhang et&#xa0;al. (<xref ref-type="bibr" rid="B18">18</xref>) extracted T2-weighted magnetic resonance imaging radiomic features from 83 patients with RC to predict KRAS mutant and wild-type phenotypes, with a C-index of 0.703 in the validation set, which was lower than that in the present study. In a prospective study, Yuan et&#xa0;al. (<xref ref-type="bibr" rid="B19">19</xref>) enrolled 73 patients with RC and collected MRI scans using the intravoxel incoherent motion-diffusion kurtosis imaging sequence. A comparison of the imaging parameters of the KRAS mutant group (apparent diffusion coefficient, true diffusion coefficient, diffusion kurtosis, perfusion fraction, and pseudo-diffusion coefficient) with those of the wild-type group demonstrated that the apparent diffusion coefficient, true diffusion coefficient, and diffusion kurtosis values in the KRAS mutant group were statistically significantly different from those in the wild-type group (P &lt; 0.05). Among these parameters, diffusion kurtosis exhibited the optimal diagnostic performance, with an AUC of 0.779, suggesting that this parameter has a high importance for distinguishing the KRAS gene status. In this study, a 2.5D deep learning model constructed based on CT images showed superior performance in predicting the KRAS gene status of RC patients, and its diagnostic AUC was significantly higher than those reported in the MRI-related studies mentioned above. In addition to its superior diagnostic efficacy, CT examination has distinct advantages in clinical application. On the one hand, CT examination has irreplaceable clinical value for patients with contraindications to MRI scanning. On the other hand, in primary and remote areas with relatively limited medical resources, CT equipment has lower maintenance costs and its examination procedures are convenient and efficient. Thus, it is more suitable for widespread deployment and popularization. The establishment of the model developed in this study is expected to meet the clinical demand for non-invasive assessment of RC gene status in primary medical institutions.</p>
<p>DTL features based on the ResNet pre-trained model can effectively capture macro-imaging features such as tumor edge characteristics and overall spatial distribution, thereby accurately identifying imaging differences under different gene mutation states (<xref ref-type="bibr" rid="B20">20</xref>). In addition, this method achieves efficient learning of high-level semantic features in medical images by transferring general features from the field of natural images, which can reduce the risk of overfitting in small-sample medical-data scenarios. This may be the key reason why the deep learning model in this study maintained a stable predictive performance in the validation set. Gan et&#xa0;al. (<xref ref-type="bibr" rid="B21">21</xref>) predicted the <italic>KRAS</italic> mutation status of RC patients using transrectal ultrasound images, and their findings indicated that the diagnostic performance of the deep learning model constructed based on the ResNet50 deep learning network outperformed that of the conventional radiomic model. Yang et&#xa0;al. (<xref ref-type="bibr" rid="B22">22</xref>) successfully predicted lymph node metastasis in RC patients by establishing a deep learning model, whereas Sun et&#xa0;al. (<xref ref-type="bibr" rid="B23">23</xref>) applied a multiparametric MRI-based deep learning model to identify synchronous liver metastases in RC patients. Collectively, these studies have demonstrated that deep learning models have substantial clinical utility for predicting critical clinical indicators in patients with RC, including prognosis and gene mutation status. Currently, relevant research on 2.5D deep learning models for RC patients remains relatively limited. Nevertheless, this technology has exhibited excellent predictive performance in the diagnosis and prognostic evaluation of other clinical lesions, thereby providing precedence for its application in the field of RC. For example, Cen et&#xa0;al. (<xref ref-type="bibr" rid="B24">24</xref>) successfully predicted the early recurrence of hepatocellular carcinoma by constructing a 2.5D deep learning model. Li et&#xa0;al. (<xref ref-type="bibr" rid="B25">25</xref>) constructed a predictive model for diffuse gliomas using preoperative multiparametric magnetic resonance imaging. They adopted the ResNet18 deep learning network and selected images of the maximum tumor diameter and two adjacent slices above and below to construct a 2.5D deep learning framework. They showed that the predictive performance of this 2.5D model was significantly superior to that of traditional radiomic models, with an AUC of 0.85&#x2013;0.89 in the validation set. Huang et&#xa0;al. (<xref ref-type="bibr" rid="B26">26</xref>) also confirmed the advantages of 2.5D deep learning. Their constructed model successfully predicted occult lymph node metastasis in lung adenocarcinoma, with a performance superior to traditional radiomic models, consistent with the conclusions of the current study. The predictive models constructed based on six machine learning models in this study all showed better performance than the radiomic models in the validation set. Further related studies comparing the predictive performances of 2.5D and 3D deep learning models confirmed the diagnostic advantages of 2.5D models (<xref ref-type="bibr" rid="B27">27</xref>, <xref ref-type="bibr" rid="B28">28</xref>). First, by integrating 2D slices of multiple key layers, 2.5D models effectively retain core spatial correlation information for the ROI without the need to construct a complete 3D structure. Second, compared with 3D models, 2.5D models require less training data, enabling stable model training even with a small number of samples. They also require fewer computational resources, and data delineation only needs to focus on the key layers, thus greatly reducing the time and labor costs of data annotation and preprocessing. Third, compared with traditional 2D models, 2.5D models can capture complex anatomical structure details and disease-specific features in images more comprehensively and accurately through multi-slice information fusion, ultimately improving prediction accuracy while ensuring the efficiency and practicality of clinical application.</p>
<p>The results demonstrated that of the radiomics models, the XGBoost model achieved the optimal diagnostic performance, with an AUC of 0.988 in the training set; however, the AUC decreased to 0.672 in the validation set, indicating poor model stability and an elevated risk of overfitting. This phenomenon could be attributed to the relatively small sample size and excessively high dimensionality of the extracted features in the present study. When the feature dimensionality is far greater than the sample size, the data tend to exhibit a sparse distribution in the high-dimensional space, leading to the so-called &#x201c;curse of dimensionality&#x201d; (<xref ref-type="bibr" rid="B29">29</xref>). In contrast, the SVM prediction model constructed based on 2.5D deep learning yielded an AUC of 0.997 in the training set and 0.913 in the validation set, with an extremely small discrepancy in performance between the two sets and no evidence of model overfitting.</p>
<p>This study had several limitations. First, it was a retrospective study and thus inevitably subject to selection bias. Second, it was a single-center, small-sample study, and further validation is needed using large-sample, multi-center data. Third, image delineation was performed manually and was thus inevitably affected by the subjectivity of the delineator. Further studies should apply more advanced artificial intelligence algorithms to improve the efficiency and accuracy of segmentation. Finally, this study used arterial-phase enhanced data for model construction, and future research should verify the ability of multi-parametric imaging data to improve model diagnostic performance.</p>
</sec>
<sec id="s5" sec-type="conclusions">
<label>5</label>
<title>Conclusions</title>
<p>2.5D deep learning models can effectively distinguish between KRAS mutant and wild-type RC, with better diagnostic performance than traditional radiomic models, thus providing a new non-invasive method and insights for the preoperative assessment of patients with RC.</p>
</sec>
</body>
<back>
<sec id="s6" sec-type="data-availability">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/<xref ref-type="supplementary-material" rid="SM1"><bold>Supplementary Material</bold></xref>. Further inquiries can be directed to the corresponding authors.</p></sec>
<sec id="s7" sec-type="ethics-statement">
<title>Ethics statement</title>
<p>The studies involving humans were approved by the Ethics Committee of Quanzhou First Affiliated Hospital of Fujian Medical University (Approval No.: Quan-Yi-Lun-2024-K133). The studies were conducted in accordance with the local legislation and institutional requirements. The ethics committee/institutional review board waived the requirement of written informed consent for participation from the participants or the participants&#x2019; legal guardians/next of kin because this study is a retrospective analysis.</p></sec>
<sec id="s8" sec-type="author-contributions">
<title>Author contributions</title>
<p>CZ: Data curation, Writing &#x2013; review &amp; editing, Methodology, Writing &#x2013; original draft. JL: Writing &#x2013; original draft, Investigation. PC: Writing &#x2013; original draft, Investigation. YZ: Methodology, Investigation, Writing &#x2013; original draft. JS: Writing &#x2013; review &amp; editing, Supervision, Funding acquisition, Project administration. GC: Supervision, Funding acquisition, Project administration, Writing &#x2013; review &amp; editing.</p></sec>
<ack>
<title>Acknowledgments</title>
<p>We thank Susan Furness, PhD, from Liwen Bianji (Edanz) (<ext-link ext-link-type="uri" xlink:href="http://www.liwenbianji.cn">www.liwenbianji.cn</ext-link>) for editing the English text of a draft of this manuscript.</p>
</ack>
<sec id="s10" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p></sec>
<sec id="s11" sec-type="ai-statement">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p></sec>
<sec id="s12" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p></sec>
<sec id="s13" sec-type="supplementary-material">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fonc.2026.1763859/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fonc.2026.1763859/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="DataSheet1.pdf" id="SM1" mimetype="application/pdf"/></sec>
<ref-list>
<title>References</title>
<ref id="B1">
<label>1</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Siegel</surname> <given-names>RL</given-names></name>
<name><surname>Wagle</surname> <given-names>NS</given-names></name>
<name><surname>Cercek</surname> <given-names>A</given-names></name>
<name><surname>Smith</surname> <given-names>RA</given-names></name>
<name><surname>Jemal</surname> <given-names>A</given-names></name>
</person-group>. 
<article-title>Colorectal cancer statistics, 2023</article-title>. <source>CA: Cancer J Clin</source>. (<year>2023</year>) <volume>73</volume>:<page-range>233&#x2013;54</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.3322/caac.21772</pub-id>, PMID: <pub-id pub-id-type="pmid">36856579</pub-id>
</mixed-citation>
</ref>
<ref id="B2">
<label>2</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zheng</surname> <given-names>S</given-names></name>
<name><surname>You</surname> <given-names>Z</given-names></name>
<name><surname>Guo</surname> <given-names>G</given-names></name>
<name><surname>Lin</surname> <given-names>Z</given-names></name>
<name><surname>Wang</surname> <given-names>S</given-names></name>
<name><surname>Yang</surname> <given-names>G</given-names></name>
<etal/>
</person-group>. 
<article-title>Effect of KRAS mutation status on clinicopathological characteristics and overall survival in patients with rectal cancer</article-title>. <source>BMC Gastroenterol</source>. (<year>2025</year>) <volume>25</volume>:<fpage>37</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s12876-025-03615-6</pub-id>, PMID: <pub-id pub-id-type="pmid">39871125</pub-id>
</mixed-citation>
</ref>
<ref id="B3">
<label>3</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Van</surname> <given-names>Cutsem E</given-names></name>
<name><surname>Cervantes</surname> <given-names>A</given-names></name>
<name><surname>Adam</surname> <given-names>R</given-names></name>
<name><surname>Sobrero</surname> <given-names>A</given-names></name>
<name><surname>Van</surname> <given-names>Krieken JH</given-names></name>
<name><surname>Aderka</surname> <given-names>D</given-names></name>
<etal/>
</person-group>. 
<article-title>ESMO consensus guidelines for the management of patients with metastatic colorectal cancer</article-title>. <source>Ann Oncol</source>. (<year>2016</year>) <volume>27</volume>:<page-range>1386&#x2013;422</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1093/annonc/mdw235</pub-id>, PMID: <pub-id pub-id-type="pmid">27380959</pub-id>
</mixed-citation>
</ref>
<ref id="B4">
<label>4</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Cao</surname> <given-names>Y</given-names></name>
<name><surname>Zhang</surname> <given-names>J</given-names></name>
<name><surname>Huang</surname> <given-names>L</given-names></name>
<name><surname>Zhao</surname> <given-names>Z</given-names></name>
<name><surname>Zhang</surname> <given-names>G</given-names></name>
<name><surname>Ren</surname> <given-names>J</given-names></name>
<etal/>
</person-group>. 
<article-title>Construction of prediction model for KRAS mutation status of colorectal cancer based on CT radiomics</article-title>. <source>Japanese J Radiol</source>. (<year>2023</year>) <volume>41</volume>:<page-range>1236&#x2013;46</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s11604-023-01458-3</pub-id>, PMID: <pub-id pub-id-type="pmid">37311935</pub-id>
</mixed-citation>
</ref>
<ref id="B5">
<label>5</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Lin</surname> <given-names>CY</given-names></name>
<name><surname>Guo</surname> <given-names>SM</given-names></name>
<name><surname>Lien</surname> <given-names>JJ</given-names></name>
<name><surname>Lin</surname> <given-names>WT</given-names></name>
<name><surname>Liu</surname> <given-names>YS</given-names></name>
<name><surname>Lai</surname> <given-names>CH</given-names></name>
<etal/>
</person-group>. 
<article-title>Combined model integrating deep learning, radiomics, and clinical data to classify lung nodules at chest CT</article-title>. <source>La Radiologia Med</source>. (<year>2024</year>) <volume>129</volume>:<fpage>56</fpage>&#x2013;<lpage>69</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s11547-023-01730-6</pub-id>, PMID: <pub-id pub-id-type="pmid">37971691</pub-id>
</mixed-citation>
</ref>
<ref id="B6">
<label>6</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Li</surname> <given-names>M</given-names></name>
<name><surname>Yuan</surname> <given-names>Y</given-names></name>
<name><surname>Zhou</surname> <given-names>H</given-names></name>
<name><surname>Feng</surname> <given-names>F</given-names></name>
<name><surname>Xu</surname> <given-names>G</given-names></name>
</person-group>. 
<article-title>A multicenter study: predicting KRAS mutation and prognosis in colorectal cancer through a CT-based radiomics nomogram</article-title>. <source>Abdominal Radiol (New York)</source>. (<year>2024</year>) <volume>49</volume>:<page-range>1816&#x2013;28</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s00261-024-04218-7</pub-id>, PMID: <pub-id pub-id-type="pmid">38393357</pub-id>
</mixed-citation>
</ref>
<ref id="B7">
<label>7</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Jo</surname> <given-names>SJ</given-names></name>
<name><surname>Kim</surname> <given-names>SH</given-names></name>
</person-group>. 
<article-title>Association between oncogenic RAS mutation and radiologic-pathologic findings in patients with primary rectal cancer</article-title>. <source>Quant Imaging Med Surg</source>. (<year>2019</year>) <volume>9</volume>:<page-range>238&#x2013;46</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.21037/qims.2018.12.10</pub-id>, PMID: <pub-id pub-id-type="pmid">30976548</pub-id>
</mixed-citation>
</ref>
<ref id="B8">
<label>8</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Shin</surname> <given-names>YR</given-names></name>
<name><surname>Kim</surname> <given-names>KA</given-names></name>
<name><surname>Im</surname> <given-names>S</given-names></name>
<name><surname>Hwang</surname> <given-names>SS</given-names></name>
<name><surname>Kim</surname> <given-names>K</given-names></name>
</person-group>. 
<article-title>Prediction of KRAS mutation in rectal cancer using MRI</article-title>. <source>Anticancer Res</source>. (<year>2016</year>) <volume>36</volume>:<page-range>4799&#x2013;804</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.21873/anticanres.11039</pub-id>, PMID: <pub-id pub-id-type="pmid">27630331</pub-id>
</mixed-citation>
</ref>
<ref id="B9">
<label>9</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Cui</surname> <given-names>Y</given-names></name>
<name><surname>Zhang</surname> <given-names>J</given-names></name>
<name><surname>Li</surname> <given-names>Z</given-names></name>
<name><surname>Wei</surname> <given-names>K</given-names></name>
<name><surname>Lei</surname> <given-names>Y</given-names></name>
<name><surname>Ren</surname> <given-names>J</given-names></name>
<etal/>
</person-group>. 
<article-title>A CT-based deep learning radiomics nomogram for predicting the responseto neoadjuvantchemotherapy in patients with locally advanced gastric cancer: A multicenter cohort study</article-title>. <source>EClinicalMedicine</source>. (<year>2022</year>) <volume>46</volume>:<fpage>101348</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.eclinm.2022.101348</pub-id>, PMID: <pub-id pub-id-type="pmid">35340629</pub-id>
</mixed-citation>
</ref>
<ref id="B10">
<label>10</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Liu</surname> <given-names>H</given-names></name>
<name><surname>Yin</surname> <given-names>H</given-names></name>
<name><surname>Li</surname> <given-names>J</given-names></name>
<name><surname>Dong</surname> <given-names>X</given-names></name>
<name><surname>Zheng</surname> <given-names>H</given-names></name>
<name><surname>Zhang</surname> <given-names>T</given-names></name>
<etal/>
</person-group>. 
<article-title>A deep learning model based on MRI and clinical factors facilitates noninvasive evaluation of KRAS mutation in rectal cancer</article-title>. <source>J Magnetic Resonance Imaging</source>. (<year>2022</year>) <volume>56</volume>:<page-range>1659&#x2013;68</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1002/jmri.28237</pub-id>, PMID: <pub-id pub-id-type="pmid">35587946</pub-id>
</mixed-citation>
</ref>
<ref id="B11">
<label>11</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Ros</surname> <given-names>J</given-names></name>
<name><surname>Vaghi</surname> <given-names>C</given-names></name>
<name><surname>Baraibar</surname> <given-names>I</given-names></name>
<name><surname>Saoudi</surname> <given-names>Gonz&#xe1;lez N</given-names></name>
<name><surname>Rodr&#xed;guez-Castells</surname> <given-names>M</given-names></name>
<name><surname>Garc&#xed;a</surname> <given-names>A</given-names></name>
<etal/>
</person-group>. 
<article-title>Targeting KRAS G12C mutation in colorectal cancer, A review: new arrows in the quiver</article-title>. <source>Int J Mol Sci</source>. (<year>2024</year>) <volume>25</volume>:<fpage>3304</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/ijms25063304</pub-id>, PMID: <pub-id pub-id-type="pmid">38542278</pub-id>
</mixed-citation>
</ref>
<ref id="B12">
<label>12</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Meng</surname> <given-names>M</given-names></name>
<name><surname>Zhong</surname> <given-names>K</given-names></name>
<name><surname>Jiang</surname> <given-names>T</given-names></name>
<name><surname>Liu</surname> <given-names>Z</given-names></name>
<name><surname>Kwan</surname> <given-names>HY</given-names></name>
<name><surname>Su</surname> <given-names>T</given-names></name>
<etal/>
</person-group>. 
<article-title>The current understanding on the impact of KRAS on colorectal cancer</article-title>. <source>BioMed Pharmacother</source>. (<year>2021</year>) <volume>140</volume>:<fpage>111717</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.biopha.2021.111717</pub-id>, PMID: <pub-id pub-id-type="pmid">34044280</pub-id>
</mixed-citation>
</ref>
<ref id="B13">
<label>13</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Li</surname> <given-names>Y</given-names></name>
<name><surname>Eresen</surname> <given-names>A</given-names></name>
<name><surname>Shangguan</surname> <given-names>J</given-names></name>
<name><surname>Yang</surname> <given-names>J</given-names></name>
<name><surname>Benson</surname> <given-names>AB 3rd</given-names></name>
<name><surname>Yaghmai</surname> <given-names>V</given-names></name>
<etal/>
</person-group>. 
<article-title>Preoperative prediction of perineural invasion and KRAS mutation in colon cancer using machine learning</article-title>. <source>J Cancer Res Clin Oncol</source>. (<year>2020</year>) <volume>146</volume>:<page-range>3165&#x2013;74</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s00432-020-03354-z</pub-id>, PMID: <pub-id pub-id-type="pmid">32779023</pub-id>
</mixed-citation>
</ref>
<ref id="B14">
<label>14</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zhao</surname> <given-names>H</given-names></name>
<name><surname>Su</surname> <given-names>Y</given-names></name>
<name><surname>Wang</surname> <given-names>Y</given-names></name>
<name><surname>Lyu</surname> <given-names>Z</given-names></name>
<name><surname>Xu</surname> <given-names>P</given-names></name>
<name><surname>Gu</surname> <given-names>W</given-names></name>
<etal/>
</person-group>. 
<article-title>Using tumor habitat-derived radiomic analysis during pretreatment (18)F-FDG PET for predicting KRAS/NRAS/BRAF mutations in colorectal cancer</article-title>. <source>Cancer Imaging</source>. (<year>2024</year>) <volume>24</volume>:<fpage>26</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s40644-024-00670-2</pub-id>, PMID: <pub-id pub-id-type="pmid">38342905</pub-id>
</mixed-citation>
</ref>
<ref id="B15">
<label>15</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Wu</surname> <given-names>Y</given-names></name>
<name><surname>Jiang</surname> <given-names>T</given-names></name>
<name><surname>Liu</surname> <given-names>H</given-names></name>
<name><surname>Shi</surname> <given-names>S</given-names></name>
<name><surname>Singh</surname> <given-names>A</given-names></name>
<name><surname>Wang</surname> <given-names>Y</given-names></name>
<etal/>
</person-group>. 
<article-title>Generative adversarial networks: multiparametric, multiregion super-resolution MRI in predicting lymph node metastasis in rectal cancer</article-title>. <source>Insights Imaging</source>. (<year>2026</year>) <volume>17</volume>:<fpage>1</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s13244-025-02173-5</pub-id>, PMID: <pub-id pub-id-type="pmid">41491765</pub-id>
</mixed-citation>
</ref>
<ref id="B16">
<label>16</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Liu</surname> <given-names>H</given-names></name>
<name><surname>Zhang</surname> <given-names>H</given-names></name>
<name><surname>Zou</surname> <given-names>Q</given-names></name>
<name><surname>Yang</surname> <given-names>J</given-names></name>
</person-group>. 
<article-title>Intratumoral and peritumoral radiomics of MRI predict pathological differentiation in patients with rectal cancer</article-title>. <source>Oncol Lett</source>. (<year>2025</year>) <volume>31</volume>:<fpage>1</fpage>&#x2013;<lpage>13</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3892/ol.2025.15363</pub-id>, PMID: <pub-id pub-id-type="pmid">41200064</pub-id>
</mixed-citation>
</ref>
<ref id="B17">
<label>17</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Cui</surname> <given-names>Y</given-names></name>
<name><surname>Liu</surname> <given-names>H</given-names></name>
<name><surname>Ren</surname> <given-names>J</given-names></name>
<name><surname>Du</surname> <given-names>X</given-names></name>
<name><surname>Xin</surname> <given-names>L</given-names></name>
<name><surname>Li</surname> <given-names>D</given-names></name>
<etal/>
</person-group>. 
<article-title>Development and validation of a MRI-based radiomics signature for prediction of KRAS mutation in rectal cancer</article-title>. <source>Eur Radiol</source>. (<year>2020</year>) <volume>30</volume>:<page-range>1948&#x2013;58</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s00330-019-06572-3</pub-id>, PMID: <pub-id pub-id-type="pmid">31942672</pub-id>
</mixed-citation>
</ref>
<ref id="B18">
<label>18</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zhang</surname> <given-names>Z</given-names></name>
<name><surname>Shen</surname> <given-names>L</given-names></name>
<name><surname>Wang</surname> <given-names>Y</given-names></name>
<name><surname>Wang</surname> <given-names>J</given-names></name>
<name><surname>Zhang</surname> <given-names>H</given-names></name>
<name><surname>Xia</surname> <given-names>F</given-names></name>
<etal/>
</person-group>. 
<article-title>MRI radiomics signature as a potential biomarker for predicting KRAS status in locally advanced rectal cancer patients[J/OL</article-title>. <source>Front Oncol</source>. (<year>2021</year>) <volume>11</volume>:<elocation-id>614052</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fonc.2021.614052</pub-id>, PMID: <pub-id pub-id-type="pmid">34026605</pub-id>
</mixed-citation>
</ref>
<ref id="B19">
<label>19</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Yuan</surname> <given-names>J</given-names></name>
<name><surname>Yao</surname> <given-names>M</given-names></name>
<name><surname>Wang</surname> <given-names>Z</given-names></name>
<name><surname>Tan</surname> <given-names>W</given-names></name>
<name><surname>Zhang</surname> <given-names>Y</given-names></name>
<name><surname>Yan</surname> <given-names>H</given-names></name>
<etal/>
</person-group>. 
<article-title>Non-invasive identification of KRAS mutation in rectal cancer using hybrid intravoxel incoherent motion and diffusion kurtosis model</article-title>. <source>World J Surg Oncol</source>. (<year>2025</year>) <volume>23</volume>:<fpage>442</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s12957-025-04097-2</pub-id>, PMID: <pub-id pub-id-type="pmid">41291770</pub-id>
</mixed-citation>
</ref>
<ref id="B20">
<label>20</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Rehman</surname> <given-names>A</given-names></name>
<name><surname>Mahmood</surname> <given-names>T</given-names></name>
<name><surname>Alamri</surname> <given-names>FS</given-names></name>
<name><surname>Saba</surname> <given-names>T</given-names></name>
<name><surname>Naseem</surname> <given-names>S</given-names></name>
</person-group>. 
<article-title>Advanced feature learning and classification of microscopic breast abnormalities using a robust deep transfer learning technique</article-title>. <source>Microscopy Res technique</source>. (<year>2024</year>) <volume>87</volume>:<page-range>1862&#x2013;88</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1002/jemt.24557</pub-id>, PMID: <pub-id pub-id-type="pmid">38553901</pub-id>
</mixed-citation>
</ref>
<ref id="B21">
<label>21</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Gan</surname> <given-names>Y</given-names></name>
<name><surname>Hu</surname> <given-names>Q</given-names></name>
<name><surname>Shen</surname> <given-names>Q</given-names></name>
<name><surname>Lin</surname> <given-names>P</given-names></name>
<name><surname>Qian</surname> <given-names>Q</given-names></name>
<name><surname>Zhuo</surname> <given-names>M</given-names></name>
<etal/>
</person-group>. 
<article-title>Comparison of intratumoral and peritumoral deep learning, radiomics, and fusion models for predicting KRAS gene mutations in rectal cancer based on endorectal ultrasound imaging</article-title>. <source>Ann Surg Oncol</source>. (<year>2024</year>) <volume>32</volume>:<page-range>3019&#x2013;30</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1245/s10434-024-16697-5</pub-id>, PMID: <pub-id pub-id-type="pmid">39690384</pub-id>
</mixed-citation>
</ref>
<ref id="B22">
<label>22</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Yang</surname> <given-names>Y</given-names></name>
<name><surname>Han</surname> <given-names>K</given-names></name>
<name><surname>Xu</surname> <given-names>Z</given-names></name>
<name><surname>Cai</surname> <given-names>Z</given-names></name>
<name><surname>Zhao</surname> <given-names>H</given-names></name>
<name><surname>Hong</surname> <given-names>J</given-names></name>
<etal/>
</person-group>. 
<article-title>Development and validation of multiparametric MRI-based interpretable deep learning radiomics fusion model for predicting lymph node metastasis and prognosis in rectal cancer: A two-center study</article-title>. <source>Acad Radiol</source>. (<year>2025</year>) <volume>32</volume>:<page-range>2642&#x2013;54</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.acra.2024.11.045</pub-id>, PMID: <pub-id pub-id-type="pmid">39638641</pub-id>
</mixed-citation>
</ref>
<ref id="B23">
<label>23</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Sun</surname> <given-names>J</given-names></name>
<name><surname>Wu</surname> <given-names>PY</given-names></name>
<name><surname>Shen</surname> <given-names>F</given-names></name>
<name><surname>Chen</surname> <given-names>X</given-names></name>
<name><surname>She</surname> <given-names>J</given-names></name>
<name><surname>Luo</surname> <given-names>M</given-names></name>
<etal/>
</person-group>. 
<article-title>Deep learning models based on multiparametric magnetic resonance imaging and clinical parameters for identifying synchronous liver metastases from rectal cancer</article-title>. <source>BMC Med Imaging</source>. (<year>2025</year>) <volume>25</volume>:<fpage>173</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s12880-025-01692-3</pub-id>, PMID: <pub-id pub-id-type="pmid">40389920</pub-id>
</mixed-citation>
</ref>
<ref id="B24">
<label>24</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Cen</surname> <given-names>Y</given-names></name>
<name><surname>Nong</surname> <given-names>H</given-names></name>
<name><surname>Du</surname> <given-names>D</given-names></name>
<name><surname>Wu</surname> <given-names>Y</given-names></name>
<name><surname>Chen</surname> <given-names>J</given-names></name>
<name><surname>Pan</surname> <given-names>Z</given-names></name>
<etal/>
</person-group>. 
<article-title>CT-based 2.5D deep learning-multi-instance learning for predicting early recurrence of hepatocellular carcinoma and correlating with recurrence-related pathological indicators</article-title>. <source>. J Hepatocell Carcinoma</source>. (<year>2025</year>) <volume>12</volume>:<page-range>2095&#x2013;108</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.2147/JHC.S541402</pub-id>, PMID: <pub-id pub-id-type="pmid">40984863</pub-id>
</mixed-citation>
</ref>
<ref id="B25">
<label>25</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>.Li</surname> <given-names>Y</given-names></name>
<name><surname>Wei</surname> <given-names>D</given-names></name>
<name><surname>Liu</surname> <given-names>X</given-names></name>
<name><surname>Fan</surname> <given-names>X</given-names></name>
<name><surname>Wang</surname> <given-names>K</given-names></name>
<name><surname>Li</surname> <given-names>S</given-names></name>
<etal/>
</person-group>. 
<article-title>Molecular subtyping of diffuse gliomas using magnetic resonance imaging: comparison and correlation between radiomics and deep learning</article-title>. <source>Eur Radiol</source>. (<year>2021</year>) <volume>32</volume>:<page-range>747&#x2013;58</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s00330-021-08237-6</pub-id>, PMID: <pub-id pub-id-type="pmid">34417848</pub-id>
</mixed-citation>
</ref>
<ref id="B26">
<label>26</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Huang</surname> <given-names>X</given-names></name>
<name><surname>Huang</surname> <given-names>X</given-names></name>
<name><surname>Wang</surname> <given-names>K</given-names></name>
<name><surname>Bai</surname> <given-names>H</given-names></name>
<name><surname>Lu</surname> <given-names>X</given-names></name>
<name><surname>Jin</surname> <given-names>G</given-names></name>
<etal/>
</person-group>. 
<article-title>2.5D deep learning radiomics and clinical data for predicting occult lymph node metastasis in lung adenocarcinoma</article-title>. <source>. BMC Med Imaging</source>. (<year>2025</year>) <volume>25</volume>:<fpage>225</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s12880-025-01759-1</pub-id>, PMID: <pub-id pub-id-type="pmid">40597741</pub-id>
</mixed-citation>
</ref>
<ref id="B27">
<label>27</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Kim</surname> <given-names>H</given-names></name>
<name><surname>Lee</surname> <given-names>D</given-names></name>
<name><surname>Cho</surname> <given-names>WS</given-names></name>
<name><surname>Lee</surname> <given-names>JC</given-names></name>
<name><surname>Goo</surname> <given-names>JM</given-names></name>
<name><surname>Kim</surname> <given-names>HC</given-names></name>
<etal/>
</person-group>. 
<article-title>CT-based deep learning model to differentiate invasive pulmonary adenocarcinomas appearing as subsolid nodules among surgical candidates: comparison of the diagnostic performance with a size-based logistic model and radiologists</article-title>. <source>Eur Radiol</source>. (<year>2020</year>) <volume>30</volume>:<page-range>3295&#x2013;305</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s00330-019-06628-4</pub-id>, PMID: <pub-id pub-id-type="pmid">32055949</pub-id>
</mixed-citation>
</ref>
<ref id="B28">
<label>28</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Kim</surname> <given-names>Y</given-names></name>
<name><surname>Kim</surname> <given-names>YG</given-names></name>
<name><surname>Park</surname> <given-names>JW</given-names></name>
<name><surname>Kim</surname> <given-names>BW</given-names></name>
<name><surname>Shin</surname> <given-names>Y</given-names></name>
<name><surname>Kong</surname> <given-names>SH</given-names></name>
<etal/>
</person-group>. 
<article-title>A CT-based deep learning model for predicting subsequent fracture risk in patients with hip fracture[J/OL</article-title>. <source>Radiology</source>. (<year>2024</year>) <volume>310</volume>:<fpage>e230614</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1148/radiol.230614</pub-id>, PMID: <pub-id pub-id-type="pmid">38289213</pub-id>
</mixed-citation>
</ref>
<ref id="B29">
<label>29</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Borah</surname> <given-names>K</given-names></name>
<name><surname>Das</surname> <given-names>HS</given-names></name>
<name><surname>Budhathoki</surname> <given-names>RK</given-names></name>
<name><surname>Aurangzeb</surname> <given-names>K</given-names></name>
<name><surname>Mallik</surname> <given-names>S</given-names></name>
</person-group>. 
<article-title>DOMSCNet: a deep learning model for the classification of stomach cancer using multi-layer omics data</article-title>. <source>Brief Bioinform</source>. (<year>2025</year>) <volume>26</volume>:<fpage>bbaf115</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1093/bib/bbaf115</pub-id>, PMID: <pub-id pub-id-type="pmid">40178281</pub-id>
</mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn id="n1" fn-type="custom" custom-type="edited-by">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1548245">Ting Guo</ext-link>, Peking University, China</p></fn>
<fn id="n2" fn-type="custom" custom-type="reviewed-by">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2397543">Zsolt Fekete</ext-link>, University of Medicine and Pharmacy Iuliu Hatieganu Cluj-Napoca, Romania</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2750948">Tingxiu Liu</ext-link>, Beijing University of Chinese Medicine, China</p></fn>
</fn-group>
</back>
</article>