<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" dtd-version="1.3" article-type="systematic-review">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Med.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Medicine</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Med.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2296-858X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fmed.2025.1731922</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Systematic Review</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>From radiomics to transformers in pancreatic cancer detection and prognosis</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name><surname>Almufareh</surname> <given-names>Maram Fahaad</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Tehsin</surname> <given-names>Samabia</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<uri xlink:href="https://loop.frontiersin.org/people/3296486"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Humayun</surname> <given-names>Mamoona</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<uri xlink:href="https://loop.frontiersin.org/people/2187125"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Kausar</surname> <given-names>Sumaira</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Farooq</surname> <given-names>Asad</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Aldossary</surname> <given-names>Haya</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<uri xlink:href="https://loop.frontiersin.org/people/2931625"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Aljohani</surname> <given-names>Abeer</given-names></name>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Department of Information Systems, College of Computer and Information Sciences, Jouf University</institution>, <city>Sakaka</city>, <country country="sa">Saudi Arabia</country></aff>
<aff id="aff2"><label>2</label><institution>Centre of Excellence in Artificial Intelligence, Bahria University</institution>, <city>Islamabad</city>, <country country="pk">Pakistan</country></aff>
<aff id="aff3"><label>3</label><institution>School of Computing, Engineering and the Built Environment, University of Roehampton</institution>, <city>London</city>, <country country="gb">United Kingdom</country></aff>
<aff id="aff4"><label>4</label><institution>Computer Science Department, College of Science and Humanities, Imam Abdul Rahman Bin Faisal University</institution>, <city>Jubail</city>, <country country="sa">Saudi Arabia</country></aff>
<aff id="aff5"><label>5</label><institution>Department of Computer Science and Information, Applied College, Taibah University</institution>, <city>Madinah</city>, <country country="sa">Saudi Arabia</country></aff>
<author-notes>
<corresp id="c001"><label>&#x0002A;</label>Correspondence: Maram Fahaad Almufareh, <email xlink:href="mailto:mfalmufareh@ju.edu.sa">mfalmufareh@ju.edu.sa</email>; Mamoona Humayun, <email xlink:href="mailto:Mamoona.Humayun@roehampton.ac.uk">Mamoona.Humayun@roehampton.ac.uk</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-01-09">
<day>09</day>
<month>01</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2025</year>
</pub-date>
<volume>12</volume>
<elocation-id>1731922</elocation-id>
<history>
<date date-type="received">
<day>24</day>
<month>10</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>26</day>
<month>11</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>04</day>
<month>12</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2026 Almufareh, Tehsin, Humayun, Kausar, Farooq, Aldossary and Aljohani.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Almufareh, Tehsin, Humayun, Kausar, Farooq, Aldossary and Aljohani</copyright-holder>
<license>
<ali:license_ref start_date="2026-01-09">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<sec>
<title>Introduction</title>
<p>Pancreatic ductal adenocarcinoma (PDAC) remains one of the deadliest malignancies, primarily due to late diagnosis and poor therapeutic response. Advances in artificial intelligence (AI), particularly in medical imaging and multi-modal data integration, have created new opportunities for improving early detection and personalized prognostication.</p></sec>
<sec>
<title>Methods</title>
<p>This systematic review was conducted according to the Preferred Reporting Items for Systematic Reviews and Meta-Analyses (PRISMA) 2020 statement. The protocol was prospectively registered with the Open Science Framework, covering studies published between 2015 and 2025.</p></sec>
<sec>
<title>Results</title>
<p>Distinct from prior surveys that focus narrowly on specific algorithms or data types, this work introduces a generational taxonomy of AI approaches&#x02014;ranging from classical radiomics-based machine learning to deep learning and contemporary transformer-based models&#x02014;and maps their application to core clinical tasks such as detection, segmentation, classification, and outcome prediction. A key contribution is the integration of diverse datasets across imaging, pathology, and molecular sources; we further assess trends in availability, usage, and sample scale.</p></sec>
<sec>
<title>Discussion</title>
<p>We critically evaluate limitations in generalizability, external validation, model calibration, and translational readiness, and outline recommendations for multi-center validation, standardized reporting, domain adaptation, and clinician-centered interpretability.</p></sec>
<sec>
<title>Systematic review registration</title>
<p><ext-link ext-link-type="uri" xlink:href="https://doi.org/10.17605/OSF.IO/2DVHJ">https://doi.org/10.17605/OSF.IO/2DVHJ</ext-link>.</p></sec></abstract>
<kwd-group>
<kwd>attention</kwd>
<kwd>deep learning</kwd>
<kwd>early detection</kwd>
<kwd>multi-modal fusion</kwd>
<kwd>pancreatic ductal adenocarcinoma</kwd>
<kwd>radiomics</kwd>
<kwd>transformers</kwd>
</kwd-group>
<funding-group>
 <funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This work was funded by the Deanship of Graduate Studies and Scientific Research at Jouf University under grant No. DGSSR-2025-FC-01029.</funding-statement>
</funding-group>
<counts>
<fig-count count="10"/>
<table-count count="9"/>
<equation-count count="6"/>
<ref-count count="84"/>
<page-count count="25"/>
<word-count count="16076"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Pathology</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<label>1</label>
<title>Introduction</title>
<p>Cancer remains one of the world&#x00027;s most formidable public-health challenges. The Global Cancer Observatory reported almost 20 million new diagnoses and 9.7 million deaths in 2022 (<xref ref-type="fig" rid="F1">Figure 1</xref>), translating to an economic burden that exceeds 1% of global gross domestic product each year (<xref ref-type="bibr" rid="B1">1</xref>). Recent market reports reflect the growing clinical interest and research activity in pancreatic cancer. Precedence Research (2024) forecasts strong expansion of the pancreatic cancer market through 2034 driven by rising investment in diagnostics (<xref ref-type="bibr" rid="B2">2</xref>), therapeutics and clinical trials. This commercial growth both supports and mirrors the increased funding for AI based detection and prognostic technologies that are the main focus of this review.</p>
<fig position="float" id="F1">
<label>Figure 1</label>
<caption><p>Projected global pancreatic cancer market growth, 2023&#x02013;2034 (Precedence Research, 2024). The market is expected to rise from approximately United States Dollar (USD) 2.86 billion in 2024 to USD 10.25 billion by 2034 Compound Annual Growth Rate (CAGR) &#x02248; 13.62%, reflecting expanding investment in diagnostics, therapeutics and clinical research that may accelerate translation of Artificial Intelligence-enabled detection and prognosis systems.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmed-12-1731922-g0001.tif">
<alt-text content-type="machine-generated">Box plot showing the distribution of dataset sample sizes by modality on a logarithmic scale. Modalities include 2D imaging, 3D imaging, biofluid biomarker, clinical tabular, molecular, and radiomics. Clinical tabular has the widest range, while molecular has the narrowest range.</alt-text>
</graphic>
</fig>
<p>Despite incremental gains in screening and therapy, the absolute number of cancer-related fatalities continues to rise with population aging and growth (<xref ref-type="bibr" rid="B3">3</xref>). Pancreatic cancer (PC), of which approximately 90% are pancreatic ductal adenocarcinomas (PDAC), deserves special attention because its mortality far exceeds its incidence ranking. Globally it is the twelfth most commonly diagnosed malignancy, yet it already ranks seventh among cancer deaths (<xref ref-type="bibr" rid="B3">3</xref>, <xref ref-type="bibr" rid="B4">4</xref>). Five-year relative survival in high-income countries has only recently crept into double digits&#x02014;roughly 11%&#x02013;13%&#x02014;making PDAC the deadliest of the major solid tumors (<xref ref-type="bibr" rid="B5">5</xref>). Contributing factors include a stealthy symptom profile, an anatomically concealed primary site and a dense desmoplastic micro-environment that confers intrinsic resistance to cytotoxic therapies (<xref ref-type="bibr" rid="B3">3</xref>). Without intervention, PDAC is projected to become the second leading cause of cancer mortality in North America and parts of Europe by 2030 (<xref ref-type="bibr" rid="B4">4</xref>).</p>
<p>The global burden of pancreatic cancer has continued to rise over the past three decades. According to GLOBOCAN 2022, there were 510,992 new cases worldwide (<xref ref-type="bibr" rid="B1">1</xref>). Estimates from the Global Burden of Disease (GBD) 2019&#x02013;2021 update indicate incident cases increased from about 489,862 in 2019 to 508,533 in 2021, while deaths rose from 486,869 to 505,752 over the same period (<xref ref-type="bibr" rid="B3">3</xref>). The age-standardized incidence rate (ASIR) decreased slightly from 6.04 to 5.96 per 100,000, whereas the age-standardized death rate (ASDR) declined from 6.03 to 5.95 per 100,000 (<xref ref-type="bibr" rid="B3">3</xref>). These aggregate figures mask striking geographic disparities. High-income regions such as North America, Western Europe and high-income Asia&#x02013;Pacific have ASIRs approaching 10 per 100,000, whereas low-SDI countries have rates as low as 1.6 per 100,000 (<xref ref-type="bibr" rid="B3">3</xref>). Case fatality mirrors incidence because PDAC is frequently lethal once symptomatic. The incidence-to-mortality ratio was only &#x02248;1.28 in 2024, and more than 85% of patients are diagnosed at an unresectable or metastatic stage (<xref ref-type="bibr" rid="B3">3</xref>).</p>
<p><xref ref-type="table" rid="T1">Table 1</xref> summarizes recent estimates from high-incidence regions. In the United States, the National Cancer Institute projects 67,440 new cases and 51,980 deaths for 2025. Although pancreatic cancer will account for only 3.3% of new malignancies, it will cause 8.4% of all cancer deaths (<xref ref-type="bibr" rid="B5">5</xref>). The United States has seen incidence climb by about 0.7% per year since 2001, and PDAC is forecast to become the second leading cause of cancer death by 2030 (<xref ref-type="bibr" rid="B4">4</xref>). European countries such as Hungary, the Czech Republic and Finland report age-standardized mortality rates exceeding 8 per 100,000 (<xref ref-type="bibr" rid="B4">4</xref>). In Asia, China already accounts for over 25% of worldwide PDAC deaths, and Japan has experienced one of the steepest rises in incidence among high-income countries (<xref ref-type="bibr" rid="B3">3</xref>). The median age at diagnosis is approximately 70 years, and more than 80% of patients present with unresectable or metastatic disease (<xref ref-type="bibr" rid="B3">3</xref>).</p>
<table-wrap position="float" id="T1">
<label>Table 1</label>
<caption><p>Recent regional estimates for pancreatic cancer incidence and mortality.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th valign="top" align="left"><bold>Region</bold></th>
<th valign="top" align="left"><bold>Epidemiological statistics</bold></th>
<th valign="top" align="left"><bold>Notable findings</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left"><bold>Global (2021&#x02013;2022)</bold></td>
<td valign="top" align="left">510,992 new cases in 2022; 508,533 cases and 505,752 deaths in 2021; ASIR decreased from 6.04 to 5.96 per 100,000 (<xref ref-type="bibr" rid="B1">1</xref>, <xref ref-type="bibr" rid="B3">3</xref>)</td>
<td valign="top" align="left">Slight decline in age-standardized rates but absolute numbers rising; incidence-to-mortality ratio &#x02248;1.28.</td>
</tr>
<tr>
<td valign="top" align="left"><bold>United States (2025)</bold></td>
<td valign="top" align="left">67,440 new cases and 51,980 deaths projected; pancreatic cancer accounts for 3.3% of new cancers but 8.4% of cancer deaths (<xref ref-type="bibr" rid="B5">5</xref>)</td>
<td valign="top" align="left">Incidence rising at &#x02248;0.7% annually; PDAC projected to be the second leading cause of cancer death by 2030 (<xref ref-type="bibr" rid="B4">4</xref>).</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Europe (2024)</bold></td>
<td valign="top" align="left">Hungary, Czechia and Finland have age-standardized mortality &#x0003E;8 per 100,000 (<xref ref-type="bibr" rid="B4">4</xref>)</td>
<td valign="top" align="left">Mortality rates in some European nations are among the highest globally.</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Asia (2024)</bold></td>
<td valign="top" align="left">China accounts for &#x0003E;25% of PDAC deaths; Japan reports one of the steepest rises in incidence among high-income countries (<xref ref-type="bibr" rid="B3">3</xref>)</td>
<td valign="top" align="left">Demographic shifts and rapid population aging drive a large share of the global burden.</td>
</tr></tbody>
</table>
</table-wrap>
<p>Socio-economic inequalities magnify these differences. A recent analysis using GBD 2021 data showed that countries with high SDI had ASIRs around 10 per 100,000 and ASDRs near 9.4 per 100,000, compared with rates around 1.6&#x02013;1.7 per 100,000 in low-SDI countries (<xref ref-type="bibr" rid="B3">3</xref>). The number of disability-adjusted life-years (DALYs) attributable to pancreatic cancer rose from 10.9 million in 2019 to 11.3 million in 2021, highlighting the growing societal and economic toll (<xref ref-type="bibr" rid="B3">3</xref>). Projection models estimate that incident cases and deaths will both exceed 875,000 by 2044 (<xref ref-type="bibr" rid="B3">3</xref>).</p>
<p>Pancreatic carcinogenesis is multifactorial, with both modifiable and non-modifiable determinants. Modifiable lifestyle factors include tobacco smoking, obesity, diabetes, diet and alcohol consumption. Smoking remains the strongest environmental risk factor: pooled analyses show that current smokers have nearly a two-fold increase in risk compared with never smokers, and heavy smokers (&#x0003E;25 cigarettes day<sup>&#x02212;1</sup>) can experience a 2.7-fold elevation (<xref ref-type="bibr" rid="B6">6</xref>). Risk declines after cessation but may not return to baseline until 15&#x02013;20 years later (<xref ref-type="bibr" rid="B6">6</xref>). A meta-analysis reported that current smokers have a 75% increased risk of pancreatic cancer relative to never smokers and that elevated risk persists for at least a decade after quitting (<xref ref-type="bibr" rid="B6">6</xref>). Second-hand smoke exposure appears to play a minor role (<xref ref-type="bibr" rid="B6">6</xref>).</p>
<p>Obesity and metabolic dysfunction are increasingly recognized contributors. A pooled analysis of cohort studies found that obesity approximately doubles the risk of pancreatic cancer in both men and women, and each 5 kg m<sup>&#x02212;2</sup> increase in body-mass index raises risk by about 12% (<xref ref-type="bibr" rid="B6">6</xref>). Adipose tissue produces pro-inflammatory cytokines and increases insulin resistance, creating a carcinogenic milieu. Type 2 diabetes mellitus (T2DM) is both a risk factor and a consequence of pancreatic cancer: long-standing T2DM increases PDAC risk by 1.5&#x02013;2.0-fold, whereas new-onset diabetes confers a 5&#x02013;8-fold increased risk within one to three years (<xref ref-type="bibr" rid="B6">6</xref>). Indeed, a large proportion of patients with pancreatic cancer have diabetes or impaired glucose tolerance at diagnosis (<xref ref-type="bibr" rid="B6">6</xref>). Other dietary and lifestyle factors&#x02014;such as heavy alcohol consumption, diets rich in red and processed meats, low intake of fruits and vegetables, and chronic pancreatitis&#x02014;have also been implicated, although effect sizes are generally smaller.</p>
<p>Non-modifiable factors include age, sex, ethnicity and genetic predisposition. Incidence increases dramatically with age and peaks between 70 and 74 years (<xref ref-type="bibr" rid="B3">3</xref>). Males tend to have higher age-standardized incidence and mortality rates than females across all age groups (<xref ref-type="bibr" rid="B3">3</xref>). Familial pancreatic cancer accounts for 5&#x02013;10% of cases; germline mutations in <italic>BRCA1/2, CDKN2A, PALB2, STK11/LKB1, TP53</italic> and mismatch-repair genes confer markedly elevated lifetime risks. Hereditary pancreatitis (<italic>PRSS1</italic> mutations), Peutz&#x02013;Jeghers syndrome and familial atypical multiple mole melanoma syndrome are notable syndromes requiring surveillance. Pancreatic cancer risk also varies by ethnicity; for example, African-American populations in the United States experience incidence and mortality rates about 30% higher than those of Caucasian populations, likely reflecting a combination of genetic, metabolic and socio-economic factors (<xref ref-type="bibr" rid="B3">3</xref>).</p>
<p>Early detection remains the cornerstone for improving pancreatic-cancer outcomes. However, current screening modalities lack sensitivity for precursor lesions, and the overall rarity of PDAC precludes population-wide screening. Standard imaging techniques such as abdominal ultrasound and computed tomography have limited ability to detect small pancreatic tumors. Pre-diagnostic CT(Computed Tomography) scans often fail to reveal abnormalities in more than half of patients, and subtle signs may precede the clinical diagnosis by 3&#x02013;36 months (<xref ref-type="bibr" rid="B7">7</xref>). Consequently, only about 13.6% of PDAC cases are diagnosed while still localized, and over 85% present with locally advanced or metastatic disease (<xref ref-type="bibr" rid="B3">3</xref>). When patients are diagnosed at an early stage and can undergo complete resection followed by multi-modal therapy, median overall survival can exceed 60 months (<xref ref-type="bibr" rid="B7">7</xref>).</p>
<p>Given the low prevalence of PDAC in the general population, surveillance strategies are currently recommended only for high-risk individuals&#x02014;those with strong family histories or pathogenic germline mutations. Ongoing prospective cohorts (for example, the CAPS and Dutch familial pancreatic cancer studies) monitor high-risk participants with annual magnetic resonance imaging (MRI) and endoscopic ultrasound (EUS). A recent update involving approximately 1,700 participants with familial or genetic risk factors reported that surveillance detected tumors at an earlier stage: 38.5% of screen-detected cancers were stage I compared with 10.3% in the general population; 5-year survival reached 50% among the surveillance cohort vs. 9% for non-screened patients (<xref ref-type="bibr" rid="B8">8</xref>). The study underscores the potential of targeted screening to extend survival but also highlights logistical challenges: surveillance requires specialized centers with multidisciplinary expertise, and false-positive results can lead to unnecessary interventions (<xref ref-type="bibr" rid="B8">8</xref>).</p>
<p>The advent of artificial intelligence and machine-learning techniques offers hope for earlier detection and more accurate risk stratification (<xref ref-type="bibr" rid="B9">9</xref>). Radiomics extracts high-dimensional quantitative features from imaging data, capturing subtle textural and morphological patterns that are imperceptible to the human eye. Deep-learning architectures&#x02014;particularly convolutional neural networks (CNNs) and U-Net&#x02013;based segmentation models&#x02014;have demonstrated promising performance in identifying pancreatic lesions and classifying intraductal papillary mucinous neoplasms (IPMNs). A recent systematic review of AI-based IPMN imaging reported classification accuracies ranging from 60% to 99.6%, although heterogeneity in study populations, imaging protocols and analytic pipelines limits direct comparison (<xref ref-type="bibr" rid="B7">7</xref>, <xref ref-type="bibr" rid="B10">10</xref>). Most studies relied on CT data despite guidelines favoring MRI, and many used small, single-center cohorts without external validation, leading to high risk of bias (<xref ref-type="bibr" rid="B7">7</xref>). Standardized frameworks, large multi-institutional datasets and rigorous external validation are urgently needed.</p>
<p>AI-augmented imaging may also facilitate detection of subtle pre-diagnostic changes. For example, radiomics and deep-learning models can segment the pancreas automatically and identify textural or shape alterations months to years before clinical presentation. In a recent review of AI-augmented imaging, radiomic signatures from routine CT were able to identify early changes that preceded diagnosis by 3&#x02013;36 months (<xref ref-type="bibr" rid="B7">7</xref>). Such models, once validated and integrated into clinical workflows, could trigger further evaluation or enrolment into high-risk surveillance programmes. Nevertheless, adoption of AI in clinical practice raises issues of data privacy, algorithmic fairness, interpretability and regulatory oversight (<xref ref-type="bibr" rid="B7">7</xref>, <xref ref-type="bibr" rid="B10">10</xref>).</p>
<p><xref ref-type="table" rid="T2">Table 2</xref> summarizes seven recent AI-focused reviews on pancreatic cancer. While these reviews document progress in CNNs, transformers, and radiomics, they consistently reveal gaps in comprehensive attention mechanism surveys, unified dataset visualizations and external validation strategies&#x02014;limitations that our paper addresses.</p>
<table-wrap position="float" id="T2">
<label>Table 2</label>
<caption><p>Summary of recent AI-focused reviews on pancreatic cancer (2023&#x02013;2025).</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th valign="top" align="left"><bold>References</bold></th>
<th valign="top" align="left"><bold>Journal/Title</bold></th>
<th valign="top" align="left"><bold>Scope</bold></th>
<th valign="top" align="left"><bold>Advantages</bold></th>
<th valign="top" align="left"><bold>Limitations</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Pod&#x0012D;n&#x00103; et al. (<xref ref-type="bibr" rid="B11">11</xref>)</td>
<td valign="top" align="left">Artificial Intelligence in Pancreatic Imaging: A Systematic Review</td>
<td valign="top" align="left">Systematic review of AI in pancreatic imaging (CT/MRI/EUS)</td>
<td valign="top" align="left">PRISMA-style search; clear overview of CNN/radiomics applications and clinical use cases</td>
<td valign="top" align="left">Imaging-only scope; limited attention/transformer mapping; no comprehensive dataset landscape/visualizations</td>
</tr>
<tr>
<td valign="top" align="left">Yao et al. (<xref ref-type="bibr" rid="B12">12</xref>)</td>
<td valign="top" align="left">Deep learning and radiomics approaches for pancreatic cancer diagnosis from medical imaging</td>
<td valign="top" align="left">Narrative review focused on CT/MRI CAD with DL &#x00026; radiomics</td>
<td valign="top" align="left">Summarizes CNNs; mentions transformers; provides workflow/metric diagrams</td>
<td valign="top" align="left">Imaging-only; brief attention/transformer coverage; lacks broad dataset comparisons/visualizations</td>
</tr>
<tr>
<td valign="top" align="left">Mishra et al. (<xref ref-type="bibr" rid="B13">13</xref>)</td>
<td valign="top" align="left">ML Models for Pancreatic Cancer Risk Prediction Using EHR&#x02014;Systematic Review and Assessment</td>
<td valign="top" align="left">Systematic review of EHR-based risk models</td>
<td valign="top" align="left">Methodological appraisal; figures on model types and validation</td>
<td valign="top" align="left">EHR-only; models mostly logistic regression; no attention/transformer landscape; no dataset visualizations</td>
</tr>
<tr>
<td valign="top" align="left">Qadir et al. (<xref ref-type="bibr" rid="B7">7</xref>)</td>
<td valign="top" align="left">AI in IPMN Imaging: A Systematic Review</td>
<td valign="top" align="left">Systematic review on IPMN (cyst) imaging</td>
<td valign="top" align="left">Breakdown by modality and stage of translation; PRISMA and study distribution figures</td>
<td valign="top" align="left">Narrow to IPMN &#x00026; imaging; small single-center studies; few prospective evaluations; minimal EUS; limited attention coverage; limited dataset visualizations</td>
</tr>
<tr>
<td valign="top" align="left">Zhang et al. (<xref ref-type="bibr" rid="B14">14</xref>)</td>
<td valign="top" align="left">Effectiveness of Radiomics-Based ML for PDAC vs. Mass-Forming Pancreatitis: Systematic Review &#x00026; Meta-analysis</td>
<td valign="top" align="left">Diagnostic performance meta-analysis (primarily radiomics)</td>
<td valign="top" align="left">Pooled sensitivity/ specificity; subgroup analyses</td>
<td valign="top" align="left">Single task focus; moderate methodological quality;minimal attention/transformer coverage; no cross-modality dataset landscape/visualizations</td>
</tr>
<tr>
<td valign="top" align="left"><bold>References</bold></td>
<td valign="top" align="left"><bold>Journal/Title</bold></td>
<td valign="top" align="left"><bold>Scope</bold></td>
<td valign="top" align="left"><bold>Advantages</bold></td>
<td valign="top" align="left"><bold>Limitations (gaps our paper covers)</bold></td>
</tr>
<tr>
<td valign="top" align="left">Antony et al. (<xref ref-type="bibr" rid="B10">10</xref>)</td>
<td valign="top" align="left">AI-Augmented Imaging for Early PDAC Detection</td>
<td valign="top" align="left">Narrative review of CT-centric AI for early PDAC</td>
<td valign="top" align="left">Highlights pre-diagnostic detection and segmentation; states barriers clearly</td>
<td valign="top" align="left">CT-focused; lacks comprehensive attention/transformer survey; minimal coverage of non-imaging modalities, or dataset visualizations</td>
</tr>
<tr>
<td valign="top" align="left">Yu et al. (<xref ref-type="bibr" rid="B84">84</xref>)</td>
<td valign="top" align="left">Combining Multimodal Medical Imaging and AI for Early Diagnosis of Pancreatic Cancer</td>
<td valign="top" align="left">Perspective/review advocating for multimodal imaging fusion</td>
<td valign="top" align="left">Articulates need for multimodal fusion; summarizes imaging performance and interpretability issues</td>
<td valign="top" align="left">Opinion piece; acknowledges current research is single-modality; does not catalog attention/transformers; no broad dataset visualizations</td>
</tr></tbody>
</table>
</table-wrap>
<p>While recent reviews organize AI studies by <bold>imaging modality</bold> [Podina et al.(<xref ref-type="bibr" rid="B11">11</xref>), Yao et al.(<xref ref-type="bibr" rid="B12">12</xref>)], <bold>clinical task</bold> (detection, segmentation, classification), <bold>data type</bold> [EHR-only: Mishra et al.(<xref ref-type="bibr" rid="B13">13</xref>)], or <bold>clinical subtype</bold> [IPMN: qadir et al.(<xref ref-type="bibr" rid="B7">7</xref>); PDAC vs. pancreatitis: Zhang et al.(<xref ref-type="bibr" rid="B14">14</xref>)]&#x02014;approaches that effectively catalog method&#x02013;modality pairings for specific protocols&#x02014;our <bold>generational taxonomy</bold> provides a distinct meta-level view by organizing studies into three methodological waves: conventional machine-learning/radiomics pipelines (Generation 1, 2015&#x02013;2020), deep-learning CNNs (Generation 2, 2017&#x02013;2023), and attention/transformer-based multi-modal fusion (Generation 3, 2020&#x02013;present). This temporal framework enables us to <bold>quantify performance evolution</bold> [AUC 0.84&#x02013;0.98 in Generation 1 &#x02192; 0.92&#x02013;0.99 in Generation 2 &#x02192; 0.996 in Generation 3 (<xref ref-type="bibr" rid="B15">15</xref>); segmentation Dice 0.19&#x02013;0.70 &#x02192; 0.57&#x02013;0.87], synthesize findings <bold>across all modalities</bold> (CT/MRI, histopathology, genomics, biomarkers) within a single coherent framework, reveal <bold>validation trends</bold> (external validation 50% &#x02192; 57% &#x02192; 67% across generations; multi-center validation 0% &#x02192; 29% &#x02192; 22%), and identify the <bold>research frontier</bold> (attention-augmented, domain-adaptive architectures) by extrapolating from the architectural trajectory&#x02014;advantages not accessible through modality-specific or task-specific organization. As detailed in <xref ref-type="table" rid="T2">Table 2</xref>, this generational perspective complements existing reviews by revealing <bold>temporal, cross-modality, and performance-evolution patterns</bold> that become visible only when viewing AI methods as successive generations building on architectural innovations.</p>
<p><xref ref-type="fig" rid="F2">Figure 2</xref> illustrates global search interest in &#x0201C;Artificial Intelligence&#x0201D; from 2015 to 2025, highlighting sharp public attention peaks that coincide with major AI advances (<xref ref-type="bibr" rid="B16">16</xref>). Such public and media surges often track research investment and adoption cycles that accelerate translation of AI techniques into healthcare applications.</p>
<fig position="float" id="F2">
<label>Figure 2</label>
<caption><p>Global Google Trends interest for &#x0201C;Artificial Intelligence,&#x0201D; 2015&#x02013;2025 (Google Trends, 2025).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmed-12-1731922-g0002.tif">
<alt-text content-type="machine-generated">Bar chart titled &#x0201C;Global Pancreatic Cancer Market Growth (2023-2034)&#x0201D; showing a rise in market size from 2.52 billion USD in 2024 to 10.25 billion USD in 2034. Bars steadily increase in height from left to right, marking projected growth in billions over the years.</alt-text>
</graphic>
</fig>
<p>This review makes several unique contributions to the growing literature on AI-enabled pancreatic cancer detection and prognosis. First, it conducts a rigorous and reproducible search across PubMed and Google Scholar covering 2015&#x02013;2025 to capture more than sixty peer-reviewed studies that apply machine learning, deep learning or attention-based methods to imaging, histopathology and molecular data. The search strategy reduces the risk of missed early-stage or conference works by including theses and preprints in addition to indexed journals.</p>
<list list-type="bullet">
<list-item><p><bold>Comprehensive literature scope:</bold> Unlike prior reviews that focus exclusively on electronic health record risk models or histopathology, we synthesize results from multi-modal data sources&#x02014;radiomics, CT/MRI imaging, endoscopic ultrasound, histopathology, genomics, and proteomics&#x02014;spanning more than 60 studies published between 2015 and 2025.</p></list-item>
<list-item><p><bold>Structured classification of methods and tasks:</bold> We introduce a clear taxonomy that groups AI approaches into three methodological generations&#x02014;classical machine-learning/radiomic pipelines, deep neural networks and attention- or transformer-enhanced architectures&#x02014;and map them to clinical tasks (detection, segmentation, classification/subtyping, and prognosis). This framework facilitates cross-comparison of algorithmic advances and reveals trends across modalities.</p></list-item>
<list-item><p><bold>Critical appraisal of methodological quality:</bold> Beyond reporting accuracy, AUROC, and Dice scores, we assess whether studies used patient-level splits, external validation cohorts and proper prevalence reporting. Our synthesis highlights that many models are retrospective and single-center with limited robustness, underscoring the need for prospective, multi-center evaluation.</p></list-item>
<list-item><p><bold>Evidence for attention and multi-modal fusion:</bold> We show that attention-augmented and transformer-based models achieve consistent improvements in diagnostic accuracy and segmentation performance across tasks. By fusing imaging with biomarkers and clinical variables, multi-modal networks outperform single-modality baselines, demonstrating a path toward earlier, non-invasive detection.</p></list-item>
<list-item><p><bold>Identifying gaps and future research directions:</bold> Our review discusses issues rarely addressed in previous surveys&#x02014;domain shift, algorithmic fairness, data governance and integration into clinical workflows&#x02014;and proposes a forward-looking research agenda. Recommendations include conducting prospective trials, adopting common reporting standards, investing in domain generalization and federated learning, and co-designing interpretable algorithms with clinicians.</p></list-item>
<list-item><p><bold>Bridging disparate literatures:</bold> By contextualizing machine-learning advances alongside epidemiological and risk-factor data and comparing AI methods across modalities, we provide a holistic understanding of how AI can support early pancreatic cancer detection and personalized management. This integrative perspective is largely missing from existing domain-specific reviews.</p></list-item>
</list>
<p>This set of contributions positions our work as a comprehensive, methodologically rigorous and forward-looking synthesis that highlights both the promise of attention-based, multi-modal AI and the steps required for safe and equitable clinical translation.</p>
<p>This review is organized to walk the reader from clinical motivation to actionable research priorities (<xref ref-type="fig" rid="F3">Figure 3</xref>): Section 1 provides the clinical and epidemiological background (incidence, mortality, key risk factors) and motivates the need for improved surveillance and AI-augmented detection, with focused subparts on risk factors, surveillance, and the role of emerging AI methods; Section 2 details the literature search and selection strategy (databases, search queries, deduplication, title/abstract triage, inclusion/exclusion criteria, full-text review and synthesis protocol) so readers can reproduce the corpus assembly; Section 3 synthesizes the surveyed AI work by methodological generation&#x02014;Section 3.1 conventional machine-learning and radiomics pipelines, Section 3.2 deep-learning models for detection/segmentation/classification, and Section 3.3 attention- and transformer-based architectures and multi-modal fusion&#x02014;highlighting representative studies and performance patterns; Section 4 catalogs the data sources that underpin the field (Section 4.1 two-dimensional imaging, Section 4.2 three-dimensional CT/MRI volumes, Section 4.3 radiomics, Section 4.4 clinical/registry data, Section 4.5 genomic/molecular assays, and Section 4.6 biofluid/biomarker panels) and links data regimes to suitable model classes; Section 5 discusses cross-cutting issues, methodological gaps, and interpretation (e.g., patient-level splitting, external validation, and domain shift); Section 6 outlines emerging directions and concrete recommendations (prospective trials, reporting standards, domain generalization, fairness, and federated learning); and Section 7 concludes with priorities for clinical translation.</p>
<fig position="float" id="F3">
<label>Figure 3</label>
<caption><p>Schematic overview of the review framework outlining the progression from clinical motivation to emerging Artificial Intelligence research directions in pancreatic cancer detection and prognosis (2015&#x02013;2025).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmed-12-1731922-g0003.tif">
<alt-text content-type="machine-generated">Heatmap showing dataset counts by modality versus availability. Categories on the y-axis include 2D imaging, 3D imaging, biofluid biomarker, clinical tabular, molecular, and radiomics. Availability options on the x-axis are public, mixed, on request, private, commercial, restricted, and partly. Color intensity indicates counts, with notable values like 16 for 3D imaging under private. The scale ranges from 0 to 16.</alt-text>
</graphic>
</fig>
</sec>
<sec id="s2">
<label>2</label>
<title>Literature search and selection strategy</title>
<p>We performed a structured, reproducible literature search and selection procedure to identify primary studies that applied machine-learning (ML), deep-learning (DL), or attention-based methods to pancreatic cancer imaging, pathology or molecular data. The goal was to capture methods that directly address detection, segmentation, classification or prognostication in pancreatic disease using computational approaches. The overall workflow is illustrated in <xref ref-type="fig" rid="F4">Figure 4</xref>.</p>
<fig position="float" id="F4">
<label>Figure 4</label>
<caption><p>Preferred Reporting Items for Systematic Reviews and Meta-Analyses (PRISMA)-style flow diagram summarizing the study selection process for this review.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmed-12-1731922-g0004.tif">
<alt-text content-type="machine-generated">Flowchart summarizing &#x0201C;Advances in Pancreatic Cancer Detection Prognosis (2015&#x02013;2025)&#x0201D; with seven sections: Introduction, Literature Search Selection, AI Methods, Data Sources, Discussion, Emerging Directions, and Conclusion. Each section lists key topics, such as clinical burden, AI methods, and prospective trials.</alt-text>
</graphic>
</fig>
<p>This systematic review was conducted according to the Preferred Reporting Items for Systematic Reviews and Meta-Analyses (PRISMA) 2020 statement. The protocol was prospectively registered with [Open Science Framework (OSF), OID: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.17605/OSF.IO/2DVHJ">https://doi.org/10.17605/OSF.IO/2DVHJ</ext-link> ].</p>
<sec>
<label>2.1</label>
<title>Data sources and search strategy</title>
<p>Searches were run in PubMed and Google Scholar for the period 2015&#x02013;2025. We selected these two resources because PubMed provides comprehensive coverage of biomedical journal literature and indexing (including MEDLINE), while Google Scholar expands coverage to conference proceedings, theses, preprints, and other scholarly material that may report novel computational methods not yet indexed in PubMed. The combined use reduces the chance of missing relevant methodological or early-stage engineering reports.</p>
<p>The search combined keywords and simple Boolean logic to balance sensitivity and precision. Example search queries used (adapted to each database syntax) were:</p>
<preformat>
&#x000A0;&#x000A0;(&#x0201C;Pancreatic&#x000A0;Cancer&#x000A0;Detection&#x0201D;&#x000A0;OR
&#x000A0;&#x000A0;&#x0201C;Pancreatic&#x000A0;Cancer&#x000A0;Detection&#x000A0;Using&#x000A0;AI&#x0201D;&#x000A0;OR
&#x000A0;&#x000A0;&#x0201C;Attention&#x000A0;on&#x000A0;Pancreatic&#x000A0;Cancer&#x0201D;)
</preformat>
<p>Searches were restricted to records published between 2015 and 2025 (inclusive). No language restriction was applied at the search stage, but non-English records were screened on title/abstract and translated where necessary for eligibility assessment.</p></sec>
<sec>
<label>2.2</label>
<title>Deduplication and initial yield</title>
<p>The initial combined search returned approximately 3,000 records. We performed automatic deduplication using reference-management software followed by manual inspection; roughly 1,100 duplicate records were removed, leaving about 1,900 unique records for title and abstract screening.</p></sec>
<sec>
<label>2.3</label>
<title>Title and abstract screening</title>
<p>Title and abstract screening was adopted as the principal triage step to exclude clearly irrelevant records prior to full-text review. This approach was necessary because the literature volume is large and many records that mention &#x0201C;pancreatic cancer&#x0201D; are peripheral (for example, biomarker discovery, basic biology, or therapeutic studies) rather than studies that develop or evaluate computational detection or image-based models; screening titles and abstracts therefore provides an efficient way to prioritize papers that explicitly describe ML/DL/attention methods or algorithmic evaluation. Equally important, title/abstract screening increases methodological focus: many biomedical papers use AI-related terms in passing, and the abstract is usually the first reliable source for whether a paper reports model architectures, training data, evaluation metrics or genuine algorithmic contributions rather than superficial references. Finally, documenting and applying explicit title/abstract criteria improves practical reproducibility by making the triage decisions auditable and repeatable. Two reviewers independently screened all titles and abstracts against the pre-specified eligibility criteria; disagreements were resolved through discussion and, where necessary, by arbitration from a senior reviewer. Following this procedure the corpus was reduced to 100 studies selected for full-text evaluation.</p></sec>
<sec>
<label>2.4</label>
<title>Inclusion and exclusion criteria</title>
<sec>
<label>2.4.1</label>
<title>Inclusion criteria</title>
<list list-type="order">
<list-item><p>Studies that apply ML, DL, or attention-based methods to pancreatic cancer imaging, histopathology, or molecular data for tasks such as detection, segmentation, classification, subtype discrimination or prognostication.</p></list-item>
<list-item><p>Original research articles reporting methods and evaluation (i.e., not purely review articles or opinion pieces).</p></list-item>
<list-item><p>Studies that report quantitative performance metrics (e.g., accuracy, AUROC, Dice, sensitivity/specificity) on defined datasets.</p></list-item>
</list></sec>
<sec>
<label>2.4.2</label>
<title>Exclusion criteria</title>
<list list-type="order">
<list-item><p>Purely biological or wet-lab studies without computational model development or evaluation.</p></list-item>
<list-item><p>Reviews, editorials, commentaries, perspective pieces and protocols without primary experimental results.</p></list-item>
<list-item><p>Papers that do not provide enough methodological detail to interpret the model (for example, a short abstract only with no methods or results).</p></list-item>
</list></sec></sec>
<sec>
<label>2.5</label>
<title>Full-text review and final selection</title>
<p>Full texts of the 64 candidate studies were retrieved and assessed in detail. During this stage we confirmed that the studies (i) presented sufficient methodological detail (data sources, preprocessing, model architecture, training, and validation strategy), (ii) used appropriate evaluation procedures (e.g., train/test splits, cross-validation, patient-level separation where applicable), and (iii) reported metrics relevant to the tasks claimed. Studies that failed to meet these standards or were duplicative (e.g., extended conference abstract later published as a journal article) were excluded. The final included set for synthesis is described in Section 3 and in <xref ref-type="table" rid="T3">Table 3</xref>.</p>
<table-wrap position="float" id="T3">
<label>Table 3</label>
<caption><p>Comprehensive methodological quality analysis: validation strategies across model generations.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th valign="top" align="left"><bold>Model generation</bold></th>
<th valign="top" align="center"><bold>Single-center internal only</bold></th>
<th valign="top" align="center"><bold>Multi-center studies</bold></th>
<th valign="top" align="left"><bold>External validation</bold></th>
<th valign="top" align="left"><bold>Performance metrics</bold></th>
</tr>
</thead>
<tbody>
<tr style="background-color:#dee1e1;">
<td valign="top" align="left" colspan="5"><bold>Conventional ML (8 studies)</bold></td>
</tr>
<tr>
<td valign="top" align="left">Internal validation only</td>
<td valign="top" align="center">3/8 (37.5%)</td>
<td valign="top" align="center">&#x02014;</td>
<td valign="top" align="left">0/8</td>
<td valign="top" align="left">&#x02014;</td>
</tr>
<tr>
<td valign="top" align="left">Single-center &#x0002B; external</td>
<td valign="top" align="center">4/8 (50%)</td>
<td valign="top" align="center">&#x02014;</td>
<td valign="top" align="left">4/8</td>
<td valign="top" align="left">AUC 0.84&#x02013;0.98</td>
</tr>
<tr>
<td valign="top" align="left">Large-scale/Registry</td>
<td valign="top" align="center">1/8 (12.5%)</td>
<td valign="top" align="center">&#x02014;</td>
<td valign="top" align="left">&#x02014;</td>
<td valign="top" align="left">99.97% acc.</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Subtotal:</bold></td>
<td valign="top" align="center"><bold>3/8 internal</bold></td>
<td valign="top" align="center"><bold>0/8 (0%)</bold></td>
<td valign="top" align="left"><bold>4/8 (50%)</bold></td>
<td/>
</tr>
<tr>
<td valign="top" align="left"><italic>Notable studies:</italic></td>
<td valign="top" align="center" colspan="4">(<xref ref-type="bibr" rid="B26">26</xref>) (ext. AUC 0.98); (<xref ref-type="bibr" rid="B22">22</xref>) (EHR 29,230 cases, AUC 0.84);</td>
</tr>
<tr>
<td/>
<td valign="top" align="center" colspan="4">PancRISK (ext. AUC 0.94); SEER registry (99.97% acc.)</td>
</tr>
<tr style="background-color:#dee1e1;">
<td valign="top" align="left" colspan="5"><bold>Deep learning (7 studies)</bold></td>
</tr>
<tr>
<td valign="top" align="left">Internal validation only</td>
<td valign="top" align="center">3/7 (43%)</td>
<td valign="top" align="center">&#x02014;</td>
<td valign="top" align="left">0/7</td>
<td valign="top" align="left">Acc. 99.8%</td>
</tr>
<tr>
<td valign="top" align="left">Single-center &#x0002B; external</td>
<td valign="top" align="center">2/7 (29%)</td>
<td valign="top" align="center">&#x02014;</td>
<td valign="top" align="left">2/7</td>
<td valign="top" align="left">AUC 0.95&#x02013;0.99</td>
</tr>
<tr>
<td valign="top" align="left">Multi-center &#x0002B; external</td>
<td valign="top" align="center">2/7 (29%)</td>
<td valign="top" align="center">2/7 (29%)</td>
<td valign="top" align="left">2/7</td>
<td valign="top" align="left">AUC 0.92</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Subtotal:</bold></td>
<td valign="top" align="center"><bold>3/7 internal</bold></td>
<td valign="top" align="center"><bold>2/7 (29%)</bold></td>
<td valign="top" align="left"><bold>4/7 (57%)</bold></td>
<td/>
</tr>
<tr>
<td valign="top" align="left"><italic>Notable studies:</italic></td>
<td valign="top" align="center" colspan="4">(<xref ref-type="bibr" rid="B46">46</xref>) (Taiwan&#x0002B;US multi-center, ext. AUC 0.92); chen2023 (nationwide ext. AUC 0.95);</td>
</tr>
<tr>
<td/>
<td valign="top" align="center" colspan="4">viriyasaranon2023 (Korean multi-center, <bold>performance drop 94.3% &#x02192; 82.5%</bold>)</td>
</tr>
<tr style="background-color:#dee1e1;">
<td valign="top" align="left" colspan="5"><bold>Attention/transformer (9 studies)</bold></td>
</tr>
<tr>
<td valign="top" align="left">Internal validation only</td>
<td valign="top" align="center">3/9 (33%)</td>
<td valign="top" align="center">&#x02014;</td>
<td valign="top" align="left">0/9</td>
<td valign="top" align="left">Acc. 92%&#x02013;94%</td>
</tr>
<tr>
<td valign="top" align="left">Single-center &#x0002B; external</td>
<td valign="top" align="center">4/9 (44%)</td>
<td valign="top" align="center">&#x02014;</td>
<td valign="top" align="left">4/9</td>
<td valign="top" align="left">Dice 0.80&#x02013;0.87</td>
</tr>
<tr>
<td valign="top" align="left">Multi-center &#x0002B; external</td>
<td valign="top" align="center">2/9 (22%)</td>
<td valign="top" align="center">2/9 (22%)</td>
<td valign="top" align="left">2/9</td>
<td valign="top" align="left">AUC 0.96&#x02013;0.99</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Subtotal:</bold></td>
<td valign="top" align="center"><bold>3/9 internal</bold></td>
<td valign="top" align="center"><bold>2/9 (22%)</bold></td>
<td valign="top" align="left"><bold>6/9 (67%)</bold></td>
<td/>
</tr>
<tr>
<td valign="top" align="left"><italic>Notable studies:</italic></td>
<td valign="top" align="center" colspan="4">PANDA (<bold>9-center study</bold>, ext. AUC 0.987 lesion detection, 0.984 across 9 external sites);</td>
</tr>
<tr>
<td/>
<td valign="top" align="center" colspan="4">DA-TransUNet (6 datasets validation); (<xref ref-type="bibr" rid="B77">77</xref>) (multi-center LNM, AUC 0.83)</td>
</tr>
<tr style="background-color:#dee1e1;">
<td valign="top" align="left" colspan="5"><bold>Overall (24 studies)</bold></td>
</tr>
<tr>
<td valign="top" align="left">Single-center, internal only</td>
<td valign="top" align="center">9/24 (37.5%)</td>
<td valign="top" align="center">&#x02014;</td>
<td valign="top" align="left">&#x02014;</td>
<td valign="top" align="left">&#x02014;</td>
</tr>
<tr>
<td valign="top" align="left">Single-center &#x0002B; external val.</td>
<td valign="top" align="center">10/24 (41.7%)</td>
<td valign="top" align="center">&#x02014;</td>
<td valign="top" align="left">10/24</td>
<td valign="top" align="left">&#x02014;</td>
</tr>
<tr>
<td valign="top" align="left">Multi-center studies</td>
<td valign="top" align="center">&#x02014;</td>
<td valign="top" align="center">4/24 (16.7%)</td>
<td valign="top" align="left">&#x02014;</td>
<td valign="top" align="left">&#x02014;</td>
</tr>
<tr>
<td valign="top" align="left">External validation (total)</td>
<td valign="top" align="center">&#x02014;</td>
<td valign="top" align="center">&#x02014;</td>
<td valign="top" align="left">14/24 (58.3%)</td>
<td valign="top" align="left">&#x02014;</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Summary:</bold></td>
<td valign="top" align="center"><bold>37.5% internal</bold></td>
<td valign="top" align="center"><bold>16.7% multi-ctr</bold></td>
<td valign="top" align="left"><bold>58.3% external</bold></td>
<td/>
</tr></tbody>
</table>
<table-wrap-foot>
<p><bold>Key findings from comprehensive analysis:</bold></p>
<p>&#x02022; 37.5% of studies relied exclusively on single-center, internal-only validation, risking overfitting to local protocols.</p>
<p>&#x02022;Only 16.7% employed multi-center designs, limiting assessment of cross-institutional generalizability.</p>
<p>&#x02022; 58.3% reported external validation, with Attention/Transformer models showing highest adoption (67%).</p>
<p>&#x02022;Multi-center studies documented performance degradation: (<xref ref-type="bibr" rid="B60">60</xref>) showed <bold>11.8% accuracy drop</bold> (94.3% &#x02192; 82.5%) from internal Korean cohort to external U.S. validation, confirming domain shift effects.</p>
<p>&#x02022;PANDA (9-center study) maintained strong performance (AUC 0.984&#x02013;0.987) through large-scale training (3,208 cases).</p>
<p>&#x02022;Large-scale studies [(<xref ref-type="bibr" rid="B22">22</xref>): 29,230 cases; SEER registry] showed robust performance but lacked geographic diversity.</p>
<p><bold>Bias implications:</bold> Single-center designs introduce sampling bias; limited multi-center validation restricts clinical translation.</p>
</table-wrap-foot>
</table-wrap></sec>
<sec>
<label>2.6</label>
<title>Classification and synthesis</title>
<p>Included studies were grouped by method class (radiomics &#x0002B; classical ML, CNN-based, transformer/attention-based, and hybrid) and by task (detection, segmentation, classification/subtyping, and prognosis). We synthesized results narratively and, where sufficient homogeneous data existed, reported ranges of key metrics stratified by task and method. Particular attention was paid to the presence or absence of patient-level splits, external validation, and prevalence reporting, since these factors strongly influence apparent performance.</p>
<p>In summary, the search and selection pipeline combined a sensitive database search, careful deduplication, a two-stage screening (title/abstract triage followed by full-text review), dual independent assessment at key stages, structured data extraction and quality appraisal. The emphasis on title/abstract screening as a triage step is intentional and pragmatic: given the large volume of literature on pancreatic cancer, it enables efficient identification of studies that explicitly present computational methods of interest while minimizing time spent on clearly out-of-scope biomedical reports.</p></sec>
<sec>
<label>2.7</label>
<title>PRISMA 2020 compliance</title>
<p><xref ref-type="table" rid="T4">Table 4</xref> provides a detailed mapping of manuscript content to PRISMA 2020 checklist items, demonstrating transparent adherence to systematic review reporting standards.</p>
<table-wrap position="float" id="T4">
<label>Table 4</label>
<caption><p>PRISMA 2020 Checklist Mapping to Manuscript Sections.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th valign="top" align="left"><bold>PRISMA item</bold></th>
<th valign="top" align="left"><bold>Checklist requirement</bold></th>
<th valign="top" align="left"><bold>Location in manuscript</bold></th>
</tr>
</thead>
<tbody>
<tr style="background-color:#dee1e1;">
<td valign="top" align="left" colspan="3"><bold>TITLE &#x00026; ABSTRACT</bold></td>
</tr>
<tr>
<td valign="top" align="left">1. Title</td>
<td valign="top" align="left">Identify as systematic review</td>
<td valign="top" align="left">Title page: &#x0201C;From Radiomics to Transformers...&#x0201D;</td>
</tr>
<tr>
<td valign="top" align="left">2. Abstract</td>
<td valign="top" align="left">Structured abstract with PRISMA elements</td>
<td valign="top" align="left">Abstract: includes background, methods, results, conclusion</td>
</tr>
<tr style="background-color:#dee1e1;">
<td valign="top" align="left" colspan="3"><bold>INTRODUCTION</bold></td>
</tr>
<tr>
<td valign="top" align="left">3. Rationale</td>
<td valign="top" align="left">Context of existing knowledge</td>
<td valign="top" align="left">Section 1: epidemiology, surveillance gaps, AI opportunities</td>
</tr>
<tr>
<td valign="top" align="left">4. Objectives</td>
<td valign="top" align="left">Explicit review objectives</td>
<td valign="top" align="left">Section 1 (Research Contributions): taxonomy, critical appraisal, gaps</td>
</tr>
<tr style="background-color:#dee1e1;">
<td valign="top" align="left" colspan="3"><bold>METHODS</bold></td>
</tr>
<tr>
<td valign="top" align="left">5. Eligibility</td>
<td valign="top" align="left">Inclusion/exclusion criteria</td>
<td valign="top" align="left">Section 2.3: ML/DL/attention studies; Section 2.4: criteria lists</td>
</tr>
<tr>
<td valign="top" align="left">6. Information sources</td>
<td valign="top" align="left">Databases, dates searched</td>
<td valign="top" align="left">Section 2.1: PubMed &#x0002B; Google Scholar, 2015&#x02013;2025</td>
</tr>
<tr>
<td valign="top" align="left">7. Search strategy</td>
<td valign="top" align="left">Full search strings</td>
<td valign="top" align="left">Section 2.1: Boolean queries provided verbatim</td>
</tr>
<tr>
<td valign="top" align="left">8. Selection process</td>
<td valign="top" align="left">Screening methods, reviewers</td>
<td valign="top" align="left">Section 2.2&#x02013;2.3: dual independent screening, 100 &#x02192; 64 studies</td>
</tr>
<tr>
<td valign="top" align="left">9. Data collection</td>
<td valign="top" align="left">Data extraction methods</td>
<td valign="top" align="left">Section 2.6: grouped by method/task, metrics extracted</td>
</tr>
<tr>
<td valign="top" align="left">10a. Outcomes</td>
<td valign="top" align="left">Outcomes sought</td>
<td valign="top" align="left">Section 2.6: AUC, Dice, F1, accuracy, sensitivity, specificity</td>
</tr>
<tr>
<td valign="top" align="left">10b. Variables</td>
<td valign="top" align="left">Other variables</td>
<td valign="top" align="left"><xref ref-type="table" rid="T4">Tables 4</xref>&#x02013;<xref ref-type="table" rid="T6">6</xref>: model architectures, datasets, sample sizes</td>
</tr>
<tr>
<td valign="top" align="left">11. Risk of bias</td>
<td valign="top" align="left">Bias assessment methods</td>
<td valign="top" align="left">Section 5 (Discussion): patient-level splits, external validation assessed</td>
</tr>
<tr>
<td valign="top" align="left">12. Effect measures</td>
<td valign="top" align="left">Metrics used</td>
<td valign="top" align="left">Section 2.6, <xref ref-type="table" rid="T4">Tables 4</xref>&#x02013;<xref ref-type="table" rid="T6">6</xref>: AUC, Dice, accuracy ranges</td>
</tr>
<tr>
<td valign="top" align="left">13a. Synthesis eligibility</td>
<td valign="top" align="left">Study grouping decisions</td>
<td valign="top" align="left">Section 2.6: method class (ML/DL/attention) &#x0002B; task</td>
</tr>
<tr>
<td valign="top" align="left">13b. Data preparation</td>
<td valign="top" align="left">Handling missing data</td>
<td valign="top" align="left">Section 2.6: narrative synthesis for heterogeneous metrics</td>
</tr>
<tr>
<td valign="top" align="left">13c. Tabulation</td>
<td valign="top" align="left">Visual display methods</td>
<td valign="top" align="left"><xref ref-type="fig" rid="F2">Figure 2</xref> (PRISMA flow), <xref ref-type="table" rid="T4">Tables 4</xref>&#x02013;<xref ref-type="table" rid="T6">6</xref> (model summaries), <xref ref-type="table" rid="T7">Table 7</xref> (aggregation)</td>
</tr>
<tr>
<td valign="top" align="left">13d. Synthesis methods</td>
<td valign="top" align="left">Synthesis approach</td>
<td valign="top" align="left">Section 2.6: narrative synthesis; no meta-analysis due to heterogeneity</td>
</tr>
<tr>
<td valign="top" align="left">13e. Heterogeneity</td>
<td valign="top" align="left">Causes of heterogeneity</td>
<td valign="top" align="left">Section 5: dataset size, validation strategy, prevalence differences</td>
</tr>
<tr>
<td valign="top" align="left">13f. Sensitivity</td>
<td valign="top" align="left">Sensitivity analyses</td>
<td valign="top" align="left">Section 5: stratification by patient-level splits, external validation</td>
</tr>
<tr>
<td valign="top" align="left">14. Reporting bias</td>
<td valign="top" align="left">Missing results assessment</td>
<td valign="top" align="left">Section 5: publication bias noted; limited prospective studies</td>
</tr>
<tr>
<td valign="top" align="left">15. Certainty</td>
<td valign="top" align="left">Evidence certainty methods</td>
<td valign="top" align="left">Sections 5&#x02013;6: methodological quality, generalizability limitations discussed</td>
</tr>
<tr>
<td valign="top" align="left">16a. Study selection</td>
<td valign="top" align="left">Search results &#x0002B; flow</td>
<td valign="top" align="left"><xref ref-type="fig" rid="F2">Figure 2</xref>: 3,000 &#x02192; 1,900 &#x02192; 100 &#x02192; 64 studies</td>
</tr>
<tr>
<td valign="top" align="left">16b. Exclusions</td>
<td valign="top" align="left">Excluded studies</td>
<td valign="top" align="left">Section 2.5: duplicates, insufficient detail, non-computational</td>
</tr>
<tr>
<td valign="top" align="left">17. Characteristics</td>
<td valign="top" align="left">Study characteristics</td>
<td valign="top" align="left"><xref ref-type="table" rid="T4">Tables 4</xref>&#x02013;<xref ref-type="table" rid="T6">6</xref>: 24 representative studies with full details</td>
</tr>
<tr>
<td valign="top" align="left">18. Risk of bias</td>
<td valign="top" align="left">Bias assessments</td>
<td valign="top" align="left">Section 5: patient-level splits, single-center vs. multi-center</td>
</tr>
<tr>
<td valign="top" align="left">19. Individual results</td>
<td valign="top" align="left">Study-level results</td>
<td valign="top" align="left"><xref ref-type="table" rid="T4">Tables 4</xref>&#x02013;<xref ref-type="table" rid="T6">6</xref>: performance metrics per study</td>
</tr>
<tr>
<td valign="top" align="left">20a. Synthesis characteristics</td>
<td valign="top" align="left">Contributing studies</td>
<td valign="top" align="left">Section 3.1&#x02013;3.3: 60&#x0002B; studies grouped by method generation</td>
</tr>
<tr>
<td valign="top" align="left">20b. Synthesis results</td>
<td valign="top" align="left">Summary estimates</td>
<td valign="top" align="left"><xref ref-type="table" rid="T7">Table 7</xref>: AUC 0.84&#x02013;0.996, Dice 0.19&#x02013;0.87, F1 0.92&#x02013;0.97</td>
</tr>
<tr>
<td valign="top" align="left">20c. Heterogeneity causes</td>
<td valign="top" align="left">Heterogeneity investigations</td>
<td valign="top" align="left">Section 5: dataset heterogeneity, task differences explained</td>
</tr>
<tr>
<td valign="top" align="left">20d. Sensitivity results</td>
<td valign="top" align="left">Robustness assessments</td>
<td valign="top" align="left">Section 5: recommendations for patient-level validation</td>
</tr>
<tr>
<td valign="top" align="left">21. Reporting bias</td>
<td valign="top" align="left">Bias assessments</td>
<td valign="top" align="left">Sections 5&#x02013;6: retrospective bias, single-center limitations</td>
</tr>
<tr>
<td valign="top" align="left">22. Certainty</td>
<td valign="top" align="left">Evidence certainty</td>
<td valign="top" align="left">Sections 5&#x02013;6: moderate certainty; external validation needed</td>
</tr>
<tr style="background-color:#dee1e1;">
<td valign="top" align="left" colspan="3"><bold>DISCUSSION</bold></td>
</tr>
<tr>
<td valign="top" align="left">23a. Interpretation</td>
<td valign="top" align="left">Results in context</td>
<td valign="top" align="left">Section 5: progression from ML to transformers contextualized</td>
</tr>
<tr>
<td valign="top" align="left">23b. Evidence limitations</td>
<td valign="top" align="left">Study limitations</td>
<td valign="top" align="left">Section 5: small datasets, slice-level leakage, prevalence issues</td>
</tr>
<tr>
<td valign="top" align="left">23c. Process limitations</td>
<td valign="top" align="left">Review limitations</td>
<td valign="top" align="left">Section 2.3: title/abstract screening rationale, no meta-analysis</td>
</tr>
<tr>
<td valign="top" align="left">23d. Implications</td>
<td valign="top" align="left">Practice/policy/research</td>
<td valign="top" align="left">Section 6: prospective trials, reporting standards, federated learning</td>
</tr>
<tr style="background-color:#dee1e1;">
<td valign="top" align="left" colspan="3"><bold>OTHER INFORMATION</bold></td>
</tr>
<tr>
<td valign="top" align="left">24a. Registration</td>
<td valign="top" align="left">Protocol registration</td>
<td valign="top" align="left">Section 2: OSF registration (doi: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.17605/OSF.IO/2DVHJ">10.17605/OSF.IO/2DVHJ</ext-link>)</td>
</tr>
<tr>
<td valign="top" align="left">24b. Protocol access</td>
<td valign="top" align="left">Protocol availability</td>
<td valign="top" align="left">Section 2: OSF link provided</td>
</tr>
<tr>
<td valign="top" align="left">24c. Amendments</td>
<td valign="top" align="left">Protocol deviations</td>
<td valign="top" align="left">Not applicable; protocol followed as registered</td>
</tr>
<tr>
<td valign="top" align="left">25. Support</td>
<td valign="top" align="left">Funding sources</td>
<td valign="top" align="left">Acknowledgments/Funding section (if present)</td>
</tr>
<tr>
<td valign="top" align="left">26. Competing interests</td>
<td valign="top" align="left">Conflicts of interest</td>
<td valign="top" align="left">Declarations section (journal requirement)</td>
</tr>
<tr>
<td valign="top" align="left">27. Data availability</td>
<td valign="top" align="left">Materials availability</td>
<td valign="top" align="left"><xref ref-type="table" rid="T4">Tables 4</xref>&#x02013;<xref ref-type="table" rid="T6">6</xref>, Section 4: datasets cataloged; code not released</td>
</tr></tbody>
</table>
</table-wrap>
</sec></sec>
<sec id="s3">
<label>3</label>
<title>AI methods in pancreatic cancer detection and prognosis</title>
<p>This section synthesizes more than sixty peer-reviewed studies on artificial-intelligence methods for pancreatic-cancer detection, prognosis and treatment monitoring published between 2015 and 2025. We group the papers into three methodological generations&#x02014;conventional machine-learning (ML) pipelines, deep-learning (DL) models, and attention- or transformer-enhanced frameworks&#x02014;while emphasizing representative contributions and situating related work in context (<xref ref-type="fig" rid="F5">Figure 5</xref>).</p>
<fig position="float" id="F5">
<label>Figure 5</label>
<caption><p>Distribution of Artificial Intelligence methodologies in reviewed pancreatic cancer studies (2015&#x02013;2025).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmed-12-1731922-g0005.tif">
<alt-text content-type="machine-generated">Pie chart illustrating the distribution of pancreatic cancer research papers by category. Conventional and ML: 37.2%, DL Based: 32.1%, Attention Based: 30.8%. Each category is represented by a distinct color.</alt-text>
</graphic>
</fig>
<p><xref ref-type="fig" rid="F6">Figure 6</xref> shows the global trend of AI publications by country and region between 2000 and 2025 (<xref ref-type="bibr" rid="B17">17</xref>). China&#x00027;s rapid ascent in research output, combined with continued contributions from the United States and the EU, helps explain why deep-learning and transformer approaches have become dominant in biomedical imaging and diagnostics.</p>
<fig position="float" id="F6">
<label>Figure 6</label>
<caption><p>Global trend of Artificial Intelligence publications (%) by country/region, 2000&#x02013;2025 Organization for Economic Co-operation and Development (OECD.AI), OpenAlex/Scopus.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmed-12-1731922-g0006.tif">
<alt-text content-type="machine-generated">Line chart showing the trend of AI publications by country from 2000 to 2025. China shows a steep increase, surpassing others after 2005. The EU27 and the United States decrease over time. Other countries like Japan, India, and the UK show relatively stable or slight declining trends.</alt-text>
</graphic>
</fig>
<sec>
<label>3.1</label>
<title>Conventional machine-learning approaches</title>
<p>Machine learning (ML) represents a family of algorithms that can automatically discover patterns in data and make predictions without being explicitly programmed with fixed rules. Unlike traditional statistical models that rely heavily on handcrafted assumptions, ML systems can flexibly learn complex relationships from structured (e.g., electronic health records, genomics) and unstructured (e.g., imaging, text) data. Commonly used paradigms include supervised learning, where models are trained on labeled datasets (e.g., classification or regression tasks), and unsupervised learning, which focuses on uncovering hidden structures (e.g., clustering, dimensionality reduction). In healthcare, ML offers the ability to integrate diverse data modalities, identify early biomarkers, and support clinical decision-making by providing scalable and adaptive predictive frameworks.</p>
<p>To ground this in formalism, consider supervised learning where the goal is to learn a function <inline-formula><mml:math id="M1"><mml:mrow><mml:mi>h</mml:mi><mml:mo>:</mml:mo><mml:mrow><mml:mi mathvariant="script">X</mml:mi></mml:mrow><mml:mo>&#x02192;</mml:mo><mml:mrow><mml:mi mathvariant="script">Y</mml:mi></mml:mrow></mml:mrow></mml:math></inline-formula> from labeled data <inline-formula><mml:math id="M2"><mml:mrow><mml:msubsup><mml:mrow><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>N</mml:mi></mml:mrow></mml:msubsup></mml:mrow></mml:math></inline-formula>. A <bold>loss function</bold> <italic>L</italic>(<italic>h</italic>(<italic>x</italic><sub><italic>i</italic></sub>), <italic>y</italic><sub><italic>i</italic></sub>) quantifies prediction error, guiding optimization via empirical risk minimization:</p>
<disp-formula id="E1"><mml:math id="M3"><mml:mrow><mml:msub><mml:mrow><mml:mi>R</mml:mi></mml:mrow><mml:mrow><mml:mtext>emp</mml:mtext></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>N</mml:mi></mml:mrow></mml:mfrac><mml:mstyle displaystyle="true"><mml:munderover accentunder="false" accent="false"><mml:mrow><mml:mo>&#x02211;</mml:mo></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>N</mml:mi></mml:mrow></mml:munderover></mml:mstyle><mml:mi>L</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>h</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:mrow></mml:math></disp-formula>
<p>The optimal hypothesis is</p>
<disp-formula id="E2"><mml:math id="M4"><mml:mrow><mml:msup><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msup><mml:mo>=</mml:mo><mml:mo class="qopname">arg</mml:mo><mml:mstyle displaystyle="true"><mml:munder class="msub"><mml:mrow><mml:mo class="qopname">min</mml:mo></mml:mrow><mml:mrow><mml:mi>h</mml:mi><mml:mo>&#x02208;</mml:mo><mml:mrow><mml:mi mathvariant="script">H</mml:mi></mml:mrow></mml:mrow></mml:munder></mml:mstyle><mml:msub><mml:mrow><mml:mi>R</mml:mi></mml:mrow><mml:mrow><mml:mtext>emp</mml:mtext></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mrow></mml:math></disp-formula>
<p>where <inline-formula><mml:math id="M5"><mml:mrow><mml:mrow><mml:mi mathvariant="script">H</mml:mi></mml:mrow></mml:mrow></mml:math></inline-formula> denotes the hypothesis space (e.g., decision trees, support vector machines). Common losses include mean squared error for regression and hinge loss for margin-based classifiers such as SVMs. Training typically proceeds using optimization techniques such as gradient descent, adapting parameters &#x003B8; iteratively to reduce <italic>R</italic><sub>emp</sub>.</p>
<p>Early investigations relied on structured data such as transcriptomics, biofluids, registries and hand-crafted radiomic descriptors. Ojha et al. (<xref ref-type="bibr" rid="B18">18</xref>) presented <italic>Gap-App</italic>, a sex-specific web tool that predicts 3-year survival for pancreatic-ductal adenocarcinoma (PDAC) directly from RNA-Seq profiles. Separate Random-Forest models for men and women achieved training accuracies of 90.33% and 90.40%, with independent-test accuracies of 81.25% and 89.47%, consistently outperforming a pooled model. Urine-biomarker studies followed: CatBoost reached 91.89% overall accuracy on the LYVE1&#x02013;creatinine&#x02013;REG1B&#x02013;TFF1 panel and achieved 1.00 recall for the pancreatic-cancer class, eclipsing Random Forest and LightGBM baselines (<xref ref-type="bibr" rid="B19">19</xref>). A multi-omics decision system that stacked XGBoost with AdaBoost similarly attained the highest F<sub>1</sub> among nine competing classifiers on a large protein&#x02013;gene dataset (<xref ref-type="bibr" rid="B20">20</xref>).</p>
<p>Large registries enabled population-scale modeling. Using 31,000 cleaned cases from SEER, Decision-Tree models predicted tumor stage with 99.97% test accuracy and survivability with 92.1%, although the authors flagged over-fitting risk given perfect training scores for certain ensembles (<xref ref-type="bibr" rid="B21">21</xref>). On 29,000,000 electronic health-record rows, an XGBoost model that distilled 18,220 variables to 582 predictors identified 58% of late-stage cancers a median 24 months early at 90% specificity (AUC 0.84) (<xref ref-type="bibr" rid="B22">22</xref>). Logistic-activation ANNs trained on NHIS and PLCO surveys (800,114 participants) achieved AUC 0.85 and enabled a three-tier risk stratification that misclassified fewer than 1% of cancers into the lowest-risk group (<xref ref-type="bibr" rid="B23">23</xref>). A separate Taiwanese claims analysis built a 4-year risk model for type-2 diabetes patients; Linear Discriminant Analysis delivered AUROC 0.9073 with 84.3% accuracy (<xref ref-type="bibr" rid="B24">24</xref>). Complementary approaches fused an MLP feature extractor with an SVM to create the <italic>AI-Powered Pancreas Navigator</italic>, posting 98.41% accuracy on NHIS data and earmarked for EMR deployment (<xref ref-type="bibr" rid="B25">25</xref>).</p>
<p>Radiomics advanced conventional pipelines toward imaging. Daily delta-radiomics of non-contrast CT predicted chemoradiation response with CV-AUC 0.94 and external AUC 0.98 after only 2&#x02013;4 weeks, using a Bayesian-regularized neural network and three key features (kurtosis&#x02013;coarseness&#x02013;NESTD) (<xref ref-type="bibr" rid="B26">26</xref>). Radiomics-based ML (volumetric pancreas segmentation &#x02192; 88 radiomic features, LASSO &#x02192; 32 selected) with an SVM classifier detected prediagnostic PDAC up to &#x02248;386&#x02013;398 days before clinical diagnosis (AUC = 0.98; sensitivity = 95.5%, specificity = 90.3%), substantially outperforming radiologists (mean AUC &#x02248;0.66) (<xref ref-type="bibr" rid="B27">27</xref>). Pretreatment FDG-PET radiomics confirmed GLZLM-GLNU heterogeneity as an independent one-year survival factor, outperforming clinical staging alone (<xref ref-type="bibr" rid="B28">28</xref>). Mucin-promoter methylation fed to SVMs and shallow NNs remained prognostic beyond standard clinicopathologic covariates (<xref ref-type="bibr" rid="B29">29</xref>). Hand-crafted features also bolstered niche applications: age-stratified CAD on endoscopic ultrasound improved sensitivity by 4&#x02013;6 pp in each age band (<xref ref-type="bibr" rid="B30">30</xref>), and IANFIS models with Bayesian hyper-parameter search reached 99.95% CT accuracy while simultaneously segmenting pancreas and tumor (<xref ref-type="bibr" rid="B31">31</xref>).</p>
<p>Biofluid and liquid-biopsy work flourished in parallel. Logistic regression on urine biomarkers yielded <italic>PancRISK</italic> with AUC 0.94; when combined with CA19-9 the strategy delivered 96% sensitivity and 96% specificity (<xref ref-type="bibr" rid="B32">32</xref>). A six-amino-acid plasma index achieved validation AUCs of 0.86 for all PDAC and 0.81 for stage IIA&#x02013;IIB tumors (<xref ref-type="bibr" rid="B33">33</xref>). Lightweight 1D CNN&#x02013;LSTM models diagnosed PDAC from urine proteomics with 97% accuracy and AUC 0.98, surpassing MLP and classical ML baselines by over 20 pp (<xref ref-type="bibr" rid="B34">34</xref>). Digital PCR detection of KRAS mutations in tissue, circulating DNA or exosomes consistently predicted worse survival (<xref ref-type="bibr" rid="B35">35</xref>&#x02013;<xref ref-type="bibr" rid="B39">39</xref>). Circulating-tumor-cell enumeration on NanoVelcro chips provided 75.0% sensitivity and 96.4% specificity for diagnosis and discriminated metastatic disease when counts exceeded three CTCs per 4 mL (<xref ref-type="bibr" rid="B40">40</xref>). Pre-diagnostic CA19-9 elevations heralded cancer up to two years prior and correlated with poorer prognosis; CA125 added value in CA19-9-negative cases (<xref ref-type="bibr" rid="B41">41</xref>). Exosomal protein&#x02013;miRNA panels achieved validation sensitivity/specificity of 1.00/0.80 across benign and malignant controls (<xref ref-type="bibr" rid="B42">42</xref>), while Lewis-negative subgroups benefited from alternative serum markers CEA and CA125 (AUCs 0.89 and 0.85) (<xref ref-type="bibr" rid="B43">43</xref>).</p>
<p>Radiologic context for true early lesions emerged from a 14-center Japanese cohort, which highlighted main-duct dilatation and pancreatic-juice cytology as pivotal for diagnosing stage 0 and I disease, translating into &#x0003E;90% ten-year survival after resection (<xref ref-type="bibr" rid="B44">44</xref>). Exploratory therapeutics such as plasma-activated medium induced ROS-mediated apoptosis and cut xenograft volume by two-thirds without harming normal tissues (<xref ref-type="bibr" rid="B45">45</xref>).</p></sec>
<sec>
<label>3.2</label>
<title>Deep learning approaches</title>
<p>Deep learning (DL) is a branch of machine learning that uses deep, multi-layer neural networks to learn hierarchical feature representations directly from raw data (images, volumes, signals, or text). Unlike classical methods that depend on handcrafted features, DL discovers progressively abstract patterns through stacked nonlinear layers, enabling powerful end-to-end pipelines for detection, segmentation, and classification across modalities and patient cohorts.</p>
<p>Formally, a deep network implements a parameterised mapping <inline-formula><mml:math id="M6"><mml:mrow><mml:msub><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mrow><mml:mi>&#x003B8;</mml:mi></mml:mrow></mml:msub><mml:mo>:</mml:mo><mml:msup><mml:mrow><mml:mi>&#x0211D;</mml:mi></mml:mrow><mml:mrow><mml:mi>d</mml:mi></mml:mrow></mml:msup><mml:mo>&#x02192;</mml:mo><mml:mrow><mml:mi mathvariant="script">Y</mml:mi></mml:mrow></mml:mrow></mml:math></inline-formula>, where <inline-formula><mml:math id="M7"><mml:mrow><mml:mi>&#x003B8;</mml:mi><mml:mo>=</mml:mo><mml:msubsup><mml:mrow><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mi>W</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mi>l</mml:mi></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:mrow></mml:msup><mml:mo>,</mml:mo><mml:msup><mml:mrow><mml:mi>b</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mi>l</mml:mi></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:mrow></mml:msup></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>l</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>L</mml:mi></mml:mrow></mml:msubsup></mml:mrow></mml:math></inline-formula> are layer-wise weights and biases. With <italic>h</italic><sup>[0]</sup> &#x0003D; <italic>x</italic> the input, a typical feedforward layer is</p>
<disp-formula id="E3"><mml:math id="M8"><mml:mrow><mml:msup><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mi>l</mml:mi></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:mrow></mml:msup><mml:mo>=</mml:mo><mml:mi>&#x003C3;</mml:mi><mml:mo>(</mml:mo><mml:msup><mml:mrow><mml:mi>W</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mi>l</mml:mi></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:mrow></mml:msup><mml:msup><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mi>l</mml:mi><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:mrow></mml:msup><mml:mo>&#x0002B;</mml:mo><mml:msup><mml:mrow><mml:mi>b</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mi>l</mml:mi></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:mrow></mml:msup><mml:mo>)</mml:mo><mml:mo>,</mml:mo><mml:mtext>&#x02003;&#x000A0;</mml:mtext><mml:mi>l</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mo>&#x02026;</mml:mo><mml:mo>,</mml:mo><mml:mi>L</mml:mi><mml:mo>,</mml:mo></mml:mrow></mml:math></disp-formula>
<p>for a nonlinear activation &#x003C3; (e.g., ReLU, sigmoid). Given training pairs <inline-formula><mml:math id="M9"><mml:mrow><mml:msubsup><mml:mrow><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>N</mml:mi></mml:mrow></mml:msubsup></mml:mrow></mml:math></inline-formula>, learning minimizes the empirical risk</p>
<disp-formula id="E4"><mml:math id="M10"><mml:mrow><mml:mover accent="true"><mml:mrow><mml:mrow><mml:mi mathvariant="script">R</mml:mi></mml:mrow></mml:mrow><mml:mo>^</mml:mo></mml:mover><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003B8;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>N</mml:mi></mml:mrow></mml:mfrac><mml:mstyle displaystyle="true"><mml:munderover accentunder="false" accent="false"><mml:mrow><mml:mo>&#x02211;</mml:mo></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>N</mml:mi></mml:mrow></mml:munderover></mml:mstyle><mml:mrow><mml:mi mathvariant="script">L</mml:mi></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mrow><mml:mi>&#x003B8;</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">)</mml:mo><mml:mo>,</mml:mo></mml:mrow></mml:math></disp-formula>
<p>where <inline-formula><mml:math id="M11"><mml:mrow><mml:mrow><mml:mi mathvariant="script">L</mml:mi></mml:mrow></mml:mrow></mml:math></inline-formula> is a task-dependent loss (cross-entropy, Dice loss, etc.). In practice optimisation uses stochastic (mini-batch) gradient methods; for a minibatch <inline-formula><mml:math id="M12"><mml:mrow><mml:mrow><mml:mi mathvariant="script">B</mml:mi></mml:mrow></mml:mrow></mml:math></inline-formula>,</p>
<disp-formula id="E5"><mml:math id="M13"><mml:mrow><mml:mi>&#x003B8;</mml:mi><mml:mo>&#x02190;</mml:mo><mml:mi>&#x003B8;</mml:mi><mml:mo>-</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003B7;</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="true">(</mml:mo><mml:mrow><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mo>|</mml:mo><mml:mrow><mml:mi mathvariant="script">B</mml:mi></mml:mrow><mml:mo>|</mml:mo></mml:mrow></mml:mfrac><mml:mstyle displaystyle="true"><mml:munder class="msub"><mml:mrow><mml:mo>&#x02211;</mml:mo></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>&#x02208;</mml:mo><mml:mrow><mml:mi mathvariant="script">B</mml:mi></mml:mrow></mml:mrow></mml:munder></mml:mstyle><mml:msub><mml:mrow><mml:mo>&#x02207;</mml:mo></mml:mrow><mml:mrow><mml:mi>&#x003B8;</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mi mathvariant="script">L</mml:mi></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mrow><mml:mi>&#x003B8;</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">)</mml:mo><mml:mo>&#x0002B;</mml:mo><mml:mtext>&#x003BB;</mml:mtext><mml:mi>&#x003B8;</mml:mi></mml:mrow><mml:mo stretchy="true">)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mrow></mml:math></disp-formula>
<p>with learning rate &#x003B7;<sub><italic>t</italic></sub> (possibly scheduled) and optional weight-decay &#x003BB;. The Universal Approximation Theorem guarantees that sufficiently large networks can approximate a wide class of continuous functions on compact domains, giving a theoretical basis for DL&#x00027;s representational power; however, generalization in practice depends on optimisation, regularization, data diversity and inductive biases rather than approximation alone. This representational strength&#x02014;combined with transfer learning, data augmentation and modern regularisers&#x02014;explains the rapid adoption of DL in medical imaging, where networks can be trained (or fine-tuned) to integrate detection, segmentation and classification within robust clinical pipelines.</p>
<p>The shift toward representation learning began with patch-based CNN screening of contrast-enhanced CT. A modified VGG network exceeded radiologist sensitivity (0.983 vs. 0.929) on Taiwanese data and maintained AUC 0.920 on a U.S. external cohort despite domain shift (<xref ref-type="bibr" rid="B46">46</xref>). Similar pipelines fine-tuned NASNet via Cat-Swarm optimisation and then classified with Glowworm-tuned Elman NNs, yielding 99.60% average accuracy across six independent runs (<xref ref-type="bibr" rid="B47">47</xref>). Hybrid stacks that incorporated denoising, segmentation, and Deep-Belief Networks reached 99.8% accuracy and perfect sensitivity on 1,800 CT images (<xref ref-type="bibr" rid="B48">48</xref>). Graph-derived features from Harris corners lifted k-NN F<sub>1</sub> to 92.74% after whale-based hyper-parameter optimisation of DenseNet descriptors (<xref ref-type="bibr" rid="B49">49</xref>), while stage-specific CNNs (ResNet50) classified four pancreatic-tumor stages with 97.88% accuracy (<xref ref-type="bibr" rid="B50">50</xref>).</p>
<p>Transfer learning on other modalities also matured. EfficientNetB0 and ResNet50 each secured 92% accuracy on a 12,000-image histopathology corpus, with ResNet50 climbing to 96% on higher-resolution subsets (<xref ref-type="bibr" rid="B51">51</xref>). Graph-causality ideas migrated to imaging: a Causality-Informed Graph Intervention Model suppressed spurious patch correlations, returning mean cross-validation AUC 0.942 and maintaining external accuracies of 86% to 82% across three centers (<xref ref-type="bibr" rid="B52">52</xref>). A successor adaptive-metric GNN delivered AUC 0.954 at only 0.44 M parameters and &#x0003C; 7 ms inference per study (<xref ref-type="bibr" rid="B53">53</xref>). End-to-end CT workflows paired CNN classifiers with Faster R-CNN detectors or YOLOv3 heads, posting 94.6% accuracy vs. 92.4% for the detection-only baseline (<xref ref-type="bibr" rid="B54">54</xref>, <xref ref-type="bibr" rid="B55">55</xref>). Coarse-to-fine cascades that combine duct segmentation with tumor masks pushed AUROC to 0.99 and retained 0.97 sensitivity for lesions &#x0003C; 2 cm after external validation (<xref ref-type="bibr" rid="B56">56</xref>). nnU-Net pipelines now detect cystic lesions with 78.8% sensitivity at just 0.48 false positives per case, rivaling radiologists for cysts &#x02265;220 mm<sup>3</sup> (<xref ref-type="bibr" rid="B57">57</xref>).</p>
<p>Segmentation networks grew in diversity. SMANet exploited feature-fusion and attention blocks to reach mDice 0.769 on five tissue types in whole-slide images (<xref ref-type="bibr" rid="B58">58</xref>). MSCA-UNet replaced initial convolutions with multi-scale branches and lifted tumor Dice from 68.0% to 80.1% on MSD data when paired with HU windowing and ROI cropping (<xref ref-type="bibr" rid="B59">59</xref>). Annotation-efficient paradigms pre-trained on pseudo-lesions boosted ShuffleNet-V2 external accuracy from 62.0% (10% data) to 82.5% and improved sensitivity by 37.0 pp (<xref ref-type="bibr" rid="B60">60</xref>). Image-reconstruction networks (DLIR-H) enhanced resectability assessment AUC from 0.75 to 0.91 while halving inter-reader variance (<xref ref-type="bibr" rid="B61">61</xref>). Comparative studies routinely showed MobileNet or InceptionV3 topping ML baselines on Kaggle CT sets with &#x02265;97% accuracy (<xref ref-type="bibr" rid="B62">62</xref>, <xref ref-type="bibr" rid="B63">63</xref>). Parameter-efficient MMPU-Net balanced performance (Dice 88.6% on MSD) and speed (4 &#x000D7; faster training) using mean-max pooling and hybrid convolutions (<xref ref-type="bibr" rid="B64">64</xref>).</p></sec>
<sec>
<label>3.3</label>
<title>Attention and transformer-based models</title>
<p>Attention techniques let a model emphasize the most relevant parts of an input by computing data-dependent weights between elements (pixels, patches, tokens). Rather than treating all locations equally, attention reweights features so the network can aggregate global context where needed and focus on small but important structures&#x02014;a behavior particularly useful in medical imaging for both classification and fine-grained segmentation. The basic architecture is shown in <xref ref-type="fig" rid="F7">Figure 7</xref>.</p>
<fig position="float" id="F7">
<label>Figure 7</label>
<caption><p>Basic encoder&#x02013;decoder structure of the Transformer architecture, consisting of stacked multi-head attention, Feed-Forward Network (FFN) layers, Fully Connected (FC) layers, and positional encodings.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmed-12-1731922-g0007.tif">
<alt-text content-type="machine-generated">Bar chart displaying dataset sample sizes in logarithmic scale. The largest dataset is &#x0201C;NHIS + PLCO combined&#x0201D; with 800,114 samples, while the smallest is &#x0201C;Ramaekers external (MSD subset)&#x0201D; with 28 samples. Other datasets range between these values, highlighting variations in sample sizes across different studies.</alt-text>
</graphic>
</fig>
<p>Attention mechanisms compute data-dependent, pairwise interactions between tokens or spatial locations so the model can reweight features by relevance rather than by fixed convolutional or local rules. Conceptually, the model transforms input features <italic>X</italic> into three components&#x02014;queries (<italic>Q</italic>), keys (<italic>K</italic>), and values (<italic>V</italic>)&#x02014;through learned linear projections. The core self-attention operation computes similarity scores between queries and keys, normalizes them via softmax to produce attention weights &#x003B1;<sub><italic>ij</italic></sub>, and then combines the values as a weighted sum:</p>
<disp-formula id="EQ6"><mml:math id="M14"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>A</mml:mi><mml:mi>t</mml:mi><mml:mi>t</mml:mi><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:mi>t</mml:mi><mml:mi>i</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>Q</mml:mi><mml:mo>,</mml:mo><mml:mi>K</mml:mi><mml:mo>,</mml:mo><mml:mi>V</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mtext>softmax</mml:mtext><mml:mrow><mml:mo stretchy="true">(</mml:mo><mml:mrow><mml:mfrac><mml:mrow><mml:mi>Q</mml:mi><mml:msup><mml:mrow><mml:mi>K</mml:mi></mml:mrow><mml:mrow><mml:mo>&#x022A4;</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mrow><mml:msqrt><mml:mrow><mml:msub><mml:mrow><mml:mi>d</mml:mi></mml:mrow><mml:mrow><mml:mi>k</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:msqrt></mml:mrow></mml:mfrac></mml:mrow><mml:mo stretchy="true">)</mml:mo></mml:mrow><mml:mi>V</mml:mi><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(1)</label></disp-formula>
<p>Multi-head attention runs <italic>h</italic> parallel attention heads and concatenates their outputs, allowing the model to attend to different representation subspaces simultaneously. Positional encodings are added to preserve spatial or sequential information. Computationally, dense self-attention costs <inline-formula><mml:math id="M15"><mml:mrow><mml:mrow><mml:mi mathvariant="script">O</mml:mi></mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mi>n</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mi>d</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:math></inline-formula> time and memory, which motivates local/windowed, sparse, or linearized attention variants that reduce complexity. In medical imaging, the attention map &#x003B1; can be interpreted as a soft, differentiable importance mask that both improves interpretability and lets the network focus on small, clinically relevant structures while aggregating global context.</p>
<p>Attention mechanisms refine both classification and segmentation by focusing the model on informative regions or features. Kernel Attention Networks (KANs) improved urine-biomarker classification to 94.44% accuracy and F<sub>1</sub> 0.97, surpassing gradient boosting and XGBoost (<xref ref-type="bibr" rid="B65">65</xref>). A Swin Transformer trained on CT achieved 83% test accuracy, modest yet superior to CNN baselines (<xref ref-type="bibr" rid="B66">66</xref>). TED-STGN combined graph attention and temporal transformers on sequential imaging, realizing 94.7% accuracy and cutting false positives relative to ViT and Swin benchmarks (<xref ref-type="bibr" rid="B67">67</xref>).</p>
<p>Segmentation benefitted markedly. MDAG-Net inserted multi-dimensional gates into U-Net skip paths, improving Dice by 5.3 pp and recall by 12.5 pp, particularly for tiny tumors (<xref ref-type="bibr" rid="B68">68</xref>). AMFF-Net paired residual depthwise attention with hybrid transformers to outscore nnUNetv2 on MSD (pancreas Dice 82.12%, tumor Dice 57.00%) (<xref ref-type="bibr" rid="B69">69</xref>). Triple-attention MAEU-Net pushed NIH pancreas Dice to 87.16% but at a cost of 325 M parameters (<xref ref-type="bibr" rid="B70">70</xref>). SCPMan injected shape-context memory and an active-shape prior, raising NIH Dice to 91.0% and MSD to 92.25%, outperforming both CNN and transformer baselines (<xref ref-type="bibr" rid="B71">71</xref>). DA-TransUNet merged positional and channel attention blocks around ViT cores, lifting Synapse Dice from 77.48% to 79.80% while trimming Hausdorff distance by 8.2 mm (<xref ref-type="bibr" rid="B72">72</xref>). Lightweight RDAM and hybrid transformer modules allowed Pancreas Dice 80.55% and FAH tumor Dice 55.17% in AMFF-Net with only 25.8 M parameters (<xref ref-type="bibr" rid="B69">69</xref>). Anatomical attention guided duct-segmentation FCNs to 55.7% Dice, a meaningful gain for such small tubular structures (<xref ref-type="bibr" rid="B73">73</xref>).</p>
<p>Large-scale diagnostic suites now embed attention throughout. The PANDA triple-stage pipeline localized pancreas, detected lesions at 99% specificity and then applied a dual-path memory transformer for subtype diagnosis, achieving PDAC identification AUC 0.987 internally and 0.957 externally across nine centers; PANDA Plus attained 99.9% real-world specificity after model iteration (<xref ref-type="bibr" rid="B15">15</xref>). PancreasNet fused progressive residuals, Swin blocks and enhanced feature reweighting to yield 92.4% accuracy and Dice 0.87 on 290 CTs, outstripping earlier CAD systems by 5&#x02013;7 pp (<xref ref-type="bibr" rid="B74">74</xref>). DenseNet-161 augmented with CBAM and clinical features separated serous from mucinous cystic neoplasms on MRI with AUC 0.971 (<xref ref-type="bibr" rid="B75">75</xref>). Uncertainty-Aware Attention captured both mean and variance of attention weights, improving AUROC and calibration on national EHRs while enabling reliable <italic>I-don&#x00027;t-know</italic> deferrals (<xref ref-type="bibr" rid="B76">76</xref>). Multi-modal frameworks fused dual-phase CT with eleven biomarkers through self-attention, increasing lymph-node-metastasis AUC from 0.72 to 0.83 (<xref ref-type="bibr" rid="B77">77</xref>). Differentiable-search MobileViT backbones combined with graph representations and XGBoost attained 97.33% accuracy on CT, underscoring the synergy between architecture search and hybrid classifiers (<xref ref-type="bibr" rid="B78">78</xref>). Novel, non-invasive fundus-image PANet achieved AUC 0.96 for pancreatic cancer, hinting at systemic ocular biomarkers (<xref ref-type="bibr" rid="B79">79</xref>).</p>
<p>The body of work surveyed (2015&#x02013;2025) demonstrates a clear methodological progression from classical machine-learning pipelines&#x02014;anchored in hand-crafted features, radiomics and population registries&#x02014;to deep representation learning and, most recently, attention- and transformer-based architectures (<xref ref-type="table" rid="T5">Tables 5</xref>&#x02013;<xref ref-type="table" rid="T7">7</xref>). This evolution has delivered substantial gains in diagnostic accuracy, segmentation performance and multi-modal fusion, and has unlocked promising avenues for non-invasive early detection (imaging &#x0002B; liquid biopsy) and scalable diagnostic suites. At the same time, persistent gaps constrain clinical translation: many studies remain retrospective, single-center or under-powered; reporting and evaluation are heterogeneous; external validation and prospective trials are limited; and issues such as class imbalance, domain shift, overfitting, calibration, interpretability, and deployment efficiency are often insufficiently addressed.</p>
<table-wrap position="float" id="T5">
<label>Table 5</label>
<caption><p>Representative AI Models&#x02014;<bold>Conventional ML</bold>.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th valign="top" align="left"><bold>References</bold></th>
<th valign="top" align="left"><bold>Data source</bold></th>
<th valign="top" align="left"><bold>Modality task</bold></th>
<th valign="top" align="left"><bold>Method/model</bold></th>
<th valign="top" align="left"><bold>Key results</bold></th>
<th valign="top" align="left"><bold>Limitations</bold></th>
<th valign="top" align="left"><bold>Strengths</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Ojha et al. (<xref ref-type="bibr" rid="B18">18</xref>)</td>
<td valign="top" align="left">RNA-Seq cohort</td>
<td valign="top" align="left">3-yr survival</td>
<td valign="top" align="left">Sex-specific Random Forest with feature filtering &#x00026; probability calibration; independent cohort validation; deployed as <italic>Gap-App</italic> web tool</td>
<td valign="top" align="left">Test acc. 81.25% (M) / 89.47% (F)</td>
<td valign="top" align="left">Small validation cohort; Genomic predictors may overshadow clinical factors; Complex non-linear interactions not fully captured</td>
<td valign="top" align="left">Sex-specific modeling improves accuracy; Web-based tool for clinical deployment; External validation with independent cohort</td>
</tr>
<tr>
<td valign="top" align="left">Modi et al. (<xref ref-type="bibr" rid="B19">19</xref>)</td>
<td valign="top" align="left">Urine biomarkers (Kaggle)</td>
<td valign="top" align="left">Multi-class diagnosis</td>
<td valign="top" align="left">CatBoost after feature selection (handles categorical via ordered boosting); 10-fold CV; compared vs. RF/LGB</td>
<td valign="top" align="left">Overall acc. 91.89%; pancreatic recall 1.00</td>
<td valign="top" align="left">Limited to Kaggle dataset; Lacks external validation on real-world clinical data; Class imbalance not explicitly addressed</td>
<td valign="top" align="left">CatBoost handles categorical features efficiently; Perfect pancreatic cancer recall (1.00); Comprehensive comparison with RF/LGB</td>
</tr>
<tr>
<td valign="top" align="left">Pandey et al. (<xref ref-type="bibr" rid="B20">20</xref>)</td>
<td valign="top" align="left">Multi-omics proteins&#x0002B;genes</td>
<td valign="top" align="left">Cancer prediction</td>
<td valign="top" align="left">Hybrid XGBoost&#x0002B;AdaBoost ensemble on curated features (redundancy removal &#x00026; imputation); cross-val. F<sub>1</sub> lead vs. 9 models</td>
<td valign="top" align="left">Highest F<sub>1</sub> of all 9 models</td>
<td valign="top" align="left">Dataset diversity unclear; No external validation reported; Hybrid ensemble complexity may hinder interpretability</td>
<td valign="top" align="left">Hybrid XGBoost&#x0002B;AdaBoost achieves highest F<sub>1</sub> score; Handles multi-omics data (proteins&#x0002B;genes); Feature curation reduces redundancy</td>
</tr>
<tr>
<td valign="top" align="left">Hasan et al. (<xref ref-type="bibr" rid="B21">21</xref>)</td>
<td valign="top" align="left">SEER registry</td>
<td valign="top" align="left">Stage &#x00026; survival</td>
<td valign="top" align="left">Decision Tree after extensive preprocessing (drop &#x0003E;80% missing, one-hot/label encoding); top model for survivability; high stage acc.</td>
<td valign="top" align="left">Survivability acc. 92.1%; stage 99.97%</td>
<td valign="top" align="left">AdaBoost and Gaussian NB show poor performance; Model interpretability not discussed; SEER data may not generalize globally</td>
<td valign="top" align="left">Decision Tree achieves 99.97% stage accuracy; 92.1% survivability prediction; Extensive preprocessing improves data quality</td>
</tr>
<tr>
<td valign="top" align="left">Nasief et al. (<xref ref-type="bibr" rid="B26">26</xref>)</td>
<td valign="top" align="left">Daily CT radiomics</td>
<td valign="top" align="left">Early response</td>
<td valign="top" align="left">Bayesian-regularized ANN trained on delta-radiomics; selected DRFs (kurtosis, coarseness, NESTD); LOOCV &#x0002B; external validation</td>
<td valign="top" align="left">External AUC 0.98</td>
<td valign="top" align="left">Small patient cohort; Image acquisition variations affect reproducibility; Limited to single-institution data</td>
<td valign="top" align="left">Bayesian ANN achieves 0.98 external AUC; Delta-radiomics capture temporal changes; Motion-independent features enhance robustness</td>
</tr>
<tr>
<td valign="top" align="left">Chen et al. (<xref ref-type="bibr" rid="B22">22</xref>)</td>
<td valign="top" align="left">18,220-var EHR window</td>
<td valign="top" align="left">Early detection</td>
<td valign="top" align="left">XGBoost on 13 &#x02192; 1 mo pre-dx window; 582 predictors retained from 1,947; operating-point trade-offs quantified</td>
<td valign="top" align="left">AUC 0.84; median 24-mo lead at 90% SP</td>
<td valign="top" align="left">Retrospective study design; Requires extensive EHR infrastructure; Trade-off between sensitivity and specificity</td>
<td valign="top" align="left">Large-scale EHR dataset (18,220 variables); 24-month median lead time at 90% specificity; XGBoost identifies 582 key predictors</td>
</tr>
<tr>
<td valign="top" align="left">Blyuss et al. (<xref ref-type="bibr" rid="B32">32</xref>)</td>
<td valign="top" align="left">Urine LYVE1/REG1B/TFF1</td>
<td valign="top" align="left">Risk score (PancRISK)</td>
<td valign="top" align="left">Logistic Regression (PancRISK) over LYVE1/REG1B/TFF1&#x0002B;creatinine&#x0002B;age; compared with RF/SVM/NN; optional CA19-9 <italic>OR</italic> rule</td>
<td valign="top" align="left">AUC 0.94; SN/SP 0.81/0.90</td>
<td valign="top" align="left">Half of cases are late-stage patients; Limited to three urine biomarkers; Requires additional validation in screening populations</td>
<td valign="top" align="left">Non-invasive urine-based biomarkers; PancRISK achieves 0.94 AUC; Simple logistic regression outperforms complex models</td>
</tr>
<tr>
<td valign="top" align="left">Kinugasa et al. (<xref ref-type="bibr" rid="B35">35</xref>)</td>
<td valign="top" align="left">ctDNA KRAS vs. tissue</td>
<td valign="top" align="left">Prognosis</td>
<td valign="top" align="left">Digital PCR KRAS in ctDNA vs. tissue; ctDNA mutations (esp. G12V) prognostic; tissue&#x02013;ctDNA concordance 77.3%</td>
<td valign="top" align="left">ctDNA &#x0002B; KRAS linked to shorter OS</td>
<td valign="top" align="left">77.3% tissue-ctDNA concordance indicates discrepancies; Small sample size (22 patients); ctDNA detection sensitivity varies</td>
<td valign="top" align="left">Liquid biopsy approach is minimally invasive; ctDNA KRAS mutations predict survival; Digital PCR enables sensitive detection</td>
</tr></tbody>
</table>
</table-wrap>
<table-wrap position="float" id="T6">
<label>Table 6</label>
<caption><p>Representative AI models&#x02014;<bold>Deep learning</bold>.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th valign="top" align="left"><bold>References</bold></th>
<th valign="top" align="left"><bold>Data source</bold></th>
<th valign="top" align="left"><bold>Modality task</bold></th>
<th valign="top" align="left"><bold>Method/model</bold></th>
<th valign="top" align="left"><bold>Key results</bold></th>
<th valign="top" align="left"><bold>Limitations</bold></th>
<th valign="top" align="left"><bold>Strengths</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Liu et al. (<xref ref-type="bibr" rid="B46">46</xref>)</td>
<td valign="top" align="left">CECT patches (TW &#x0002B;US)</td>
<td valign="top" align="left">Cancer detection</td>
<td valign="top" align="left">Modified VGG CNN on patch crops with patient-level aggregation thresholding; weighted loss; multi-cohort external validation</td>
<td valign="top" align="left">Acc. 0.986 (local); ext. AUC 0.920</td>
<td valign="top" align="left">Retrospective study design; Patch-based approach may miss global context; Requires manual ROI selection for preprocessing</td>
<td valign="top" align="left">Cross-racial external validation (Taiwan &#x0002B; US); Modified VGG achieves 0.986 local accuracy; Multi-cohort validation demonstrates generalizability</td>
</tr>
<tr>
<td valign="top" align="left">Shnawa et al. (<xref ref-type="bibr" rid="B47">47</xref>)</td>
<td valign="top" align="left">CT (250 &#x0002B;250)</td>
<td valign="top" align="left">Binary detection</td>
<td valign="top" align="left">NASNet feature extractor (Cat Swarm optimized) &#x0002B; Elman NN (Glowworm Swarm tuned); end-to-end ETEPCC-MDTL pipeline</td>
<td valign="top" align="left">Acc. 99.60%</td>
<td valign="top" align="left">Limited dataset size (250&#x0002B;250); Swarm optimization adds computational complexity; Single-institution data may limit generalization</td>
<td valign="top" align="left">Cat Swarm &#x0002B; Glowworm Swarm optimization for hyperparameter tuning; Elman NN handles temporal dependencies; 99.60% accuracy achieved</td>
</tr>
<tr>
<td valign="top" align="left">Bhargavi et al. (<xref ref-type="bibr" rid="B48">48</xref>)</td>
<td valign="top" align="left">CT (PCCD 1800)</td>
<td valign="top" align="left">Early prediction</td>
<td valign="top" align="left">Preproc. (HSV &#x0002B; diffusion) &#x02192; Fuzzy K-NN Equality segmentation &#x02192; DCNN&#x0002B;DBN classifier with HOG fusion</td>
<td valign="top" align="left">Acc. 99.8%; SN 100%</td>
<td valign="top" align="left">Complex multi-stage pipeline may be difficult to reproduce; HOG feature fusion increases computational cost; Limited external validation</td>
<td valign="top" align="left">DCNN&#x0002B;DBN fusion leverages complementary features; HSV color space preprocessing enhances contrast; Achieves 99.8% accuracy with 100% sensitivity</td>
</tr>
<tr>
<td valign="top" align="left">Kavak et al. (<xref ref-type="bibr" rid="B51">51</xref>)</td>
<td valign="top" align="left">H&#x00026;E (12k / 4k img.)</td>
<td valign="top" align="left">Histology Dx</td>
<td valign="top" align="left">Transfer learning across CNNs (ResNet50, EfficientNetB0, etc.); curated patching/augmentation; ResNet50 best on 512 &#x000D7; 512</td>
<td valign="top" align="left">Acc. 96% (512 &#x000D7; 512)</td>
<td valign="top" align="left">Histology-based approach requires tissue samples; Patch size (512 &#x000D7; 512) may affect performance on different resolutions; Computational cost of transfer learning</td>
<td valign="top" align="left">Comprehensive CNN comparison (ResNet50, EfficientNetB0, etc.); Transfer learning reduces training time; 96% accuracy on 512 &#x000D7; 512 patches with curated augmentation</td>
</tr>
<tr>
<td valign="top" align="left">Ramaekers et al. (<xref ref-type="bibr" rid="B56">56</xref>)</td>
<td valign="top" align="left">Contrast CT</td>
<td valign="top" align="left">Det. &#x0002B; localization</td>
<td valign="top" align="left">Anatomy-guided ensemble: pancreas/duct segmentation &#x02192; tumor segmentation using secondary signs; bootstrapped folds</td>
<td valign="top" align="left">AUROC 0.99; SN 0.97; SP 1.00</td>
<td valign="top" align="left">Relies on secondary signs which may be subtle; Bootstrap validation may overestimate performance; Requires accurate pancreas segmentation</td>
<td valign="top" align="left">Anatomy-guided approach improves interpretability; Ensemble of segmentation models enhances robustness; Achieves 0.99 AUROC with 0.97 sensitivity and 1.00 specificity</td>
</tr>
<tr>
<td valign="top" align="left">Viriyasaranon et al. (<xref ref-type="bibr" rid="B60">60</xref>)</td>
<td valign="top" align="left">Multi-center CT</td>
<td valign="top" align="left">Classification</td>
<td valign="top" align="left">Annotation-efficient pretraining via pseudo-lesion segmentation; fine-tuned ShuffleNetV2/PVT; robust even with 10% labels</td>
<td valign="top" align="left">Acc. 94.3% internal; 82.5% ext. (10% data)</td>
<td valign="top" align="left">Pseudo-lesion segmentation may introduce artifacts; External validation shows performance drop (82.5% vs. 94.3%); Multi-center variability affects consistency</td>
<td valign="top" align="left">Annotation-efficient pretraining reduces labeling burden; Robust with only 10% labeled data; ShuffleNetV2/PVT balance accuracy and efficiency</td>
</tr>
<tr>
<td valign="top" align="left">Chen et al. (<xref ref-type="bibr" rid="B82">82</xref>)</td>
<td valign="top" align="left">1,473 CECT</td>
<td valign="top" align="left">Full CAD pipeline</td>
<td valign="top" align="left">Segmentation-driven CAD followed by 5 &#x000D7; 3D CNN ensemble; no manual preprocessing; real-world cohort validated</td>
<td valign="top" align="left">Real-world AUC 0.95; SN 74.7% &#x0003C; 2 cm</td>
<td valign="top" align="left">Small tumor detection remains challenging (74.7% SN &#x0003C; 2 cm); Segmentation-driven approach requires accurate organ delineation; Ensemble complexity increases inference time</td>
<td valign="top" align="left">Nationwide population-based study (1,473 CECT); No manual preprocessing required; 5 &#x000D7; 3D CNN ensemble improves robustness; Real-world validation achieves 0.95 AUC</td>
</tr></tbody>
</table>
</table-wrap>
<table-wrap position="float" id="T7">
<label>Table 7</label>
<caption><p>Representative AI models&#x02014;<bold>Attention/transformer</bold>.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th valign="top" align="left"><bold>References</bold></th>
<th valign="top" align="left"><bold>Data source</bold></th>
<th valign="top" align="left"><bold>Modality task</bold></th>
<th valign="top" align="left"><bold>Method/model</bold></th>
<th valign="top" align="left"><bold>Key results</bold></th>
<th valign="top" align="left"><bold>Limitations</bold></th>
<th valign="top" align="left"><bold>Strengths</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Vinod et al. (<xref ref-type="bibr" rid="B65">65</xref>)</td>
<td valign="top" align="left">Urine biomarkers</td>
<td valign="top" align="left">3-class Dx</td>
<td valign="top" align="left">Kernel Attention Network (KAN) with EBM-based feature selection; attention grid (g = 4, k = 2); L-BFGS optimization</td>
<td valign="top" align="left">Acc. 94.44%; F1 0.97</td>
<td valign="top" align="left">Kernel Attention Networks are relatively new with limited validation; L-BFGS optimization may be sensitive to initialization; Urine biomarker approach requires standardized collection</td>
<td valign="top" align="left">Kernel Attention Network (KAN) provides interpretable attention mechanisms; EBM-based feature selection improves robustness; Achieves 94.44% accuracy with 0.97 F1 score</td>
</tr>
<tr>
<td valign="top" align="left">Cao et al. (<xref ref-type="bibr" rid="B68">68</xref>)</td>
<td valign="top" align="left">CT (Task07)</td>
<td valign="top" align="left">Pancreas &#x0002B;tumor seg.</td>
<td valign="top" align="left">MDAG-Net: multi-dimensional attention gates in U-Net skips; WML loss (weighted CE&#x0002B;MIoU) for small targets</td>
<td valign="top" align="left">Dice &#x0002B;5.3% vs. U-Net</td>
<td valign="top" align="left">Multi-dimensional attention gates increase model complexity; WML loss requires careful hyperparameter tuning; Limited to Task07 dataset</td>
<td valign="top" align="left">Multi-dimensional attention gates in U-Net capture cross-scale features; WML loss addresses class imbalance for small targets; 5.3% Dice improvement over standard U-Net</td>
</tr>
<tr>
<td valign="top" align="left">Cao et al. (<xref ref-type="bibr" rid="B15">15</xref>)</td>
<td valign="top" align="left">3,208 non-contrast CT</td>
<td valign="top" align="left">Det. &#x0002B;subtype</td>
<td valign="top" align="left">PANDA: nnU-Net localization &#x02192; high-SP lesion detect &#x02192; dual-path memory Transformer (prototype/context) for subtype/PDAC</td>
<td valign="top" align="left">PDAC AUC 0.987; 9-center SP 95.7%</td>
<td valign="top" align="left">Dual-path memory Transformer requires substantial computational resources; High specificity may come at cost of sensitivity; Complex pipeline with multiple stages</td>
<td valign="top" align="left">Large-scale validation (3,208 non-contrast CT); PANDA framework achieves 0.987 PDAC AUC; 9-center validation with 95.7% specificity demonstrates generalizability</td>
</tr>
<tr>
<td valign="top" align="left">Dong et al. (<xref ref-type="bibr" rid="B69">69</xref>)</td>
<td valign="top" align="left">CT (MSD)</td>
<td valign="top" align="left">Segmentation</td>
<td valign="top" align="left">AMFF-Net: RDAM (GateAttn) in shallow layers &#x0002B; Hybrid Transformer at deepest stage; decoder multiscale fusion</td>
<td valign="top" align="left">Dice 82.12% (pancreas); 57.00% (tumor)</td>
<td valign="top" align="left">Hybrid Transformer at deepest stage increases memory requirements; Tumor Dice (57%) lower than pancreas (82.12%); Decoder multiscale fusion adds complexity</td>
<td valign="top" align="left">RDAM (GateAttn) in shallow layers preserves fine details; Hybrid Transformer captures long-range dependencies; Multiscale fusion improves boundary delineation</td>
</tr>
<tr>
<td valign="top" align="left">Sun et al. (<xref ref-type="bibr" rid="B72">72</xref>)</td>
<td valign="top" align="left">Synapse &#x0002B;5 sets</td>
<td valign="top" align="left">Segmentation</td>
<td valign="top" align="left">DA-TransUNet: ViT&#x0002B;U-Net with Dual Attention blocks (position&#x0002B;channel) in encoder &#x00026; skip connections</td>
<td valign="top" align="left">Dice 79.80% ( &#x0002B;2.3%)</td>
<td valign="top" align="left">ViT component requires large amounts of training data; Dual attention increases inference time; Performance gain (&#x0002B;2.3% Dice) may not justify added complexity</td>
<td valign="top" align="left">ViT&#x0002B;U-Net architecture combines global and local features; Dual attention (position&#x0002B;channel) enhances feature representation; Validated on Synapse &#x0002B;5 datasets; 79.80% Dice (&#x0002B;2.3%)</td>
</tr>
<tr>
<td valign="top" align="left">Li et al. (<xref ref-type="bibr" rid="B77">77</xref>)</td>
<td valign="top" align="left">Dual-phase CT &#x0002B;11 labs</td>
<td valign="top" align="left">LN metastasis</td>
<td valign="top" align="left">Dual-channel ResNet18 for CT features with attention-based multimodal fusion of clinical biomarkers; non-linear correlations</td>
<td valign="top" align="left">AUC 0.83 (vs. 0.72 radiomics)</td>
<td valign="top" align="left">Requires both dual-phase CT and 11 lab biomarkers; Multimodal fusion complexity may hinder deployment; Limited to lymph node metastasis prediction</td>
<td valign="top" align="left">Dual-channel ResNet18 processes CT features efficiently; Attention-based multimodal fusion captures non-linear correlations; AUC 0.83 outperforms radiomics-only (0.72)</td>
</tr>
<tr>
<td valign="top" align="left">Heo et al. (<xref ref-type="bibr" rid="B76">76</xref>)</td>
<td valign="top" align="left">National EHR</td>
<td valign="top" align="left">Risk prediction</td>
<td valign="top" align="left">Uncertainty-aware Attention (variational): learns attention mean/variance; RETAIN-style with improved calibration (lower ECE)</td>
<td valign="top" align="left">Higher AUROC; lower ECE</td>
<td valign="top" align="left">Variational attention adds training complexity; RETAIN-style architecture may not generalize to non-EHR data; Requires large-scale national EHR</td>
<td valign="top" align="left">Uncertainty-aware attention improves model calibration; Learns attention mean and variance for reliability; Lower ECE and higher AUROC than deterministic attention</td>
</tr>
<tr>
<td valign="top" align="left">Tian et al. (<xref ref-type="bibr" rid="B75">75</xref>)</td>
<td valign="top" align="left">MRI cysts</td>
<td valign="top" align="left">SCN vs. MCN</td>
<td valign="top" align="left">CBAM-DenseNet161 fused with 11 clinical features; channel &#x0002B; spatial attention before FC fusion</td>
<td valign="top" align="left">AUC 0.971; Acc. 92.44%</td>
<td valign="top" align="left">Limited to MRI cystic tumor classification; Requires 11 clinical features in addition to imaging; CBAM attention adds computational overhead</td>
<td valign="top" align="left">CBAM-DenseNet161 combines channel and spatial attention; Fusion with 11 clinical features improves accuracy; Achieves 0.971 AUC and 92.44% accuracy for SCN vs. MCN</td>
</tr>
<tr>
<td valign="top" align="left">Mahendran et al. (<xref ref-type="bibr" rid="B74">74</xref>)</td>
<td valign="top" align="left">CT (290 vols.)</td>
<td valign="top" align="left">Detection</td>
<td valign="top" align="left">PancreasNet: Swin-based progressive residual Transformer with Enhanced Feature Reweighting &#x00026; Regulated Fusion; LSTM-like retention</td>
<td valign="top" align="left">Acc. 92.4%; Dice 0.87</td>
<td valign="top" align="left">Swin Transformer requires careful patch size selection; LSTM-like retention increases model parameters; Dataset size (290 volumes) relatively modest</td>
<td valign="top" align="left">Swin-based progressive residual Transformer scales efficiently; Enhanced Feature Reweighting improves important feature emphasis; Regulated Fusion with LSTM-like retention; 92.4% accuracy and 0.87 Dice</td>
</tr></tbody>
</table>
</table-wrap>
</sec></sec>
<sec id="s4">
<label>4</label>
<title>Data sources</title>
<p>A panoramic view of the data landscape clarifies <italic>why</italic> pancreatic-cancer AI has shifted from classical machine-learning to deep-learning and, most recently, to attention or transformer architectures. Instead of enumerating studies, the material below pools the datasets <italic>actually used</italic> by the short-listed papers (<xref ref-type="table" rid="T8">Table 8</xref>) by the primary data modality that governs network design and evaluation. Three trends emerge. First, modern imaging pipelines lean on ever larger, mostly private three-dimensional CT archives. Second, prognosis-oriented radiomics and -omics projects still depend on mid-sized, often single-center cohorts. Third, a handful of public benchmarks act as &#x0201C;connective tissue&#x0201D; for cross-paper comparison and pre-training.</p>
<table-wrap position="float" id="T8">
<label>Table 8</label>
<caption><p>Summary of data sources in pancreatic cancer AI research.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th valign="top" align="left"><bold>Dataset</bold></th>
<th valign="top" align="left"><bold>Modality &#x00026; description</bold></th>
<th valign="top" align="left"><bold>Sample size</bold></th>
<th valign="top" align="left"><bold>Public</bold></th>
<th valign="top" align="left"><bold>References</bold></th>
</tr>
</thead>
<tbody>
<tr style="background-color:#dee1e1;">
<td valign="top" align="left" colspan="5"><bold>Two-dimensional imaging datasets</bold></td>
</tr>
<tr>
<td valign="top" align="left">Histology H&#x00026;E crops</td>
<td valign="top" align="left">WSI (PDAC vs. chronic pancreatitis)</td>
<td valign="top" align="left">12,000 &#x0002B; 4,000 images</td>
<td valign="top" align="left">No</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B51">51</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Balanced CT slices</td>
<td valign="top" align="left">Two-class abdominal CT slices</td>
<td valign="top" align="left">500 slices</td>
<td valign="top" align="left">No</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B47">47</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">PCCD CT images</td>
<td valign="top" align="left">Portal-venous CT slices</td>
<td valign="top" align="left">1,800 slices</td>
<td valign="top" align="left">No</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B48">48</xref>)</td>
</tr>
<tr style="background-color:#dee1e1;">
<td valign="top" align="left" colspan="5"><bold>Three-dimensional imaging datasets</bold></td>
</tr>
<tr>
<td valign="top" align="left">Taiwan CECT &#x0002B; TCIA</td>
<td valign="top" align="left">Portal-venous CT (internal &#x0002B; external)</td>
<td valign="top" align="left">370&#x0002B;320 / 281&#x0002B;82</td>
<td valign="top" align="left">Mixed</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B46">46</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Dutch CECT &#x0002B; MSD</td>
<td valign="top" align="left">Local 197 CECT &#x0002B; public 281 MSD volumes</td>
<td valign="top" align="left">478 volumes</td>
<td valign="top" align="left">Mixed</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B56">56</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Korean multi-center</td>
<td valign="top" align="left">Multi-center non-contrast CT</td>
<td valign="top" align="left">4,287 &#x0002B; 361 pts</td>
<td valign="top" align="left">No</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B60">60</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Chinese registry</td>
<td valign="top" align="left">Multi-hospital contrast CT</td>
<td valign="top" align="left">1,473 studies</td>
<td valign="top" align="left">No</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B82">82</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">PANDA multi-center</td>
<td valign="top" align="left">Non-contrast CT from 9 hospitals</td>
<td valign="top" align="left">3,208 &#x0002B; 5,337</td>
<td valign="top" align="left">Partly<sup><italic>a</italic></sup></td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B15">15</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">MSD Task07</td>
<td valign="top" align="left">3-D CT segmentation challenge</td>
<td valign="top" align="left">281 volumes</td>
<td valign="top" align="left">Yes</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B68">68</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">NIH-82 pancreas CT</td>
<td valign="top" align="left">TCIA abdominal benchmark</td>
<td valign="top" align="left">82 volumes</td>
<td valign="top" align="left">Yes</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B69">69</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Synapse abdominal CT</td>
<td valign="top" align="left">Multi-organ segmentation set</td>
<td valign="top" align="left">&#x0007E;377 volumes</td>
<td valign="top" align="left">Yes</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B72">72</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">PancreasNet CT</td>
<td valign="top" align="left">Progressive-residual Swin study</td>
<td valign="top" align="left">290 volumes</td>
<td valign="top" align="left">No</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B74">74</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Dual-phase CT</td>
<td valign="top" align="left">Arterial&#x0002B;venous CT &#x00026; biomarkers</td>
<td valign="top" align="left">202 pts</td>
<td valign="top" align="left">No</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B77">77</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">MRI cystic-neoplasm</td>
<td valign="top" align="left">T1/T2 ROIs (SCN vs. MCN)</td>
<td valign="top" align="left">314 pts / 1,761 ROIs</td>
<td valign="top" align="left">No</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B75">75</xref>)</td>
</tr>
<tr style="background-color:#dee1e1;">
<td valign="top" align="left" colspan="5"><bold>Imaging-derived tabular radiomics</bold></td>
</tr>
<tr>
<td valign="top" align="left">Serial non-contrast CT</td>
<td valign="top" align="left">Daily CT during chemoradiation</td>
<td valign="top" align="left">90 pts / 2,520 scans</td>
<td valign="top" align="left">No</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B26">26</xref>)</td>
</tr>
<tr style="background-color:#dee1e1;">
<td valign="top" align="left" colspan="5"><bold>Clinical tabular sources</bold></td>
</tr>
<tr>
<td valign="top" align="left">SEER registry</td>
<td valign="top" align="left">Cancer registry (1975&#x02013;2016)</td>
<td valign="top" align="left">&#x0007E;31,000 cases</td>
<td valign="top" align="left">Yes</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B21">21</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Optum<sup>&#x000AE;</sup> EHR</td>
<td valign="top" align="left">De-identified claims / clinical text</td>
<td valign="top" align="left">3,322 &#x0002B; 25,908</td>
<td valign="top" align="left">Commercial</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B22">22</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">NHIS insurance</td>
<td valign="top" align="left">National health-service claims</td>
<td valign="top" align="left">NR</td>
<td valign="top" align="left">Restricted</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B76">76</xref>)</td>
</tr>
<tr style="background-color:#dee1e1;">
<td valign="top" align="left" colspan="5"><bold>Molecular and liquid-biopsy datasets</bold></td>
</tr>
<tr>
<td valign="top" align="left">Sex-distinct RNA-Seq</td>
<td valign="top" align="left">Tumor RNA-Seq (FPKM)</td>
<td valign="top" align="left">NR</td>
<td valign="top" align="left">No</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B18">18</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Multi-omics panel</td>
<td valign="top" align="left">Proteins &#x0002B; gene expression</td>
<td valign="top" align="left">NR</td>
<td valign="top" align="left">No</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B20">20</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">ctDNA vs. tissue KRAS</td>
<td valign="top" align="left">Serum/tissue sequencing</td>
<td valign="top" align="left">75 &#x0002B; 66 pts</td>
<td valign="top" align="left">No</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B35">35</xref>)</td>
</tr>
<tr style="background-color:#dee1e1;">
<td valign="top" align="left" colspan="5"><bold>Biofluid biomarker panels</bold></td>
</tr>
<tr>
<td valign="top" align="left">Kaggle urinary 2020</td>
<td valign="top" align="left">LYVE1, REG1B, TFF1, creatinine</td>
<td valign="top" align="left">590 samples</td>
<td valign="top" align="left">Yes</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B19">19</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">PancRISK urine</td>
<td valign="top" align="left">LYVE1, REG1B, TFF1, creatinine</td>
<td valign="top" align="left">379 samples</td>
<td valign="top" align="left">On request</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B32">32</xref>)</td>
</tr></tbody>
</table>
<table-wrap-foot>
<p><sup><italic>a</italic></sup>Planned for public release. NR, Not reported; pts, patients; ROIs, regions of interest.</p>
</table-wrap-foot>
</table-wrap>
<sec>
<label>4.1</label>
<title>Two-dimensional imaging datasets</title>
<p>Two-dimensional inputs either whole slides or axial slices&#x02014;were an early compromise between GPU memory limits and the need for more training examples. Histology work by Kavak et al. built a corpus of 12,000 low-resolution and 4,000 high-resolution H&#x00026;E crops from 119 surgical slides, later used to benchmark transfer-learning CNNs and to illustrate that ResNet50 and EfficientNetB0 outperform custom models on balanced pathology data (<xref ref-type="bibr" rid="B51">51</xref>). Liu et al. followed a similar patch strategy on imaging, extracting 224 &#x000D7; 224 arterial-phase CECT tiles from 690 Taiwanese CT volumes and validating on 281 PDAC and 82 control scans from TCIA; the redundancy of thousands of overlapping patches lifted patient-level accuracy beyond 0.98 on the internal test set (<xref ref-type="bibr" rid="B46">46</xref>). Beyond the abdomen, Wu et al. repurposed ophthalmic photographs, assembling 1 300 fundus images from 194 patients to train PANet&#x02014;a ResNet34 backbone with multi-scale and channel attention&#x02014;that reached an AUC of 0.96 for pancreatic-cancer prediction (<xref ref-type="bibr" rid="B79">79</xref>). Finally, Chen et al. created the PCPI set by tiling whole-slide pathology images into 224-pixel squares and showed that a plug-and-play channel-plus-spatial self-attention block boosts mDice to 74 % on five tissue classes (<xref ref-type="bibr" rid="B80">80</xref>). Collectively, these two-dimensional resources demonstrate how high annotation density can offset limited patient numbers when memory or data-sharing constraints prohibit full-volume learning.</p></sec>
<sec>
<label>4.2</label>
<title>Three-dimensional imaging volumes</title>
<p>Full-volume learning has now become the default for detection, segmentation and staging tasks. Several compact single-center datasets still underpin proof-of-concept work: a balanced 500-slice abdominal CT collection drives the NASNet&#x02013;Elman hybrid that reports 99.6% accuracy (<xref ref-type="bibr" rid="B47">47</xref>); the so-called PCCD set of 1,800 portal-venous slices fuels a DCNN&#x02013;DBN pipeline that reaches 99.8% accuracy (<xref ref-type="bibr" rid="B48">48</xref>); and a 290-volume non-contrast series underlies PancreasNet, a Swin-based progressive residual network that attains a Dice of 0.87 (<xref ref-type="bibr" rid="B74">74</xref>). Multicentre cohorts offer greater diversity: Ramaekers et al. combined 197 contrast-enhanced exams from the Netherlands with the public Medical Segmentation Decathlon (MSD) Task02 pancreas-tumor set of 281 volumes, showing that anatomy-aware ensembles generalize with an AUROC of 0.99 (<xref ref-type="bibr" rid="B56">56</xref>). Viriyasaranon et al. pushed self-supervised pre-training on 4,287 Asian and 361 US studies, a scale that revealed cross-ethnicity gaps and how pseudo-lesions can bridge them (<xref ref-type="bibr" rid="B60">60</xref>). Public benchmarks remain indispensable: MSD Task07, NIH-82 and the Synapse multi-organ set (&#x02248;377 volumes) form the backbone of attention U-Nets, MDAG-Net, AMFF-Net, and DA-TransUNet, letting authors claim consistent Dice gains of 2&#x02013;6 percentage points over U-Net or TransUNet baselines (<xref ref-type="bibr" rid="B68">68</xref>, <xref ref-type="bibr" rid="B69">69</xref>, <xref ref-type="bibr" rid="B72">72</xref>, <xref ref-type="bibr" rid="B81">81</xref>). At the high end of scale, the PANDA consortium aggregated 3208 non-contrast CTs with 5,337 external validations drawn from nine hospitals; its memory transformer discriminates PDAC subtypes with an AUC of 0.987 and often outperforms radiologists in reader studies (<xref ref-type="bibr" rid="B15">15</xref>). Chen et al. add a 1,473-study nationwide registry for five-network ensemble testing, reporting real-world AUC 0.95 and 74.7 % sensitivity for tumors smaller than two centimeters (<xref ref-type="bibr" rid="B82">82</xref>). A hybrid modality appears in the dual-phase arterial-plus-venous collection of 202 patients that, when fused with 11 laboratory variables via multi-head attention, yields an AUC of 0.83 for lymph-node-metastasis prediction (<xref ref-type="bibr" rid="B77">77</xref>). Together these resources illustrate how volume-level diversity, not merely sample count, drives external validity.</p>
<p>Magnetic-resonance datasets are far scarcer. Tian et al. gathered 314 cystic-neoplasm exams&#x02014;1,761 two-dimensional tumor ROIs&#x02014;and showed that inserting a CBAM hybrid attention block into DenseNet161, then concatenating eleven clinical factors, pushes patient-level AUC to 0.97 for distinguishing serous from mucinous cystic lesions (<xref ref-type="bibr" rid="B75">75</xref>). The study underscores both the promise and current scarcity of MRI data for pancreatic AI.</p></sec>
<sec>
<label>4.3</label>
<title>Radiomics data</title>
<p>Despite the rise of end-to-end CNNs, hand-engineered features remain influential in prognosis and treatment-response modeling. Nasief et al. extracted more than 1,300 delta-features from 2,520 daily non-contrast CTs in 90 chemoradiation patients, finding that a three-feature Bayesian neural network predicted response with an external AUC of 0.98 (<xref ref-type="bibr" rid="B26">26</xref>). Toyama et al. linked gray-level non-uniformity on FDG-PET to survival in 161 cases, showing that radiomic heterogeneity complements clinical stage and surgical status (<xref ref-type="bibr" rid="B28">28</xref>). Such studies highlight radiomics&#x00027; role where serial imaging or functional tracers exist but deep labels do not.</p></sec>
<sec>
<label>4.4</label>
<title>Clinical and registry data sources</title>
<p>Large administrative or registry data still power many risk-stratification efforts. The Optum<sup>&#x000AE;</sup> inverse-cohort strategy linked 3,322 early-stage and 25,908 late-stage PDAC cases to de-identified claims, letting XGBoost anticipate a median 24-month diagnostic lead at 90% specificity (<xref ref-type="bibr" rid="B22">22</xref>). The SEER registry, cleaned down to roughly 31,000 PDAC records, remains a staple for stage and survival classification (<xref ref-type="bibr" rid="B21">21</xref>). A Taiwanese three-center EMR of 66,384 diabetic patients supports a four-year risk score with an AUROC of 0.91 (<xref ref-type="bibr" rid="B24">24</xref>), while Korean NHIS claims underpin uncertainty-aware attention that yields better calibration than deterministic baselines (<xref ref-type="bibr" rid="B76">76</xref>). Structured, population-scale tables thus remain irreplaceable for longitudinal prediction.</p></sec>
<sec>
<label>4.5</label>
<title>Genomic and molecular data</title>
<p>Genomic, proteomic and cell-free assays enrich modeling with biological mechanism, albeit at smaller scale. Ojha et al. built sex-distinct three-year survival predictors on paired tumor RNA-Seq, although the sample count is undisclosed (<xref ref-type="bibr" rid="B18">18</xref>). Patel et al. merged protein abundances and gene expression into a mixed multi-omics panel but left accession details unspecified (<xref ref-type="bibr" rid="B20">20</xref>). Kinugasa et al. sequenced <italic>KRAS</italic> in 75 paired tissue and 66 serum samples, confirming that circulating mutations predict poorer outcome (<xref ref-type="bibr" rid="B35">35</xref>). Follow-up work by Cohen, Allenson, Pietrasz, and Hadano refined plasma assays that combine ctDNA with proteins or exosomes, generally trading sample size for earlier detection or better prognostication (<xref ref-type="bibr" rid="B36">36</xref>&#x02013;<xref ref-type="bibr" rid="B39">39</xref>).</p></sec>
<sec>
<label>4.6</label>
<title>Biomarker and biofluid data sources</title>
<p>Urine and serum immunoassays remain attractive for low-cost screening. The 2020 Kaggle urine dataset&#x02014;590 samples measuring LYVE1, REG1B, TFF1 plus creatinine&#x02014;supports both CatBoost classification and the kernel-attention network that attains a 0.97 F1 score for the cancer class (<xref ref-type="bibr" rid="B19">19</xref>, <xref ref-type="bibr" rid="B65">65</xref>). Blyuss et al. contributed the 379-sample PancRISK panel, where logistic regression yields an AUC of 0.94 and sensitivity of 0.81 at 90 % specificity (<xref ref-type="bibr" rid="B32">32</xref>). Serum CA19-9, CA125, CEA and exosomal miRNAs&#x02014;explored in nested case&#x02013;control and Lewis-negative subcohorts&#x02014;extend liquid-biopsy utility, particularly when conventional antigens fail (<xref ref-type="bibr" rid="B41">41</xref>&#x02013;<xref ref-type="bibr" rid="B43">43</xref>). These panels demonstrate that modest sample sets, when molecularly rich and clinically accessible, still underpin competitive AI models.</p>
<p>Across modalities, one message is constant: <italic>data variety, not merely data volume, dictates architectural choice</italic>. Two-dimensional patch-based CNNs excel where dense annotation is feasible; three-dimensional CNN&#x02013;Transformer hybrids dominate in volumetric CT; and attention mechanisms increasingly fuse mixed signals&#x02014;dual-phase imaging with laboratory values, or claims data with temporal patterns&#x02014;to deliver clinically actionable insight.</p>
<p><xref ref-type="fig" rid="F8">Figure 8</xref> presents modality-wise distributions using boxplots on a log scale, with mean markers shown for each group. Finally, In <xref ref-type="fig" rid="F9">Figure 9</xref>, a horizontal bar chart displays the per-dataset sample sizes on a logarithmic x-axis, sorted in descending order and labeled with exact values, <xref ref-type="fig" rid="F10">Figure 10</xref> depicts a modality &#x000D7; availability heatmap, where cell colors encode dataset counts and overlaid text provides the precise numbers. All three figures were generated in Python using Matplotlib.</p>
<fig position="float" id="F8">
<label>Figure 8</label>
<caption><p>Distribution of dataset sample sizes by modality (log scale). Boxes show interquartile range with whiskers; dots mark the mean for each modality.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmed-12-1731922-g0008.tif">
<alt-text content-type="machine-generated">Flowchart depicting a research process for pancreatic cancer detection using AI. It starts with database search in PubMed and Google Scholar from 2015 to 2025, using specific keywords. Out of 3,000 articles, around 1,100 duplicates are removed. After screening titles and abstracts, 64 are excluded, leaving 1,900 articles for analysis. The distribution involves categorizing into ML-based, DL-based, and attention-based methods.</alt-text>
</graphic>
</fig>
<fig position="float" id="F9">
<label>Figure 9</label>
<caption><p>Dataset sample sizes (log scale), sorted by size. Each bar corresponds to one dataset from our inventory and is labeled with its sample count.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmed-12-1731922-g0009.tif">
<alt-text content-type="machine-generated">Diagram of a Transformer architecture showing the encoder and decoder components. The encoder contains multiple layers of &#x0201C;Add and Norm,&#x0201D; &#x0201C;Positionwise FFN,&#x0201D; and &#x0201C;Multi-head attention.&#x0201D; The decoder includes &#x0201C;Masked multi-head attention,&#x0201D; additional &#x0201C;Add and Norm&#x0201D; and &#x0201C;Multi-head attention&#x0201D; layers, with final feed into &#x0201C;FC.&#x0201D; Both encoder and decoder use embeddings with positional encoding. Arrows indicate data flow.</alt-text>
</graphic>
</fig>
<fig position="float" id="F10">
<label>Figure 10</label>
<caption><p>Counts of datasets by modality and availability category. Color encodes count and cell annotations give exact values.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmed-12-1731922-g0010.tif">
<alt-text content-type="machine-generated">Line graph depicting data trends from January 1, 2016, to January 1, 2024. The yellow line shows some fluctuations with notable peaks around early 2020 and late 2023. Vertical lines labeled &#x0201C;Note&#x0201D; appear at November 1, 2020, and January 1, 2022.</alt-text>
</graphic>
</fig>
</sec></sec>
<sec sec-type="discussion" id="s5">
<label>5</label>
<title>Discussion</title>
<p>The past decade has witnessed a rapid transition from conventional machine-learning pipelines to end-to-end deep networks and, most recently, to transformer-enhanced architectures for pancreatic cancer. Early studies relied on hand-crafted radiomic and clinical variables coupled with support-vector machines, random forests or shallow neural networks. These models provided useful proof-of-concepts but were limited by the size and heterogeneity of available datasets and required expert feature engineering (<xref ref-type="bibr" rid="B22">22</xref>, <xref ref-type="bibr" rid="B26">26</xref>). Deep-learning approaches alleviated some of these constraints by learning hierarchical features directly from images. Modified VGG and ResNet models achieved internal accuracies around 98%&#x02013;99% for binary detection when trained on balanced CT or histology datasets (<xref ref-type="bibr" rid="B46">46</xref>, <xref ref-type="bibr" rid="B48">48</xref>). Hybrid stacks that combined denoising, segmentation and deep belief networks further pushed sensitivity to nearly 100% on curated cohorts (<xref ref-type="bibr" rid="B48">48</xref>). More recent work incorporates multi-dimensional attention and transformer components to better capture long-range context and focus the network on the small, heterogeneous pancreas and tumor region. For instance, the Multi-Dimensional Attention Gate network (MDAG-Net) introduces spatial, channel and multi-input attention gates; compared with a U-Net baseline on the Task07 Pancreas dataset it improved Dice, precision, recall and mean IoU by 5.3, 1.5, 12.7, and 7.6 percentage points, respectively. This shift illustrates how attention helps filter redundant background features and recalibrates convolution kernels to emphasize tumor regions.</p>
<p>Progress has been driven not only by algorithmic innovation but also by access to larger and more diverse datasets. Early efforts were restricted to two-dimensional slices or small, single-center cohorts; for example, the PCCD dataset comprised 1,800 portal-venous slices, and several urine-biomarker studies contained fewer than 600 samples. Contemporary studies leverage full three-dimensional volumes and multi-center registries. Large-scale cohorts such as PANDA aggregated more than 3,000 non-contrast CTs with external validation across nine hospitals, enabling robust training and external assessment (<xref ref-type="bibr" rid="B15">15</xref>). Public benchmarks like the NIH, MSD Task07, and Synapse datasets now provide standardized segmentation tasks, fostering fair comparison and transfer learning. Meanwhile, new modalities&#x02014;fundus photography, plasma-omics, and multi-phase CT fused with clinical biomarkers&#x02014;have diversified the data landscape (<xref ref-type="bibr" rid="B18">18</xref>, <xref ref-type="bibr" rid="B79">79</xref>). This variety has underscored the importance of modality-specific architectures: two-dimensional patch CNNs remain effective for dense histology or fundus imagery, whereas three-dimensional U-Nets and transformers excel on volumetric CT, and attention-based fusion layers integrate imaging with lab tests or clinical variables.</p>
<p>The model-summary (<xref ref-type="table" rid="T5">Tables 5</xref>&#x02013;<xref ref-type="table" rid="T7">7</xref>) and the data-source (<xref ref-type="table" rid="T8">Table 8</xref>) together highlight a consistent pattern: (i) radiomics and conventional ML are strongest for prognosis and longitudinal monitoring, where repeated measures (delta-radiomics on serial CT or pre/post-RT PET/CT) capture treatment-induced change and heterogeneity (<xref ref-type="bibr" rid="B26">26</xref>&#x02013;<xref ref-type="bibr" rid="B28">28</xref>); (ii) CNNs dominate lesion detection/segmentation on volumetric contrast CT&#x02014;especially when anatomical context (pancreas/duct masks, secondary signs) is injected&#x02014;yielding high AUROC with clinically interpretable by-products (masks, heat-maps) (<xref ref-type="bibr" rid="B56">56</xref>, <xref ref-type="bibr" rid="B57">57</xref>); and (iii) transformer/attention components are most compelling when fusing modalities (dual-phase CT with laboratory values, or imaging with clinical covariates) and when global context matters (e.g., multi-center non-contrast CT) (<xref ref-type="bibr" rid="B15">15</xref>, <xref ref-type="bibr" rid="B75">75</xref>, <xref ref-type="bibr" rid="B77">77</xref>). In short, the &#x0201C;what each method is good at&#x0201D; in <xref ref-type="table" rid="T5">Tables 5</xref>&#x02013;<xref ref-type="table" rid="T7">7</xref> maps directly to the data regimes cataloged in <xref ref-type="table" rid="T8">Table 8</xref>: hand-engineered features plus survival models for serial/prognostic settings; CNNs for voxel-level localization; and attention/transformers for multi-modal fusion and long-range dependencies.</p>
<p>Recent transformer-enhanced models demonstrate state-of-the-art performance on both detection and segmentation tasks. Hybrid DenseNet models incorporating channel and spatial attention and eleven clinical variables achieved 92.44% accuracy, AUC 0.971 and F1-score 0.936 for differentiating serous vs. mucinous cystic neoplasms on MRI. The dual-attention TransUNet (DA-TransUNet) integrates positional and channel attention blocks into a U-shaped transformer; on the multi-organ Synapse dataset it improved mean Dice by 2.32 percentage points and reduced the Hausdorff distance by 8.21 mm relative to TransUNet, with a pancreas-specific Dice gain of 5.73 percentage points. Such gains come at minimal computational cost&#x02014;only a 2.54% increase in parameters. Similarly, the PancreasNet progressive residual transformer network combines Swin blocks, enhanced feature reweighting and regulated fusion; it reported 92.4% accuracy, 93.1% recall, 90.7% specificity, and Dice 0.87 on a 290-volume cohort, outperforming earlier convolutional CAD systems by 5&#x02013;7 percentage points. Multi-modal attention further enhances classification: a dual-phase CT plus clinical biomarker model achieved 80% accuracy, 0.82 precision, 0.86 specificity, 0.74 recall, and AUC 0.83 for pre-operative lymph-node metastasis prediction, representing improvements of 0.10&#x02013;0.16 points over radiomics baselines. At the extreme, the DeepOptimalNet cascade achieved a reported 99.3% accuracy, 99.1% sensitivity, and 99.5% specificity for pancreatic tumor classification in CT imaging (<xref ref-type="bibr" rid="B83">83</xref>), though such perfect scores on limited datasets warrant cautious interpretation and external validation. Collectively, these results indicate that attention-augmented architectures provide tangible benefits in focusing on subtle pancreatic features, balancing precision and recall, and reducing false positives.</p>
<p>To provide a comprehensive quantitative overview, <xref ref-type="table" rid="T9">Table 9</xref> aggregates reported performance metrics across the representative studies in <xref ref-type="table" rid="T5">Tables 5</xref>&#x02013;<xref ref-type="table" rid="T7">7</xref>. Across conventional machine-learning approaches, AUC values ranged from 0.84 to 0.98, with accuracy spanning 81%&#x02013;99.97%; deep-learning models improved these ranges to AUC 0.92&#x02013;0.99 and accuracy 82.5%&#x02013;99.8%, while also introducing segmentation capabilities (Dice 0.19&#x02013;0.70). Attention and transformer-based architectures achieved the highest overall discrimination [AUC up to 0.996 for PANDA (<xref ref-type="bibr" rid="B15">15</xref>)] and best segmentation performance [Dice 0.87 for PancreasNet (<xref ref-type="bibr" rid="B74">74</xref>)], with F1 scores consistently exceeding 0.92 across all generations and peaking at 0.97 for the kernel attention network (<xref ref-type="bibr" rid="B65">65</xref>). This progression demonstrates measurable performance gains from conventional ML through deep learning to attention/transformer architectures, particularly for complex imaging tasks requiring spatial reasoning and multimodal data fusion. However, it is important to note that these metrics reflect heterogeneous datasets, tasks (detection vs. segmentation vs. prognosis) and validation strategies; therefore, direct cross-study comparisons must account for differences in sample size, prevalence, imaging protocols and patient-level vs. slice-level evaluation.</p>
<table-wrap position="float" id="T9">
<label>Table 9</label>
<caption><p>Quantitative performance aggregation across AI model generations.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th valign="top" align="left"><bold>Metric</bold></th>
<th valign="top" align="left"><bold>Conventional ML</bold></th>
<th valign="top" align="left"><bold>Deep learning</bold></th>
<th valign="top" align="left"><bold>Attention/transformer</bold></th>
<th valign="top" align="left"><bold>Best overall</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left"><bold>AUC</bold></td>
<td valign="top" align="left">0.84&#x02013;0.98</td>
<td valign="top" align="left">0.92&#x02013;0.99</td>
<td valign="top" align="left">0.83&#x02013;0.996</td>
<td valign="top" align="left">0.996 [PANDA (<xref ref-type="bibr" rid="B15">15</xref>)]</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Dice</bold></td>
<td valign="top" align="left">N/A<sup><italic>a</italic></sup></td>
<td valign="top" align="left">0.19&#x02013;0.70</td>
<td valign="top" align="left">0.57&#x02013;0.87</td>
<td valign="top" align="left">0.87 [PancreasNet (<xref ref-type="bibr" rid="B74">74</xref>)]</td>
</tr>
<tr>
<td valign="top" align="left"><bold>F1</bold></td>
<td valign="top" align="left">&#x0003E;0.92<sup><italic>b</italic></sup></td>
<td valign="top" align="left">0.92&#x02013;0.96</td>
<td valign="top" align="left">0.936&#x02013;0.97</td>
<td valign="top" align="left">0.97 [KAN (<xref ref-type="bibr" rid="B65">65</xref>)]</td>
</tr>
<tr>
<td valign="top" align="left"><bold>Accuracy</bold></td>
<td valign="top" align="left">81%&#x02013;99.97%</td>
<td valign="top" align="left">82.5%&#x02013;99.8%</td>
<td valign="top" align="left">80%&#x02013;94.4%</td>
<td valign="top" align="left">99.97% [Decision Tree (<xref ref-type="bibr" rid="B21">21</xref>)]</td>
</tr></tbody>
</table>
<table-wrap-foot>
<p><sup><italic>a</italic></sup>ML models focused on classification tasks without segmentation.</p>
<p><sup><italic>b</italic></sup>Exact F1 value not reported; described as &#x0201C;highest&#x0201D; among compared models (<xref ref-type="bibr" rid="B20">20</xref>).</p>
<p>Ranges reflect heterogeneous datasets, tasks, and validation strategies; direct cross-study comparisons should account for these differences. AUC, Area Under ROC Curve; N/A, Not Applicable.</p>
</table-wrap-foot>
</table-wrap>
<p>A few entries in <xref ref-type="table" rid="T5">Tables 5</xref>&#x02013;<xref ref-type="table" rid="T7">7</xref> (and related reports) claim near-perfect performance&#x02014;e.g., 99.8% for a DCNN&#x0002B;DBN pipeline on the 1,800-slice PCCD set (<xref ref-type="bibr" rid="B48">48</xref>) and 99.6% for an NASNet &#x02192; Elman hybrid on a 500-image two-class CT dataset (<xref ref-type="bibr" rid="B47">47</xref>). Based on the dataset properties summarized in <xref ref-type="table" rid="T8">Table 8</xref> and the authors&#x00027; own descriptions, several factors likely contribute.</p>
<p>First, both studies used relatively small, balanced research sets (hundreds to low thousands of <italic>images</italic>, not <italic>patients</italic>), often from one or few scanners/centers. Such homogeneity reduces nuisance variation and can inflate apparent performance. Second, PCCD and similar corpora are 2D slice collections where many slices from the <italic>same</italic> patient share textures, field-of-view and reconstruction kernels. If slices (or overlapping patches) from a given patient appear in both train and test folds, leakage occurs, and models learn patient/scanner signatures rather than disease biology, driving metrics toward 99%&#x02013;100%. Third, several pipelines framed a binary &#x0201C;tumor vs. healthy&#x0201D; task with tumor-centric crops or <italic>post-hoc</italic> ROI selection, simplifying the decision surface and limiting challenging negatives (e.g., pancreatitis, cysts, post-operative change). Finally, neither study reports robust, multi-center external testing at the <italic>patient</italic> level; metrics were typically computed on balanced sets and may not reflect screening prevalence or case-mix.</p>
<p>These considerations help explain why 99%&#x0002B; results are rare in larger, multi-institutional cohorts or prospective settings. In line with the trends in <xref ref-type="table" rid="T8">Table 8</xref>, when studies scale to diverse volumes and enforce patient-level splits with external validation, reported AUROC/accuracy typically settle into the 0.83&#x02013;0.96 range for CAD, and Dice for pancreas/tumor segmentation into the 0.57&#x02013;0.88 range&#x02014;credible but not &#x0201C;perfect&#x0201D; (<xref ref-type="bibr" rid="B56">56</xref>, <xref ref-type="bibr" rid="B69">69</xref>, <xref ref-type="bibr" rid="B82">82</xref>).</p>
<p>To close the gap between <xref ref-type="table" rid="T5">Tables 5</xref>&#x02013;<xref ref-type="table" rid="T8">8</xref> and clinical reality, we recommend that future work:</p>
<list list-type="order">
<list-item><p>Enforce <bold>patient-level</bold> train/validation/test segregation (no slice/patch overlap),</p></list-item>
<list-item><p>report both <italic>image-level</italic> and <italic>patient-level</italic> metrics with <bold>case-mix</bold> and <bold>prevalence</bold> made explicit,</p></list-item>
<list-item><p>Include <bold>external, multi-center</bold> testing and, when possible, <bold>prospective</bold> evaluation,</p></list-item>
<list-item><p>Stratify results by lesion size (&#x0003C; 2 cm), stage, and scanner/protocol,</p></list-item>
<list-item><p>Provide <bold>calibration</bold> (reliability) curves and decision-curve analysis in addition to accuracy/AUC, and</p></list-item>
<list-item><p>Release preprocessing code and split manifests to mitigate inadvertent leakage.</p></list-item>
</list>
<p>Where feasible, anatomically informed attention (ducts, vessels) and multi-modal fusion should be prioritized for tasks that intrinsically require global context (e.g., lymph-node metastasis or subtype attribution) (<xref ref-type="bibr" rid="B15">15</xref>, <xref ref-type="bibr" rid="B77">77</xref>).</p>
<p><xref ref-type="table" rid="T3">Table 3</xref> presents a comprehensive methodological quality assessment across 24 studies from three model generations (Machine Learning, Deep Learning, and Attention/Transformer architectures), systematically analyzing single-center vs. multi-center designs and internal vs. external validation strategies&#x02014;key quality indicators that directly impact model generalizability and clinical translation.</p>
<p>The evolution of AI models has increasingly emphasized holistic patient context. Sex-specific RNA-Seq survival predictors illustrate how biological sex influences transcriptomic signatures and survival modeling (<xref ref-type="bibr" rid="B18">18</xref>). Multi-omics panels combining proteins and gene expression have outperformed single-omics classifiers, highlighting the synergy of heterogeneous data sources (<xref ref-type="bibr" rid="B20">20</xref>). The lymph-node metastasis study cited above fused dual-phase CT and eleven laboratory variables via an attention-based fusion module; this integration raised AUC from 0.72 (radiomics alone) to 0.83. Similar improvements are observed when fusing imaging with risk factors: logistic regression on urine biomarkers plus CA19-9 improved sensitivity and specificity to 96%, and multi-head fusion of dual-phase CT with clinical biomarkers increased lymph-node detection accuracy by over 10 percentage points. Beyond imaging, end-to-end frameworks now incorporate self-supervision and pseudo-lesion pretext tasks to reduce annotation burdens; models pre-trained on synthetic lesions achieved external accuracies up to 82.5% despite using only 10% of labeled data (<xref ref-type="bibr" rid="B60">60</xref>).</p></sec>
<sec id="s6">
<label>6</label>
<title>Emerging directions and future challenges</title>
<p>Although remarkable progress has been made, several challenges remain. Many studies are retrospective and single-center; prospective, multi-institutional trials are needed to verify generalisability and clinical impact. Domain shift across scanners, patient populations and acquisition protocols can degrade performance; attention-based causal regularization and uncertainty estimation may mitigate such shifts. Interpretation and fairness are critical: anatomically guided attention and uncertainty-aware mechanisms provide more transparent saliency maps and allow models to defer to clinicians when confidence is low. In addition, counterfactual explanations should be incorporated to further enhance interpretability and strengthen clinician trust. Early detection remains an open frontier. Delta-radiomics and longitudinal EHR models have shown that subtle texture changes or clinical patterns can predict pancreatic cancer 24 months before diagnosis (<xref ref-type="bibr" rid="B22">22</xref>). Recent work suggests that radiomic signatures in apparently normal pancreas can precede diagnosis by over a year. Translating such findings into practice will require precise volumetric segmentation and integration with high-risk cohort selection. Combining imaging with genomics, proteomics, and metabolomics may uncover prodromal signatures, while non-invasive modalities such as fundus photography already hint at systemic biomarkers. Finally, data privacy, algorithmic bias and the integration of AI outputs into clinical workflows require careful consideration. The next decade will likely see the convergence of self-supervised learning, causal graph modeling, multi-modal fusion and federated training to deliver interpretable and equitable AI tools for the early detection, staging, and management of pancreatic cancer. To ensure clinical translation, future studies must also adopt prospective, population-based trial designs that directly evaluate real-world screening and triage performance.</p>
<p>Among the surveyed studies, <bold>PANDA</bold> (9-center, 3,208 training CTs, 5,337 external validation cases, AUC 0.984&#x02013;0.987) (<xref ref-type="bibr" rid="B15">15</xref>) and <bold>Chen et al.&#x00027;s nationwide study</bold> (1,473 real-world CTs, AUC 0.95, 89.7% AI sensitivity vs. 74.7% radiologist for &#x0003C; 2 cm tumors) (<xref ref-type="bibr" rid="B82">82</xref>) represent the closest candidates for clinical deployment, distinguished by multi-center validation and head-to-head clinician comparisons. Beyond imaging, <bold>Nasief&#x00027;s delta-radiomics</bold> (treatment response prediction, external AUC 0.98) (<xref ref-type="bibr" rid="B26">26</xref>) and <bold>biomarker panels</bold> [PancRISK urine test, AUC 0.94 (<xref ref-type="bibr" rid="B32">32</xref>); plasma amino-acids, AUC 0.86 (<xref ref-type="bibr" rid="B33">33</xref>)] show strong translational potential through interpretability and integration with existing workflows. However, regulatory approval faces critical barriers: 83.3% of studies lack multi-center validation required by FDA/EMA for generalizability evidence (<xref ref-type="table" rid="T3">Table 3</xref>); only four studies documented prospective reader comparisons mandated for Software as a Medical Device (SaMD) submissions; and domain shift effects [11.8% performance drop in cross-ethnic validation (<xref ref-type="bibr" rid="B60">60</xref>)] highlight inadequate robustness testing. The EU AI Act (2024) further mandates algorithmic transparency, bias audits across patient subgroups, and uncertainty quantification&#x02014;requirements met by &#x0003C; 5% of reviewed studies.</p>
<sec>
<label>6.1</label>
<title>Data-sharing frameworks are critical yet underdeveloped accelerators of translation</title>
<p>Public benchmarks (Medical Segmentation Decathlon, NIH pancreas dataset) enable algorithmic comparison but lack patient metadata for robust generalizability assessment (<xref ref-type="bibr" rid="B68">68</xref>, <xref ref-type="bibr" rid="B69">69</xref>). Federated learning&#x02014;where models train locally while sharing only gradient updates&#x02014;offers privacy-preserving multi-center collaboration, yet zero reviewed studies employed this approach. International consortia (TCIA, PANDA, AI-PREDICT) demonstrate feasibility of de-identified data pooling with harmonized protocols, while standardized reporting (TRIPOD-AI, STARD-AI) remains non-mandatory despite regulatory necessity. Accelerating translation requires: (1) multi-center designs as the methodological standard (currently only 16.7%); (2) federated learning adoption to overcome privacy barriers; (3) expanded data-sharing consortia with incentivized participation; and (4) prospective trials with locked algorithms and independent test sets to satisfy FDA 510(k) evidentiary requirements. Without these infrastructure investments, even high-performing AI risks remaining research curiosities rather than deployed clinical tools.</p></sec></sec>
<sec sec-type="conclusions" id="s7">
<label>7</label>
<title>Conclusion</title>
<p>This review has traced the methodological progression from classical machine-learning and radiomics to deep representation learning and, most recently, attention- and transformer-enhanced architectures for pancreatic cancer detection, segmentation and prognosis. Attention-augmented models and multi-modal fusion have delivered consistent gains in diagnostic accuracy, segmentation Dice and AUC across a range of tasks and datasets, demonstrating clear potential to augment human readers and to enable earlier, non-invasive detection.</p>
<p>Despite these advances, several barriers remain before routine clinical deployment is possible. Many published studies are retrospective, single-center or underpowered; evaluation metrics and reporting are heterogeneous; and model robustness to domain shift, scanner variability and population differences is often insufficiently tested. Moreover, challenges around interpretability, fairness, data governance and workflow integration persist and must be addressed alongside pure algorithmic improvements.</p>
<p>To accelerate safe and equitable translation we recommend the following priorities:</p>
<list list-type="bullet">
<list-item><p>Move beyond retrospective benchmarks by testing models in prospective cohorts and pragmatic clinical settings to quantify real-world benefit and harms.</p></list-item>
<list-item><p>Adopt common reporting standards, share pre-processing pipelines and encourage public, well-annotated multi-center datasets to reduce evaluation heterogeneity.</p></list-item>
<list-item><p>Invest in methods for domain generalization, calibration, uncertainty estimation and causal regularization to reduce sensitivity to scanner/protocol and population shifts.</p></list-item>
<list-item><p>Prioritize architectures that fuse imaging with biomarkers and clinical data while remaining computationally efficient and interpretable for deployment.</p></list-item>
<list-item><p>Use federated learning and secure model-sharing to scale training data while respecting patient privacy and jurisdictional constraints.</p></list-item>
<list-item><p>Co-design algorithms with clinicians, evaluate decision-impact (not just performance metrics), and plan regulatory, ethical and cost-effectiveness assessments early in development.</p></list-item>
</list>
<p>In summary, the combination of anatomically guided attention, principled multi-modal fusion, and rigorous external validation offers the most promising path toward clinically useful AI for pancreatic cancer. If future work couples these technical advances with prospective trials, clear reporting standards and careful attention to fairness and deployment, AI could materially improve early detection, staging and personalized management for patients with pancreatic cancer.</p></sec>
</body>
<back>
<sec sec-type="data-availability" id="s8">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/supplementary material, further inquiries can be directed to the corresponding author.</p>
</sec>
<sec sec-type="author-contributions" id="s9">
<title>Author contributions</title>
<p>MA: Funding acquisition, Investigation, Writing &#x02013; original draft. ST: Formal analysis, Investigation, Methodology, Writing &#x02013; original draft. MH: Conceptualization, Project administration, Supervision, Writing &#x02013; review &#x00026; editing. SK: Formal analysis, Validation, Visualization, Writing &#x02013; review &#x00026; editing. AF: Data curation, Investigation, Software, Writing &#x02013; original draft. HA: Formal analysis, Investigation, Methodology, Writing &#x02013; review &#x00026; editing. AA: Data curation, Formal analysis, Methodology, Writing &#x02013; review &#x00026; editing.</p>
</sec>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="s11">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p></sec>
<sec sec-type="disclaimer" id="s12">
<title>Publisher&#x00027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<label>1.</label>
<mixed-citation publication-type="web"><person-group person-group-type="author"><collab>World Cancer Research Fund</collab></person-group>. <source>Pancreatic cancer statistics</source>. (<year>2022</year>). Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.wcrf.org/preventing-cancer/cancer-statistics/pancreatic-cancer-statistics/">https://www.wcrf.org/preventing-cancer/cancer-statistics/pancreatic-cancer-statistics/</ext-link> (Accessed September 8, 2025).</mixed-citation>
</ref>
<ref id="B2">
<label>2.</label>
<mixed-citation publication-type="web"><person-group person-group-type="author"><name><surname>Research</surname> <given-names>P; Precedence Research</given-names></name></person-group>. <source>Pancreatic Cancer Market Size, Share, and Trends 2024 to 2034</source>. (<year>2024</year>). Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.precedenceresearch.com/pancreatic-cancer-market">https://www.precedenceresearch.com/pancreatic-cancer-market</ext-link> (Accessed October 9, 2025).</mixed-citation>
</ref>
<ref id="B3">
<label>3.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>X</given-names></name> <name><surname>Zhang</surname> <given-names>Y</given-names></name> <name><surname>Yan</surname> <given-names>Z</given-names></name> <name><surname>Jiang</surname> <given-names>W</given-names></name> <name><surname>Rui</surname> <given-names>S</given-names></name></person-group>. <article-title>Global, regional and national burden of pancreatic cancer and its attributable risk factors from 2019 to 2021, with projection to 2044</article-title>. <source>Front Oncol</source>. (<year>2025</year>) <volume>14</volume>:<fpage>1521788</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fonc.2024.1521788</pub-id><pub-id pub-id-type="pmid">39876895</pub-id></mixed-citation>
</ref>
<ref id="B4">
<label>4.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zottl</surname> <given-names>J</given-names></name> <name><surname>Sebesta</surname> <given-names>CG</given-names></name> <name><surname>Tomosel</surname> <given-names>E</given-names></name> <name><surname>Sebesta</surname> <given-names>M</given-names></name> <name><surname>Sebesta</surname> <given-names>C</given-names></name></person-group>. <article-title>Unraveling the burden of pancreatic cancer in the 21st century: trends in incidence, mortality, survival, and key contributing factors</article-title>. <source>Cancers</source>. (<year>2025</year>) <volume>17</volume>:<fpage>1607</fpage>. doi: <pub-id pub-id-type="doi">10.3390/cancers17101607</pub-id><pub-id pub-id-type="pmid">40427106</pub-id></mixed-citation>
</ref>
<ref id="B5">
<label>5.</label>
<mixed-citation publication-type="web"><person-group person-group-type="author"><collab>National Cancer Institute</collab></person-group>. <source>Cancer Stat Facts: Pancreatic Cancer</source>. (<year>2025</year>). Available online at: <ext-link ext-link-type="uri" xlink:href="https://seer.cancer.gov/statfacts/html/pancreas.html">https://seer.cancer.gov/statfacts/html/pancreas.html</ext-link> (Accessed September 8, 2025).</mixed-citation>
</ref>
<ref id="B6">
<label>6.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Olakowski</surname> <given-names>M</given-names></name> <name><surname>Bu&#x00142;dak</surname> <given-names>&#x00141;</given-names></name></person-group>. <article-title>Modifiable and non-modifiable risk factors for the development of non-hereditary pancreatic cancer</article-title>. <source>Medicina</source>. (<year>2022</year>) <volume>58</volume>:<fpage>978</fpage>. doi: <pub-id pub-id-type="doi">10.3390/medicina58080978</pub-id><pub-id pub-id-type="pmid">35893093</pub-id></mixed-citation>
</ref>
<ref id="B7">
<label>7.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Qadir</surname> <given-names>MI</given-names></name> <name><surname>Baril</surname> <given-names>JA</given-names></name> <name><surname>Yip-Schneider</surname> <given-names>MT</given-names></name> <name><surname>Schonlau</surname> <given-names>D</given-names></name> <name><surname>Tran</surname> <given-names>TTT</given-names></name> <name><surname>Schmidt</surname> <given-names>CM</given-names></name> <etal/></person-group>. <article-title>Artificial intelligence in pancreatic intraductal papillary mucinous neoplasm imaging: a systematic review</article-title>. <source>PLoS Digital Health</source>. (<year>2025</year>) <volume>4</volume>:<fpage>e0000920</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pdig.0000920</pub-id><pub-id pub-id-type="pmid">39830259</pub-id></mixed-citation>
</ref>
<ref id="B8">
<label>8.</label>
<mixed-citation publication-type="web"><person-group person-group-type="author"><name><surname>Winstead</surname> <given-names>E</given-names></name></person-group>. <source>Screening People at High Risk for Pancreatic Cancer May Help Them Live Longer</source>. (<year>2024</year>). National Cancer Institute Cancer Currents Blog. Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.cancer.gov/news-events/cancer-currents-blog/2024/pancreatic-cancer-surveillance">https://www.cancer.gov/news-events/cancer-currents-blog/2024/pancreatic-cancer-surveillance</ext-link> (Accessed September 8, 2025).</mixed-citation>
</ref>
<ref id="B9">
<label>9.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tehsin</surname> <given-names>S</given-names></name> <name><surname>Zameer</surname> <given-names>S</given-names></name> <name><surname>Saif</surname> <given-names>S</given-names></name></person-group>. <article-title>Myeloma cell detection in bone marrow aspiration using microscopic images</article-title>. In: <source>2019 11th International Conference on Knowledge and Smart Technology (KST)</source> (<year>2019</year>). p. <fpage>57</fpage>&#x02013;<lpage>61</lpage>. doi: <pub-id pub-id-type="doi">10.1109/KST.2019.8687511</pub-id></mixed-citation>
</ref>
<ref id="B10">
<label>10.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Antony</surname> <given-names>A</given-names></name> <name><surname>Mukherjee</surname> <given-names>S</given-names></name> <name><surname>Bhinder</surname> <given-names>K</given-names></name> <name><surname>Murlidhar</surname> <given-names>M</given-names></name> <name><surname>Zarrintan</surname> <given-names>A</given-names></name> <name><surname>Goenka</surname> <given-names>AH</given-names></name></person-group>. <article-title>Artificial intelligence-augmented imaging for early pancreatic cancer detection</article-title>. <source>Vis Med</source>. (<year>2025</year>) <volume>41</volume>:<fpage>271</fpage>&#x02013;<lpage>279</lpage>. doi: <pub-id pub-id-type="doi">10.1159/000546603</pub-id><pub-id pub-id-type="pmid">40567392</pub-id></mixed-citation>
</ref>
<ref id="B11">
<label>11.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pod&#x0012D;n&#x00103;</surname> <given-names>N</given-names></name> <name><surname>Gheorghe</surname> <given-names>EC</given-names></name> <name><surname>Constantin</surname> <given-names>A</given-names></name> <name><surname>Cazacu</surname> <given-names>I</given-names></name> <name><surname>Croitoru</surname> <given-names>V</given-names></name> <name><surname>Gheorghe</surname> <given-names>C</given-names></name> <etal/></person-group>. <article-title>Artificial intelligence in pancreatic imaging: a systematic review</article-title>. <source>United Eur Gastroenterol J</source>. (<year>2025</year>) <volume>13</volume>:<fpage>55</fpage>&#x02013;<lpage>77</lpage>. doi: <pub-id pub-id-type="doi">10.1002/ueg2.12723</pub-id><pub-id pub-id-type="pmid">39865461</pub-id></mixed-citation>
</ref>
<ref id="B12">
<label>12.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Yao</surname> <given-names>L</given-names></name> <name><surname>Zhang</surname> <given-names>Z</given-names></name> <name><surname>Keles</surname> <given-names>E</given-names></name> <name><surname>Yazici</surname> <given-names>C</given-names></name> <name><surname>Tirkes</surname> <given-names>T</given-names></name> <name><surname>Bagci</surname> <given-names>U</given-names></name> <etal/></person-group>. <article-title>review of deep learning and radiomics approaches for pancreatic cancer diagnosis from medical imaging</article-title>. <source>Curr Opin Gastroenterol</source>. (<year>2023</year>) <volume>39</volume>:<fpage>436</fpage>&#x02013;<lpage>47</lpage>. doi: <pub-id pub-id-type="doi">10.1097/MOG.0000000000000966</pub-id></mixed-citation>
</ref>
<ref id="B13">
<label>13.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mishra</surname> <given-names>AK</given-names></name> <name><surname>Chong</surname> <given-names>B</given-names></name> <name><surname>Arunachalam</surname> <given-names>SP</given-names></name> <name><surname>Oberg</surname> <given-names>AL</given-names></name> <name><surname>Majumder</surname> <given-names>S</given-names></name></person-group>. <article-title>Machine learning models for pancreatic cancer risk prediction using electronic health record data-a systematic review and assessment</article-title>. <source>Am J Gastroenterol</source>. (<year>2024</year>) <volume>119</volume>:<fpage>1466</fpage>&#x02013;<lpage>82</lpage>. doi: <pub-id pub-id-type="doi">10.14309/ajg.0000000000002870</pub-id><pub-id pub-id-type="pmid">38752654</pub-id></mixed-citation>
</ref>
<ref id="B14">
<label>14.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>L</given-names></name> <name><surname>Li</surname> <given-names>D</given-names></name> <name><surname>Su</surname> <given-names>T</given-names></name> <name><surname>Xiao</surname> <given-names>T</given-names></name> <name><surname>Zhao</surname> <given-names>S</given-names></name></person-group>. <article-title>Effectiveness of radiomics-based machine learning models in differentiating pancreatitis and pancreatic ductal adenocarcinoma: systematic review and meta-analysis</article-title>. <source>J Med Internet Res</source>. (<year>2025</year>) <volume>27</volume>:<fpage>e72420</fpage>. doi: <pub-id pub-id-type="doi">10.2196/72420</pub-id><pub-id pub-id-type="pmid">40744488</pub-id></mixed-citation>
</ref>
<ref id="B15">
<label>15.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cao</surname> <given-names>K</given-names></name> <name><surname>Xia</surname> <given-names>Y</given-names></name> <name><surname>Yao</surname> <given-names>J</given-names></name> <name><surname>Han</surname> <given-names>X</given-names></name> <name><surname>Lambert</surname> <given-names>L</given-names></name> <name><surname>Zhang</surname> <given-names>T</given-names></name> <etal/></person-group>. <article-title>Large-scale pancreatic cancer detection via non-contrast CT and deep learning</article-title>. <source>Nat Med</source>. (<year>2023</year>) <volume>29</volume>:<fpage>3033</fpage>&#x02013;<lpage>43</lpage>. doi: <pub-id pub-id-type="doi">10.1038/s41591-023-02640-w</pub-id><pub-id pub-id-type="pmid">37985692</pub-id></mixed-citation>
</ref>
<ref id="B16">
<label>16.</label>
<mixed-citation publication-type="web"><person-group person-group-type="author"><name><surname>Trends</surname> <given-names>G</given-names></name> <name><surname>Google</surname> <given-names>LLC</given-names></name></person-group>. <source>Artificial Intelligence Search Trends (2015&#x02013;2025)</source>. (<year>2025</year>). Available online at: <ext-link ext-link-type="uri" xlink:href="https://trends.withgoogle.com/trends/us/artificial-intelligence-search-trends/">https://trends.withgoogle.com/trends/us/artificial-intelligence-search-trends/</ext-link> (Accessed October 9, 2025).</mixed-citation>
</ref>
<ref id="B17">
<label>17.</label>
<mixed-citation publication-type="web"><person-group person-group-type="author"><collab>Observatory OAP Organisation for Economic Co-operation; Development (OECD)</collab></person-group>. <source>AI Publications by Country (2000&#x02013;2025)</source>. (<year>2025</year>). Data from OpenAlex and Scopus. Available online at: <ext-link ext-link-type="uri" xlink:href="https://oecd.ai/en/data">https://oecd.ai/en/data</ext-link> (Accessed October 9, 2025).</mixed-citation>
</ref>
<ref id="B18">
<label>18.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ojha</surname> <given-names>A</given-names></name> <name><surname>Zhao</surname> <given-names>SJ</given-names></name> <name><surname>Akpunonu</surname> <given-names>B</given-names></name> <name><surname>Zhang</surname> <given-names>JT</given-names></name> <name><surname>Simo</surname> <given-names>KA</given-names></name> <name><surname>Liu</surname> <given-names>JY</given-names></name></person-group>. <article-title>Gap-App: a sex-distinct AI-based predictor for pancreatic ductal adenocarcinoma survival as a web application open to patients and physicians</article-title>. <source>Cancer Lett</source>. (<year>2025</year>) <volume>622</volume>:<fpage>217689</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.canlet.2025.217689</pub-id><pub-id pub-id-type="pmid">38895246</pub-id></mixed-citation>
</ref>
<ref id="B19">
<label>19.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Modi</surname> <given-names>N</given-names></name> <name><surname>Kumar</surname> <given-names>Y</given-names></name></person-group>. <article-title>Machine Learning Based Approaches to diagnosis and detection of cancerous and non-pancreatic cancerous conditions</article-title>. In: <source>2024 OPJU International Technology Conference (OTCON) on Smart Computing for Innovation and Advancement in Industry 4.0</source> (<year>2024</year>). p. <fpage>1</fpage>&#x02013;<lpage>5</lpage>. doi: <pub-id pub-id-type="doi">10.1109/OTCON60325.2024.10687862</pub-id></mixed-citation>
</ref>
<ref id="B20">
<label>20.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pandey</surname> <given-names>P</given-names></name> <name><surname>Mayank</surname> <given-names>K</given-names></name> <name><surname>Sharma</surname> <given-names>S</given-names></name></person-group>. <article-title>Bio-marker cancer prediction system using artificial intelligence</article-title>. In: <source>2023 International Conference on Integration of Computational Intelligent System (ICICIS)</source> (<year>2023</year>). p. <fpage>1</fpage>&#x02013;<lpage>5</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ICICIS56802.2023.10430291</pub-id></mixed-citation>
</ref>
<ref id="B21">
<label>21.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hasan</surname> <given-names>MU</given-names></name> <name><surname>Nishat Khan</surname> <given-names>F</given-names></name> <name><surname>Jahan</surname> <given-names>H</given-names></name> <name><surname>Zaman</surname> <given-names>Z</given-names></name> <name><surname>Rahman</surname> <given-names>MM</given-names></name></person-group>. <article-title>Predicting pancreatic cancer survival and staging: a machine learning approach</article-title>. In: <source>2024 IEEE International Conference on Computing, Applications and Systems (COMPAS)</source> (<year>2024</year>). p. <fpage>1</fpage>&#x02013;<lpage>5</lpage>. doi: <pub-id pub-id-type="doi">10.1109/COMPAS60761.2024.10797176</pub-id></mixed-citation>
</ref>
<ref id="B22">
<label>22.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>Q</given-names></name> <name><surname>Cherry</surname> <given-names>DR</given-names></name> <name><surname>Nalawade</surname> <given-names>V</given-names></name> <name><surname>Qiao</surname> <given-names>EM</given-names></name> <name><surname>Kumar</surname> <given-names>A</given-names></name> <name><surname>Lowy</surname> <given-names>AM</given-names></name> <etal/></person-group>. <article-title>Clinical data prediction model to identify patients with early-stage pancreatic cancer</article-title>. <source>JCO Clin Cancer Inform</source>. (<year>2021</year>) <volume>5</volume>:<fpage>279</fpage>&#x02013;<lpage>87</lpage>. doi: <pub-id pub-id-type="doi">10.1200/CCI.20.00137</pub-id><pub-id pub-id-type="pmid">33739856</pub-id></mixed-citation>
</ref>
<ref id="B23">
<label>23.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Muhammad</surname> <given-names>W</given-names></name> <name><surname>Hart</surname> <given-names>GR</given-names></name> <name><surname>Nartowt</surname> <given-names>B</given-names></name> <name><surname>Farrell</surname> <given-names>JJ</given-names></name> <name><surname>Johung</surname> <given-names>K</given-names></name> <name><surname>Liang</surname> <given-names>Y</given-names></name> <etal/></person-group>. <article-title>Pancreatic cancer prediction through an artificial neural network</article-title>. <source>Front Artif Intell</source>. (<year>2019</year>) <volume>2</volume>:<fpage>2</fpage>. doi: <pub-id pub-id-type="doi">10.3389/frai.2019.00002</pub-id><pub-id pub-id-type="pmid">33733091</pub-id></mixed-citation>
</ref>
<ref id="B24">
<label>24.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>SM</given-names></name> <name><surname>Phuc</surname> <given-names>PT</given-names></name> <name><surname>Nguyen</surname> <given-names>PA</given-names></name> <name><surname>Burton</surname> <given-names>W</given-names></name> <name><surname>Lin</surname> <given-names>SJ</given-names></name> <name><surname>Lin</surname> <given-names>WC</given-names></name> <etal/></person-group>. <article-title>A novel prediction model of the risk of pancreatic cancer among diabetes patients using multiple clinical data and machine learning</article-title>. <source>Cancer Med</source>. (<year>2023</year>) <volume>12</volume>:<fpage>19987</fpage>&#x02013;<lpage>99</lpage>. doi: <pub-id pub-id-type="doi">10.1002/cam4.6547</pub-id><pub-id pub-id-type="pmid">37737056</pub-id></mixed-citation>
</ref>
<ref id="B25">
<label>25.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ravi</surname> <given-names>KC</given-names></name> <name><surname>Dixit</surname> <given-names>RR</given-names></name> <name><surname>T</surname> <given-names>I</given-names></name> <name><surname>Singh</surname> <given-names>S</given-names></name> <name><surname>Gopatoti</surname> <given-names>A</given-names></name> <name><surname>Yadav</surname> <given-names>AS</given-names></name></person-group>. <article-title>AI-powered pancreas navigator: delving into the depths of early pancreatic cancer diagnosis using advanced deep learning techniques</article-title>. In: <source>2023 9th International Conference on Smart Structures and Systems (ICSSS)</source> (<year>2023</year>). p. <fpage>1</fpage>&#x02013;<lpage>6</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ICSSS58085.2023.10407836</pub-id></mixed-citation>
</ref>
<ref id="B26">
<label>26.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nasief</surname> <given-names>H</given-names></name> <name><surname>Zheng</surname> <given-names>C</given-names></name> <name><surname>Schott</surname> <given-names>D</given-names></name> <name><surname>Hall</surname> <given-names>W</given-names></name> <name><surname>Tsai</surname> <given-names>S</given-names></name> <name><surname>Erickson</surname> <given-names>B</given-names></name> <etal/></person-group>. <article-title>A machine learning based delta-radiomics process for early prediction of treatment response of pancreatic cancer</article-title>. <source>NPJ Prec Oncol</source>. (<year>2019</year>) <volume>3</volume>:<fpage>25</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41698-019-0096-z</pub-id><pub-id pub-id-type="pmid">31602401</pub-id></mixed-citation>
</ref>
<ref id="B27">
<label>27.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mukherjee</surname> <given-names>S</given-names></name> <name><surname>Patra</surname> <given-names>A</given-names></name> <name><surname>Khasawneh</surname> <given-names>H</given-names></name> <name><surname>Korfiatis</surname> <given-names>P</given-names></name> <name><surname>Rajamohan</surname> <given-names>N</given-names></name> <name><surname>Suman</surname> <given-names>G</given-names></name> <etal/></person-group>. <article-title>Radiomics-based machine-learning models can detect pancreatic cancer on prediagnostic computed tomography scans at a substantial lead time before clinical diagnosis</article-title>. <source>Gastroenterology</source>. (<year>2022</year>) <volume>163</volume>:<fpage>1435</fpage>&#x02013;<lpage>1446</lpage>.e3. doi: <pub-id pub-id-type="doi">10.1053/j.gastro.2022.06.066</pub-id><pub-id pub-id-type="pmid">35788343</pub-id></mixed-citation>
</ref>
<ref id="B28">
<label>28.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Toyama</surname> <given-names>Y</given-names></name> <name><surname>Hotta</surname> <given-names>M</given-names></name> <name><surname>Motoi</surname> <given-names>F</given-names></name> <name><surname>Takanami</surname> <given-names>K</given-names></name> <name><surname>Minamimoto</surname> <given-names>R</given-names></name> <name><surname>Takase</surname> <given-names>K</given-names></name></person-group>. <article-title>Prognostic value of FDG-PET radiomics with machine learning in pancreatic cancer</article-title>. <source>Sci Rep</source>. (<year>2020</year>) <volume>10</volume>:<fpage>17024</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-020-73237-3</pub-id><pub-id pub-id-type="pmid">33046736</pub-id></mixed-citation>
</ref>
<ref id="B29">
<label>29.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Yokoyama</surname> <given-names>S</given-names></name> <name><surname>Hamada</surname> <given-names>T</given-names></name> <name><surname>Higashi</surname> <given-names>M</given-names></name> <name><surname>Matsuo</surname> <given-names>K</given-names></name> <name><surname>Maemura</surname> <given-names>K</given-names></name> <name><surname>Kurahara</surname> <given-names>H</given-names></name> <etal/></person-group>. <article-title>Predicted prognosis of patients with pancreatic cancer by machine learning</article-title>. <source>Clin Cancer Res</source>. (<year>2020</year>) <volume>26</volume>:<fpage>2411</fpage>&#x02013;<lpage>21</lpage>. doi: <pub-id pub-id-type="doi">10.1158/1078-0432.CCR-19-1247</pub-id><pub-id pub-id-type="pmid">31992588</pub-id></mixed-citation>
</ref>
<ref id="B30">
<label>30.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ozkan</surname> <given-names>M</given-names></name> <name><surname>Cakiroglu</surname> <given-names>M</given-names></name> <name><surname>Kocaman</surname> <given-names>O</given-names></name> <name><surname>Kurt</surname> <given-names>M</given-names></name> <name><surname>Yilmaz</surname> <given-names>B</given-names></name> <name><surname>Can</surname> <given-names>G</given-names></name> <etal/></person-group>. <article-title>Age-based computer-aided diagnosis approach for pancreatic cancer on endoscopic ultrasound images</article-title>. <source>Endosc Ultrasound</source>. (<year>2016</year>) <volume>5</volume>:<fpage>101</fpage>&#x02013;<lpage>7</lpage>. doi: <pub-id pub-id-type="doi">10.4103/2303-9027.180473</pub-id><pub-id pub-id-type="pmid">27080608</pub-id></mixed-citation>
</ref>
<ref id="B31">
<label>31.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Dodda</surname> <given-names>K</given-names></name> <name><surname>Muneeswari</surname> <given-names>G</given-names></name></person-group>. <article-title>IANFIS: a machine learning-based optimized technique for the classification and segmentation of pancreatic cancer</article-title>. <source>Res Biomed Eng</source>. (<year>2024</year>) <volume>40</volume>:<fpage>373</fpage>&#x02013;<lpage>85</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s42600-024-00352-9</pub-id></mixed-citation>
</ref>
<ref id="B32">
<label>32.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Blyuss</surname> <given-names>O</given-names></name> <name><surname>Zaikin</surname> <given-names>A</given-names></name> <name><surname>Cherepanova</surname> <given-names>V</given-names></name> <name><surname>Munblit</surname> <given-names>D</given-names></name> <name><surname>Kiseleva</surname> <given-names>EM</given-names></name> <name><surname>Prytomanova</surname> <given-names>OM</given-names></name> <etal/></person-group>. <article-title>Development of PancRISK, a urine biomarker-based risk score for stratified screening of pancreatic cancer patients</article-title>. <source>Br J Cancer</source>. (<year>2020</year>) <volume>122</volume>:<fpage>692</fpage>&#x02013;<lpage>6</lpage>. doi: <pub-id pub-id-type="doi">10.1038/s41416-019-0694-0</pub-id><pub-id pub-id-type="pmid">31857725</pub-id></mixed-citation>
</ref>
<ref id="B33">
<label>33.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zou</surname> <given-names>X</given-names></name> <name><surname>Wei</surname> <given-names>J</given-names></name> <name><surname>Huang</surname> <given-names>Z</given-names></name> <name><surname>Zhou</surname> <given-names>X</given-names></name> <name><surname>Lu</surname> <given-names>Z</given-names></name> <name><surname>Zhu</surname> <given-names>W</given-names></name> <etal/></person-group>. <article-title>Identification of a six-miRNA panel in serum benefiting pancreatic cancer diagnosis</article-title>. <source>Cancer Med</source>. (<year>2019</year>) <volume>8</volume>:<fpage>2810</fpage>&#x02013;<lpage>22</lpage>. doi: <pub-id pub-id-type="doi">10.1002/cam4.2145</pub-id><pub-id pub-id-type="pmid">31006985</pub-id></mixed-citation>
</ref>
<ref id="B34">
<label>34.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Karar</surname> <given-names>ME</given-names></name> <name><surname>El-Fishawy</surname> <given-names>N</given-names></name> <name><surname>Radad</surname> <given-names>M</given-names></name></person-group>. <article-title>Automated classification of urine biomarkers to diagnose pancreatic cancer using 1-D convolutional neural networks</article-title>. <source>J Biol Eng</source>. (<year>2023</year>) <volume>17</volume>:<fpage>28</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s13036-023-00340-0</pub-id><pub-id pub-id-type="pmid">37069681</pub-id></mixed-citation>
</ref>
<ref id="B35">
<label>35.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kinugasa</surname> <given-names>H</given-names></name> <name><surname>Nouso</surname> <given-names>K</given-names></name> <name><surname>Miyahara</surname> <given-names>K</given-names></name> <name><surname>Morimoto</surname> <given-names>Y</given-names></name> <name><surname>Dohi</surname> <given-names>C</given-names></name> <name><surname>Tsutsumi</surname> <given-names>K</given-names></name> <etal/></person-group>. <article-title>Detection of K-ras gene mutation by liquid biopsy in patients with pancreatic cancer</article-title>. <source>Cancer</source>. (<year>2015</year>) <volume>121</volume>:<fpage>2271</fpage>&#x02013;<lpage>80</lpage>. doi: <pub-id pub-id-type="doi">10.1002/cncr.29364</pub-id><pub-id pub-id-type="pmid">25823825</pub-id></mixed-citation>
</ref>
<ref id="B36">
<label>36.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cohen</surname> <given-names>JD</given-names></name> <name><surname>Javed</surname> <given-names>AA</given-names></name> <name><surname>Thoburn</surname> <given-names>C</given-names></name> <name><surname>Wong</surname> <given-names>F</given-names></name> <name><surname>Tie</surname> <given-names>J</given-names></name> <name><surname>Gibbs</surname> <given-names>P</given-names></name> <etal/></person-group>. <article-title>Combined circulating tumor DNA and protein biomarker-based liquid biopsy for the earlier detection of pancreatic cancers</article-title>. <source>Proc Nat Acad Sci</source>. (<year>2017</year>) <volume>114</volume>:<fpage>10202</fpage>&#x02013;<lpage>7</lpage>. doi: <pub-id pub-id-type="doi">10.1073/pnas.1704961114</pub-id><pub-id pub-id-type="pmid">28874546</pub-id></mixed-citation>
</ref>
<ref id="B37">
<label>37.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pietrasz</surname> <given-names>D</given-names></name> <name><surname>P&#x000E9;cuchet</surname> <given-names>N</given-names></name> <name><surname>Garlan</surname> <given-names>F</given-names></name> <name><surname>Didelot</surname> <given-names>A</given-names></name> <name><surname>Dubreuil</surname> <given-names>O</given-names></name> <name><surname>Doat</surname> <given-names>S</given-names></name> <etal/></person-group>. <article-title>Plasma circulating tumor DNA in pancreatic cancer patients is a prognostic marker</article-title>. <source>Clin Cancer Res</source>. (<year>2017</year>) <volume>23</volume>:<fpage>116</fpage>&#x02013;<lpage>23</lpage>. doi: <pub-id pub-id-type="doi">10.1158/1078-0432.CCR-16-0806</pub-id><pub-id pub-id-type="pmid">27993964</pub-id></mixed-citation>
</ref>
<ref id="B38">
<label>38.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hadano</surname> <given-names>N</given-names></name> <name><surname>Murakami</surname> <given-names>Y</given-names></name> <name><surname>Uemura</surname> <given-names>K</given-names></name> <name><surname>Hashimoto</surname> <given-names>Y</given-names></name> <name><surname>Kondo</surname> <given-names>N</given-names></name> <name><surname>Nakagawa</surname> <given-names>N</given-names></name> <etal/></person-group>. <article-title>Prognostic value of circulating tumour DNA in patients undergoing curative resection for pancreatic cancer</article-title>. <source>Br J Cancer</source>. (<year>2016</year>) <volume>115</volume>:<fpage>59</fpage>&#x02013;<lpage>65</lpage>. doi: <pub-id pub-id-type="doi">10.1038/bjc.2016.175</pub-id><pub-id pub-id-type="pmid">27280632</pub-id></mixed-citation>
</ref>
<ref id="B39">
<label>39.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Allenson</surname> <given-names>K</given-names></name> <name><surname>Castillo</surname> <given-names>J</given-names></name> <name><surname>San Lucas</surname> <given-names>F</given-names></name> <name><surname>Scelo</surname> <given-names>G</given-names></name> <name><surname>Kim</surname> <given-names>D</given-names></name> <name><surname>Bernard</surname> <given-names>V</given-names></name> <etal/></person-group>. <article-title>High prevalence of mutantKRAS in circulating exosome-derived DNA from early-stage pancreatic cancer patients</article-title>. <source>Ann Oncol</source>. (<year>2017</year>) <volume>28</volume>:<fpage>741</fpage>&#x02013;<lpage>7</lpage>. doi: <pub-id pub-id-type="doi">10.1093/annonc/mdx004</pub-id></mixed-citation>
</ref>
<ref id="B40">
<label>40.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ankeny</surname> <given-names>J</given-names></name> <name><surname>Court</surname> <given-names>C</given-names></name> <name><surname>Hou</surname> <given-names>S</given-names></name> <name><surname>Li</surname> <given-names>Q</given-names></name> <name><surname>Song</surname> <given-names>M</given-names></name> <name><surname>Wu</surname> <given-names>D</given-names></name> <etal/></person-group>. <article-title>Circulating tumour cells as a biomarker for diagnosis and staging in pancreatic cancer</article-title>. <source>Br J Cancer</source>. (<year>2016</year>) <volume>114</volume>:<fpage>1367</fpage>&#x02013;<lpage>75</lpage>. doi: <pub-id pub-id-type="doi">10.1038/bjc.2016.121</pub-id><pub-id pub-id-type="pmid">27300108</pub-id></mixed-citation>
</ref>
<ref id="B41">
<label>41.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>O&#x00027;Brien</surname> <given-names>DP</given-names></name> <name><surname>Sandanayake</surname> <given-names>NS</given-names></name> <name><surname>Jenkinson</surname> <given-names>C</given-names></name> <name><surname>Gentry-Maharaj</surname> <given-names>A</given-names></name> <name><surname>Apostolidou</surname> <given-names>S</given-names></name> <name><surname>Fourkala</surname> <given-names>EO</given-names></name> <etal/></person-group>. <article-title>Serum CA19-9 is significantly upregulated up to 2 years before diagnosis with pancreatic cancer: implications for early disease detection</article-title>. <source>Clin Cancer Res</source>. (<year>2015</year>) <volume>21</volume>:<fpage>622</fpage>&#x02013;<lpage>31</lpage>. doi: <pub-id pub-id-type="doi">10.1158/1078-0432.CCR-14-0365</pub-id><pub-id pub-id-type="pmid">24938522</pub-id></mixed-citation>
</ref>
<ref id="B42">
<label>42.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Madhavan</surname> <given-names>B</given-names></name> <name><surname>Yue</surname> <given-names>S</given-names></name> <name><surname>Galli</surname> <given-names>U</given-names></name> <name><surname>Rana</surname> <given-names>S</given-names></name> <name><surname>Gross</surname> <given-names>W</given-names></name> <name><surname>M&#x000FC;ller</surname> <given-names>M</given-names></name> <etal/></person-group>. <article-title>Combined evaluation of a panel of protein and miRNA serum-exosome biomarkers for pancreatic cancer diagnosis increases sensitivity and specificity</article-title>. <source>Int J Cancer</source>. (<year>2015</year>) <volume>136</volume>:<fpage>2616</fpage>&#x02013;<lpage>27</lpage>. doi: <pub-id pub-id-type="doi">10.1002/ijc.29324</pub-id><pub-id pub-id-type="pmid">25388097</pub-id></mixed-citation>
</ref>
<ref id="B43">
<label>43.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Luo</surname> <given-names>G</given-names></name> <name><surname>Liu</surname> <given-names>C</given-names></name> <name><surname>Guo</surname> <given-names>M</given-names></name> <name><surname>Cheng</surname> <given-names>H</given-names></name> <name><surname>Lu</surname> <given-names>Y</given-names></name> <name><surname>Jin</surname> <given-names>K</given-names></name> <etal/></person-group>. <article-title>Potential biomarkers in Lewis negative patients with pancreatic cancer</article-title>. <source>Ann Surg</source>. (<year>2017</year>) <volume>265</volume>:<fpage>800</fpage>&#x02013;<lpage>5</lpage>. doi: <pub-id pub-id-type="doi">10.1097/SLA.0000000000001741</pub-id><pub-id pub-id-type="pmid">28267695</pub-id></mixed-citation>
</ref>
<ref id="B44">
<label>44.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kanno</surname> <given-names>A</given-names></name> <name><surname>Masamune</surname> <given-names>A</given-names></name> <name><surname>Hanada</surname> <given-names>K</given-names></name> <name><surname>Maguchi</surname> <given-names>H</given-names></name> <name><surname>Shimizu</surname> <given-names>Y</given-names></name> <name><surname>Ueki</surname> <given-names>T</given-names></name> <etal/></person-group>. <article-title>Multicenter study of early pancreatic cancer in Japan</article-title>. <source>Pancreatology</source>. (<year>2018</year>) <volume>18</volume>:<fpage>61</fpage>&#x02013;<lpage>7</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.pan.2017.11.007</pub-id><pub-id pub-id-type="pmid">29170051</pub-id></mixed-citation>
</ref>
<ref id="B45">
<label>45.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hattori</surname> <given-names>N</given-names></name> <name><surname>Yamada</surname> <given-names>S</given-names></name> <name><surname>Torii</surname> <given-names>K</given-names></name> <name><surname>Takeda</surname> <given-names>S</given-names></name> <name><surname>Nakamura</surname> <given-names>K</given-names></name> <name><surname>Tanaka</surname> <given-names>H</given-names></name> <etal/></person-group>. <article-title>Effectiveness of plasma treatment on pancreatic cancer cells</article-title>. <source>Int J Oncol</source>. (<year>2015</year>) <volume>47</volume>:<fpage>1655</fpage>&#x02013;<lpage>62</lpage>. doi: <pub-id pub-id-type="doi">10.3892/ijo.2015.3149</pub-id><pub-id pub-id-type="pmid">26351772</pub-id></mixed-citation>
</ref>
<ref id="B46">
<label>46.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>KL</given-names></name> <name><surname>Wu</surname> <given-names>T</given-names></name> <name><surname>Chen</surname> <given-names>PT</given-names></name> <name><surname>Tsai</surname> <given-names>YM</given-names></name> <name><surname>Roth</surname> <given-names>H</given-names></name> <name><surname>Wu</surname> <given-names>MS</given-names></name> <etal/></person-group>. <article-title>Deep learning to distinguish pancreatic cancer tissue from non-cancerous pancreatic tissue: a retrospective study with cross-racial external validation</article-title>. <source>Lancet Digital Health</source>. (<year>2020</year>) <volume>2</volume>:<fpage>e303</fpage>&#x02013;<lpage>13</lpage>. doi: <pub-id pub-id-type="doi">10.1016/S2589-7500(20)30078-9</pub-id><pub-id pub-id-type="pmid">33328124</pub-id></mixed-citation>
</ref>
<ref id="B47">
<label>47.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Shnawa</surname> <given-names>AH</given-names></name> <name><surname>Mohammed</surname> <given-names>G</given-names></name> <name><surname>Hadi</surname> <given-names>MR</given-names></name> <name><surname>Ibrahim</surname> <given-names>K</given-names></name> <name><surname>Adnan</surname> <given-names>MM</given-names></name> <name><surname>Hameed</surname> <given-names>W</given-names></name></person-group>. <article-title>Optimal elman neural network for pancreatic cancer classification using computed tomography images</article-title>. In: <source>2023 6th International Conference on Engineering Technology and its Applications (IICETA)</source> (<year>2023</year>). p. <fpage>689</fpage>&#x02013;<lpage>695</lpage>. doi: <pub-id pub-id-type="doi">10.1109/IICETA57613.2023.10351360</pub-id></mixed-citation>
</ref>
<ref id="B48">
<label>48.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bhargavi</surname> <given-names>K</given-names></name> <name><surname>Prasad</surname> <given-names>ML</given-names></name> <name><surname>G</surname> <given-names>A</given-names></name> <name><surname>Shaker Reddy</surname> <given-names>PC</given-names></name> <name><surname>Yuvalatha</surname> <given-names>S</given-names></name> <name><surname>Triveni</surname> <given-names>MN</given-names></name></person-group>. <article-title>An enhanced diagnostic system using deep learning for early prediction of pancreatic cancer</article-title>. In: <source>2024 10th International Conference on Electrical Energy Systems (ICEES)</source> (<year>2024</year>). p. <fpage>1</fpage>&#x02013;<lpage>6</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ICEES61253.2024.10776830</pub-id></mixed-citation>
</ref>
<ref id="B49">
<label>49.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Alaca</surname> <given-names>Y</given-names></name> <name><surname>Akme&#x0015F;e</surname> <given-names>&#x000D6;F</given-names></name></person-group>. <article-title>Pancreatic tumor detection from CT images converted to graphs using whale optimization and classification algorithms with transfer learning</article-title>. <source>Int J Imag Syst Technol</source>. (<year>2025</year>) <volume>35</volume>:<fpage>e70040</fpage>. doi: <pub-id pub-id-type="doi">10.1002/ima.70040</pub-id></mixed-citation>
</ref>
<ref id="B50">
<label>50.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Singh</surname> <given-names>J</given-names></name> <name><surname>Akurati</surname> <given-names>M</given-names></name> <name><surname>Anandaram</surname> <given-names>H</given-names></name> <name><surname>Balakrishna</surname> <given-names>C</given-names></name> <name><surname>Al-Fatlawy</surname> <given-names>R</given-names></name> <name><surname>Ansari</surname> <given-names>SA</given-names></name></person-group>. <article-title>Stage-specific prognostic modeling for pancreatic cancer using convolutional neural networks</article-title>. In: <source>2024 International Conference on Artificial Intelligence and Emerging Technology (Global AI Summit)</source> (<year>2024</year>). p. <fpage>90</fpage>&#x02013;<lpage>94</lpage>. doi: <pub-id pub-id-type="doi">10.1109/GlobalAISummit62156.2024.10947991</pub-id></mixed-citation>
</ref>
<ref id="B51">
<label>51.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kavak</surname> <given-names>F</given-names></name> <name><surname>Bora</surname> <given-names>S</given-names></name> <name><surname>Kantarci</surname> <given-names>A</given-names></name> <name><surname>U&#x0011F;ur</surname> <given-names>A</given-names></name> <name><surname>Cagaptay</surname> <given-names>S</given-names></name> <name><surname>Gokcay</surname> <given-names>D</given-names></name> <etal/></person-group>. <article-title>Diagnosis of pancreatic ductal adenocarcinoma using deep learning</article-title>. <source>Sensors</source>. (<year>2024</year>) <volume>24</volume>:<fpage>7005</fpage>. doi: <pub-id pub-id-type="doi">10.3390/s24217005</pub-id><pub-id pub-id-type="pmid">39517902</pub-id></mixed-citation>
</ref>
<ref id="B52">
<label>52.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>X</given-names></name> <name><surname>Guo</surname> <given-names>R</given-names></name> <name><surname>Zhu</surname> <given-names>H</given-names></name> <name><surname>Chen</surname> <given-names>T</given-names></name> <name><surname>Qian</surname> <given-names>X</given-names></name></person-group>. <article-title>A causality-informed graph intervention model for pancreatic cancer early diagnosis</article-title>. <source>IEEE Trans Artif Intell</source>. (<year>2024</year>) <volume>5</volume>:<fpage>4675</fpage>&#x02013;<lpage>85</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TAI.2024.3395586</pub-id></mixed-citation>
</ref>
<ref id="B53">
<label>53.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>X</given-names></name> <name><surname>Guo</surname> <given-names>R</given-names></name> <name><surname>Lu</surname> <given-names>J</given-names></name> <name><surname>Chen</surname> <given-names>T</given-names></name> <name><surname>Qian</surname> <given-names>X</given-names></name></person-group>. <article-title>Causality-driven graph neural network for early diagnosis of pancreatic cancer in non-contrast computerized tomography</article-title>. <source>IEEE Trans Med Imaging</source>. (<year>2023</year>) <volume>42</volume>:<fpage>1656</fpage>&#x02013;<lpage>67</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TMI.2023.3236162</pub-id><pub-id pub-id-type="pmid">37018703</pub-id></mixed-citation>
</ref>
<ref id="B54">
<label>54.</label>
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Abirami</surname> <given-names>S</given-names></name> <name><surname>Chelliah</surname> <given-names>BJ</given-names></name></person-group>. <article-title>Early prediction of pancreatic malignancy by using deep learning techniques</article-title>. In: <source>2024 Third International Conference on Electrical, Electronics, Information and Communication Technologies (ICEEICT)</source>. <publisher-loc>IEEE</publisher-loc> (<year>2024</year>). p. <fpage>1</fpage>&#x02013;<lpage>7</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ICEEICT61591.2024.10718637</pub-id></mixed-citation>
</ref>
<ref id="B55">
<label>55.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Dinesh</surname> <given-names>M</given-names></name> <name><surname>Bacanin</surname> <given-names>N</given-names></name> <name><surname>Askar</surname> <given-names>S</given-names></name> <name><surname>Abouhawwash</surname> <given-names>M</given-names></name></person-group>. <article-title>Diagnostic ability of deep learning in detection of pancreatic tumour</article-title>. <source>Sci Rep</source>. (<year>2023</year>) <volume>13</volume>:<fpage>9725</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-023-36886-8</pub-id><pub-id pub-id-type="pmid">37322046</pub-id></mixed-citation>
</ref>
<ref id="B56">
<label>56.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ramaekers</surname> <given-names>M</given-names></name> <name><surname>Viviers</surname> <given-names>CG</given-names></name> <name><surname>Hellstr&#x000F6;m</surname> <given-names>TA</given-names></name> <name><surname>Ewals</surname> <given-names>LJ</given-names></name> <name><surname>Tasios</surname> <given-names>N</given-names></name> <name><surname>Jacobs</surname> <given-names>I</given-names></name> <etal/></person-group>. <article-title>Improved pancreatic cancer detection and localization on CT scans: a computer-aided detection model utilizing secondary features</article-title>. <source>Cancers</source>. (<year>2024</year>) <volume>16</volume>:<fpage>2403</fpage>. doi: <pub-id pub-id-type="doi">10.3390/cancers16132403</pub-id><pub-id pub-id-type="pmid">39001465</pub-id></mixed-citation>
</ref>
<ref id="B57">
<label>57.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Abel</surname> <given-names>L</given-names></name> <name><surname>Wasserthal</surname> <given-names>J</given-names></name> <name><surname>Weikert</surname> <given-names>T</given-names></name> <name><surname>Sauter</surname> <given-names>AW</given-names></name> <name><surname>Nesic</surname> <given-names>I</given-names></name> <name><surname>Obradovic</surname> <given-names>M</given-names></name> <etal/></person-group>. <article-title>Automated detection of pancreatic cystic lesions on CT using deep learning</article-title>. <source>Diagnostics</source>. (<year>2021</year>) <volume>11</volume>:<fpage>901</fpage>. doi: <pub-id pub-id-type="doi">10.3390/diagnostics11050901</pub-id><pub-id pub-id-type="pmid">34069328</pub-id></mixed-citation>
</ref>
<ref id="B58">
<label>58.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gao</surname> <given-names>E</given-names></name> <name><surname>Jiang</surname> <given-names>H</given-names></name> <name><surname>Zhou</surname> <given-names>Z</given-names></name> <name><surname>Yang</surname> <given-names>C</given-names></name> <name><surname>Chen</surname> <given-names>M</given-names></name> <name><surname>Zhu</surname> <given-names>W</given-names></name> <etal/></person-group>. <article-title>Automatic multi-tissue segmentation in pancreatic pathological images with selected multi-scale attention network</article-title>. <source>Comput Biol Med</source>. (<year>2022</year>) <volume>151</volume>:<fpage>106228</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.compbiomed.2022.106228</pub-id><pub-id pub-id-type="pmid">36306579</pub-id></mixed-citation>
</ref>
<ref id="B59">
<label>59.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Du</surname> <given-names>Y</given-names></name> <name><surname>Zuo</surname> <given-names>X</given-names></name> <name><surname>Liu</surname> <given-names>S</given-names></name> <name><surname>Cheng</surname> <given-names>D</given-names></name> <name><surname>Li</surname> <given-names>J</given-names></name> <name><surname>Sun</surname> <given-names>M</given-names></name> <etal/></person-group>. <article-title>Segmentation of pancreatic tumors based on multi-scale convolution and channel attention mechanism in the encoder-decoder scheme</article-title>. <source>Med Phys</source>. (<year>2023</year>) <volume>50</volume>:<fpage>7764</fpage>&#x02013;<lpage>78</lpage>. doi: <pub-id pub-id-type="doi">10.1002/mp.16561</pub-id><pub-id pub-id-type="pmid">37365850</pub-id></mixed-citation>
</ref>
<ref id="B60">
<label>60.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Viriyasaranon</surname> <given-names>T</given-names></name> <name><surname>Chun</surname> <given-names>JW</given-names></name> <name><surname>Koh</surname> <given-names>YH</given-names></name> <name><surname>Cho</surname> <given-names>JH</given-names></name> <name><surname>Jung</surname> <given-names>MK</given-names></name> <name><surname>Kim</surname> <given-names>SH</given-names></name> <etal/></person-group>. <article-title>Annotation-efficient deep learning model for pancreatic cancer diagnosis and classification using CT images: a retrospective diagnostic study</article-title>. <source>Cancers</source>. (<year>2023</year>) <volume>15</volume>:<fpage>3392</fpage>. doi: <pub-id pub-id-type="doi">10.3390/cancers15133392</pub-id><pub-id pub-id-type="pmid">37444502</pub-id></mixed-citation>
</ref>
<ref id="B61">
<label>61.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lyu</surname> <given-names>P</given-names></name> <name><surname>Neely</surname> <given-names>B</given-names></name> <name><surname>Solomon</surname> <given-names>J</given-names></name> <name><surname>Rigiroli</surname> <given-names>F</given-names></name> <name><surname>Ding</surname> <given-names>Y</given-names></name> <name><surname>Schwartz</surname> <given-names>FR</given-names></name> <etal/></person-group>. <article-title>Effect of deep learning image reconstruction in the prediction of resectability of pancreatic cancer: diagnostic performance and reader confidence</article-title>. <source>Eur J Radiol</source>. (<year>2021</year>) <volume>141</volume>:<fpage>109825</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ejrad.2021.109825</pub-id><pub-id pub-id-type="pmid">34144309</pub-id></mixed-citation>
</ref>
<ref id="B62">
<label>62.</label>
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Suneetha</surname> <given-names>M</given-names></name> <name><surname>Kalhara</surname> <given-names>G</given-names></name> <name><surname>Krishna</surname> <given-names>MV</given-names></name> <name><surname>Chandana</surname> <given-names>KNS</given-names></name> <name><surname>Snigdha</surname> <given-names>VLS</given-names></name></person-group>. <article-title>Pancreatic cancer prediction through convolutional neural networks</article-title>. In: <source>2023 7th International Conference on Intelligent Computing and Control Systems (ICICCS)</source>. <publisher-loc>IEEE</publisher-loc> (<year>2023</year>). p. <fpage>602</fpage>&#x02013;<lpage>607</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ICICCS56967.2023.10142618</pub-id></mixed-citation>
</ref>
<ref id="B63">
<label>63.</label>
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Zavals&#x00131;z</surname> <given-names>MT</given-names></name> <name><surname>Alhajj</surname> <given-names>S</given-names></name> <name><surname>Sailunaz</surname> <given-names>K</given-names></name> <name><surname>&#x000D6;zyer</surname> <given-names>T</given-names></name> <name><surname>Alhajj</surname> <given-names>R</given-names></name></person-group>. <article-title>Pancreatic tumor detection by convolutional neural networks</article-title>. In: <source>2022 International Arab Conference on Information Technology (ACIT)</source>. <publisher-loc>IEEE</publisher-loc> (<year>2022</year>). p. <fpage>1</fpage>&#x02013;<lpage>6</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ACIT57182.2022.9994181</pub-id></mixed-citation>
</ref>
<ref id="B64">
<label>64.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Juwita</surname> <given-names>J</given-names></name> <name><surname>Hassan</surname> <given-names>GM</given-names></name> <name><surname>Akhtar</surname> <given-names>N</given-names></name> <name><surname>Datta</surname> <given-names>A</given-names></name></person-group>. <article-title>MMPU-Net: A parameter-efficient network for fine-stage of pancreas and pancreas-tumor segmentation on CT scans</article-title>. <source>Biomed Signal Process Control</source>. (<year>2025</year>) <volume>110</volume>:<fpage>108224</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.bspc.2025.108224</pub-id></mixed-citation>
</ref>
<ref id="B65">
<label>65.</label>
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Vinod</surname> <given-names>RG</given-names></name> <name><surname>Kumar</surname> <given-names>MA</given-names></name> <name><surname>Chowdary</surname> <given-names>CV</given-names></name> <name><surname>Suvarchala</surname> <given-names>K</given-names></name></person-group>. <article-title>Classification of pancreatic cancer using kolmogorov-arnold networks: an upgrade on traditional machine learning for medical classification</article-title>. In: <source>2025 6th International Conference on Mobile Computing and Sustainable Informatics (ICMCSI)</source>. <publisher-loc>IEEE</publisher-loc> (<year>2025</year>). p. <fpage>961</fpage>&#x02013;<lpage>969</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ICMCSI64620.2025.10883622</pub-id></mixed-citation>
</ref>
<ref id="B66">
<label>66.</label>
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Pravamanjari</surname> <given-names>A</given-names></name> <name><surname>Swain</surname> <given-names>S</given-names></name> <name><surname>Mallick</surname> <given-names>PK</given-names></name></person-group>. <article-title>Advanced detection and classification of pancreatic cancer in CT images using swin transformer architecture</article-title>. In: <source>2025 International Conference on Emerging Systems and Intelligent Computing (ESIC)</source>. <publisher-loc>IEEE</publisher-loc> (<year>2025</year>). p. <fpage>288</fpage>&#x02013;<lpage>293</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ESIC64052.2025.10962786</pub-id></mixed-citation>
</ref>
<ref id="B67">
<label>67.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>G</surname> <given-names>V</given-names></name> <name><surname>Prathibha</surname> <given-names>S</given-names></name></person-group>. <article-title>Transformers with temporal enhanced attention over graphs for accurate identification of pancreatic cancer</article-title>. In: <source>2025 3rd IEEE International Conference on Industrial Electronics: Developments</source> &#x00026; <italic>Applications (ICIDeA)</italic> (<year>2025</year>). p. <fpage>1</fpage>&#x02013;<lpage>6</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ICIDeA64800.2025.10962937</pub-id></mixed-citation>
</ref>
<ref id="B68">
<label>68.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cao</surname> <given-names>L</given-names></name> <name><surname>Li</surname> <given-names>J</given-names></name> <name><surname>Chen</surname> <given-names>S</given-names></name></person-group>. <article-title>Multi-target segmentation of pancreas and pancreatic tumor based on fusion of attention mechanism</article-title>. <source>Biomed Signal Process Control</source>. (<year>2023</year>) <volume>79</volume>:<fpage>104170</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.bspc.2022.104170</pub-id></mixed-citation>
</ref>
<ref id="B69">
<label>69.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Dong</surname> <given-names>K</given-names></name> <name><surname>Hu</surname> <given-names>P</given-names></name> <name><surname>Zhu</surname> <given-names>Y</given-names></name> <name><surname>Tian</surname> <given-names>Y</given-names></name> <name><surname>Li</surname> <given-names>X</given-names></name> <name><surname>Zhou</surname> <given-names>T</given-names></name> <etal/></person-group>. <article-title>Attention-enhanced multiscale feature fusion network for pancreas and tumor segmentation</article-title>. <source>Med Phys</source>. (<year>2024</year>) <volume>51</volume>:<fpage>8999</fpage>&#x02013;<lpage>9016</lpage>. doi: <pub-id pub-id-type="doi">10.1002/mp.17385</pub-id><pub-id pub-id-type="pmid">39306864</pub-id></mixed-citation>
</ref>
<ref id="B70">
<label>70.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>He</surname> <given-names>P</given-names></name> <name><surname>Kong</surname> <given-names>QQ</given-names></name> <name><surname>Chen</surname> <given-names>Y</given-names></name> <name><surname>Shao</surname> <given-names>CB</given-names></name> <name><surname>Su</surname> <given-names>Z</given-names></name></person-group>. <article-title>Pancreas segmentation based on multi-stage attention enhanced U-Net</article-title>. <source>Int J Imag Syst Technol</source>. (<year>2025</year>) <volume>35</volume>:<fpage>e70025</fpage>. doi: <pub-id pub-id-type="doi">10.1002/ima.70025</pub-id></mixed-citation>
</ref>
<ref id="B71">
<label>71.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zeng</surname> <given-names>L</given-names></name> <name><surname>Li</surname> <given-names>X</given-names></name> <name><surname>Yang</surname> <given-names>X</given-names></name> <name><surname>Chen</surname> <given-names>W</given-names></name> <name><surname>Liu</surname> <given-names>J</given-names></name> <name><surname>Shen</surname> <given-names>L</given-names></name> <etal/></person-group>. <article-title>SCPMan: shape context and prior constrained multi-scale attention network for pancreatic segmentation</article-title>. <source>Expert Syst Appl</source>. (<year>2024</year>) <volume>252</volume>:<fpage>124070</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.eswa.2024.124070</pub-id></mixed-citation>
</ref>
<ref id="B72">
<label>72.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sun</surname> <given-names>G</given-names></name> <name><surname>Pan</surname> <given-names>Y</given-names></name> <name><surname>Kong</surname> <given-names>W</given-names></name> <name><surname>Xu</surname> <given-names>Z</given-names></name> <name><surname>Ma</surname> <given-names>J</given-names></name> <name><surname>Racharak</surname> <given-names>T</given-names></name> <etal/></person-group>. <article-title>DA-TransUNet: integrating spatial and channel dual attention with transformer U-net for medical image segmentation</article-title>. <source>Front Bioeng Biotechnol</source>. (<year>2024</year>) <volume>12</volume>:<fpage>1398237</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fbioe.2024.1398237</pub-id><pub-id pub-id-type="pmid">38827037</pub-id></mixed-citation>
</ref>
<ref id="B73">
<label>73.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Shen</surname> <given-names>C</given-names></name> <name><surname>Roth</surname> <given-names>HR</given-names></name> <name><surname>Hayashi</surname> <given-names>Y</given-names></name> <name><surname>Oda</surname> <given-names>M</given-names></name> <name><surname>Sato</surname> <given-names>G</given-names></name> <name><surname>Miyamoto</surname> <given-names>T</given-names></name> <etal/></person-group>. <article-title>Anatomical attention can help to segment the dilated pancreatic duct in abdominal CT</article-title>. <source>Int J Comput Assist Radiol Surg</source>. (<year>2024</year>) <volume>19</volume>:<fpage>655</fpage>&#x02013;<lpage>64</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s11548-023-03049-z</pub-id><pub-id pub-id-type="pmid">38498132</pub-id></mixed-citation>
</ref>
<ref id="B74">
<label>74.</label>
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Mahendran</surname> <given-names>RK</given-names></name> <name><surname>Aniruddhan</surname> <given-names>P</given-names></name> <name><surname>Kumar</surname> <given-names>P</given-names></name></person-group>. <article-title>PancreasNet: a transformer-based progressive residual network for comprehensive pancreatic cancer detection using CT images</article-title>. In: <source>2025 International Conference on Wireless Communications Signal Processing and Networking (WiSPNET)</source>. <publisher-loc>IEEE</publisher-loc> (<year>2025</year>). p. <fpage>1</fpage>&#x02013;<lpage>12</lpage>. doi: <pub-id pub-id-type="doi">10.1109/WiSPNET64060.2025.11004859</pub-id></mixed-citation>
</ref>
<ref id="B75">
<label>75.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tian</surname> <given-names>H</given-names></name> <name><surname>Zhang</surname> <given-names>B</given-names></name> <name><surname>Zhang</surname> <given-names>Z</given-names></name> <name><surname>Xu</surname> <given-names>Z</given-names></name> <name><surname>Jin</surname> <given-names>L</given-names></name> <name><surname>Bian</surname> <given-names>Y</given-names></name> <etal/></person-group>. <article-title>DenseNet model incorporating hybrid attention mechanisms and clinical features for pancreatic cystic tumor classification</article-title>. <source>J Appl Clin Med Phys</source>. (<year>2024</year>) <volume>25</volume>:<fpage>e14380</fpage>. doi: <pub-id pub-id-type="doi">10.1002/acm2.14380</pub-id><pub-id pub-id-type="pmid">38715381</pub-id></mixed-citation>
</ref>
<ref id="B76">
<label>76.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Heo</surname> <given-names>J</given-names></name> <name><surname>Lee</surname> <given-names>HB</given-names></name> <name><surname>Kim</surname> <given-names>S</given-names></name> <name><surname>Lee</surname> <given-names>J</given-names></name> <name><surname>Kim</surname> <given-names>KJ</given-names></name> <name><surname>Yang</surname> <given-names>E</given-names></name> <etal/></person-group>. <article-title>Uncertainty-aware attention for reliable interpretation and prediction</article-title>. In: <source>Advances in Neural Information Processing Systems</source>. (<year>2018</year>). p. 31.</mixed-citation>
</ref>
<ref id="B77">
<label>77.</label>
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>J</given-names></name> <name><surname>Yang</surname> <given-names>Y</given-names></name> <name><surname>Shen</surname> <given-names>N</given-names></name> <name><surname>Gao</surname> <given-names>H</given-names></name></person-group>. <article-title>Diagnosis of lymph node metastasis of pancreatic cancer based on feature extraction and information fusion</article-title>. In: <source>2025 6th International Conference on Computer Vision, Image and Deep Learning (CVIDL)</source>. <publisher-loc>IEEE</publisher-loc> (<year>2025</year>). p. <fpage>180</fpage>&#x02013;<lpage>183</lpage>. doi: <pub-id pub-id-type="doi">10.1109/CVIDL65390.2025.11085864</pub-id></mixed-citation>
</ref>
<ref id="B78">
<label>78.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Alaca</surname> <given-names>Y</given-names></name></person-group>. <article-title>Machine learning via DARTS-Optimized MobileViT models for pancreatic Cancer diagnosis with graph-based deep learning</article-title>. <source>BMC Med Inform Decis Mak</source>. (<year>2025</year>) <volume>25</volume>:<fpage>81</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s12911-025-02923-x</pub-id><pub-id pub-id-type="pmid">39955532</pub-id></mixed-citation>
</ref>
<ref id="B79">
<label>79.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wu</surname> <given-names>Y</given-names></name> <name><surname>Fang</surname> <given-names>P</given-names></name> <name><surname>Wang</surname> <given-names>X</given-names></name> <name><surname>Shen</surname> <given-names>J</given-names></name></person-group>. <article-title>Predicting pancreatic diseases from fundus images using deep learning</article-title>. <source>Vis Comput</source>. (<year>2025</year>) <volume>41</volume>:<fpage>3553</fpage>&#x02013;<lpage>64</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00371-024-03619-5</pub-id></mixed-citation>
</ref>
<ref id="B80">
<label>80.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>ZM</given-names></name> <name><surname>Liao</surname> <given-names>Y</given-names></name> <name><surname>Zhou</surname> <given-names>X</given-names></name> <name><surname>Yu</surname> <given-names>W</given-names></name> <name><surname>Zhang</surname> <given-names>G</given-names></name> <name><surname>Ge</surname> <given-names>Y</given-names></name> <etal/></person-group>. <article-title>Pancreatic cancer pathology image segmentation with channel and spatial long-range dependencies</article-title>. <source>Comput Biol Med</source>. (<year>2024</year>) <volume>169</volume>:<fpage>107844</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.compbiomed.2023.107844</pub-id><pub-id pub-id-type="pmid">38103482</pub-id></mixed-citation>
</ref>
<ref id="B81">
<label>81.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Oktay</surname> <given-names>O</given-names></name> <name><surname>Schlemper</surname> <given-names>J</given-names></name> <name><surname>Folgoc</surname> <given-names>LL</given-names></name> <name><surname>Lee</surname> <given-names>M</given-names></name> <name><surname>Heinrich</surname> <given-names>M</given-names></name> <name><surname>Misawa</surname> <given-names>K</given-names></name> <etal/></person-group>. <article-title>Attention u-net: learning where to look for the pancreas</article-title>. <source>arXiv preprint arXiv:180403999</source>. (<year>2018</year>).</mixed-citation>
</ref>
<ref id="B82">
<label>82.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>PT</given-names></name> <name><surname>Wu</surname> <given-names>T</given-names></name> <name><surname>Wang</surname> <given-names>P</given-names></name> <name><surname>Chang</surname> <given-names>D</given-names></name> <name><surname>Liu</surname> <given-names>KL</given-names></name> <name><surname>Wu</surname> <given-names>MS</given-names></name> <etal/></person-group>. <article-title>Pancreatic cancer detection on CT scans with deep learning: a nationwide population-based study</article-title>. <source>Radiology</source>. (<year>2023</year>) <volume>306</volume>:<fpage>172</fpage>&#x02013;<lpage>82</lpage>. doi: <pub-id pub-id-type="doi">10.1148/radiol.220152</pub-id><pub-id pub-id-type="pmid">36098642</pub-id></mixed-citation>
</ref>
<ref id="B83">
<label>83.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Thanya</surname> <given-names>T</given-names></name> <name><surname>Jeslin</surname> <given-names>T</given-names></name></person-group>. <article-title>DeepOptimalNet: optimized deep learning model for early diagnosis of pancreatic tumor classification in CT imaging</article-title>. <source>Abdom Radiol</source>. (<year>2025</year>) <volume>50</volume>:<fpage>4181</fpage>&#x02013;<lpage>4211</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00261-025-04860-9</pub-id><pub-id pub-id-type="pmid">40047871</pub-id></mixed-citation>
</ref>
<ref id="B84">
<label>84.</label>
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Yu</surname> <given-names>X</given-names></name> <name><surname>Wang</surname> <given-names>Y</given-names></name> <name><surname>Wang</surname> <given-names>Y</given-names></name> <name><surname>Feng</surname> <given-names>J</given-names></name> <name><surname>Fan</surname> <given-names>S</given-names></name> <name><surname>Li</surname> <given-names>C</given-names></name></person-group>. <article-title>Combining multimodal medical imaging and artificial intelligence for the early diagnosis of pancreatic cancer</article-title>. <source>Front Med</source>. (<year>2025</year>) <volume>12</volume>:<fpage>1631671</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fmed.2025.1631671</pub-id><pub-id pub-id-type="pmid">40861235</pub-id></mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by" id="fn0001">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2641396/overview">Surbhi Bhatia Khan</ext-link>, University of Salford, United Kingdom</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by" id="fn0002">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3266298/overview">Meenakshi Mittal</ext-link>, Central University of Punjab, India</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3268087/overview">Vikas Verma</ext-link>, Lovely Professional University, India</p>
</fn>
</fn-group>
<fn-group>
<fn fn-type="abbr" id="abbr1"><label>Abbreviations:</label><p>PDAC; Pancreatic ductal adenocarcinoma; PC; Pancreatic cancer; MRI; Magnetic resonance imaging; EUS; Endoscopic ultrasound; AI; Artificial intelligence; ML; Machine learning; DL; Deep learning; MSD; Medical Segmentation Decathlon.</p></fn></fn-group>
</back>
</article>
