<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article article-type="editorial" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Radiol.</journal-id><journal-title-group>
<journal-title>Frontiers in Radiology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Radiol.</abbrev-journal-title></journal-title-group>
<issn pub-type="epub">2673-8740</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fradi.2026.1796451</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Editorial</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Editorial: Innovative AI approaches in quantitative MRI: from image enhancement to biomarker estimation</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes"><name><surname>Mastropietro</surname><given-names>Alfonso</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="cor1">&#x002A;</xref><uri xlink:href="https://loop.frontiersin.org/people/690671/overview"/><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role></contrib>
<contrib contrib-type="author"><name><surname>Scalco</surname><given-names>Elisa</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/798734/overview"/><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role></contrib>
<contrib contrib-type="author"><name><surname>Figini</surname><given-names>Matteo</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/1564839/overview" /><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role></contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Istituto di Sistemi e Tecnologie Industriali Intelligenti per il Manifatturiero Avanzato, Consiglio Nazionale delle Ricerche</institution>, <city>Milan</city>, <country country="it">Italy</country></aff>
<aff id="aff2"><label>2</label><institution>Istituto di Tecnologie Biomediche, Consiglio Nazionale delle Ricerche</institution>, <city>Segrate</city>, <country country="it">Italy</country></aff>
<aff id="aff3"><label>3</label><institution>Hawkes Institute and Department of Computer Science, University College London</institution>, <city>London</city>, <country country="gb">United Kingdom</country></aff>
<author-notes>
<corresp id="cor1"><label>&#x002A;</label><bold>Correspondence:</bold> Alfonso Mastropietro <email xlink:href="mailto:alfonso.mastropietro@cnr.it">alfonso.mastropietro@cnr.it</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-16"><day>16</day><month>02</month><year>2026</year></pub-date>
<pub-date publication-format="electronic" date-type="collection"><year>2026</year></pub-date>
<volume>6</volume><elocation-id>1796451</elocation-id>
<history>
<date date-type="received"><day>26</day><month>01</month><year>2026</year></date>
<date date-type="accepted"><day>30</day><month>01</month><year>2026</year></date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2026 Mastropietro, Scalco and Figini.</copyright-statement>
<copyright-year>2026</copyright-year><copyright-holder>Mastropietro, Scalco and Figini</copyright-holder><license><ali:license_ref start_date="2026-02-16">https://creativecommons.org/licenses/by/4.0/</ali:license_ref><license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p></license>
</permissions>
<kwd-group>
<kwd>artificial intelligence&#x2014;AI</kwd>
<kwd>biomarker estimation</kwd>
<kwd>image processing</kwd>
<kwd>magnetic resonance imaging</kwd>
<kwd>quantitative MRI (qMRI)</kwd>
</kwd-group><counts>
<fig-count count="0"/>
<table-count count="0"/><equation-count count="0"/><ref-count count="6"/><page-count count="3"/><word-count count="0"/></counts><custom-meta-group><custom-meta><meta-name>section-at-acceptance</meta-name><meta-value>Artificial Intelligence in Radiology</meta-value></custom-meta></custom-meta-group>
</article-meta>
<notes notes-type="frontiers-research-topic">
<p>Editorial on the Research Topic <ext-link xlink:href="https://www.frontiersin.org/research-topics/59167/innovative-ai-approaches-in-quantitative-mri-from-image-enhancement-to-biomarker-estimation/articles" ext-link-type="uri">Innovative AI approaches in quantitative MRI: from image enhancement to biomarker estimation</ext-link></p>
</notes>
</front>
<body>
<sec id="s1" sec-type="intro"><title>Introduction</title>
<p>Quantitative MRI (qMRI) enables the extraction of quantitative parameters from MR images that can serve as potential biomarkers. However, its clinical adoption still remains limited by practical constraints such as sequence availability, scan time, artifact sensitivity, inter-site variability, and post-processing complexity (<xref ref-type="bibr" rid="B1">1</xref>&#x2013;<xref ref-type="bibr" rid="B3">3</xref>). Artificial intelligence (AI) is increasingly used to overcome these barriers throughout the workflow, from image formation to robust biomarker estimation (<xref ref-type="bibr" rid="B4">4</xref>&#x2013;<xref ref-type="bibr" rid="B6">6</xref>). However, compared to other imaging applications, qMRI presents unique challenges for AI, as it&#x0027;s fundamental to ensure that algorithm outputs are quantitatively accurate and respect their physical meaning. This Research Topic sought contributions that address these challenges and target clinically relevant scenarios in qMRI.</p>
<p>A unifying theme across the Research Topic is the use of AI to make qMRI more accessible, either by extracting quantitative information from routinely acquired conventional images or by automating steps that currently require specialized expertise or are time-consuming. Specifically, two contributions focus on retrospective quantification, i.e., learning mappings from conventional acquisitions to quantitative parameters that would otherwise require specialized sequences. A third contribution targets reproducible, automated segmentation to enable fast and standardized flow quantification. Finally, one paper proposes MR fingerprinting (MRF) data synthesis from magnitude-only conventional imaging, suggesting a route to relaxometry without a dedicated MRF pulse sequence. Together, these studies illustrate complementary approaches to applying AI at various points along qMRI pipelines, such as deriving quantitative information from constrained inputs, ensuring scalability, and validating outputs against quantitative references.</p>
</sec>
<sec id="s2"><title>Overview of the contributions</title>
<p>In their paper, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fradi.2023.1223377">Sun et al.</ext-link> focus on retrospective T<sub>2</sub> mapping of the prostate from conventional T<sub>1</sub>- and T<sub>2</sub>-weighted images, motivated by the clinical value of quantitative T<sub>2</sub> for lesion characterization while acknowledging the limited availability of dedicated mapping sequences in routine mpMRI protocols. A U-Net trained against reference multi-echo spin-echo T2 maps produces estimates that preserve anatomical structure and contrast. The predicted T2 maps in 25 subjects show strong quantitative agreement with references and demonstrate clinical utility by differentiating tumor from non-tumor tissue and reflecting longitudinal changes in active surveillance cohorts. The work highlights the potential of AI to extract quantitative biomarkers from existing standard clinical imaging when rigorously validated and clinically contextualized.</p>
<p><ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fradi.2023.1168901">Wu et al.</ext-link> address the limitations of standard multi-phasic abdominal DCE-MRI by proposing a pharmacokinetics-informed deep learning framework to retrospectively recover temporal resolution and enable quantitative analysis. Considering 45 subjects, including healthy controls and patients with pancreatic ductal adenocarcinoma or chronic pancreatitis, a model was trained on high-temporal-resolution DCE reference data, yielding pharmacokinetic parameters that closely match quantitative DCE estimates and discriminate healthy pancreas from disease cohorts. By constraining learning through a downstream physical model, the study exemplifies a hybrid, model-aware AI strategy well suited to quantitative MRI, enabling biomarker extraction without changes to routine imaging protocols.</p>
<p><ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fradi.2024.1385424">Winter et al.</ext-link> address vessel segmentation, a key bottleneck in intracranial 4D flow MRI, by proposing a fully automated framework that reduces time burden and user variability, particularly in stenotic vessels. Using a 3D U-Net trained on dual-VENC data from 68 patients with intracranial atherosclerotic disease and stenosis and 86 healthy controls, the method achieves fast inference with accuracy comparable to expert observers. Beyond geometric performance, the study validates segmentation using downstream hemodynamic metrics, including flow parameters and flow conservation error, and observes strong agreement in lumen area with black-blood vessel wall imaging. This work demonstrates a practical and clinically relevant pathway for robust, automated extraction of flow biomarkers.</p>
<p>Finally, in their paper, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fradi.2024.1498411">McGee et al.</ext-link> propose a deep learning strategy to synthesize MRF signals from conventional magnitude-only 3D T<sub>1</sub>-weighted brain MRI, thereby reducing dependence on customized MRF acquisitions and dedicated processing. Using data from 37 volunteers, the authors found a high correlation between a U-Net&#x2013;based model and relaxometry (T<sub>1</sub>, T<sub>2</sub>) derived from dictionary matching on synthesized vs. acquired MRF signals across 47 anatomical regions. This work is notable for framing the learning problem around quantitative endpoints (regional relaxometry agreement) rather than purely image similarity, which is essential when the intended output is a quantitative map rather than a visually plausible image.</p>
</sec>
<sec id="s3"><title>Outlook</title>
<p>Across the four contributions, several methodological priorities emerge. First, retrospective quantification from non-specialised acquisitions is an increasingly practical strategy, but it raises additional questions, since models trained on specific protocols or vendors must be extensively validated in different conditions to assess generalizability. Additionally, training and validation on retrospective data can raise concerns about how reliably the findings will translate to prospective studies, where recruitment and acquisition protocols are controlled and kept up to date. Second, AI can be used as an alternative to labour-intensive manual processing (e.g., segmentation) or computationally intensive processing (e.g., iterative algorithms for model fitting) to accelerate procedures and reduce the need for specialized expertise. Together, these two points show the potential of AI to make qMRI more accessible by reducing the need for human expertise, computational time, and specialized acquisitions or hardware, as well as improving reproducibility. However, performance assessment should be anchored in quantitative endpoints (agreement with reference maps, robustness of derived parameters, impact on downstream measurements), not solely on qualitative analysis based on visual similarity. AI applications in qMRI are most impactful when automated processing translates into clinically meaningful endpoints and when performance is explicitly validated in patient cohorts rather than extrapolated from results in healthy controls or synthetic data.</p>
<p>Looking forward, the direction suggested by this Research Topic is that AI will function as an enabling factor for qMRI&#x2014;expanding access to quantitative biomarkers by reducing dependency on specialized sequences, accelerating analysis, and supporting more reproducible workflows. Progress toward routine use will depend on continued emphasis on external validation, transparent reporting of acquisition/preprocessing dependencies, and (where feasible) uncertainty or quality-control mechanisms that help clinicians and researchers judge when a quantitative estimate is reliable. We hope this collection will help inform both method developers and end users on practical, validation-focused pathways to translate AI-based qMRI into robust tools for research and patient care.</p>
</sec>
</body>
<back>
<sec id="s4" sec-type="author-contributions"><title>Author contributions</title>
<p>AM: Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. ES: Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. MF: Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing.</p>
</sec>
<sec id="s6" sec-type="COI-statement"><title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="s7" sec-type="ai-statement"><title>Generative AI statement</title>
<p>The author(s) declared that generative AI was used in the creation of this manuscript. The authors declare that generative artificial intelligence tools were used to support language editing and stylistic refinement of the manuscript. All scientific content, interpretations, and conclusions were developed by the authors, who take full responsibility for the integrity and accuracy of the work.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec id="s8" sec-type="disclaimer"><title>Publisher&#x0027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list><title>References</title>
<ref id="B1"><label>1.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mills</surname> <given-names>AF</given-names></name> <name><surname>Sakai</surname> <given-names>O</given-names></name> <name><surname>Anderson</surname> <given-names>SW</given-names></name> <name><surname>Jara</surname> <given-names>H</given-names></name></person-group>. <article-title>Principles of quantitative MR imaging with illustrated review of applicable modular pulse diagrams</article-title>. <source>Radiographics</source>. (<year>2017</year>) <volume>37</volume>(<issue>7</issue>):<fpage>2083</fpage>&#x2013;<lpage>105</lpage>. <pub-id pub-id-type="doi">10.1148/rg.2017160099</pub-id><pub-id pub-id-type="pmid">28985137</pub-id></mixed-citation></ref>
<ref id="B2"><label>2.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Gulani</surname> <given-names>V</given-names></name> <name><surname>Seiberlich</surname> <given-names>N</given-names></name></person-group>. <article-title>Quantitative MRI: rationale and challenges</article-title>. In: <person-group person-group-type="editor"><name><surname>Seiberlich</surname><given-names>N</given-names></name> <name><surname>Gulani</surname><given-names>V</given-names></name> <name><surname>Calamante</surname><given-names>F</given-names></name> <name><surname>Campbell-Washburn</surname><given-names>A</given-names></name> <name><surname>Doneva</surname><given-names>M</given-names></name> <name><surname>Hu</surname><given-names>HH</given-names></name><etal/></person-group> <source>Advances in Magnetic Resonance Technology and Applications</source>. <publisher-loc>Cambridge, MA</publisher-loc>: <publisher-name>Academic Press</publisher-name> (<year>2020</year>). <volume>vol. 1</volume>, p. <fpage>xxxvii-li</fpage></mixed-citation></ref>
<ref id="B3"><label>3.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Saltarelli</surname> <given-names>G</given-names></name> <name><surname>Di Cerbo</surname> <given-names>G</given-names></name> <name><surname>Innocenzi</surname> <given-names>A</given-names></name> <name><surname>De Felici</surname> <given-names>C</given-names></name> <name><surname>Splendiani</surname> <given-names>A</given-names></name> <name><surname>Di Cesare</surname> <given-names>E</given-names></name></person-group>. <article-title>Quantitative MRI in neuroimaging: a review of techniques, biomarkers, and emerging clinical applications</article-title>. <source>Brain Sci</source>. (<year>2025</year>) <volume>15</volume>(<issue>10</issue>):<fpage>1088</fpage>. <pub-id pub-id-type="doi">10.3390/brainsci15101088</pub-id><pub-id pub-id-type="pmid">41154182</pub-id></mixed-citation></ref>
<ref id="B4"><label>4.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Foti</surname> <given-names>G</given-names></name> <name><surname>Longo</surname> <given-names>C</given-names></name></person-group>. <article-title>Deep learning and AI in reducing magnetic resonance imaging scanning time: advantages and pitfalls in clinical practice</article-title>. <source>Pol J Radiol</source>. (<year>2024</year>) <volume>89</volume>:<fpage>e443</fpage>&#x2013;<lpage>551</lpage>. <pub-id pub-id-type="doi">10.5114/pjr/192822</pub-id><pub-id pub-id-type="pmid">39444654</pub-id></mixed-citation></ref>
<ref id="B5"><label>5.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Scalco</surname> <given-names>E</given-names></name> <name><surname>Rizzo</surname> <given-names>G</given-names></name> <name><surname>Bertolino</surname> <given-names>N</given-names></name> <name><surname>Mastropietro</surname> <given-names>A</given-names></name></person-group>. <article-title>Leveraging deep learning for improving parameter extraction from perfusion MR images: a narrative review</article-title>. <source>Phys Med</source>. (<year>2025</year>) <volume>133</volume>:<fpage>104978</fpage>. <pub-id pub-id-type="doi">10.1016/j.ejmp.2025.104978</pub-id><pub-id pub-id-type="pmid">40215839</pub-id></mixed-citation></ref>
<ref id="B6"><label>6.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wu</surname> <given-names>C</given-names></name> <name><surname>Andaloussi</surname> <given-names>MA</given-names></name> <name><surname>Hormuth</surname> <given-names>DA</given-names></name> <name><surname>Lima</surname> <given-names>EA</given-names></name> <name><surname>Lorenzo</surname> <given-names>G</given-names></name> <name><surname>Stowers</surname> <given-names>CE</given-names></name><etal/></person-group> <article-title>A critical assessment of artificial intelligence in magnetic resonance imaging of cancer</article-title>. <source>Npj Imaging</source>. (<year>2025</year>) <volume>3</volume>(<issue>1</issue>):<fpage>15</fpage>. <pub-id pub-id-type="doi">10.1038/s44303-025-00076-0</pub-id><pub-id pub-id-type="pmid">40226507</pub-id></mixed-citation></ref></ref-list>
<fn-group>
<fn id="n1" fn-type="custom" custom-type="edited-by"><p>Edited and Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/976340/overview">Dajiang Zhu</ext-link>, University of Texas at Arlington, United States</p></fn>
</fn-group>
</back>
</article>