<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" article-type="brief-report" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Med.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Medicine</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Med.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2296-858X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fmed.2026.1758708</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Perspective</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Diverging regulatory DNA in adaptive medical AI: US agility and EU accountability in lifecycle governance</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author"><name><surname>Lee</surname> <given-names>Jae Hyun</given-names></name><xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3326962"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author"><name><surname>Choi</surname> <given-names>Boram</given-names></name><xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author"><name><surname>Jeong</surname> <given-names>Kwunho</given-names></name><xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author"><name><surname>Suh</surname> <given-names>Sang Won</given-names></name><xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/473959"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author"><name><surname>Rhee</surname> <given-names>Hwanseok</given-names></name><xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author" corresp="yes"><name><surname>Kim</surname> <given-names>Ju Han</given-names></name><xref ref-type="aff" rid="aff5"><sup>5</sup></xref><xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/641562"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
</contrib>
<contrib contrib-type="author" corresp="yes"><name><surname>Son</surname> <given-names>Dae-Soon</given-names></name><xref ref-type="aff" rid="aff6"><sup>6</sup></xref><xref ref-type="aff" rid="aff7"><sup>7</sup></xref><xref ref-type="aff" rid="aff8"><sup>8</sup></xref><xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1710779"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Global Research Center, JNPMEDI</institution>, <city>Seoul</city>, <country country="kr">Republic of Korea</country></aff>
<aff id="aff2"><label>2</label><institution>Department of Clinical Research, Onu Institute</institution>, <city>Seoul</city>, <country country="kr">Republic of Korea</country></aff>
<aff id="aff3"><label>3</label><institution>Department of Physiology, Hallym University College of Medicine</institution>, <city>Chuncheon</city>, <country country="kr">Republic of Korea</country></aff>
<aff id="aff4"><label>4</label><institution>Research Institute of Medical-Bio Convergence, Hallym University</institution>, <city>Chuncheon</city>, <country country="kr">Republic of Korea</country></aff>
<aff id="aff5"><label>5</label><institution>Seoul National University Biomedical Informatics (SNUBI), Division of Biomedical Informatics, Seoul National University College of Medicine</institution>, <city>Seoul</city>, <country country="kr">Republic of Korea</country></aff>
<aff id="aff6"><label>6</label><institution>Major in Bio-Healthcare Convergence, Hallym University College of Natural Sciences</institution>, <city>Chuncheon</city>, <country country="kr">Republic of Korea</country></aff>
<aff id="aff7"><label>7</label><institution>Division of Big Data and Artificial Intelligence, Institute of New Frontier Research, Hallym University College of Medicine</institution>, <city>Chuncheon</city>, <country country="kr">Republic of Korea</country></aff>
<aff id="aff8"><label>8</label><institution>Hallym AI-BioHealth R&#x0026;BD Center, Research Institute of Medical-Bio Convergence, Hallym University</institution>, <city>Chuncheon</city>, <country country="kr">Republic of Korea</country></aff>
<author-notes>
<corresp id="c001"><label>&#x002A;</label>Correspondence: Ju Han Kim, <email xlink:href="mailto:juhan@snu.ac.kr">juhan@snu.ac.kr</email>; Dae-Soon Son, <email xlink:href="mailto:biostat@hallym.ac.kr">biostat@hallym.ac.kr</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-23">
<day>23</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>13</volume>
<elocation-id>1758708</elocation-id>
<history>
<date date-type="received">
<day>02</day>
<month>12</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>04</day>
<month>02</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>10</day>
<month>02</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2026 Lee, Choi, Jeong, Suh, Rhee, Kim and Son.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Lee, Choi, Jeong, Suh, Rhee, Kim and Son</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-23">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<p>Medical artificial intelligence (AI) is transitioning from static, rule-based systems into adaptive models capable of continuous learning and iterative refinement. Such adaptivity expands the utility and performance of clinical AI systems across diverse patient populations and real-world conditions. However, these properties challenge regulatory paradigms originally designed for fixed-function medical devices. Although the United States and the European Union share goals of ensuring safety, accountability, and trustworthy performance, their regulatory architectures diverge due to underlying legal-philosophical traditions. The United States employs a common-law, evidence-driven approach centered on the Total Product Life Cycle, using predetermined change-control mechanisms and real-world observational data to support iterative improvement under controlled risk. In contrast, the European Union adopts a civil-law, precautionary model operationalized through the Artificial Intelligence Act, the Medical Device Regulation, and the revised Product Liability Directive, emphasizing ex-ante duties, transparency, traceability, and accountability. Understanding these distinct regulatory DNAs is critical for aligning lifecycle governance of adaptive AI across jurisdictions and ensuring safe, context-responsive innovation.</p>
</abstract>
<kwd-group>
<kwd>accountability</kwd>
<kwd>adaptive AI</kwd>
<kwd>AI Act</kwd>
<kwd>lifecycle governance</kwd>
<kwd>MDR</kwd>
<kwd>PCCP</kwd>
<kwd>regulatory DNA</kwd>
<kwd>RWE</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This research was supported by the Regional Innovation System &#x0026; Education (RISE) Glocal University 30 Project program through the Gangwon RISE Center, funded by the Ministry of Education (MOE) and the Gangwon State (G.S.), Republic of Korea (2025-RISE-10-009).</funding-statement>
</funding-group>
<counts>
<fig-count count="0"/>
<table-count count="1"/>
<equation-count count="0"/>
<ref-count count="23"/>
<page-count count="5"/>
<word-count count="3895"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Regulatory Science</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec1">
<label>1</label>
<title>Introduction</title>
<p>Medical artificial intelligence is undergoing a shift from locked, rule-based architectures to adaptive systems capable of continuous refinement and contextual responsiveness (<xref ref-type="bibr" rid="ref1 ref2 ref3 ref4">1&#x2013;4</xref>). These capabilities are increasingly important as AI models encounter heterogeneous patient populations, evolving clinical knowledge, and variable real-world environments (<xref ref-type="bibr" rid="ref5">5</xref>, <xref ref-type="bibr" rid="ref6">6</xref>).</p>
<p>In this Perspective, we use &#x201C;adaptive medical AI&#x201D; to encompass systems whose performance or behavior may change after deployment through ongoing learning, recalibration, or scheduled updates. Importantly, adaptation is not a single technical mode: it may occur through continuous learning mechanisms that update model parameters in near real time, or through periodic updates (e.g., batch retraining or recalibration) released under controlled governance. These distinctions are significant from a regulatory perspective because the frequency and scope of change shape risk, oversight feasibility, and lifecycle evidence needs.</p>
<p>Yet legacy medical-device regulation&#x2014;built around linear sequences of manufacture, approval, and tightly constrained post-market modification&#x2014;was never designed for systems that evolve after deployment (<xref ref-type="bibr" rid="ref7">7</xref>, <xref ref-type="bibr" rid="ref8">8</xref>). These frameworks assume that safety and effectiveness can be validated at a fixed moment and that system performance remains stable during commercial use. Adaptive AI challenges each of these premises.</p>
<p>Concrete tensions have already emerged in practice. Developers may struggle to determine when routine recalibration constitutes &#x201C;maintenance&#x201D; versus a regulated modification that could require additional review, while regulators face uncertainty in assessing safety and effectiveness when real-world performance diverges from its originally validated state. Such frictions are amplified by well-described phenomena such as calibration drift in clinical prediction models, which can occur even without structural retraining and may necessitate periodic recalibration to maintain clinical reliability (<xref ref-type="bibr" rid="ref4">4</xref>).</p>
<p>Accordingly, governing adaptive medical AI requires more than extending legacy device paradigms: it demands explicit definitions of permissible update types, pre-specified validation and documentation expectations, and post-market monitoring triggers that determine when performance changes warrant escalation, notification, or re-assessment.</p>
<p>This Perspective examines how the United States (US) and the European Union (EU)&#x2014;despite shared commitments to safety, accountability, and trustworthy innovation&#x2014;have developed fundamentally different regulatory identities rooted in US common-law incrementalism and EU civil-law codification (<xref ref-type="bibr" rid="ref9">9</xref>, <xref ref-type="bibr" rid="ref10">10</xref>). Understanding these regulatory DNAs is essential for shaping future governance of adaptive AI systems (<xref ref-type="table" rid="tab1">Table 1</xref>).</p>
<table-wrap position="float" id="tab1">
<label>Table 1</label>
<caption>
<p>Key regulatory contrasts between US and EU approaches to lifecycle governance of adaptive medical AI.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Dimension</th>
<th align="left" valign="top">United States (FDA)</th>
<th align="left" valign="top">European Union (AI Act + MDR&#x202F;+&#x202F;PLD)</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Regulatory posture</td>
<td align="left" valign="top">Evidence-driven, iterative oversight across the TPLC</td>
<td align="left" valign="top">Precautionary, ex-ante duties emphasizing predictability and rights</td>
</tr>
<tr>
<td align="left" valign="top">Primary lifecycle tool for updates</td>
<td align="left" valign="top">PCCP enabling pre-authorized, bounded changes</td>
<td align="left" valign="top">Conformity assessment-centered control of change; limited pre-authorization for adaptation</td>
</tr>
<tr>
<td align="left" valign="top">Role of real-world data</td>
<td align="left" valign="top">RWE as a feedback loop to monitor drift and validate updates</td>
<td align="left" valign="top">Data as compliance evidence and documentation; post-market signals via MDR surveillance/vigilance</td>
</tr>
<tr>
<td align="left" valign="top">Transparency and documentation</td>
<td align="left" valign="top">Strong emphasis for PCCP submissions; may vary by implementation capacity</td>
<td align="left" valign="top">Broad statutory duties for documentation, traceability, and governance</td>
</tr>
<tr>
<td align="left" valign="top">Accountability and liability logic</td>
<td align="left" valign="top">Case-by-case liability through tort-based adjudication</td>
<td align="left" valign="top">Structural accountability with statutory duties and liability presumptions (PLD)</td>
</tr>
<tr>
<td align="left" valign="top">Innovation friction</td>
<td align="left" valign="top">Lower pre-market friction; higher reliance on post-market monitoring capacity</td>
<td align="left" valign="top">Higher pre-market friction; sandboxes provide guided pathways without waiving duties</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>In the sections that follow, we unpack how these distinct regulatory DNAs manifest in the US and the EU, tracing the legal traditions, oversight tools, and lifecycle mechanisms that structure their approaches. We then analyze the divergent implications these choices create for innovation, safety, accountability, and the prospects for global harmonization.</p>
</sec>
<sec id="sec2">
<label>2</label>
<title>Diverging approaches to adaptive medical AI</title>
<sec id="sec3">
<label>2.1</label>
<title>The US approach: evidence-driven agility (common-law logic)</title>
<p>The US approach to adaptive medical AI tends to treat model evolution not as an exception but as an anticipated feature of real-world deployment. This stance aligns with the pragmatism of the US common-law tradition, which often addresses uncertainty through incremental adjustment rather than comprehensive ex-ante control. Within this context, the Total Product Life Cycle (TPLC) framework provides a conceptual foundation for US oversight. The FDA has further advanced this lifecycle view through initiatives such as the Total Product Life Cycle Advisory Program (TAP) Pilot, which seeks to support earlier and more continuous engagement across the lifecycle for eligible technologies (<xref ref-type="bibr" rid="ref11">11</xref>). Rather than viewing medical AI as a fixed-function device validated at a single moment in time, a TPLC orientation assumes that performance can shift as models encounter heterogeneous patient populations, new clinical knowledge, and changing real-world environments (<xref ref-type="bibr" rid="ref11">11</xref>). In principle, this lifecycle framing supports continued regulatory engagement across development, deployment, and post-market monitoring, reflecting the premise that safety for adaptive systems may depend on ongoing oversight rather than a single front-loaded assurance event.</p>
<p>This lifecycle foundation is operationalized through Predetermined Change Control Plans (PCCPs), which translate a dynamic view of AI into a structured administrative tool. In the FDA&#x2019;s current guidance, a PCCP is expected to describe (i) the planned device modifications, (ii) the associated methodology to develop, validate, and implement those modifications, and (iii) an assessment of the impact of those modifications (<xref ref-type="bibr" rid="ref12">12</xref>). Reviewed as part of a marketing submission, a PCCP functions as a regulatory pre-commitment: developers can implement pre-specified, bounded updates without submitting a new marketing application for each change, while regulators maintain predictability through upfront specification of update methods, evidence plans, and control limits (<xref ref-type="bibr" rid="ref12">12</xref>).</p>
<p>Yet adaptability alone does not guarantee safety. To close this loop, the US system relies heavily on Real-World Evidence (RWE) as an empirical feedback mechanism. RWE enables regulators and developers to evaluate whether updates implemented under a PCCP maintain acceptable performance when exposed to real clinical variation, but its value depends on whether the underlying real-world data are fit-for-purpose (e.g., relevant, reliable, and sufficiently traceable for the regulatory question) (<xref ref-type="bibr" rid="ref13">13</xref>). Rather than relying exclusively on controlled trials&#x2014;which are often infeasible for frequently updated systems&#x2014;RWE programs can be used to monitor drift, heterogeneity effects, and calibration changes <italic>in situ</italic>, ideally under a monitoring plan that pre-defines the target metrics, drift thresholds, reporting cadence, and corrective actions. For instance, calibration drift has been documented in clinical prediction models such as those used for acute kidney injury risk estimation, highlighting the need for ongoing monitoring and recalibration in practice (<xref ref-type="bibr" rid="ref4">4</xref>). Taken together, TPLC provides the conceptual structure, PCCPs institutionalize planned adaptation, and RWE supplies the evidence needed to validate ongoing change.</p>
<p>However, evidence-driven agility is not operationally frictionless. PCCP- and RWE-enabled iteration presupposes sustained access to high-quality and representative real-world data&#x2014;an assumption that may not hold uniformly across institutions, regions, or patient subgroups due to interoperability barriers, privacy constraints, and fragmented data stewardship. Continuous performance monitoring also imposes nontrivial technical and organizational burdens, including data governance, drift-detection infrastructure, subgroup performance auditing, and clear escalation pathways when degradation is detected. Critically, these burdens are often shared across developers and healthcare institutions, meaning that the practical effectiveness of the US approach depends on unevenly distributed capacities and may perform best in well-resourced settings.</p>
</sec>
<sec id="sec4">
<label>2.2</label>
<title>The EU approach: precautionary accountability (civil-law logic)</title>
<p>In contrast, the EU adopts an orientation rooted in civil-law codification, fundamental rights protection, and the precautionary principle. Rather than assuming that post-deployment adaptation can be managed primarily through iterative oversight, the EU regulatory DNA emphasizes ex-ante duties designed to minimize uncertainty before market entry. These duties are distributed across overlapping instruments. Under the AI Act, many medical AI systems are categorized as high-risk, triggering requirements for risk management, data governance, technical documentation, logging and traceability, human oversight, and demonstrated accuracy and robustness (<xref ref-type="bibr" rid="ref14">14</xref>). In parallel, when medical AI is placed on the market as a medical device software function, the MDR imposes conformity assessment, clinical evaluation, and quality-management obligations that structure evidence generation and control of change (<xref ref-type="bibr" rid="ref15">15</xref>). Recent EU guidance (MDCG 2025&#x2013;6) explicitly frames these regimes as simultaneously and complementarily applicable for medical-device AI and encourages integrated compliance approaches rather than parallel, duplicative systems (<xref ref-type="bibr" rid="ref16">16</xref>). The interplay between these instruments can create both redundancy (e.g., documentation and governance) and complementarity (e.g., medical-device lifecycle controls supporting AI-specific duties), reinforcing the EU&#x2019;s precautionary posture (<xref ref-type="bibr" rid="ref16">16</xref>).</p>
<p>Importantly, the EU framework is not purely static. The MDR embeds lifecycle governance through post-market surveillance and vigilance obligations, requiring manufacturers to collect and evaluate experience gained from devices in use and to respond to incidents and performance concerns (<xref ref-type="bibr" rid="ref15">15</xref>). For adaptive or frequently updated software, these post-market duties can become a key channel through which real-world performance signals are detected and acted upon, even when the regulatory pathway for pre-authorized adaptation remains comparatively constrained.</p>
<p>This ex-ante posture is reinforced by the Revised Product Liability Directive (PLD), which modernizes liability law to address the unique risks of adaptive and opaque AI systems alongside the AI Act and MDR. The PLD introduces expanded duties to update, clearer obligations around transparency, and presumptions of defect when manufacturers cannot demonstrate adequate documentation or post-market diligence (<xref ref-type="bibr" rid="ref17">17</xref>). These provisions shift evidentiary burdens toward developers, embedding accountability structurally within statutory duties rather than relying primarily on case-by-case adjudication.</p>
<p>Recognizing, however, that strict ex-ante requirements can impede innovation, the EU has introduced Regulatory Sandboxes as a controlled mechanism to support experimentation without undermining its precautionary architecture (<xref ref-type="bibr" rid="ref14">14</xref>). Participation in a sandbox primarily helps developers interpret and operationalize compliance expectations (e.g., documentation, data governance, and testing plans) through close regulatory accompaniment. Sandboxes therefore aim to reduce regulatory uncertainty and improve readiness for conformity assessment, rather than to waive substantive obligations or substitute for post-market lifecycle duties.</p>
</sec>
<sec id="sec5">
<label>2.3</label>
<title>Divergent implications for innovation, safety, and trust</title>
<p>These regulatory DNAs&#x2014;evidence-driven agility in the US and precautionary accountability in the EU&#x2014;do more than create procedural differences. They generate distinct trajectories for innovation, risk distribution, and public trust. Understanding these implications is essential for navigating global deployment of adaptive medical AI.</p>
<p>A key divergence concerns the trade-off between timeliness and predictability. The US model accelerates innovation by allowing models to evolve under PCCPs and validating performance through RWE. This responsiveness can yield rapid benefits but also concentrates risk in the post-market phase if drift emerges undetected. Conversely, the EU&#x2019;s ex-ante gatekeeping fosters predictability and public trust through rigorous documentation and conformity assessment but slows adaptation, increasing the risk of outdated models persisting in clinical use.</p>
<p>A second divergence appears in the logic of data. In the US, data primarily functions as fuel for iteration&#x2014;a resource used to fine-tune, recalibrate, and enhance models throughout their lifecycle. In the EU, data functions as evidence for compliance, shaping developer incentives toward robust governance, traceability, and documentation infrastructures.</p>
<p>A third divergence involves allocation of responsibility. The US tort system evaluates liability case by case, enabling contextual flexibility but generating uncertainty about ultimate accountability. In contrast, the EU employs structural accountability, where statutory duties and presumptions of defect place predictable responsibility on developers, enhancing trust but increasing regulatory burden.</p>
<p>Taken together, these differences produce distinct innovation ecosystems. The US model rewards rapid experimentation under managed uncertainty, while the EU model prioritizes reliability and fundamental-rights safeguards through stronger ex-ante predictability. Neither approach is inherently superior; each reflects a coherent value hierarchy encoded in its regulatory DNA.</p>
</sec>
</sec>
<sec sec-type="discussion" id="sec6">
<label>3</label>
<title>Discussion</title>
<p>The divergent regulatory DNAs of the US and the EU shape not only how adaptive AI systems evolve after deployment but also how developers, clinicians, and regulators interpret their ongoing responsibilities within the broader innovation ecosystem. The US approach enables iterative refinement through PCCPs and RWE (<xref ref-type="bibr" rid="ref11 ref12 ref13">11&#x2013;13</xref>, <xref ref-type="bibr" rid="ref18">18</xref>), supporting context-responsive improvements that help maintain clinical relevance in dynamic environments. This agility is particularly advantageous when rapid recalibration is required to prevent model drift or performance degradation, especially in clinical contexts where population characteristics shift or emerging evidence demands timely updates. In contrast, the EU&#x2019;s documentation-rich and rights-centered framework strengthens transparency, explainability, and legal accountability&#x2014;features essential for public trust and system legitimacy (<xref ref-type="bibr" rid="ref14">14</xref>, <xref ref-type="bibr" rid="ref15">15</xref>, <xref ref-type="bibr" rid="ref17">17</xref>, <xref ref-type="bibr" rid="ref19 ref20 ref21">19&#x2013;21</xref>). These attributes are especially valuable for population-level deployments that require predictability, traceability, and clearly allocated responsibility throughout the lifecycle, ensuring that changes do not undermine established safeguards (<xref ref-type="bibr" rid="ref22">22</xref>).</p>
<p>These approaches reflect partially competing priorities: rapid clinical responsiveness through iterative updating versus predictable accountability anchored in ex-ante assurance. A workable &#x201C;happy medium&#x201D; therefore requires risk-based co-lifecycle governance that permits bounded adaptation where benefits are clear and risks are manageable, while reserving stricter reassessment for changes that could materially alter intended use, clinical impact, or safety profiles. Achieving this balance also depends on coordinated roles among key stakeholders&#x2014;including regulators, notified bodies (in the EU), developers, healthcare institutions, clinicians, and patients&#x2014;because monitoring, documentation, and escalation cannot be executed by a single actor alone. Operationally, developers must maintain update documentation and performance monitoring plans, healthcare institutions must enable data capture and incident reporting in routine workflows, and regulators/notified bodies must define acceptance criteria and escalation pathways that translate post-market signals into governance actions.</p>
<p>The complementary strengths of the two systems create opportunities for meaningful cross-jurisdictional learning. PCCP-based update structures in the US may inform future EU mechanisms for managing permissible model evolution without compromising ex-ante assurance, offering a pathway for integrating controlled adaptivity into the EU&#x2019;s traditionally static conformity assessment processes. Conversely, EU data-governance duties and liability presumptions can enrich US expectations for transparency, auditability, and post-deployment obligation, highlighting areas where US oversight may benefit from more formalized assurances. As adaptive AI increasingly intersects with cross-border clinical workflows, distributed datasets, and multinational deployment pipelines, alignment between these regulatory DNAs becomes essential for ensuring practical interoperability, reducing regulatory fragmentation, and sustaining safety across jurisdictions.</p>
<p>In our view, future global harmonization efforts must explicitly prioritize co-lifecycle governance that couples US-style PCCP-driven adaptability with EU-derived accountability and documentation safeguards. Neither agility nor precaution alone is sufficient for governing adaptive medical AI; instead, durable trust will require a hybrid model that institutionalizes both empirical feedback loops and rights-based protections. Such an approach acknowledges the inevitability of model evolution while ensuring that changes remain transparent, auditable, and aligned with established ethical and legal expectations (<xref ref-type="bibr" rid="ref23">23</xref>).</p>
<p>Global Harmonization Implications. Meaningful convergence between the US and EU will require operational mechanisms rather than conceptual alignment alone. We propose three actionable components:</p>
<list list-type="simple">
<list-item>
<p>(1) Shared taxonomy of permissible software changes (e.g., calibration-only updates, retraining within a fixed architecture, and architecture-level changes), linked to clear regulatory triggers, validation expectations, and notification thresholds&#x2014;thereby enabling pre-authorized, bounded updates without defaulting to full return-to-gate assessment.</p>
</list-item>
<list-item>
<p>(2) Standardized documentation and traceability packages for each update (e.g., change logs, data provenance, subgroup performance reports, and human-oversight provisions), improving auditability and clarifying accountability when performance shifts in real-world use.</p>
</list-item>
<list-item>
<p>(3) Interoperable expectations for post-market performance monitoring, including minimum drift-detection metrics, reporting cadence, and escalation pathways when degradation is detected, to reduce fragmentation for multinational deployment and to avoid divergent model behavior across markets.</p>
</list-item>
</list>
<p>In parallel, structured early dialogue mechanisms&#x2014;such as coordinated scientific advice or sandbox-based accompaniment&#x2014;could help developers design evidence plans that satisfy both PCCP-style adaptability and EU-grade accountability from the outset.</p>
<p>Together, these steps would operationalize co-lifecycle governance by combining planned iteration, empirical feedback, documentation, and clearly allocated responsibility. Such integration supports safe, transparent, and context-responsive innovation across global health systems and provides a foundation for multinational trust in adaptive AI. As the clinical, legal, and technical stakes of adaptive models continue to rise, aligning the agility of the US system with the accountability of the EU will be critical for shaping a globally coherent and ethically robust regulatory future. Societally, such co-lifecycle governance can improve timely access to safer and more current models while sustaining public trust through transparency and clear responsibility; however, it may also amplify inequities if monitoring and documentation costs fall disproportionately on resource-limited settings, underscoring the need for shared infrastructure and proportional requirements.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="sec7">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/supplementary material, further inquiries can be directed to the corresponding author.</p>
</sec>
<sec sec-type="author-contributions" id="sec8">
<title>Author contributions</title>
<p>JL: Investigation, Writing &#x2013; original draft, Conceptualization, Writing &#x2013; review &#x0026; editing. BC: Investigation, Writing &#x2013; review &#x0026; editing. KJ: Writing &#x2013; review &#x0026; editing. SS: Investigation, Writing &#x2013; review &#x0026; editing. HR: Writing &#x2013; review &#x0026; editing. JK: Conceptualization, Writing &#x2013; review &#x0026; editing, Supervision. D-SS: Investigation, Writing &#x2013; review &#x0026; editing, Conceptualization, Writing &#x2013; original draft, Project administration.</p>
</sec>
<sec sec-type="COI-statement" id="sec9">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="sec10">
<title>Generative AI statement</title>
<p>The author(s) declared that Generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="sec11">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="ref1"><label>1.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ribeiro</surname> <given-names>MT</given-names></name> <name><surname>Singh</surname> <given-names>S</given-names></name> <name><surname>Guestrin</surname> <given-names>C</given-names></name></person-group>. <article-title>&#x201C;Why should I trust you?&#x201D;: explaining the predictions of any classifier</article-title>. <source>KDD</source>. (<year>2016</year>):<fpage>1135</fpage>&#x2013;<lpage>44</lpage>. doi: <pub-id pub-id-type="doi">10.1145/2939672.2939778</pub-id></mixed-citation></ref>
<ref id="ref2"><label>2.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kelly</surname> <given-names>CJ</given-names></name> <name><surname>Karthikesalingam</surname> <given-names>A</given-names></name> <name><surname>Suleyman</surname> <given-names>M</given-names></name> <name><surname>Corrado</surname> <given-names>G</given-names></name> <name><surname>King</surname> <given-names>D</given-names></name></person-group>. <article-title>Key challenges for delivering clinical impact with artificial intelligence</article-title>. <source>BMC Med</source>. (<year>2019</year>) <volume>17</volume>:<fpage>195</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s12916-019-1426-2</pub-id>, <pub-id pub-id-type="pmid">31665002</pub-id></mixed-citation></ref>
<ref id="ref3"><label>3.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cabitza</surname> <given-names>F</given-names></name> <name><surname>Rasoini</surname> <given-names>R</given-names></name> <name><surname>Gensini</surname> <given-names>GF</given-names></name></person-group>. <article-title>Unintended consequences of machine learning in medicine</article-title>. <source>JAMA</source>. (<year>2017</year>) <volume>318</volume>:<fpage>517</fpage>&#x2013;<lpage>8</lpage>. doi: <pub-id pub-id-type="doi">10.1001/jama.2017.7797</pub-id>, <pub-id pub-id-type="pmid">28727867</pub-id></mixed-citation></ref>
<ref id="ref4"><label>4.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Davis</surname> <given-names>SE</given-names></name> <name><surname>Lasko</surname> <given-names>TA</given-names></name> <name><surname>Chen</surname> <given-names>G</given-names></name> <name><surname>Siew</surname> <given-names>ED</given-names></name> <name><surname>Matheny</surname> <given-names>ME</given-names></name></person-group>. <article-title>Calibration drift in models for acute kidney injury</article-title>. <source>J Am Med Inform Assoc</source>. (<year>2017</year>) <volume>24</volume>:<fpage>1052</fpage>&#x2013;<lpage>61</lpage>. doi: <pub-id pub-id-type="doi">10.1093/jamia/ocx030</pub-id></mixed-citation></ref>
<ref id="ref5"><label>5.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hood</surname> <given-names>L</given-names></name> <name><surname>Flores</surname> <given-names>M</given-names></name></person-group>. <article-title>Systems medicine and P4 medicine</article-title>. <source>New Biotechnol</source>. (<year>2012</year>) <volume>29</volume>:<fpage>613</fpage>&#x2013;<lpage>24</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.nbt.2012.03.004</pub-id></mixed-citation></ref>
<ref id="ref6"><label>6.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Topol</surname> <given-names>EJ</given-names></name></person-group>. <article-title>High-performance medicine</article-title>. <source>Nat Med</source>. (<year>2019</year>) <volume>25</volume>:<fpage>44</fpage>&#x2013;<lpage>56</lpage>. doi: <pub-id pub-id-type="doi">10.1038/s41591-018-0300-7</pub-id></mixed-citation></ref>
<ref id="ref7"><label>7.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Char</surname> <given-names>DS</given-names></name> <name><surname>Shah</surname> <given-names>NH</given-names></name> <name><surname>Magnus</surname> <given-names>D</given-names></name></person-group>. <article-title>Implementing machine learning in health care&#x2014;addressing ethical challenges</article-title>. <source>N Engl J Med</source>. (<year>2018</year>) <volume>378</volume>:<fpage>981</fpage>&#x2013;<lpage>3</lpage>. doi: <pub-id pub-id-type="doi">10.1056/NEJMp1714229</pub-id>, <pub-id pub-id-type="pmid">29539284</pub-id></mixed-citation></ref>
<ref id="ref8"><label>8.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Vayena</surname> <given-names>E</given-names></name> <name><surname>Blasimme</surname> <given-names>A</given-names></name> <name><surname>Cohen</surname> <given-names>IG</given-names></name></person-group>. <article-title>Machine learning in medicine: addressing ethical challenges</article-title>. <source>PLoS Med</source>. (<year>2018</year>) <volume>15</volume>:<fpage>e1002689</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pmed.1002689</pub-id>, <pub-id pub-id-type="pmid">30399149</pub-id></mixed-citation></ref>
<ref id="ref9"><label>9.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Selbst</surname> <given-names>AD</given-names></name> <name><surname>Powles</surname> <given-names>J</given-names></name></person-group>. <article-title>Meaningful information and the right to explanation</article-title>. <source>Int Data Privacy Law</source>. (<year>2017</year>) <volume>7</volume>:<fpage>233</fpage>&#x2013;<lpage>42</lpage>. doi: <pub-id pub-id-type="doi">10.1093/idpl/ipx022</pub-id></mixed-citation></ref>
<ref id="ref10"><label>10.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mittelstadt</surname> <given-names>BD</given-names></name> <name><surname>Allo</surname> <given-names>P</given-names></name> <name><surname>Taddeo</surname> <given-names>M</given-names></name> <name><surname>Wachter</surname> <given-names>S</given-names></name> <name><surname>Floridi</surname> <given-names>L</given-names></name></person-group>. <article-title>The ethics of algorithms</article-title>. <source>Big Data Soc</source>. (<year>2016</year>) <volume>3</volume>:<fpage>2053951716679679</fpage>. doi: <pub-id pub-id-type="doi">10.1177/2053951716679679</pub-id></mixed-citation></ref>
<ref id="ref11"><label>11.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><collab id="coll1">U.S. Food and Drug Administration</collab></person-group>. <source>Total product life cycle advisory program (TAP)</source>. <publisher-loc>MD, USA</publisher-loc>: <publisher-name>Silver Spring</publisher-name>. (<year>2025</year>).</mixed-citation></ref>
<ref id="ref12"><label>12.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><collab id="coll2">U.S. Food and Drug Administration</collab></person-group>. <source>Marketing submission recommendations for a predetermined change control plan for artificial intelligence-enabled device software functions: guidance for industry and Food and Drug Administration staff</source>. <publisher-loc>MD, USA</publisher-loc>: <publisher-name>Silver Spring</publisher-name>. (<year>2025</year>).</mixed-citation></ref>
<ref id="ref13"><label>13.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><collab id="coll3">U.S. Food and Drug Administration</collab></person-group>. <source>Use of real-world evidence to support regulatory decision-making for medical devices: guidance for industry and Food and Drug Administration staff</source>. <publisher-loc>MD, USA</publisher-loc>: <publisher-name>Silver Spring</publisher-name>. (<year>2025</year>).</mixed-citation></ref>
<ref id="ref14"><label>14.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><collab id="coll4">European Union</collab></person-group>. <source>European Parliament and Council Regulation (EU) 2024/1689: artificial intelligence act</source>. <publisher-loc>Belgium</publisher-loc>: <publisher-name>Brussels</publisher-name>. (<year>2024</year>).</mixed-citation></ref>
<ref id="ref15"><label>15.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><collab id="coll5">European Union</collab></person-group>. <source>European Parliament and Council Regulation (EU) 2017/745 on medical devices (MDR)</source>. <publisher-loc>Belgium</publisher-loc>: <publisher-name>Brussels</publisher-name>. (<year>2017</year>).</mixed-citation></ref>
<ref id="ref16"><label>16.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><collab id="coll6">European Commission (Medical Device Coordination Group)</collab></person-group>. <source>Medical Device Coordination Group MDCG 2025-6: FAQ on interplay between MDR, IVDR and AI act</source>. <publisher-loc>Belgium</publisher-loc>: <publisher-name>Brussels</publisher-name>. (<year>2025</year>).</mixed-citation></ref>
<ref id="ref17"><label>17.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><collab id="coll7">European Union</collab></person-group>. <source>European Parliament and Council Directive (EU) 2024/2853 on liability for defective products (PLD)</source>. <publisher-loc>Belgium</publisher-loc>: <publisher-name>Brussels</publisher-name>. (<year>2024</year>).</mixed-citation></ref>
<ref id="ref18"><label>18.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><collab id="coll8">U.S. Food and Drug Administration</collab></person-group>. <source>Quality system regulation amendments (QMSR). Final rule</source>. <publisher-loc>MD, USA</publisher-loc>: <publisher-name>Silver Spring</publisher-name>. (<year>2024</year>).</mixed-citation></ref>
<ref id="ref19"><label>19.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Amann</surname> <given-names>J</given-names></name> <name><surname>Blasimme</surname> <given-names>A</given-names></name> <name><surname>Vayena</surname> <given-names>E</given-names></name> <name><surname>Frey</surname> <given-names>D</given-names></name> <name><surname>Madai</surname> <given-names>VI</given-names></name></person-group>. <article-title>Explainability for artificial intelligence in healthcare</article-title>. <source>BMC Med Inform Decis Mak</source>. (<year>2020</year>) <volume>20</volume>:<fpage>310</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s12911-020-01332-6</pub-id></mixed-citation></ref>
<ref id="ref20"><label>20.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Price</surname> <given-names>WN</given-names> <suffix>II</suffix></name> <name><surname>Gerke</surname> <given-names>S</given-names></name> <name><surname>Cohen</surname> <given-names>IG</given-names></name></person-group>. <article-title>Potential liability for physicians using artificial intelligence</article-title>. <source>JAMA</source>. (<year>2019</year>) <volume>322</volume>:<fpage>1765</fpage>&#x2013;<lpage>6</lpage>. doi: <pub-id pub-id-type="doi">10.1001/jama.2019.15064</pub-id></mixed-citation></ref>
<ref id="ref21"><label>21.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gerke</surname> <given-names>S</given-names></name> <name><surname>Minssen</surname> <given-names>T</given-names></name> <name><surname>Cohen</surname> <given-names>G</given-names></name></person-group>. <article-title>Ethical and legal challenges of artificial intelligence-driven healthcare</article-title>. <source>Artificial Intelligence in Healthcare</source>. (<year>2020</year>) <fpage>295</fpage>&#x2013;<lpage>336</lpage>. doi: <pub-id pub-id-type="doi">10.1016/B978-0-12-818438-7.00012-5</pub-id></mixed-citation></ref>
<ref id="ref22"><label>22.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Johnson</surname> <given-names>KW</given-names></name> <name><surname>Torres Soto</surname> <given-names>J</given-names></name> <name><surname>Glicksberg</surname> <given-names>BS</given-names></name> <name><surname>Shameer</surname> <given-names>K</given-names></name> <name><surname>Miotto</surname> <given-names>R</given-names></name> <name><surname>Ali</surname> <given-names>M</given-names></name> <etal/></person-group>. <article-title>Artificial intelligence in cardiology</article-title>. <source>J Am Coll Cardiol</source>. (<year>2018</year>) <volume>71</volume>:<fpage>2668</fpage>&#x2013;<lpage>79</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jacc.2018.03.521</pub-id>, <pub-id pub-id-type="pmid">29880128</pub-id></mixed-citation></ref>
<ref id="ref23"><label>23.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><collab id="coll9">World Health Organization</collab></person-group>. <source>Ethics and governance of artificial intelligence for health</source>.  <publisher-loc>Geneva, Switzerland</publisher-loc>: <publisher-name>WHO</publisher-name>. (<year>2021</year>).</mixed-citation></ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by" id="fn0001">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1009998/overview">Reza Rastmanesh</ext-link>, Independent Researcher, Tehran, Iran</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by" id="fn0002">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3346256/overview">Meghavi Mashar</ext-link>, Beth Israel Deaconess Medical Center Cancer Center, United States</p>
</fn>
</fn-group>
</back>
</article>