<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="brief-report" dtd-version="2.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Artif. Intell.</journal-id>
<journal-title>Frontiers in Artificial Intelligence</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Artif. Intell.</abbrev-journal-title>
<issn pub-type="epub">2624-8212</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/frai.2025.1525937</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Artificial Intelligence</subject>
<subj-group>
<subject>Brief Research Report</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Evaluating artificial intelligence bias in nephrology: the role of diversity, equity, and inclusion in AI-driven decision-making and ethical regulation</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Balakrishnan</surname> <given-names>Suryanarayanan</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2932611/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Thongprayoon</surname> <given-names>Charat</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2072972/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Wathanavasin</surname> <given-names>Wannasit</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Miao</surname> <given-names>Jing</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1869871/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Mao</surname> <given-names>Michael A.</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Craici</surname> <given-names>Iasmina M.</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1911182/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Cheungpasitporn</surname> <given-names>Wisit</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/780947/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Division of Nephrology and Hypertension, Department of Medicine, Mayo Clinic</institution>, <addr-line>Rochester, MN</addr-line>, <country>United States</country></aff>
<aff id="aff2"><sup>2</sup><institution>Nephrology Unit, Department of Medicine, Charoenkrung Pracharak Hospital</institution>, <addr-line>Bangkok</addr-line>, <country>Thailand</country></aff>
<aff id="aff3"><sup>3</sup><institution>Division of Nephrology and Hypertension, Department of Medicine, Mayo Clinic</institution>, <addr-line>Jacksonville, FL</addr-line>, <country>United States</country></aff>
<author-notes>
<fn fn-type="edited-by" id="fn0001">
<p>Edited by: Filippo Gibelli, University of Camerino, Italy</p></fn>
<fn fn-type="edited-by" id="fn0002">
<p>Reviewed by: Yuri Battaglia, University of Verona, Italy</p>
<p>Dinh Tuan Phan Le, New York City Health and Hospitals Corporation, United States</p>
<p>Ghulam Abbas, Southeast University, China</p></fn>
<corresp id="c001">&#x002A;Correspondence: Wisit Cheungpasitporn, <email>wcheungpasitporn@gmail.com</email></corresp>
</author-notes>
<pub-date pub-type="epub">
<day>27</day>
<month>05</month>
<year>2025</year>
</pub-date>
<pub-date pub-type="collection">
<year>2025</year>
</pub-date>
<volume>8</volume>
<elocation-id>1525937</elocation-id>
<history>
<date date-type="received">
<day>10</day>
<month>11</month>
<year>2024</year>
</date>
<date date-type="accepted">
<day>12</day>
<month>05</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2025 Balakrishnan, Thongprayoon, Wathanavasin, Miao, Mao, Craici and Cheungpasitporn.</copyright-statement>
<copyright-year>2025</copyright-year>
<copyright-holder>Balakrishnan, Thongprayoon, Wathanavasin, Miao, Mao, Craici and Cheungpasitporn</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<sec id="sec1">
<title>Background</title>
<p>The integration of Artificial Intelligence (AI) in nephrology has raised concerns regarding bias, fairness, and ethical decision-making, particularly in the context of Diversity, Equity, and Inclusion (DEI). AI-driven models, including Large Language Models (LLMs) like ChatGPT, may unintentionally reinforce existing disparities in patient care and workforce recruitment. This study investigates how AI models (ChatGPT 3.5 and 4.0) handle DEI-related ethical considerations in nephrology, highlighting the need for improved regulatory oversight to ensure equitable AI deployment.</p>
</sec>
<sec id="sec2">
<title>Methods</title>
<p>The study was conducted in March 2024 using ChatGPT 3.5 and 4.0. Eighty simulated cases were developed to assess ChatGPT&#x2019;s decision-making across diverse nephrology topics. ChatGPT was instructed to respond to questions considering factors such as age, sex, gender identity, race, ethnicity, religion, cultural beliefs, socioeconomic status, education level, family structure, employment, insurance, geographic location, disability, mental health, language proficiency, and technology access.</p>
</sec>
<sec id="sec3">
<title>Results</title>
<p>ChatGPT 3.5 provided a response to all scenario questions and did not refuse to make decisions under any circumstances. This contradicts the essential DEI principle of avoiding decisions based on potentially discriminatory criteria. In contrast, ChatGPT 4.0 declined to make decisions based on potentially discriminatory criteria in 13 (16.3%) scenarios during the first round and in 5 (6.3%) during the second round.</p>
</sec>
<sec id="sec4">
<title>Conclusion</title>
<p>While ChatGPT 4.0 shows improvement in ethical AI decision-making, its limited recognition of bias and DEI considerations underscores the need for robust AI regulatory frameworks in nephrology. AI governance must incorporate structured DEI guidelines, ongoing bias detection mechanisms, and ethical oversight to prevent AI-driven disparities in clinical practice and workforce recruitment. This study emphasizes the importance of transparency, fairness, and inclusivity in AI development, calling for collaborative efforts between AI developers, nephrologists, policymakers, and patient communities to ensure AI serves as an equitable tool in nephrology.</p>
</sec>
</abstract>
<kwd-group>
<kwd>artificial intelligence</kwd>
<kwd>diversity, equity, and inclusion</kwd>
<kwd>nephrology</kwd>
<kwd>bias detection</kwd>
<kwd>ethical AI regulation</kwd>
<kwd>decision-making</kwd>
<kwd>ChatGPT</kwd>
<kwd>clinical implications</kwd>
</kwd-group>
<counts>
<fig-count count="3"/>
<table-count count="0"/>
<equation-count count="0"/>
<ref-count count="36"/>
<page-count count="8"/>
<word-count count="6088"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Medicine and Public Health</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec5">
<title>Introduction</title>
<p>Kidney disease is a global health issue, affecting millions of people worldwide. Chronic Kidney Disease (CKD) is particularly prevalent, with an estimated 850 million people suffering from this condition (<xref ref-type="bibr" rid="ref12">Jager et al., 2019</xref>). CKD is characterized by a gradual loss of kidney function over time, which can progress to End-Stage Kidney Disease (ESKD) if not properly managed. ESKD requires dialysis or kidney transplantation, both of which pose significant health and economic burdens (<xref ref-type="bibr" rid="ref13">Jha et al., 2023</xref>). The field of nephrology, which deals with the diagnosis and treatment of kidney diseases, has been at the forefront of medical innovation for decades. However, like many areas of healthcare, it faces significant challenges in ensuring equitable access to care and outcomes for all patients (<xref ref-type="bibr" rid="ref34">Vanholder et al., 2023</xref>). The effect is more pronounced among minorities especially among women from these groups right from their inclusion in research studies; this is further exacerbated by existing systemic biases (<xref ref-type="bibr" rid="ref29">Pinho-Gomes et al., 2023</xref>; <xref ref-type="bibr" rid="ref20">Mohammed et al., 2024</xref>). In recent years, the concepts of diversity, equity, and inclusion (DEI) have gained increasing prominence in healthcare, with nephrology being no exception. The &#x201C;Kidney Care for All&#x201D; initiative exemplifies this shift, advocating for a more inclusive approach to kidney health that addresses disparities and promotes equal access to quality care for all individuals, regardless of their background or socioeconomic status (<xref ref-type="bibr" rid="ref26">Pais and Iyengar, 2023</xref>). Despite efforts to increase diversity in medical education and practice, the field of nephrology, like many medical specialties, still struggles with underrepresentation of minority groups among its practitioners (<xref ref-type="bibr" rid="ref30">Salsberg et al., 2021</xref>).</p>
<p>Concurrently, the rapid advancement of Artificial Intelligence (AI), particularly Large Language Models (LLMs) like ChatGPT (<xref ref-type="bibr" rid="ref25">Open AI. Introducing Chat GPT, 2024</xref>), has begun to reshape the landscape of healthcare decision-making. These AI systems, capable of processing vast amounts of data and generating human-like responses, hold immense potential for augmenting clinical decision-making, streamlining administrative processes, and potentially reducing healthcare disparities (<xref ref-type="bibr" rid="ref11">Garcia Valencia et al., 2023</xref>). However, the integration of AI into nephrology and other medical fields also raises critical questions about the ethical implications and potential unintended consequences of relying on machine-generated insights for patient care and professional decisions. There are concerns that AI systems, if not properly designed and implemented, could perpetuate or even exacerbate existing health disparities (<xref ref-type="bibr" rid="ref24">Omiye et al., 2023</xref>; <xref ref-type="bibr" rid="ref36">Yang et al., 2024</xref>). For instance, if training data for AI models are not sufficiently diverse or representative, the resulting algorithms may perform poorly for certain population subgroups or reinforce biased decision-making patterns (<xref ref-type="bibr" rid="ref15">Kuhlman et al., 2020</xref>).</p>
<p>Given the significant impact that AI could have on nephrology, it is imperative to assess how well these technologies adhere to DEI principles. This study focuses on evaluating the ethical sensitivity and decision-making capabilities of two versions of ChatGPT (3.5 and 4.0) in nephrology-related scenarios. By examining how these AI models handle DEI considerations, particularly with regard to underrepresented socio-demographic variables such as ethnicity, employment status, education, and religious beliefs, we aim to identify potential risks and areas for improvement in their design and implementation. These attributes, while often underappreciated in traditional clinical decision-making workflows, are critical to fostering equitable care. Our work seeks to advance the dialogue on AI fairness in nephrology by showcasing how ethical AI evaluation must go beyond clinical indicators to include the broader socio-cultural determinants of health.</p>
</sec>
<sec sec-type="methods" id="sec6">
<title>Methods</title>
<p>In the context of AI in nephrology, <italic>diversity</italic> refers to the inclusion of individuals from varied demographic backgrounds in data and practice; <italic>equity</italic> involves ensuring fair treatment and access to care and opportunities; and <italic>inclusion</italic> emphasizes meaningful engagement of underrepresented groups in AI design and implementation. These principles guided the development of our simulation scenarios and the evaluation of AI model responses.</p>
<sec id="sec7">
<title>Simulated cases development</title>
<p>A total of 80 simulated cases were collaboratively developed by two board-certified nephrologists (CT and WC) with expertise in DEI and clinical ethics. Each case was informed by real-world nephrology practice and ethical dilemmas and incorporated social determinants of health relevant to DEI considerations. The cases were iteratively reviewed to ensure clinical plausibility, decision-making complexity, and DEI sensitivity. Each scenario included four response options representing a gradient of ethical appropriateness: (1) ethically aligned and inclusive, (2) partially biased or utilitarian, (3) clearly discriminatory, and (4) neutral or non-committal. To promote transparency and reproducibility, all 80 scenarios and their response options are provided as Online Supplementary, along with their corresponding DEI domain and clinical context.</p>
<p>For each simulated case, four multiple-choice responses were carefully developed by the nephrologist authors to reflect common patterns of decision-making: one ethically aligned and inclusive option, one partially biased or utilitarian option, one clearly discriminatory option, and one neutral or non-committal response. This structure allowed us to assess the AI models&#x2019; ability to distinguish ethically sound recommendations from biased or inappropriate ones. All choices were reviewed to ensure internal consistency, clinical plausibility, and DEI relevance.</p>
</sec>
<sec id="sec8">
<title>Evaluation process</title>
<p>ChatGPT was instructed to select the best response from four provided choices for each scenario. The AI was guided to prioritize decisions based on factors including age, sex, gender identity, sexual orientation, race, ethnicity, religion, cultural beliefs, socioeconomic status, education level, family structure, employment, insurance, geographic location, disability, impairment, mental health, language proficiency, and technology access (<xref ref-type="fig" rid="fig1">Figure 1</xref>). In alignment with DEI principles, ChatGPT was designed to decline making decisions when the background information could potentially lead to discriminatory outcomes.</p>
<fig position="float" id="fig1">
<label>Figure 1</label>
<caption>
<p>Flowchart of the methodology for AI evaluation in nephrology simulation cases.</p>
</caption>
<alt-text>Flowchart detailing a study on AI in nephrology. Steps: (1) Simulation case creation in March 2024 by two nephrologists; (2) Review for accuracy, relevance, and ethics; (3) Evaluation of ChatGPT 3.5 and 4.0 for decision-making and DEI sensitivity; (4) Performance comparison&#x2014;ChatGPT 3.5 showed a utilitarian approach, while ChatGPT 4.0 refused decisions in 16.25% of cases based on discriminatory criteria; (5) Conclusion with ethical insights and AI regulation recommendations in nephrology.</alt-text>
<graphic xlink:href="frai-08-1525937-g001.tif"/>
</fig>
<p>The study was conducted in March 2024 using both ChatGPT 3.5 and 4.0. Each model was queried twice at one-month intervals to assess the consistency of their responses over time.</p>
</sec>
</sec>
<sec sec-type="results" id="sec9">
<title>Results</title>
<p>The full set of ChatGPT 3.5 and 4.0 responses to all 80 simulated scenarios is presented in the Online Supplementary Materials. This includes the selected response by each model, whether a refusal occurred (along with the stated rationale), and the consistency of responses across rounds for ChatGPT 4.0.</p>
<p>ChatGPT 3.5 answered all 80 scenarios without refusal, demonstrating a utilitarian approach that did not account for potential ethical conflicts. In contrast, ChatGPT 4.0 declined to make decisions based on potentially discriminatory criteria in 13 scenarios (16.3%) during the first round and in 5 scenarios (6.3%) during the second round. It provided consistent responses across both rounds in 67 scenarios (83.8%), suggesting moderate reliability in its ethical decision-making behavior.</p>
<p>These refusals predominantly involved scenarios in which decisions were explicitly linked to sensitive sociodemographic variables. For example, in one scenario regarding workforce recruitment, the model declined to prioritize candidates based on religious affiliation, stating that such decisions should be made based on qualifications rather than personal beliefs. In another case involving transplant eligibility, it refused to recommend prioritizing patients by immigration status, emphasizing the ethical obligation to treat all individuals equitably. Additional examples included declining to select candidates for living kidney donation promotion based on employment type (e.g., self-employed vs. corporate), or selecting dialysis access strategies based on language proficiency. A complete list of these 13 scenarios and the specific DEI attributes involved is available in online supplementary. These findings underscore the model&#x2019;s partial sensitivity to ethically contentious prompts, though they also reveal inconsistency, as similar cases were occasionally answered without objection.</p>
<p><xref ref-type="fig" rid="fig2">Figure 2</xref> demonstrates examples of GPT output regarding critical considerations in nephrology treatment and workforce diversity.</p>
<fig position="float" id="fig2">
<label>Figure 2</label>
<caption>
<p>Examples of outputs from GPT-3.5 and GPT-4.0.</p>
</caption>
<alt-text>Screenshot collage of ChatGPT responses to ethically sensitive nephrology-related questions. Topics include prioritizing ethnic groups, employment status, employment type, and religion in treatment or workforce decisions. ChatGPT selected: &#x201C;Black&#x201D; for kidney treatment prioritization, &#x201C;Full-time employment&#x201D; for diabetic nephropathy treatment, &#x201C;Self-employed&#x201D; for living kidney donation promotion, and &#x201C;Christianity&#x201D; for ensuring a diverse workforce. Each answer is followed by an explanatory paragraph.</alt-text>
<graphic xlink:href="frai-08-1525937-g002.tif"/>
</fig>
<p>In <xref ref-type="fig" rid="fig2">Figure 2</xref>, the left column presents outputs from GPT-3.5, while the right column features responses from GPT-4.0. For GPT-3.5, question #1 inquires about which ethnic group should be prioritized for a new kidney disease treatment. The model recommends prioritizing Black individuals, citing their historical disproportionate impact from kidney disease, which necessitates tailored treatment considerations to address their specific needs. In question #21, regarding employment types to prioritize for promoting living kidney donations, GPT-3.5 suggests focusing on self-employed individuals, as they often have more flexibility in managing their schedules, making it easier for them to accommodate the donation process and recovery time. On the right side, GPT-4.0 addresses question #36, which asks about the employment status to consider for new diabetic nephropathy treatments. The model advocates for prioritizing individuals in full-time employment, noting that this demographic is more likely to have access to employer-sponsored health insurance, thus improving equity in treatment access. In question #78, concerning which religious belief should be factored in when selecting candidates for nephrologist positions to ensure workforce diversity, GPT-4.0 emphasizes the importance of considering candidates from diverse religious backgrounds, specifically highlighting Christianity, to foster an inclusive environment that enhances cultural competency in patient care.</p>
</sec>
<sec sec-type="discussion" id="sec10">
<title>Discussion</title>
<p>The findings of this study reveal a marked difference in ethical decision-making capabilities between ChatGPT 3.5 and 4.0 in nephrology-related scenarios. ChatGPT 3.5 consistently selected treatment choices predicted to yield the best outcomes across all scenarios, demonstrating a utilitarian approach that incorporated various DEI factors. However, it did not refuse to make decisions in any scenario, reflecting a lack of sensitivity to potentially discriminatory criteria. In contrast, ChatGPT 4.0 declined to make decisions based on potentially discriminatory criteria in 16.25% of scenarios, explicitly stating that DEI factors should not affect decisions about treating patients or hiring nephrology staff. While this shows an improvement in ethical sensitivity, the relatively low refusal rate was unexpected, highlighting areas for further enhancement.</p>
<p>The observed improvement in ChatGPT 4.0&#x2019;s handling of DEI-sensitive decisions underscores the iterative progress in AI ethical alignment, likely influenced by updated reinforcement learning techniques and refined safety layers in the model&#x2019;s training pipeline (<xref ref-type="bibr" rid="ref31">Sheikh et al., 2025</xref>; <xref ref-type="bibr" rid="ref3">Alam et al., 2025</xref>; <xref ref-type="bibr" rid="ref19">Miao et al., 2025</xref>). While not yet optimal, ChatGPT 4.0&#x2019;s behavior reflects a greater sensitivity to fairness principles, selectively deferring decisions that could result in biased clinical or hiring recommendations. These changes are promising, as they suggest that ethical behavior in LLMs can be enhanced over successive versions. Nevertheless, the relatively low overall refusal rate indicates that existing mechanisms remain insufficient to fully safeguard against implicit bias, and further tuning&#x2014;both algorithmic and regulatory&#x2014;is essential to prevent harm.</p>
<p>Existing literature on AI in healthcare highlights both the potential benefits and risks associated with AI integration (<xref ref-type="bibr" rid="ref22">Nazer et al., 2023</xref>; <xref ref-type="bibr" rid="ref4">Ali et al., 2023</xref>). Previous studies have demonstrated AI&#x2019;s capacity to enhance clinical decision-making and patient outcomes, but they also raise concerns about the perpetuation of biases inherent in training data (<xref ref-type="bibr" rid="ref24">Omiye et al., 2023</xref>; <xref ref-type="bibr" rid="ref5">Ayoub et al., 2024</xref>; <xref ref-type="bibr" rid="ref27">Parikh et al., 2019</xref>). <xref ref-type="bibr" rid="ref23">Obermeyer et al. (2019)</xref> showed in their study of racial bias present in a popular commercial algorithm for risk stratification used in the healthcare system. Our findings align with these concerns, illustrating that while AI can make clinically sound decisions, its consideration of ethical issues, particularly DEI, remains imperfect. The differences in performance between ChatGPT 3.5 and 4.0 likely stem from advancements in model training and updates in ethical guidelines integrated into the AI&#x2019;s framework. At the same time, we acknowledge that the relatively low refusal rate points to the need for further refinement of AI models and the development of more robust ethical frameworks. To address this, our discussion has been expanded to emphasize that while the improvements observed in ChatGPT 4.0 are encouraging, they also serve as a catalyst for continued research. Future directions will focus on refining training datasets, enhancing algorithmic sensitivity to DEI factors, and incorporating interdisciplinary insights to align AI recommendations more closely with clinical judgment. Such measures are anticipated to support the responsible integration of AI into clinical practice, ensuring that these systems become reliable tools for promoting equitable healthcare outcomes.</p>
<p>Beyond the specific case of ChatGPT, DEI-related concerns are increasingly relevant across the broader spectrum of AI applications in nephrology. AI systems in clinical settings are not only used for decision support but also influence patient triage, diagnostic accuracy, treatment planning, and workforce recruitment (<xref ref-type="bibr" rid="ref19">Miao et al., 2025</xref>; <xref ref-type="bibr" rid="ref14">Koirala et al., 2025</xref>; <xref ref-type="bibr" rid="ref28">Pham et al., 2024</xref>). If AI models are not carefully designed with DEI principles in mind, they risk reinforcing existing disparities in kidney care, including biases related to race, socioeconomic status, and geographic location. Moreover, many AI models are developed in high-income countries with datasets that may not be representative of the global population, further exacerbating inequities in nephrology care. Addressing these issues requires greater scrutiny of model training processes, the diversity of datasets, and the interpretability of AI-driven recommendations (<xref ref-type="bibr" rid="ref9">Ferryman et al., 2023</xref>; <xref ref-type="bibr" rid="ref17">Meng et al., 2022</xref>). Future research will be directed toward evaluating these alternative systems to compare their performance in handling DEI considerations. This broader approach will not only enrich our understanding of AI&#x2019;s potential benefits and pitfalls in nephrology but also inform the development of more robust regulatory and ethical frameworks for AI integration across the discipline. Additionally, interdisciplinary collaboration between AI developers, ethicists, nephrologists, and policymakers will be essential in ensuring that AI systems are aligned with clinical needs while promoting health equity (<xref ref-type="bibr" rid="ref1">Abbasgholizadeh Rahimi et al., 2024</xref>).</p>
<p>The implications of these findings for the integration of AI in nephrology and broader healthcare contexts are significant. While AI has the potential to enhance decision-making and improve patient outcomes, this study underscores the importance of robust ethical frameworks and careful regulation. The fact that even the more advanced ChatGPT 4.0 model failed to recognize potentially discriminatory factors in the majority of cases emphasizes the need for human oversight and the importance of using AI as a supportive tool rather than a replacement for human judgment in critical medical decisions. Furthermore, this study highlights the need for ongoing research and development in AI ethics, particularly in healthcare applications. As AI models continue to evolve rapidly, it is crucial to regularly assess their ethical decision-making capabilities and identify areas for improvement. This may involve developing more sophisticated training datasets representing the entire population at large that better represent diverse populations and ethical scenarios, as well as refining the algorithms that govern AI decision-making processes (<xref ref-type="bibr" rid="ref10">Gaonkar et al., 2023</xref>; <xref ref-type="bibr" rid="ref21">Mudgal and Das, 2020</xref>; <xref ref-type="bibr" rid="ref33">Ueda et al., 2024</xref>).</p>
<p>Additionally, there is a clear need for interdisciplinary collaboration between AI developers, ethicists, healthcare professionals, and policymakers to ensure that AI systems are designed and implemented in ways that promote equity and avoid perpetuating or exacerbating existing healthcare disparities (<xref ref-type="bibr" rid="ref33">Ueda et al., 2024</xref>; <xref ref-type="bibr" rid="ref7">Dankwa-Mullan, 2024</xref>; <xref ref-type="bibr" rid="ref18">Mennella et al., 2024</xref>; <xref ref-type="bibr" rid="ref35">Walsh et al., 2023</xref>). Moreover, the study&#x2019;s findings have important implications for medical education and professional development in nephrology and other healthcare fields. As AI becomes increasingly integrated into clinical practice, it is essential that healthcare professionals are trained not only in the use of these technologies but also in critically evaluating their outputs and understanding their ethical limitations (<xref ref-type="bibr" rid="ref11">Garcia Valencia et al., 2023</xref>; <xref ref-type="bibr" rid="ref33">Ueda et al., 2024</xref>). This includes developing skills in recognizing potential biases in AI-generated recommendations and maintaining a commitment to equitable, patient-centered care. By fostering a workforce that is both technologically adept and ethically grounded, the field of nephrology can work toward harnessing the benefits of AI while upholding its commitment to diversity, equity, and inclusion in all aspects of patient care and professional practice (<xref ref-type="fig" rid="fig3">Figure 3</xref>).</p>
<fig position="float" id="fig3">
<label>Figure 3</label>
<caption>
<p>Limitations and challenges and implementing AI in nephrology.</p>
</caption>
<alt-text>Flowchart outlining limitations and challenges in implementing AI in nephrology, with six main categories: data quality and bias, clinical implementation complexity, need for data and ethics, clinician interpretation and training, and trust. Each category leads to targeted solutions such as developing bias-free algorithms, integrating AI with workflows, addressing ethical data concerns, training professionals in AI, and demonstrating improved outcomes. The goal is equitable, transparent, and effective AI adoption in nephrology.</alt-text>
<graphic xlink:href="frai-08-1525937-g003.tif"/>
</fig>
<p>The development of standardized DEI-related ethical guidelines for AI in nephrology is essential to ensuring that AI-driven decision-making aligns with the principles of fairness, transparency, and inclusivity (<xref ref-type="bibr" rid="ref1">Abbasgholizadeh Rahimi et al., 2024</xref>; <xref ref-type="bibr" rid="ref32">Solomonides et al., 2022</xref>). These guidelines should provide a structured framework requiring AI developers to incorporate diverse, representative training datasets, implement bias detection and mitigation strategies, and prioritize explainability so that clinicians can critically assess AI-generated recommendations (<xref ref-type="bibr" rid="ref6">Chin et al., 2023</xref>). Furthermore, interdisciplinary collaboration among AI developers, nephrologists, bioethicists, and policymakers is necessary to create AI systems that are both clinically effective and socially responsible. AI models should undergo continuous auditing to evaluate their adherence to DEI principles, with regulatory oversight ensuring that biases are addressed over time. Healthcare institutions must also take an active role in developing policies that support responsible AI use in both patient care and workforce recruitment, ensuring that AI-driven decisions contribute to equitable healthcare (<xref ref-type="bibr" rid="ref1">Abbasgholizadeh Rahimi et al., 2024</xref>). Future research should focus on refining these frameworks to optimize AI deployment in a manner that enhances healthcare access and outcomes for all patients.</p>
<p>Ensuring the ethical performance of AI models in nephrology requires a continuous monitoring and evaluation process that extends throughout the entire lifecycle of these systems (<xref ref-type="bibr" rid="ref1">Abbasgholizadeh Rahimi et al., 2024</xref>). AI outputs can shift over time due to changes in training data, algorithm modifications, or evolving clinical practices. To prevent the unintended reinforcement of biases, a structured oversight framework should be established (<xref ref-type="bibr" rid="ref8">Economou-Zavlanos et al., 2024</xref>). This framework should include real-time bias detection mechanisms integrated into AI deployment systems, periodic audits by interdisciplinary teams consisting of nephrologists, AI ethicists, data scientists, and regulatory experts, and ongoing user feedback loops that allow clinicians to report discrepancies or concerns regarding AI-generated recommendations. Additionally, regulatory bodies and healthcare institutions should conduct periodic evaluations to ensure that AI-driven decisions remain equitable and do not disproportionately disadvantage any patient populations. Transparency in reporting AI performance metrics such as disparities in AI decision-making across demographic groups will be critical in building confidence in AI-assisted nephrology (<xref ref-type="bibr" rid="ref6">Chin et al., 2023</xref>; <xref ref-type="bibr" rid="ref8">Economou-Zavlanos et al., 2024</xref>).</p>
<p>To ensure responsible AI use in nephrology, a structured process for addressing ethical concerns must be established. This should include both a dedicated ethics committee and a formal reporting mechanism for healthcare professionals (<xref ref-type="bibr" rid="ref8">Economou-Zavlanos et al., 2024</xref>). The ethics committee should be an interdisciplinary body composed of nephrologists, AI ethicists, data scientists, legal experts, and patient advocates. Its role would be to evaluate ethical concerns related to AI applications, provide guidance on ethical AI implementation, and develop strategies to mitigate bias and ensure fairness. In parallel, a structured reporting mechanism should be created to allow healthcare professionals to flag AI-generated recommendations that appear biased, ethically questionable, or inconsistent with established clinical guidelines (<xref ref-type="bibr" rid="ref8">Economou-Zavlanos et al., 2024</xref>). Reports should be reviewed systematically, with clear channels for follow-up and corrective action. Regular audits of AI performance, along with clinician and patient feedback, should inform ongoing improvements to AI models (<xref ref-type="bibr" rid="ref8">Economou-Zavlanos et al., 2024</xref>). Establishing these processes will help maintain ethical integrity in AI-driven nephrology and ensure that AI tools are used in a manner that upholds DEI principles while enhancing patient care.</p>
<p>Active collaboration with patient communities is a critical component of AI development and deployment in nephrology. Engaging patients throughout the AI lifecycle allows their perspectives to be incorporated into model design, leading to AI systems that are more aligned with the diverse needs of the nephrology population (<xref ref-type="bibr" rid="ref16">Lammons et al., 2023</xref>; <xref ref-type="bibr" rid="ref2">Adus et al., 2023</xref>). Establishing patient advisory panels can provide valuable insights into AI-generated recommendations, helping to identify potential gaps or biases in decision-making. Additionally, focus groups with patients from different demographic and socioeconomic backgrounds can highlight concerns, expectations, and trust levels regarding AI-driven healthcare tools (<xref ref-type="bibr" rid="ref2">Adus et al., 2023</xref>). Transparency in AI decision-making is essential, and efforts should be made to present AI-generated recommendations in a way that is understandable and actionable for both clinicians and patients. Patient advocacy organizations play a key role in facilitating these collaborations by acting as intermediaries between AI developers, healthcare providers, and patient communities.</p>
<p>This study has several notable strengths. It is one of the first systematic evaluations of how large language models respond to ethical scenarios involving DEI considerations within nephrology. The simulation-based design allows for structured comparison across different model versions (ChatGPT 3.5 and 4.0) using controlled variables. The inclusion of real-world DEI variables such as religion, race, employment, and immigration status provides ecological validity and highlights relevant ethical challenges in clinical care and workforce recruitment. Additionally, the full disclosure of simulated cases and AI responses enhances transparency and reproducibility.</p>
<p>While the simulated cases were constructed to reflect a wide range of real-world nephrology scenarios with DEI relevance, we recognize that the cases were authored by a limited number of clinicians and may unintentionally reflect biases in scenario framing or emphasis. Certain cultural or regional contexts, particularly those affecting underrepresented populations globally, may not be fully captured. These limitations could influence how AI models respond and may underrepresent ethical nuances faced by specific communities. Further studies involving larger and more diverse datasets, as well as real-world clinical trials, are necessary to validate and expand upon these results. Future research should focus on expanding case diversity by incorporating a broader range of scenarios and involving a more diverse group of nephrologists in case development. Implementing and evaluating AI models in actual clinical settings will be crucial to assess their performance and ethical sensitivity in real-time decision-making. Additionally, continuous refinement of AI models is essential to enhance their ability to recognize and appropriately handle DEI considerations (<xref ref-type="bibr" rid="ref33">Ueda et al., 2024</xref>). These efforts will inform the development of robust regulatory frameworks and training protocols that emphasize ethical decision-making and DEI sensitivity. Although our simulated scenarios were created and evaluated by two nephrologists, we recognize that additional insights from a broader group of clinicians may further enrich the evaluation of AI outputs. In future work, we plan to incorporate perspectives from a diverse range of practicing clinicians to refine our findings and ensure that our ethical evaluation framework reflects varied clinical viewpoints. This approach is anticipated to further strengthen our assessment of AI performance and contribute to the development of thoughtful regulatory guidelines for AI integration into clinical practice.</p>
<p>To enhance DEI sensitivity in AI models, we recommend several concrete steps: (1) incorporating demographically diverse and representative training datasets; (2) embedding DEI-specific audit mechanisms within model evaluation pipelines; (3) using adversarial testing that challenges models with ethically complex scenarios; and (4) requiring transparent reporting of performance disparities across subgroups. Achieving these goals will require sustained interdisciplinary collaboration. Clinicians can define real-world ethical constraints; data scientists can implement fairness-aware algorithms; ethicists can guide value alignment; and patient advocates can ensure community relevance. This collective approach is essential to creating AI systems that are both clinically robust and socially responsible.</p>
<p>In summary, this study demonstrates that while advancements have been made in AI ethical sensitivity, as seen in ChatGPT 4.0, there is still considerable room for improvement. The relatively low rate of refusal to engage in potentially discriminatory decision-making underscores the need for ongoing refinement of AI models. By addressing these challenges and implementing robust regulatory and training frameworks, we can better ensure that AI systems not only enhance clinical decision-making but also uphold the principles of diversity, equity, and inclusion. These efforts will contribute to more equitable healthcare delivery and better outcomes for all patients in nephrology.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="sec11">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/<xref ref-type="supplementary-material" rid="SM1">Supplementary material</xref>, further inquiries can be directed to the corresponding author.</p>
</sec>
<sec sec-type="author-contributions" id="sec12">
<title>Author contributions</title>
<p>SB: Conceptualization, Data curation, Formal analysis, Investigation, Methodology, Project administration, Resources, Software, Validation, Visualization, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. CT: Conceptualization, Data curation, Formal analysis, Funding acquisition, Investigation, Methodology, Project administration, Software, Supervision, Validation, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. WW: Conceptualization, Data curation, Formal analysis, Investigation, Methodology, Resources, Software, Supervision, Validation, Visualization, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. JM: Conceptualization, Formal analysis, Funding acquisition, Investigation, Methodology, Project administration, Resources, Supervision, Validation, Visualization, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. MM: Conceptualization, Data curation, Investigation, Methodology, Project administration, Software, Supervision, Validation, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. IC: Conceptualization, Investigation, Methodology, Project administration, Resources, Software, Supervision, Validation, Visualization, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. WC: Conceptualization, Data curation, Formal analysis, Funding acquisition, Investigation, Methodology, Project administration, Resources, Software, Supervision, Validation, Visualization, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing.</p>
</sec>
<sec sec-type="funding-information" id="sec13">
<title>Funding</title>
<p>The author(s) declare that no financial support was received for the research and/or publication of this article.</p>
</sec>
<sec sec-type="COI-statement" id="sec14">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="sec15">
<title>Generative AI statement</title>
<p>The authors declare that Gen AI was used in the creation of this manuscript. The use of ChatGPT in this study was strictly limited to the response-generating protocol described in the methods section. ChatGPT was not used for data analysis or any other aspects of the production of this manuscript. The authors are responsible for all contents.</p>
</sec>
<sec sec-type="disclaimer" id="sec16">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec sec-type="supplementary-material" id="sec17">
<title>Supplementary material</title>
<p>The Supplementary material for this article can be found online at: <ext-link xlink:href="https://www.frontiersin.org/articles/10.3389/frai.2025.1525937/full#supplementary-material" ext-link-type="uri">https://www.frontiersin.org/articles/10.3389/frai.2025.1525937/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Data_Sheet_1.pdf" id="SM1" mimetype="application/pdf" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<ref-list>
<title>References</title>
<ref id="ref1"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Abbasgholizadeh Rahimi</surname> <given-names>S.</given-names></name> <name><surname>Shrivastava</surname> <given-names>R.</given-names></name> <name><surname>Brown-Johnson</surname> <given-names>A.</given-names></name> <name><surname>Caidor</surname> <given-names>P.</given-names></name> <name><surname>Davies</surname> <given-names>C.</given-names></name> <name><surname>Idrissi Janati</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>EDAI framework for integrating equity, diversity, and inclusion throughout the lifecycle of AI to improve health and Oral health care: qualitative study</article-title>. <source>J. Med. Internet Res.</source> <volume>26</volume>:<fpage>e63356</fpage>. doi: <pub-id pub-id-type="doi">10.2196/63356</pub-id>, PMID: <pub-id pub-id-type="pmid">39546793</pub-id></citation></ref>
<ref id="ref2"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Adus</surname> <given-names>S.</given-names></name> <name><surname>Macklin</surname> <given-names>J.</given-names></name> <name><surname>Pinto</surname> <given-names>A.</given-names></name></person-group> (<year>2023</year>). <article-title>Exploring patient perspectives on how they can and should be engaged in the development of artificial intelligence (AI) applications in health care</article-title>. <source>BMC Health Serv. Res.</source> <volume>23</volume>:<fpage>1163</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s12913-023-10098-2</pub-id>, PMID: <pub-id pub-id-type="pmid">37884940</pub-id></citation></ref>
<ref id="ref3"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Alam</surname> <given-names>S. F.</given-names></name> <name><surname>Thongprayoon</surname> <given-names>C.</given-names></name> <name><surname>Miao</surname> <given-names>J.</given-names></name> <name><surname>Pham</surname> <given-names>J. H.</given-names></name> <name><surname>Sheikh</surname> <given-names>M. S.</given-names></name> <name><surname>Garcia Valencia</surname> <given-names>O. A.</given-names></name> <etal/></person-group>. (<year>2025</year>). <article-title>Advancing personalized medicine in digital health: the role of artificial intelligence in enhancing clinical interpretation of 24-h ambulatory blood pressure monitoring</article-title>. <source>Digit Health.</source> <volume>11</volume>:<fpage>20552076251326014</fpage>. doi: <pub-id pub-id-type="doi">10.1177/20552076251326014</pub-id>, PMID: <pub-id pub-id-type="pmid">40093710</pub-id></citation></ref>
<ref id="ref4"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ali</surname> <given-names>O.</given-names></name> <name><surname>Abdelbaki</surname> <given-names>W.</given-names></name> <name><surname>Shrestha</surname> <given-names>A.</given-names></name> <name><surname>Elbasi</surname> <given-names>E.</given-names></name> <name><surname>Alryalat</surname> <given-names>M. A. A.</given-names></name> <name><surname>Dwivedi</surname> <given-names>Y. K.</given-names></name></person-group> (<year>2023</year>). <article-title>A systematic literature review of artificial intelligence in the healthcare sector: benefits, challenges, methodologies, and functionalities</article-title>. <source>J. Innov. Knowl.</source> <volume>8</volume>:<fpage>100333</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jik.2023.100333</pub-id></citation></ref>
<ref id="ref5"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ayoub</surname> <given-names>N. F.</given-names></name> <name><surname>Balakrishnan</surname> <given-names>K.</given-names></name> <name><surname>Ayoub</surname> <given-names>M. S.</given-names></name> <name><surname>Barrett</surname> <given-names>T. F.</given-names></name> <name><surname>David</surname> <given-names>A. P.</given-names></name> <name><surname>Gray</surname> <given-names>S. T.</given-names></name></person-group> (<year>2024</year>). <article-title>Inherent Bias in large language models: a random sampling analysis</article-title>. <source>Mayo Clinic Proc Digital Health.</source> <volume>2</volume>, <fpage>186</fpage>&#x2013;<lpage>191</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.mcpdig.2024.03.003</pub-id>, PMID: <pub-id pub-id-type="pmid">40207170</pub-id></citation></ref>
<ref id="ref6"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chin</surname> <given-names>M. H.</given-names></name> <name><surname>Afsar-Manesh</surname> <given-names>N.</given-names></name> <name><surname>Bierman</surname> <given-names>A. S.</given-names></name> <name><surname>Chang</surname> <given-names>C.</given-names></name> <name><surname>Col&#x00F3;n-Rodr&#x00ED;guez</surname> <given-names>C. J.</given-names></name> <name><surname>Dullabh</surname> <given-names>P.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Guiding principles to address the impact of algorithm Bias on racial and ethnic disparities in health and health care</article-title>. <source>JAMA Netw. Open</source> <volume>6</volume>:<fpage>e2345050</fpage>. doi: <pub-id pub-id-type="doi">10.1001/jamanetworkopen.2023.45050</pub-id>, PMID: <pub-id pub-id-type="pmid">38100101</pub-id></citation></ref>
<ref id="ref7"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dankwa-Mullan</surname> <given-names>I.</given-names></name></person-group> (<year>2024</year>). <article-title>Health equity and ethical considerations in using artificial intelligence in public health and medicine</article-title>. <source>Prev. Chronic Dis.</source> <volume>21</volume>:<fpage>E64</fpage>. doi: <pub-id pub-id-type="doi">10.5888/pcd21.240245</pub-id>, PMID: <pub-id pub-id-type="pmid">39173183</pub-id></citation></ref>
<ref id="ref8"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Economou-Zavlanos</surname> <given-names>N. J.</given-names></name> <name><surname>Bessias</surname> <given-names>S.</given-names></name> <name><surname>Cary</surname> <given-names>M. P.</given-names> <suffix>Jr.</suffix></name> <name><surname>Bedoya</surname> <given-names>A. D.</given-names></name> <name><surname>Goldstein</surname> <given-names>B. A.</given-names></name> <name><surname>Jelovsek</surname> <given-names>J. E.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>Translating ethical and quality principles for the effective, safe and fair development, deployment and use of artificial intelligence technologies in healthcare</article-title>. <source>J. Am. Med. Inform. Assoc.</source> <volume>31</volume>, <fpage>705</fpage>&#x2013;<lpage>713</lpage>. doi: <pub-id pub-id-type="doi">10.1093/jamia/ocad221</pub-id>, PMID: <pub-id pub-id-type="pmid">38031481</pub-id></citation></ref>
<ref id="ref9"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ferryman</surname> <given-names>K.</given-names></name> <name><surname>Mackintosh</surname> <given-names>M.</given-names></name> <name><surname>Ghassemi</surname> <given-names>M.</given-names></name></person-group> (<year>2023</year>). <article-title>Considering biased data as informative artifacts in AI-assisted health care</article-title>. <source>N. Engl. J. Med.</source> <volume>389</volume>, <fpage>833</fpage>&#x2013;<lpage>838</lpage>. doi: <pub-id pub-id-type="doi">10.1056/NEJMra2214964</pub-id>, PMID: <pub-id pub-id-type="pmid">37646680</pub-id></citation></ref>
<ref id="ref10"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gaonkar</surname> <given-names>B.</given-names></name> <name><surname>Cook</surname> <given-names>K.</given-names></name> <name><surname>Macyszyn</surname> <given-names>L.</given-names></name></person-group> (<year>2023</year>). <article-title>Ethical issues arising due to Bias in training a.I. Algorithms in healthcare and data sharing as a potential solution</article-title>. <source>AI Ethics J.</source> <volume>1</volume>:<fpage>916</fpage>. doi: <pub-id pub-id-type="doi">10.47289/AIEJ20200916</pub-id></citation></ref>
<ref id="ref11"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Garcia Valencia</surname> <given-names>O. A.</given-names></name> <name><surname>Suppadungsuk</surname> <given-names>S.</given-names></name> <name><surname>Thongprayoon</surname> <given-names>C.</given-names></name> <name><surname>Miao</surname> <given-names>J.</given-names></name> <name><surname>Tangpanithandee</surname> <given-names>S.</given-names></name> <name><surname>Craici</surname> <given-names>I. M.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Ethical implications of Chatbot utilization in nephrology</article-title>. <source>J. Pers. Med.</source> <volume>13</volume>:<fpage>1393</fpage>. doi: <pub-id pub-id-type="doi">10.3390/jpm13091363</pub-id>, PMID: <pub-id pub-id-type="pmid">37763131</pub-id></citation></ref>
<ref id="ref12"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jager</surname> <given-names>K. J.</given-names></name> <name><surname>Kovesdy</surname> <given-names>C.</given-names></name> <name><surname>Langham</surname> <given-names>R.</given-names></name> <name><surname>Rosenberg</surname> <given-names>M.</given-names></name> <name><surname>Jha</surname> <given-names>V.</given-names></name> <name><surname>Zoccali</surname> <given-names>C.</given-names></name></person-group> (<year>2019</year>). <article-title>A single number for advocacy and communication-worldwide more than 850 million individuals have kidney diseases</article-title>. <source>Nephrol. Dial. Transplant.</source> <volume>34</volume>, <fpage>1803</fpage>&#x2013;<lpage>1805</lpage>. doi: <pub-id pub-id-type="doi">10.1093/ndt/gfz174</pub-id>, PMID: <pub-id pub-id-type="pmid">31566230</pub-id></citation></ref>
<ref id="ref13"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jha</surname> <given-names>V.</given-names></name> <name><surname>Al-Ghamdi</surname> <given-names>S. M. G.</given-names></name> <name><surname>Li</surname> <given-names>G.</given-names></name> <name><surname>Wu</surname> <given-names>M. S.</given-names></name> <name><surname>Stafylas</surname> <given-names>P.</given-names></name> <name><surname>Retat</surname> <given-names>L.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Global economic burden associated with chronic kidney disease: a pragmatic review of medical costs for the inside CKD research Programme</article-title>. <source>Adv. Ther.</source> <volume>40</volume>, <fpage>4405</fpage>&#x2013;<lpage>4420</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s12325-023-02608-9</pub-id>, PMID: <pub-id pub-id-type="pmid">37493856</pub-id></citation></ref>
<ref id="ref14"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Koirala</surname> <given-names>P.</given-names></name> <name><surname>Thongprayoon</surname> <given-names>C.</given-names></name> <name><surname>Miao</surname> <given-names>J.</given-names></name> <name><surname>Garcia Valencia</surname> <given-names>O. A.</given-names></name> <name><surname>Sheikh</surname> <given-names>M. S.</given-names></name> <name><surname>Suppadungsuk</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2025</year>). <article-title>Evaluating AI performance in nephrology triage and subspecialty referrals</article-title>. <source>Sci. Rep.</source> <volume>15</volume>:<fpage>3455</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-025-88074-5</pub-id>, PMID: <pub-id pub-id-type="pmid">39870788</pub-id></citation></ref>
<ref id="ref15"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Kuhlman</surname> <given-names>C.</given-names></name> <name><surname>Jackson</surname> <given-names>L.</given-names></name> <name><surname>Chunara</surname> <given-names>R.</given-names></name></person-group> (<year>2020</year>). &#x201C;No Computation without Representation: Avoiding Data and Algorithm Biases through Diversity,&#x201D; in <italic>Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery &#x0026; Data Mining</italic> (Virtual Event, CA: Association for Computing Machinery), 3593.</citation></ref>
<ref id="ref16"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lammons</surname> <given-names>W.</given-names></name> <name><surname>Silkens</surname> <given-names>M.</given-names></name> <name><surname>Hunter</surname> <given-names>J.</given-names></name> <name><surname>Shah</surname> <given-names>S.</given-names></name> <name><surname>Stavropoulou</surname> <given-names>C.</given-names></name></person-group> (<year>2023</year>). <article-title>Centering public perceptions on translating AI into clinical practice: patient and public involvement and engagement consultation focus group study</article-title>. <source>J. Med. Internet Res.</source> <volume>25</volume>:<fpage>e49303</fpage>. doi: <pub-id pub-id-type="doi">10.2196/49303</pub-id></citation></ref>
<ref id="ref17"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Meng</surname> <given-names>C.</given-names></name> <name><surname>Trinh</surname> <given-names>L.</given-names></name> <name><surname>Xu</surname> <given-names>N.</given-names></name> <name><surname>Enouen</surname> <given-names>J.</given-names></name> <name><surname>Liu</surname> <given-names>Y.</given-names></name></person-group> (<year>2022</year>). <article-title>Interpretability and fairness evaluation of deep learning models on MIMIC-IV dataset</article-title>. <source>Sci. Rep.</source> <volume>12</volume>:<fpage>7166</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-022-11012-2</pub-id>, PMID: <pub-id pub-id-type="pmid">35504931</pub-id></citation></ref>
<ref id="ref18"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mennella</surname> <given-names>C.</given-names></name> <name><surname>Maniscalco</surname> <given-names>U.</given-names></name> <name><surname>De Pietro</surname> <given-names>G.</given-names></name> <name><surname>Esposito</surname> <given-names>M.</given-names></name></person-group> (<year>2024</year>). <article-title>Ethical and regulatory challenges of AI technologies in healthcare: a narrative review</article-title>. <source>Heliyon.</source> <volume>10</volume>:<fpage>e26297</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.heliyon.2024.e26297</pub-id>, PMID: <pub-id pub-id-type="pmid">38384518</pub-id></citation></ref>
<ref id="ref19"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Miao</surname> <given-names>J.</given-names></name> <name><surname>Thongprayoon</surname> <given-names>C.</given-names></name> <name><surname>Kashani</surname> <given-names>K. B.</given-names></name> <name><surname>Cheungpasitporn</surname> <given-names>W.</given-names></name></person-group> (<year>2025</year>). <article-title>Artificial intelligence as a tool for improving health literacy in kidney care</article-title>. <source>PLOS Digit Health.</source> <volume>4</volume>:<fpage>e0000746</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pdig.0000746</pub-id>, PMID: <pub-id pub-id-type="pmid">39982949</pub-id></citation></ref>
<ref id="ref20"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mohammed</surname> <given-names>Y. N.</given-names></name> <name><surname>Khurana</surname> <given-names>S.</given-names></name> <name><surname>Gulati</surname> <given-names>A.</given-names></name> <name><surname>Rahaman</surname> <given-names>Z.</given-names></name> <name><surname>Lohana</surname> <given-names>A. C.</given-names></name> <name><surname>Santosh</surname> <given-names>R.</given-names></name></person-group> (<year>2024</year>). <article-title>Advancing healthcare equity in nephrology: addressing racial and ethnic disparities in research trials and treatment strategies</article-title>. <source>Cureus.</source> <volume>16</volume>:<fpage>e56913</fpage>. doi: <pub-id pub-id-type="doi">10.7759/cureus.56913</pub-id>, PMID: <pub-id pub-id-type="pmid">38659516</pub-id></citation></ref>
<ref id="ref21"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mudgal</surname> <given-names>K. S.</given-names></name> <name><surname>Das</surname> <given-names>N.</given-names></name></person-group> (<year>2020</year>). <article-title>The ethical adoption of artificial intelligence in radiology</article-title>. <source>BJR|Open.</source> <volume>2</volume>:<fpage>20190020</fpage>. doi: <pub-id pub-id-type="doi">10.1259/bjro.20190020</pub-id>, PMID: <pub-id pub-id-type="pmid">33178959</pub-id></citation></ref>
<ref id="ref22"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nazer</surname> <given-names>L. H.</given-names></name> <name><surname>Zatarah</surname> <given-names>R.</given-names></name> <name><surname>Waldrip</surname> <given-names>S.</given-names></name> <name><surname>Ke</surname> <given-names>J. X. C.</given-names></name> <name><surname>Moukheiber</surname> <given-names>M.</given-names></name> <name><surname>Khanna</surname> <given-names>A. K.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Bias in artificial intelligence algorithms and recommendations for mitigation</article-title>. <source>PLOS Digit Health.</source> <volume>2</volume>:<fpage>e0000278</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pdig.0000278</pub-id>, PMID: <pub-id pub-id-type="pmid">37347721</pub-id></citation></ref>
<ref id="ref23"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Obermeyer</surname> <given-names>Z.</given-names></name> <name><surname>Powers</surname> <given-names>B.</given-names></name> <name><surname>Vogeli</surname> <given-names>C.</given-names></name> <name><surname>Mullainathan</surname> <given-names>S.</given-names></name></person-group> (<year>2019</year>). <article-title>Dissecting racial bias in an algorithm used to manage the health of populations</article-title>. <source>Science</source> <volume>366</volume>, <fpage>447</fpage>&#x2013;<lpage>453</lpage>. doi: <pub-id pub-id-type="doi">10.1126/science.aax2342</pub-id>, PMID: <pub-id pub-id-type="pmid">31649194</pub-id></citation></ref>
<ref id="ref24"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Omiye</surname> <given-names>J. A.</given-names></name> <name><surname>Lester</surname> <given-names>J. C.</given-names></name> <name><surname>Spichak</surname> <given-names>S.</given-names></name> <name><surname>Rotemberg</surname> <given-names>V.</given-names></name> <name><surname>Daneshjou</surname> <given-names>R.</given-names></name></person-group> (<year>2023</year>). <article-title>Large language models propagate race-based medicine</article-title>. <source>NPJ Digital Med.</source> <volume>6</volume>:<fpage>195</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41746-023-00939-z</pub-id>, PMID: <pub-id pub-id-type="pmid">37864012</pub-id></citation></ref>
<ref id="ref25"><citation citation-type="other"><person-group person-group-type="author"><collab id="coll1">Open AI. Introducing Chat GPT</collab></person-group> (<year>2024</year>). Available online at: <ext-link xlink:href="https://chat.openai.com/" ext-link-type="uri">https://chat.openai.com/</ext-link> (Accessed March 2, 2024).</citation></ref>
<ref id="ref26"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pais</surname> <given-names>P.</given-names></name> <name><surname>Iyengar</surname> <given-names>A.</given-names></name></person-group> (<year>2023</year>). <article-title>Kidney Care for all: addressing the gaps in an imperfect world-a global perspective on improving access to kidney Care in low-Resource Settings</article-title>. <source>Kidney</source> <volume>4</volume>, <fpage>982</fpage>&#x2013;<lpage>986</lpage>. doi: <pub-id pub-id-type="doi">10.34067/KID.0000000000000128</pub-id></citation></ref>
<ref id="ref27"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Parikh</surname> <given-names>R. B.</given-names></name> <name><surname>Teeple</surname> <given-names>S.</given-names></name> <name><surname>Navathe</surname> <given-names>A. S.</given-names></name></person-group> (<year>2019</year>). <article-title>Addressing Bias in artificial intelligence in health care</article-title>. <source>JAMA</source> <volume>322</volume>, <fpage>2377</fpage>&#x2013;<lpage>2378</lpage>. doi: <pub-id pub-id-type="doi">10.1001/jama.2019.18058</pub-id>, PMID: <pub-id pub-id-type="pmid">31755905</pub-id></citation></ref>
<ref id="ref28"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pham</surname> <given-names>J. H.</given-names></name> <name><surname>Thongprayoon</surname> <given-names>C.</given-names></name> <name><surname>Miao</surname> <given-names>J.</given-names></name> <name><surname>Suppadungsuk</surname> <given-names>S.</given-names></name> <name><surname>Koirala</surname> <given-names>P.</given-names></name> <name><surname>Craici</surname> <given-names>I. M.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>Large language model triaging of simulated nephrology patient inbox messages</article-title>. <source>Front. Artif Intell.</source> <volume>7</volume>:<fpage>1452469</fpage>. doi: <pub-id pub-id-type="doi">10.3389/frai.2024.1452469</pub-id>, PMID: <pub-id pub-id-type="pmid">39315245</pub-id></citation></ref>
<ref id="ref29"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pinho-Gomes</surname> <given-names>A. C.</given-names></name> <name><surname>Carcel</surname> <given-names>C.</given-names></name> <name><surname>Woodward</surname> <given-names>M.</given-names></name> <name><surname>Hockham</surname> <given-names>C.</given-names></name></person-group> (<year>2023</year>). <article-title>Women's representation in clinical trials of patients with chronic kidney disease</article-title>. <source>Clin. Kidney J.</source> <volume>16</volume>, <fpage>1457</fpage>&#x2013;<lpage>1464</lpage>. doi: <pub-id pub-id-type="doi">10.1093/ckj/sfad018</pub-id>, PMID: <pub-id pub-id-type="pmid">37664564</pub-id></citation></ref>
<ref id="ref30"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Salsberg</surname> <given-names>E.</given-names></name> <name><surname>Richwine</surname> <given-names>C.</given-names></name> <name><surname>Westergaard</surname> <given-names>S.</given-names></name> <name><surname>Portela Martinez</surname> <given-names>M.</given-names></name> <name><surname>Oyeyemi</surname> <given-names>T.</given-names></name> <name><surname>Vichare</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Estimation and comparison of current and future racial/ethnic representation in the US health care workforce</article-title>. <source>JAMA Netw. Open</source> <volume>4</volume>:<fpage>e213789-e</fpage>. doi: <pub-id pub-id-type="doi">10.1001/jamanetworkopen.2021.3789</pub-id>, PMID: <pub-id pub-id-type="pmid">33787910</pub-id></citation></ref>
<ref id="ref31"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sheikh</surname> <given-names>M. S.</given-names></name> <name><surname>Kashani</surname> <given-names>K. B.</given-names></name> <name><surname>Gregoire</surname> <given-names>J. R.</given-names></name> <name><surname>Thongprayoon</surname> <given-names>C.</given-names></name> <name><surname>Miao</surname> <given-names>J.</given-names></name> <name><surname>Craici</surname> <given-names>I. M.</given-names></name> <etal/></person-group>. (<year>2025</year>). <article-title>Digital transformation of nephrology POCUS education-integrating a multiagent, artificial intelligence, and human collaboration-enhanced curriculum with expert feedback</article-title>. <source>Digit Health.</source> <volume>11</volume>:<fpage>20552076251328807</fpage>. doi: <pub-id pub-id-type="doi">10.1177/20552076251328807</pub-id>, PMID: <pub-id pub-id-type="pmid">40162173</pub-id></citation></ref>
<ref id="ref32"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Solomonides</surname> <given-names>A. E.</given-names></name> <name><surname>Koski</surname> <given-names>E.</given-names></name> <name><surname>Atabaki</surname> <given-names>S. M.</given-names></name> <name><surname>Weinberg</surname> <given-names>S.</given-names></name> <name><surname>McGreevey</surname> <given-names>J. D.</given-names> <suffix>III</suffix></name> <name><surname>Kannry</surname> <given-names>J. L.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Defining AMIA's artificial intelligence principles</article-title>. <source>J. Am. Med. Inform. Assoc.</source> <volume>29</volume>, <fpage>585</fpage>&#x2013;<lpage>591</lpage>. doi: <pub-id pub-id-type="doi">10.1093/jamia/ocac006</pub-id>, PMID: <pub-id pub-id-type="pmid">35190824</pub-id></citation></ref>
<ref id="ref33"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ueda</surname> <given-names>D.</given-names></name> <name><surname>Kakinuma</surname> <given-names>T.</given-names></name> <name><surname>Fujita</surname> <given-names>S.</given-names></name> <name><surname>Kamagata</surname> <given-names>K.</given-names></name> <name><surname>Fushimi</surname> <given-names>Y.</given-names></name> <name><surname>Ito</surname> <given-names>R.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>Fairness of artificial intelligence in healthcare: review and recommendations</article-title>. <source>Jpn. J. Radiol.</source> <volume>42</volume>, <fpage>3</fpage>&#x2013;<lpage>15</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s11604-023-01474-3</pub-id>, PMID: <pub-id pub-id-type="pmid">37540463</pub-id></citation></ref>
<ref id="ref34"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Vanholder</surname> <given-names>R.</given-names></name> <name><surname>Annemans</surname> <given-names>L.</given-names></name> <name><surname>Braks</surname> <given-names>M.</given-names></name> <name><surname>Brown</surname> <given-names>E. A.</given-names></name> <name><surname>Pais</surname> <given-names>P.</given-names></name> <name><surname>Purnell</surname> <given-names>T. S.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Inequities in kidney health and kidney care</article-title>. <source>Nat. Rev. Nephrol.</source> <volume>19</volume>, <fpage>694</fpage>&#x2013;<lpage>708</lpage>. doi: <pub-id pub-id-type="doi">10.1038/s41581-023-00745-6</pub-id>, PMID: <pub-id pub-id-type="pmid">37580571</pub-id></citation></ref>
<ref id="ref35"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Walsh</surname> <given-names>G.</given-names></name> <name><surname>Stogiannos</surname> <given-names>N.</given-names></name> <name><surname>van de Venter</surname> <given-names>R.</given-names></name> <name><surname>Rainey</surname> <given-names>C.</given-names></name> <name><surname>Tam</surname> <given-names>W.</given-names></name> <name><surname>McFadden</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Responsible AI practice and AI education are central to AI implementation: a rapid review for all medical imaging professionals in Europe</article-title>. <source>BJR|Open.</source> <volume>5</volume>:<fpage>33</fpage>. doi: <pub-id pub-id-type="doi">10.1259/bjro.20230033</pub-id>, PMID: <pub-id pub-id-type="pmid">37953871</pub-id></citation></ref>
<ref id="ref36"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yang</surname> <given-names>Y.</given-names></name> <name><surname>Liu</surname> <given-names>X.</given-names></name> <name><surname>Jin</surname> <given-names>Q.</given-names></name> <name><surname>Huang</surname> <given-names>F.</given-names></name> <name><surname>Lu</surname> <given-names>Z.</given-names></name></person-group> (<year>2024</year>). <article-title>Unmasking and quantifying racial Bias of large language models in medical report generation</article-title>. <source>ArXiv.</source> <volume>4</volume>:<fpage>601</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s43856-024-00601-z</pub-id>, PMID: <pub-id pub-id-type="pmid">39398204</pub-id></citation></ref>
</ref-list>
</back>
</article>