<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article article-type="research-article" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Digit. Health</journal-id><journal-title-group>
<journal-title>Frontiers in Digital Health</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Digit. Health</abbrev-journal-title></journal-title-group>
<issn pub-type="epub">2673-253X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fdgth.2025.1664345</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Influencing public acceptance of artificial intelligence (AI) in healthcare delivery</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author"><name><surname>Aras</surname><given-names>Selin</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/3129412/overview"/><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role></contrib>
<contrib contrib-type="author"><name><surname>Drakos</surname><given-names>Calvin</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/3107746/overview" /><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role></contrib>
<contrib contrib-type="author"><name><surname>Manimangalam</surname><given-names>Vineesha</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/3196668/overview" /><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role></contrib>
<contrib contrib-type="author"><name><surname>Nasir</surname><given-names>Moiz Ali</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/3131511/overview" /><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role></contrib>
<contrib contrib-type="author"><name><surname>Burns</surname><given-names>Christina</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/2926096/overview" /><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role></contrib>
<contrib contrib-type="author"><name><surname>Smith</surname><given-names>Davey</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role></contrib>
<contrib contrib-type="author" corresp="yes"><name><surname>Equils</surname><given-names>Ozlem</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="cor1">&#x002A;</xref><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role></contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>MiOra</institution>, <city>Los Angeles</city>, <state>CA</state>, <country country="us">United States</country></aff>
<aff id="aff2"><label>2</label><institution>Department of Medicine, University of California</institution>, <city>San Diego, La Jolla</city>, <state>CA</state>, <country country="us">United States</country></aff>
<author-notes>
<corresp id="cor1"><label>&#x002A;</label><bold>Correspondence:</bold> Ozlem Equils <email xlink:href="mailto:oequils@miora.org">oequils@miora.org</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-01-13"><day>13</day><month>1</month><year>2026</year></pub-date>
<pub-date publication-format="electronic" date-type="collection"><year>2025</year></pub-date>
<volume>7</volume><elocation-id>1664345</elocation-id>
<history>
<date date-type="received"><day>11</day><month>07</month><year>2025</year></date>
<date date-type="rev-recd"><day>28</day><month>11</month><year>2025</year></date>
<date date-type="accepted"><day>12</day><month>12</month><year>2025</year></date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2026 Aras, Drakos, Manimangalam, Nasir, Burns, Smith and Equils.</copyright-statement>
<copyright-year>2026</copyright-year><copyright-holder>Aras, Drakos, Manimangalam, Nasir, Burns, Smith and Equils</copyright-holder><license><ali:license_ref start_date="2026-01-13">https://creativecommons.org/licenses/by/4.0/</ali:license_ref><license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p></license>
</permissions>
<abstract><sec><title>Introduction</title>
<p>Despite the potential of artificial intelligence (AI) to transform healthcare delivery and reduce costs, adoption remains uneven across populations. Understanding the demographic, behavioral, and cognitive factors influencing public willingness to use AI-powered health tools is critical for equitable implementation. This study examined determinants of AI adoption in healthcare among adults in the United States (U.S.).</p>
</sec><sec><title>Methods</title>
<p>A cross-sectional survey was conducted between March and June 2024 using convenience sampling across the U.S. The study included 568 adult respondents recruited via Qualtrics. The survey assessed demographic characteristics, digital health behaviors, self-reported health status, cognitive and attitudinal factors, and behavioral intentions related to AI use in healthcare. Logistic regression models were used to examine associations between predictors and willingness to adopt AI, with z-tests for subgroup comparisons and Bonferroni correction applied for multiple hypothesis testing.</p>
</sec><sec><title>Results</title>
<p>The sample was predominantly female (66.7&#x0025;) and Hispanic/Latino (50.7&#x0025;), with moderate income and education levels. Older age was negatively associated with AI adoption (<italic>&#x03B2;</italic>&#x2009;&#x003D;&#x2009;&#x2212;0.029), males were less likely to use AI than females (<italic>&#x03B2;</italic>&#x2009;&#x003D;&#x2009;&#x2212;0.388), and income was positively correlated with AI adoption (<italic>&#x03B2;</italic>&#x2009;&#x003D;&#x2009;0.096). Trust in AI was substantially lower than trust in physicians: 14.6&#x0025; trusted ChatGPT&#x0027;s diagnosis for serious illness compared with 92.3&#x0025; trusting physicians, and 17.1&#x0025; versus 96.4&#x0025; for specialist referrals. Telehealth use strongly predicted AI adoption (<italic>&#x03B2;</italic>&#x2009;&#x003D;&#x2009;1.012), while lower self-rated mental health was associated with higher AI use (<italic>&#x03B2;</italic>&#x2009;&#x003D;&#x2009;&#x2212;0.254). Uninsured participants reported higher trust in AI diagnostic capabilities than insured participants (57&#x0025; vs. 43&#x0025;, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.05). Ethnic differences were observed, with Asian participants reporting higher AI usage rates than Hispanic participants (16.49&#x0025; vs. 5.56&#x0025;, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.05).</p>
</sec><sec><title>Discussion</title>
<p>AI adoption in healthcare is shaped by the interaction of demographic, socioeconomic, and cultural factors. While AI has the potential to expand healthcare access, adoption patterns reflect existing disparities in healthcare access and trust. Trust emerged as a central determinant, with AI functioning as a compensatory tool when traditional healthcare access is limited. Given the U.S.-specific context, findings should be interpreted as exploratory and may not generalize to other healthcare systems. These results highlight the need for future research on transparency, digital literacy, and structural barriers to support equitable implementation of healthcare AI.</p>
</sec>
</abstract>
<kwd-group>
<kwd>artificial intelligence</kwd>
<kwd>behavioral models</kwd>
<kwd>health anxiety</kwd>
<kwd>healthcare delivery</kwd>
<kwd>public trust</kwd>
<kwd>social cognitive theory</kwd>
<kwd>technology adoption</kwd>
<kwd>theory of planned behavior</kwd>
</kwd-group><funding-group><funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This research was funded by MiOra. None of the authors received compensation.</funding-statement></funding-group><counts>
<fig-count count="0"/>
<table-count count="3"/><equation-count count="0"/><ref-count count="43"/><page-count count="11"/><word-count count="0"/></counts><custom-meta-group><custom-meta><meta-name>section-at-acceptance</meta-name><meta-value>Human Factors and Digital Health</meta-value></custom-meta></custom-meta-group>
</article-meta>
</front>
<body><sec id="s1" sec-type="intro"><title>Introduction</title>
<p>Over the past decade, healthcare technologies have advanced at an unprecedented pace. Among these developments, AI has emerged as a transformative force in healthcare delivery, with the potential to significantly improve diagnostic accuracy, optimize treatment planning, and enhance administrative efficiency (<xref ref-type="bibr" rid="B1">1</xref>). Projections indicate that by 2026, AI applications could reduce annual U.S. healthcare costs by approximately &#x0024;150 billion (<xref ref-type="bibr" rid="B2">2</xref>). Supplementing this estimate, a study assessing the broader adoption of AI found that &#x0024;360 billion could be saved annually through AI&#x0027;s potential for medical error reduction (<xref ref-type="bibr" rid="B3">3</xref>). Ranging from machine learning algorithms to natural language processing and predictive analytics, AI-powered tools are becoming increasingly integrated into clinical workflows, promising to improve outcomes for both physicians and patients (<xref ref-type="bibr" rid="B4">4</xref>).</p>
<table-wrap id="T1" position="float"><label>Table&#x00A0;1</label>
<caption><p>Demographic profile of survey (<italic>N</italic>&#x2009;&#x003D;&#x2009;568).</p></caption>
<table>
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="center"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left">Category</th>
<th valign="top" align="left">Response</th>
<th valign="top" align="center">Percentage</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left" rowspan="5">Household Income</td>
<td valign="top" align="left">&#x0024;0&#x2013;&#x0024;22,000</td>
<td valign="top" align="center">21.70&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">&#x0024;22,000&#x2013;&#x0024;89,450</td>
<td valign="top" align="center">43.70&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">&#x0024;89,451&#x2013;&#x0024;190,750</td>
<td valign="top" align="center">24.10&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">&#x0024;190,751&#x2013;&#x0024;384,200</td>
<td valign="top" align="center">6.00&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">&#x003E;&#x0024;384,200</td>
<td valign="top" align="center">4.60&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="8">Education Level</td>
<td valign="top" align="left">Less than High School</td>
<td valign="top" align="center">3.70&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">High School Diploma</td>
<td valign="top" align="center">13.00&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">Some College</td>
<td valign="top" align="center">22.90&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">2-year Degree</td>
<td valign="top" align="center">11.60&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">4-year Degree</td>
<td valign="top" align="center">32.00&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">Masters</td>
<td valign="top" align="center">11.10&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">Doctorate</td>
<td valign="top" align="center">3.30&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">Technical or Trade School</td>
<td valign="top" align="center">2.30&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="6">Ethnicity</td>
<td valign="top" align="left">Hispanic/Latino</td>
<td valign="top" align="center">50.70&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">Asian</td>
<td valign="top" align="center">17.10&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">White/Caucasian</td>
<td valign="top" align="center">11.60&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">Black/African American</td>
<td valign="top" align="center">10.20&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">2&#x002B; Ethnicities</td>
<td valign="top" align="center">5.10&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">Other&#x002A;</td>
<td valign="top" align="center">5.30&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="2">Sex</td>
<td valign="top" align="left">Female</td>
<td valign="top" align="center">66.70&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">Male</td>
<td valign="top" align="center">33.30&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="4">Political Affiliation</td>
<td valign="top" align="left">Undecided</td>
<td valign="top" align="center">35.20&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">Liberal/Left Wing</td>
<td valign="top" align="center">32.20&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">Moderate</td>
<td valign="top" align="center">22.90&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">Conservative/Right Wing</td>
<td valign="top" align="center">9.70&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="2">Healthcare-Related Career</td>
<td valign="top" align="left">Work in healthcare/science</td>
<td valign="top" align="center">32.50&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">Do not work in healthcare/science</td>
<td valign="top" align="center">67.50&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="2">Health Insurance</td>
<td valign="top" align="left">Has health insurance</td>
<td valign="top" align="center">87.30&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">Does not have health insurance</td>
<td valign="top" align="center">12.70&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="2">Age</td>
<td valign="top" align="left">Median age</td>
<td valign="top" align="center">31 years</td>
</tr>
<tr>
<td valign="top" align="left">Age range</td>
<td valign="top" align="center">18&#x2013;83 years</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="5">Trust in Healthcare System</td>
<td valign="top" align="left">Completely trust</td>
<td valign="top" align="center">8.80&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">Trust most of the time</td>
<td valign="top" align="center">43.60&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">Neutral/ambivalent</td>
<td valign="top" align="center">34.00&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">Don&#x0027;t trust that much</td>
<td valign="top" align="center">11.50&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">Don&#x0027;t trust at all</td>
<td valign="top" align="center">2.10&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="4">Doctor Visit Frequency</td>
<td valign="top" align="left">For a yearly check-up:</td>
<td valign="top" align="center">50.0&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">To follow-up on something I have been seen for before</td>
<td valign="top" align="center">35.4&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">When I am sick or have new symptoms</td>
<td valign="top" align="center">58.9&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">Never</td>
<td valign="top" align="center">8.5&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="2">AI Usage</td>
<td valign="top" align="left">Use AI for at least one purpose</td>
<td valign="top" align="center">45.10&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">Do not use AI</td>
<td valign="top" align="center">54.90&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="4">AI Use Cases</td>
<td valign="top" align="left">School</td>
<td valign="top" align="center">27.40&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">Work</td>
<td valign="top" align="center">22.10&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">Socialization</td>
<td valign="top" align="center">14.80&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">Health/personal care</td>
<td valign="top" align="center">9.20&#x0025;</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn id="TF1"><p>&#x002A;Other includes Middle Eastern, Native Hawaiian or Other Pacific Islander, South Asian, American Indian or Alaska Native.</p></fn>
</table-wrap-foot>
</table-wrap>
<table-wrap id="T2" position="float"><label>Table&#x00A0;2</label>
<caption><p>Logistic regression coefficients for predicting community AI Use for healthcare.</p></caption>
<table>
<colgroup>
<col align="left"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left">Predictor</th>
<th valign="top" align="center">Coefficient (<italic>&#x03B2;</italic>)</th>
<th valign="top" align="center">CI 2.5&#x0025;</th>
<th valign="top" align="center">CI 97.5&#x0025;</th>
<th valign="top" align="center">SE</th>
<th valign="top" align="center"><italic>p</italic>-value</th>
<th valign="top" align="center">OR</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Age</td>
<td valign="top" align="center">&#x2212;0.029</td>
<td valign="top" align="center">&#x2212;0.0740</td>
<td valign="top" align="center">&#x2212;0.0090</td>
<td valign="top" align="center">0.0163</td>
<td valign="top" align="center">0.006</td>
<td valign="top" align="center"><bold>0</bold><bold>.</bold><bold>971</bold></td>
</tr>
<tr>
<td valign="top" align="left">Gender identity (encoded)</td>
<td valign="top" align="center">&#x2212;0.388</td>
<td valign="top" align="center">&#x2212;1.0920</td>
<td valign="top" align="center">0.0000</td>
<td valign="top" align="center">0.3396</td>
<td valign="top" align="center">0.042</td>
<td valign="top" align="center"><bold>0</bold><bold>.</bold><bold>678</bold></td>
</tr>
<tr>
<td valign="top" align="left">Mental health rating</td>
<td valign="top" align="center">&#x2212;0.254</td>
<td valign="top" align="center">&#x2212;0.5857</td>
<td valign="top" align="center">0.0000</td>
<td valign="top" align="center">0.1510</td>
<td valign="top" align="center">0.026</td>
<td valign="top" align="center"><bold>0</bold><bold>.</bold><bold>776</bold></td>
</tr>
<tr>
<td valign="top" align="left">Income (encoded)</td>
<td valign="top" align="center">0.096</td>
<td valign="top" align="center">&#x2212;0.1498</td>
<td valign="top" align="center">0.3206</td>
<td valign="top" align="center">0.1157</td>
<td valign="top" align="center">0.392</td>
<td valign="top" align="center"><bold>1</bold><bold>.</bold><bold>101</bold></td>
</tr>
<tr>
<td valign="top" align="left">Education (encoded)</td>
<td valign="top" align="center">&#x2212;0.0035</td>
<td valign="top" align="center">&#x2212;0.1652</td>
<td valign="top" align="center">0.1569</td>
<td valign="top" align="center">0.0848</td>
<td valign="top" align="center">0.902</td>
<td valign="top" align="center"><bold>0</bold><bold>.</bold><bold>996</bold></td>
</tr>
<tr>
<td valign="top" align="left">Use of mental health apps</td>
<td valign="top" align="center">&#x2212;0.349</td>
<td valign="top" align="center">&#x2212;1.2785</td>
<td valign="top" align="center">0.2432</td>
<td valign="top" align="center">0.4051</td>
<td valign="top" align="center">0.158</td>
<td valign="top" align="center"><bold>0</bold><bold>.</bold><bold>705</bold></td>
</tr>
<tr>
<td valign="top" align="left">Use of exercise apps</td>
<td valign="top" align="center">0.000</td>
<td valign="top" align="center">0.0000</td>
<td valign="top" align="center">0.0000</td>
<td valign="top" align="center">0.0000</td>
<td valign="top" align="center">0.000</td>
<td valign="top" align="center"><bold>1</bold><bold>.</bold><bold>000</bold></td>
</tr>
<tr>
<td valign="top" align="left">Use of menstrual tracking apps</td>
<td valign="top" align="center">0.260</td>
<td valign="top" align="center">&#x2212;0.3230</td>
<td valign="top" align="center">0.8129</td>
<td valign="top" align="center">0.2777</td>
<td valign="top" align="center">0.284</td>
<td valign="top" align="center"><bold>1</bold><bold>.</bold><bold>297</bold></td>
</tr>
<tr>
<td valign="top" align="left">Use of electronic chart system</td>
<td valign="top" align="center">0.000</td>
<td valign="top" align="center">0.0000</td>
<td valign="top" align="center">0.0000</td>
<td valign="top" align="center">0.0000</td>
<td valign="top" align="center">0.000</td>
<td valign="top" align="center"><bold>1</bold><bold>.</bold><bold>000</bold></td>
</tr>
<tr>
<td valign="top" align="left">Use of biosensors</td>
<td valign="top" align="center">0.000</td>
<td valign="top" align="center">0.0000</td>
<td valign="top" align="center">0.0000</td>
<td valign="top" align="center">0.0000</td>
<td valign="top" align="center">0.000</td>
<td valign="top" align="center"><bold>1</bold><bold>.</bold><bold>000</bold></td>
</tr>
<tr>
<td valign="top" align="left">Use of telehealth/telemedicine</td>
<td valign="top" align="center">1.012</td>
<td valign="top" align="center">0.4347</td>
<td valign="top" align="center">1.6379</td>
<td valign="top" align="center">0.3093</td>
<td valign="top" align="center">0.000</td>
<td valign="top" align="center"><bold>2</bold><bold>.</bold><bold>751</bold></td>
</tr>
<tr>
<td valign="top" align="left">Not having a private place to use telemedicine</td>
<td valign="top" align="center">&#x2212;0.2560</td>
<td valign="top" align="center">&#x2212;1.4065</td>
<td valign="top" align="center">0.2682</td>
<td valign="top" align="center">0.4144</td>
<td valign="top" align="center">0.154</td>
<td valign="top" align="center"><bold>0</bold><bold>.</bold><bold>774</bold></td>
</tr>
<tr>
<td valign="top" align="left">Increased level of trust in the healthcare system and government organization health recommendations</td>
<td valign="top" align="center">&#x2212;0.1936</td>
<td valign="top" align="center">&#x2212;0.5177</td>
<td valign="top" align="center">0.1163</td>
<td valign="top" align="center">0.1707</td>
<td valign="top" align="center">0.200</td>
<td valign="top" align="center"><bold>0</bold><bold>.</bold><bold>824</bold></td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn id="TF2"><p>CI, confidence interval; SE, standard error; OR, odds-ratio.</p></fn>
<fn>
<p>Bold values indicate statistically significant associations (<italic>p</italic>&#x2009;&#x003C;&#x2009;0.05).</p></fn>
</table-wrap-foot>
</table-wrap>
<table-wrap id="T3" position="float"><label>Table&#x00A0;3</label>
<caption><p>Community use and attitudes towards AI.</p></caption>
<table>
<colgroup>
<col align="left"/>
<col align="center"/>
<col align="center"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left">Category</th>
<th valign="top" align="center">Count (<italic>N</italic>)</th>
<th valign="top" align="center">Proportion (&#x0025;)</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Uses AI (any purpose)</td>
<td valign="top" align="center">262</td>
<td valign="top" align="center">46.2&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">Trusting a Physician&#x0027;s Diagnosis for a Serious Illness</td>
<td valign="top" align="center">524</td>
<td valign="top" align="center">92.3&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">Trusting ChatGPT&#x0027;s Diagnosis for a Serious Illness</td>
<td valign="top" align="center">83</td>
<td valign="top" align="center">14.6&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">Trusting a Physician&#x0027;s Referral for a Specialist</td>
<td valign="top" align="center">548</td>
<td valign="top" align="center">96.4&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">Trusting ChatGPT&#x0027;s Referral for a Specialist</td>
<td valign="top" align="center">97</td>
<td valign="top" align="center">17.1&#x0025;</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn id="TF3"><p>AI, artificial intelligence.</p></fn>
</table-wrap-foot>
</table-wrap>
<p>Despite this transformative promise, the adoption of AI in healthcare remains uneven across populations. A general acceptance of AI is influenced by an interplay of cognitive, behavioral, and structural factors, including digital literacy, perceptions of usefulness, provider trust, and concerns surrounding algorithmic fairness and transparency (<xref ref-type="bibr" rid="B5">5</xref>). Surveys consistently show that while Americans welcome AI for administrative or supportive tasks, they are more hesitant about its role in clinical decision-making (<xref ref-type="bibr" rid="B6">6</xref>). Additionally, systematic review by Beets et al. (<xref ref-type="bibr" rid="B7">7</xref>). found that although many U.S. adults acknowledge AI&#x0027;s potential, but concerns about data privacy, transparency, and trust persist. Similarly, Witkowski et al. (<xref ref-type="bibr" rid="B8">8</xref>) reported that one-third of survey respondents trusted AI to provide diagnoses, citing a fear of losing the &#x201C;human touch&#x201D; in healthcare; however, similar to Tyson et al.&#x0027;s (<xref ref-type="bibr" rid="B6">6</xref>) findings, most respondents were comfortable with AI&#x0027;s administrative duties, such as scheduling appointments.</p>
<p>Moreover, recent national data adds to the complexity of U.S. adults&#x0027; attitudes toward AI in healthcare. A 2025 nationally representative survey found that only about 19.55&#x0025; of respondents believed AI would improve their personal doctor-patient relationship or healthcare affordability. Notably, individuals with higher levels of trust in providers displayed more positive expectations of AI as a strong tool for healthcare practices (<xref ref-type="bibr" rid="B9">9</xref>). Similarly, an earlier survey of over 900 U.S. adults reported that openness to AI varied significantly by demographic factors such as age and education, and that personality traits like trust and psychosocial factors strongly influenced whether individuals viewed AI in healthcare as beneficial or concerning (<xref ref-type="bibr" rid="B10">10</xref>). These findings underscore that public perceptions of AI are not only shaped by technological promise but also by relational and systemic concerns that draw from one&#x0027;s demographic and psychological determinants.</p>
<p>Hesitations of AI also transcend to a systematical level. Organizational studies show uneven deployment of AI across U.S. health systems. For example, Poon et al. (<xref ref-type="bibr" rid="B11">11</xref>) noted that while generative AI tools such as ambient documentation have gained traction, many clinical AI applications face barriers due to regulatory uncertainty, immature tools, and financial concerns. This variation highlights that adoption is also influenced by regulations and institutional readiness.</p>
</sec>
<sec id="s2"><title>Limitations of existing literature</title>
<p>While several studies (<xref ref-type="bibr" rid="B6">6</xref>, <xref ref-type="bibr" rid="B7">7</xref>, <xref ref-type="bibr" rid="B10">10</xref>) provide valuable insights, they also carry limitations needing further studies to resolve. Many of these studies (<xref ref-type="bibr" rid="B8">8</xref>, <xref ref-type="bibr" rid="B10">10</xref>) relied on regional or convenience samples, limiting generalizability. Others (<xref ref-type="bibr" rid="B7">7</xref>, <xref ref-type="bibr" rid="B9">9</xref>) used inconsistent survey methods, which complicates cross-comparisons. Large-scale surveys, such as Tyson et al. (<xref ref-type="bibr" rid="B6">6</xref>), risk framing effects that may amplify public fears of AI, while cross-sectional designs, including those of Nong and Ji (<xref ref-type="bibr" rid="B9">9</xref>) and Antes et al. (<xref ref-type="bibr" rid="B10">10</xref>), limit the ability to determine whether one factor directly causes another. Recent scoping reviews add to this evidence: Botha et al. (<xref ref-type="bibr" rid="B12">12</xref>) mapped the perceived benefits of AI, identifying improvements in diagnostic sensitivity, workflow efficiency, and error reduction, while also showing that evidence is scattered and often disease specific. In contrast, Botha et al. (<xref ref-type="bibr" rid="B13">13</xref>) mapped the perceived threats, including unpredictable errors, loss of the &#x201C;human touch,&#x201D; insufficient regulatory frameworks, algorithmic bias, and data privacy concerns. Together, these reviews reinforce that much of the existing literature remains fragmented, addressing isolated issues such as trust, usability, or fairness without integrating how these factors interact with demographic and psychosocial determinants. To address this gap, we applied an integrated framework from the behavioral sciences. Key perspectives from Social Cognitive Theory, which studies the role of personal experience, environmental context, and observational learning in shaping behavior (<xref ref-type="bibr" rid="B14">14</xref>), the Theory of Reasoned Action (TRA) and the Theory of Planned Behavior (TPB) which emphasize that behavioral intentions are driven by attitudes, perceived social norms, and perceived behavioral control (<xref ref-type="bibr" rid="B15">15</xref>), and trust-centered models of technology acceptance suggest that trust plays a critical mediating role, particularly in contexts where users must rely on automated or opaque systems (<xref ref-type="bibr" rid="B16">16</xref>). In addition, the Health Belief Model and the Short Health Anxiety Inventory framework were incorporated to capture the influence of perceived health risks, anxiety, and individual health status on decision-making and behavior (<xref ref-type="bibr" rid="B17">17</xref>). This allows us to create a unified model of AI adoption. Unlike prior studies that examine single determinants in isolation, such as trust, demographics, or perceived usability, our approach synthesizes cognitive, demographic, psychosocial, and trust-related predictors within a single analytic framework. This integrated perspective makes it possible to examine how these factors interact rather than treating them as separate or unrelated contributors. By focusing on the general United States adult population and including individuals who actively use generative AI for health information, who represent an understudied and increasingly relevant subgroup, this study extends prior work that has relied heavily on regional, convenience, or profession-specific samples. Together, these design choices differentiate our study from existing literature even though a cross-sectional survey format was necessary.</p>
<p>By situating this study within these theoretical frameworks, we aim to provide a more comprehensive understanding of how demographic, cognitive, and trust-related factors collectively shape public willingness to adopt AI-driven healthcare tools in the U.S. population.</p>
</sec>
<sec id="s3" sec-type="methods"><title>Methods</title>
<sec id="s3a"><title>Study design</title>
<p>This study employed a cross-sectional survey design, a method that collects data once per participant to assess associations between variables within a defined population (<xref ref-type="bibr" rid="B18">18</xref>). The design was used to examine factors associated with the willingness of adults in the U.S. to adopt AI in healthcare. While cross-sectional surveys are common in this field, our study builds on prior work by applying a conceptually integrated measurement framework that captures demographic, cognitive, psychosocial, and trust-related variables simultaneously, allowing for a more comprehensive assessment of AI adoption than has been achieved in earlier single-factor studies. The study protocol and the survey instrument were reviewed and approved as exempt by the Western-Institutional Review Board (Protocol &#x0023;1755408).</p>
</sec>
<sec id="s3b"><title>Sampling and data collection</title>
<p>Participants were recruited between March and June 2024 through MiOra health education networks (<xref ref-type="bibr" rid="B19">19</xref>) and Qualtrics research panels. Eligibility criteria included being 18 years or older and residing in the U.S.; the final sample ranged in age from 18 to 83 years. A total of 689 responses were collected; after excluding incomplete or invalid submissions, 568 complete entries were retained for analysis. Recruitment through Qualtrics ensured a broad national reach, while MiOra&#x0027;s health education channels facilitated participation from individuals with prior exposure to digital health. All participants provided informed electronic consent prior to participation. Surveys were conducted online, collected anonymously, and no personal identifiers were retained.</p>
</sec>
<sec id="s3c"><title>Measures</title>
<p>Survey items (<xref ref-type="sec" rid="s13">Supplementary Appendix 1</xref>) were developed to assess demographic characteristics, digital health behaviors, self-reported health status, cognitive and attitudinal factors toward AI, and behavioral intentions.</p>
<p>Instrument development was informed by prior literature on technology acceptance and trust in healthcare AI (<xref ref-type="bibr" rid="B7">7</xref>, <xref ref-type="bibr" rid="B8">8</xref>, <xref ref-type="bibr" rid="B10">10</xref>), which provided conceptual grounding for items measuring perceived usefulness, reliability, and trust. A similar behavioral framework, the Technology Acceptance Model (TAM), was also applied in a recent study by Svestkova A et al. (<xref ref-type="bibr" rid="B20">20</xref>), which examined community acceptance of generative AI for health purposes. Although the investigators built their hypothesis and model on prior research about medical professionals&#x0027; trust in medical AI, their data were drawn from community participants. Importantly, their findings confirmed that trust was a central mediator of willingness to use AI. Our work extends this framework by focusing specifically on non-medical populations using ChatGPT for health information, where the determinants of trust may differ substantially.</p>
<p>Items were further refined through (a) feedback from representative community members affiliated with MiOra health education programs, who reviewed the survey for clarity, readability, and cultural relevance; and (b) input from academic experts in behavioral sciences and digital health, who evaluated content validity. The survey was not piloted as a standalone study; however, iterative refinement through this process ensured face and content validity prior to launch. This iterative process helped establish face validity by ensuring that survey items were clear and culturally appropriate for respondents, and content validity by confirming that the items adequately covered the intended domains, e.g., demographic, cognitive, and attitudinal factors. Feedback from community members focused on clarity and cultural relevance, while expert reviewers emphasized conceptual alignment and comprehensiveness.</p>
<p>Demographic measures included age, gender identity, racial and ethnic background, formal educational attainment, and annual household income (reported according to an aggregated version of the 2024 tax bracket) (<xref ref-type="bibr" rid="B21">21</xref>). Digital health behavior questions assessed prior experience with AI-supported applications, including telehealth platforms, wearable biosensors, mental health apps, and menstrual tracking tools. Health-related variables included self-reported physical health, measured by &#x201C;How would you rate your physical health?&#x201D;; mental health, measured by &#x201C;How would you rate your mental health?&#x201D;; and frequency of interactions with the healthcare system.</p>
<p>To measure cognitive variables, the survey included items assessing attitudes toward AI (e.g., perceived usefulness, reliability), perceived social norms (e.g., whether healthcare providers or peers support AI), and perceived behavioral control (e.g., confidence in using AI-based tools). Trust in AI was measured using a comparative item assessing whether participants trusted AI or human providers more when making health-related decisions. A willingness to use AI was operationalized through items capturing openness to AI-generated recommendations and comfort with AI involvement in care. Representative items included &#x201C;If AI/ChatGPT told you to see a specialist, would you take it seriously?&#x201D; to assess attitudes toward AI and &#x201C;Do you agree with the statement &#x2018;A doctor is less likely to understand my healthcare needs than AI/ChatGPT?&#x2019; to assess perceived social norms.&#x201D;</p>
<p>In addition to these domains, survey items were intentionally aligned with constructs from Social Cognitive Theory (SCT), the Theory of Reasoned Action (TRA), the Theory of Planned Behavior (TPB), trust-centered technology acceptance frameworks, and the Health Belief Model (HBM). SCT concepts such as behavioral capability and environmental constraints were reflected in items assessing prior use of digital health tools (e.g., telehealth, mental health apps, biosensors) and access to privacy for telemedicine. TRA/TPB constructs were operationalized through items capturing attitudes (&#x201C;If AI/ChatGPT told you to see a specialist, would you take it seriously?&#x201D;), subjective norms (&#x201C;A doctor is less likely to understand my healthcare needs than AI/ChatGPT&#x201D;), and perceived behavioral control (comfort understanding medical information sheets, familiarity with digital tools). Trust-centered acceptance models were addressed through comparative trust items contrasting physician vs. AI diagnoses and referrals. HBM-related constructs were incorporated through items measuring perceived susceptibility and severity (self-rated physical and mental health) and perceived barriers (difficulty understanding health information sheets, lack of privacy for telemedicine, and level of trust in the healthcare system). These theoretical mappings allowed us to integrate demographic, cognitive, psychosocial, and trust-related determinants of AI adoption within a unified behavioral framework.</p>
<p>Because ChatGPT was used as the reference point for all AI-related survey items, participants may have evaluated a familiar, general-purpose chatbot rather than a clinically validated medical AI system. ChatGPT was selected because it is currently the most widely used large language model, but its consumer-facing nature may have shaped respondents&#x0027; perceptions of both AI use and AI trust in a healthcare context.</p>
</sec>
<sec id="s3d"><title>Statistical analysis</title>
<p>All analyses were conducted using Python software. Descriptive statistics, including means, standard deviations, and frequencies, were used to summarize demographic characteristics, health behaviors, and cognitive and attitudinal measures. Logistic regression models were used to examine associations between demographic (age, assigned sex, gender identity, income), health-related (mental health rating), and behavioral predictors (digital health behaviors) and the dependent variable, willingness to use AI for health-related purposes, operationalized as responses to &#x201C;Yes, I use AI/ChatGPT for health and/or personal care&#x201D; (<xref ref-type="bibr" rid="B22">22</xref>). Multi-response survey items were converted into separate binary variables to indicate each selected option. Categorical predictors were encoded numerically or dummy coded as appropriate (<xref ref-type="bibr" rid="B23">23</xref>). Predictors with zero variance (constant values across all respondents) were excluded prior to fitting the model to ensure stable coefficient estimation (<xref ref-type="bibr" rid="B24">24</xref>). All entries with missing data for the variables used in the analysis were removed (<xref ref-type="bibr" rid="B25">25</xref>). Chi-square tests and two-sample <italic>z</italic>-tests were used to compare categorical variables across subgroups, and a Bonferroni correction was applied to adjust for multiple comparisons (<xref ref-type="bibr" rid="B23">23</xref>, <xref ref-type="bibr" rid="B26">26</xref>, <xref ref-type="bibr" rid="B27">27</xref>). Logistic regression model convergence was ensured using maximum iterations set to 1,000, and statistical significance was defined at <italic>&#x03B1;</italic>&#x2009;&#x003D;&#x2009;0.05 (<xref ref-type="bibr" rid="B28">28</xref>, <xref ref-type="bibr" rid="B29">29</xref>).</p>
<p>The dependent variable for all logistic regression models was the response to the item &#x201C;Yes, I use AI/ChatGPT for health and/or personal care,&#x201D; coded as 1&#x2009;&#x003D;&#x2009;yes and 0&#x2009;&#x003D;&#x2009;no. Predictors were created directly from survey items: age was treated as a continuous variable; self-rated mental health and trust in the healthcare system were coded as ordinal variables; and each option in multi-select digital health behavior items (e.g., &#x201C;I use telehealth/telemedicine,&#x201D; &#x201C;I use a menstrual cycle tracking app,&#x201D; &#x201C;I don&#x0027;t have a private place to use telemedicine&#x201D;) was converted into its own binary indicator (0&#x2009;&#x003D;&#x2009;not selected, 1&#x2009;&#x003D;&#x2009;selected). All categorical demographic variables, including gender identity, income, and race/ethnicity, were dummy-coded prior to analysis (<xref ref-type="bibr" rid="B23">23</xref>). Logistic regression models produced log-odds coefficients (<italic>&#x03B2;</italic>); for interpretability, we also exponentiated these values to obtain odds ratios (OR&#x2009;&#x003D;&#x2009;<italic>e<sup>&#x03B2;</sup></italic>), which are reported alongside the <italic>&#x03B2;</italic> coefficients in the results table (<xref ref-type="bibr" rid="B30">30</xref>).</p>
</sec>
</sec>
<sec id="s4" sec-type="results"><title>Results</title>
<sec id="s4a"><title>Sample characteristics</title>
<p>The sample included 568 respondents (<xref ref-type="table" rid="T1">Table 1</xref>). The median age was 31 years (IQR: 23&#x2013;41, range: 18&#x2013;83). Over half identified as Hispanic/Latino (50.7&#x0025;), followed by Asian (17.1&#x0025;), White/Caucasian (11.6&#x0025;), and Black/African American (10.2&#x0025;); 5.1&#x0025; identified as two or more ethnicities, and 5.3&#x0025; as Middle Eastern, Native Hawaiian/Pacific Islander, South Asian, or American Indian/Alaska Native. Women comprised 66.7&#x0025; of the sample.</p>
<p>Educational attainment was varied: 3.7&#x0025; had less than high school, 13.0&#x0025; held a high school diploma, 22.9&#x0025; some college, 11.6&#x0025; a 2-year degree, 32.0&#x0025; a 4-year degree, and 16.7&#x0025; had graduate or professional training. Household income ranged from &#x0024;0 to over &#x0024;384,200, with 43.7&#x0025; in the &#x0024;22,000&#x2013;&#x0024;89,450 bracket and 21.7&#x0025; earning below &#x0024;22,000.</p>
<p>Politically, 35.2&#x0025; identified as undecided, 32.2&#x0025; as liberal, 22.9&#x0025; as moderate, and 9.7&#x0025; as conservative. About one-third (32.5&#x0025;) worked in healthcare or science, and 87.3&#x0025; had health insurance.</p>
<p>Healthcare engagement varied: 58.9&#x0025; sought care when experiencing new symptoms, 50.0&#x0025; attended yearly check-ups, and 35.4&#x0025; attended follow-ups; 8.5&#x0025; reported never visiting a doctor. Trust in the healthcare system was moderate overall, with 43.6&#x0025; reporting trust &#x201C;most of the time,&#x201D; 34.0&#x0025; neutral, 8.8&#x0025; high trust, and 13.6&#x0025; low trust.</p>
<p>Nearly half (45.1&#x0025;) used AI for at least one purpose. Among AI users, common uses included school (27.4&#x0025;), work (22.1&#x0025;), and socialization (14.8&#x0025;). Only 9.2&#x0025; reported using AI specifically for health or personal care.</p>
</sec>
<sec id="s4b"><title>Demographic factors and AI adoption</title>
<p>Logistic regression identified several demographic predictors of AI use for health purposes (<xref rid="T2" ref-type="table">Table 2</xref>). Younger age was significantly associated with higher adoption (<italic>&#x03B2;</italic>&#x2009;&#x003D;&#x2009;&#x2013;0.029, <italic>p</italic>&#x2009;&#x003D;&#x2009;0.006), and men were less likely than women to use AI for health information (<italic>&#x03B2;</italic>&#x2009;&#x003D;&#x2009;&#x2013;0.388, <italic>p</italic>&#x2009;&#x003D;&#x2009;0.042). Income showed a small positive coefficient, but this association did not reach statistical significance (<italic>&#x03B2;</italic>&#x2009;&#x003D;&#x2009;0.096, <italic>p</italic>&#x2009;&#x003D;&#x2009;0.39), indicating that income was not a reliable predictor of AI use in this sample. Consistent with descriptive trends, individuals with lower educational attainment (high school or below) were less likely to use AI (60&#x0025; vs. 47&#x0025;, <italic>p</italic>&#x2009;&#x003D;&#x2009;0.0188) and also reported more difficulty understanding printed health information (12.9&#x0025; vs. 29.9&#x0025;, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.001).</p>
<p>AI use varied across racial and ethnic groups: Asian (16.5&#x0025;), African American (15.5&#x0025;), and South Asian (16.7&#x0025;) respondents reported the highest adoption rates, whereas White (7.8&#x0025;) and Hispanic/Latino (5.9&#x0025;) respondents reported the lowest. The only statistically significant comparison was between Asian and Hispanic participants (<italic>p</italic>&#x2009;&#x003C;&#x2009;0.001). Despite nearly half the sample using AI for some purpose, health-related use remained low (9.2&#x0025;), suggesting that general familiarity with AI does not automatically translate to medical adoption.</p>
</sec>
<sec id="s4c"><title>Trust in AI as an intermediary factor</title>
<p>Trust strongly differentiated attitudes toward AI in healthcare (<xref rid="T3" ref-type="table">Table 3</xref>). 92.3&#x0025; trusted a physician&#x0027;s diagnosis for a serious illness compared to 14.6&#x0025; who trusted ChatGPT, and 96.4&#x0025; trusted physician referrals compared to 17.1&#x0025; who trusted ChatGPT recommendations. This indicates a clear preference for human clinical judgment.</p>
<p>Digital familiarity influenced adoption: telehealth users were significantly more likely to use AI for health (<italic>&#x03B2;</italic>&#x2009;&#x003D;&#x2009;1.012, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.001), and menstrual tracking app use showed a positive but nonsignificant association (<italic>&#x03B2;</italic>&#x2009;&#x003D;&#x2009;0.260, <italic>p</italic>&#x2009;&#x003D;&#x2009;0.284). Lack of a private space for telemedicine was associated with lower AI use, though not significantly (<italic>&#x03B2;</italic>&#x2009;&#x003D;&#x2009;&#x2013;0.256, <italic>p</italic>&#x2009;&#x003D;&#x2009;0.154). Increased level of trust in the healthcare system and government organization health recommendations was negatively associated with AI use for health purposes but did not reach a level of significance, meaning that those with decreased level of trust had a higher correlation of use with AI for health purposes (<italic>&#x03B2;</italic>&#x2009;&#x003D;&#x2009;&#x2212;0.194, <italic>p</italic>&#x2009;&#x003D;&#x2009;0.200), consistent with patterns of seeking information outside traditional healthcare. Several features&#x2014;use of exercise apps, electronic chart systems, and biosensors&#x2014;had coefficients of 0. These zero coefficients arose because the corresponding predictors had no variance after preprocessing in the dataset, meaning that none of the respondents who used AI for health purposes reported using these features, rendering them uninformative for the model. Consequently, these predictors do not have any statistical significance. This was expected to be a statistical artifact.</p>
</sec>
<sec id="s4d"><title>Health status and anxiety as moderators</title>
<p>Lower self-rated mental health was associated with significantly greater AI use for health purposes (<italic>&#x03B2;</italic>&#x2009;&#x003D;&#x2009;&#x2013;0.254, <italic>p</italic>&#x2009;&#x003D;&#x2009;0.026), suggesting that individuals experiencing psychological strain may turn to AI as an accessible supplementary resource (<xref rid="T2" ref-type="table">Table 2</xref>). Conversely, individuals already using mental health apps were less likely to use AI for broader health purposes, though this pattern was nonsignificant (<italic>&#x03B2;</italic>&#x2009;&#x003D;&#x2009;&#x2013;0.349, <italic>p</italic>&#x2009;&#x003D;&#x2009;0.158). Across these variables, we observed that uninsured participants, those with lower self-rated mental health, those reporting difficulty understanding medical information, and those with lower trust in the healthcare system showed higher reliance on AI tools, indicating patterns consistent with compensatory use when traditional healthcare access is limited.</p>
</sec>
</sec>
<sec id="s5" sec-type="discussion"><title>Discussion</title>
<p>The findings from this study underscore the complex interplay of demographic, technological, and social factors influencing AI adoption in healthcare. Younger age, higher income, distrust in the current medical system, and prior use of digital health tools such as telemedicine and menstrual tracking apps were positively associated with adoption, while barriers such as lack of private space, lower education levels, and limited trust in AI tools constrained engagement. Although AI tools hold significant promise for expanding access and improving outcomes, current adoption patterns reveal that pre-existing differences&#x2014;particularly those linked to socioeconomic status, education, and race&#x2014;are mirrored in the digital health landscape. These disparities highlight both the potential of AI to contribute to improvements in healthcare delivery and the importance of understanding inequities in digital access and trust. Because attitudes toward AI are shaped by insurance structure, cost barriers, and cultural norms unique to the U.S. healthcare system, these results should be interpreted as context-bound rather than globally generalizable.</p>
<sec id="s5a"><title>Socioeconomic barriers to AI adoption</title>
<p>Although the direction of the coefficient suggested that higher income may be associated with greater AI use, this relationship was not statistically significant. This contrasts with previous research that showed that financial insecurity may reduce opportunities for engaging with innovative health solutions (<xref ref-type="bibr" rid="B31">31</xref>). Similarly, population-level analyses during the COVID-19 pandemic found that lower-income groups reported reduced uptake of digital health technologies compared to higher-income counterparts (<xref ref-type="bibr" rid="B32">32</xref>), Several factors may explain this discrepancy, including differences in our outcome variable (AI use for health specifically vs. general digital health engagement), the demographic composition of our sample, and the presence of stronger predictors (e.g., telehealth use, trust, mental health) that may attenuate the independent effect of income.</p>
<p>From the perspective of the Theory of Planned Behavior, these socioeconomic patterns suggest that individuals with higher income and education may experience greater perceived behavioral control, making them more confident in navigating AI tools (<xref ref-type="bibr" rid="B15">15</xref>). Social Cognitive Theory further supports this interpretation by emphasizing the role of environmental resources and prior experience, both of which are more available to individuals with higher socioeconomic status (<xref ref-type="bibr" rid="B14">14</xref>). From an HBM perspective, limitations such as difficulty understanding medical information sheets function as perceived barriers, which may reduce engagement with traditional care and increase reliance on AI as an alternative source of health information (<xref ref-type="bibr" rid="B17">17</xref>).</p>
</sec>
<sec id="s5b"><title>Educational attainment and AI literacy</title>
<p>Our results also indicated that participants with lower levels of education were less likely to use AI tools and reported more difficulty understanding printed health information materials. This finding is consistent with previous studies that have shown education to be a strong predictor of digital health engagement. For instance, other studies have demonstrated that higher educational attainment is associated with greater digital health literacy and more frequent use of health technologies (<xref ref-type="bibr" rid="B33">33</xref>). Taken together, these results highlight that disparities in educational access may translate directly into disparities in the ability to effectively adopt and benefit from AI-driven healthcare platforms. This suggests that digital health platforms must simplify user interfaces and language to accommodate varying levels of health and digital literacy (<xref ref-type="bibr" rid="B34">34</xref>). The ability to comprehend post-visit information, already a barrier to traditional healthcare, appears to be further amplified in the context of AI (<xref ref-type="bibr" rid="B35">35</xref>). This aligns with Social Cognitive Theory, which posits that behavioral capability and self-efficacy shape technology engagement (<xref ref-type="bibr" rid="B14">14</xref>). Individuals with lower health and digital literacy may perceive fewer skills or resources to use AI effectively, which directly limits perceived behavioral control as described in the Theory of Planned Behavior (<xref ref-type="bibr" rid="B15">15</xref>).</p>
</sec>
<sec id="s5c"><title>Cultural influences on AI trust and use</title>
<p>In our quantitative analysis, we found that Hispanic participants were significantly less likely than Asian participants to adopt AI-based health tools indicating potential cultural or contextual influences shaping engagement with AI. This aligns closely with recent qualitative evidence. A study by Kraft et al. (<xref ref-type="bibr" rid="B36">36</xref>) conducted focus groups with Hispanic and Latinx adults, revealing concerns around mHealth tools, which frequently include AI features such as unfamiliarity with technology, privacy apprehensions, and fears of overreliance on automated systems. Participants emphasized the value of human oversight, expressing discomfort with entirely automated interventions and concern that AI tools might not adequately account for cultural norms or nuanced personal contexts.</p>
<p>Together, these findings suggest that lower rates of AI adoption among Hispanic respondents may be partially driven by cultural and community-level apprehensions. The qualitative insights underscore that perceived trustworthiness, need for personal connection, and cultural appropriateness are important mediators of AI acceptance, factors that are not easily captured through quantitative measures alone. These findings illustrate how subjective norms, a central construct in the Theory of Planned Behavior, may shape AI use across cultural groups (<xref ref-type="bibr" rid="B14">14</xref>). Communities with stronger norms emphasizing personal interaction with healthcare providers may perceive AI as inconsistent with those expectations. Social Cognitive Theory similarly suggests that community-level models, norms, and shared experiences influence individual behavioral intentions (<xref ref-type="bibr" rid="B15">15</xref>).</p>
<p>Gender differences also emerged in our analysis. Men were significantly less likely than women to use AI tools for healthcare, a result consistent with longstanding evidence that men are generally less likely to engage in proactive health-seeking behaviors compared to women (<xref ref-type="bibr" rid="B37">37</xref>). This suggests that AI adoption patterns may not only reflect cultural differences across ethnic groups but also broader socialized norms around gender and healthcare engagement. The Theory of Reasoned Action provides a useful lens here: if men hold weaker subjective norms around proactive help-seeking, they may be less inclined to adopt AI tools designed to support health behaviors. This reflects broader attitudinal and normative influences rather than purely technological ones (<xref ref-type="bibr" rid="B15">15</xref>).</p>
</sec>
<sec id="s5d"><title>Trust as a mediator in AI adoption</title>
<p>Trust in AI emerged as a key mediating factor in healthcare AI adoption, with individuals who reported lower trust in AI being less likely to use it for health purposes. This finding aligns with the Technology Adoption Model, which posits that external factors, including environmental infrastructure shape technology uptake (<xref ref-type="bibr" rid="B38">38</xref>).</p>
<p>A recent scoping review further supports this, identifying trust as a significant catalyst for AI adoption in healthcare (<xref ref-type="bibr" rid="B39">39</xref>) Increasing transparency and explainability in AI decision-making, alongside close collaboration with trusted healthcare providers, will be key to building public confidence in these tools (<xref ref-type="bibr" rid="B39">39</xref>). Our results that trust predicts AI adoption align with recent evidence emphasizing human-centered and participatory approaches in healthcare AI, where embedding trust and equity into design is critical for improving adoption (<xref ref-type="bibr" rid="B8">8</xref>, <xref ref-type="bibr" rid="B40">40</xref>).</p>
<p>Additionally, decreased trust in the healthcare system showed a higher correlation with AI use for health purposes, though this relationship did not reach statistical significance. This finding aligns with previous research demonstrating that people with lower trust in official healthcare sources are more likely to use and trust alternative information sources such as blogs and social media (<xref ref-type="bibr" rid="B41">41</xref>). While that study predated widespread AI adoption, it suggests a consistent pattern of seeking secondary information sources when trust in traditional healthcare is diminished.</p>
<p>This is consistent with trust-centered technology acceptance models, which emphasize that trust is a prerequisite for forming positive attitudes toward automated systems (<xref ref-type="bibr" rid="B16">16</xref>). In the Theory of Planned Behavior, trust operates through attitudes and perceived control, shaping behavioral intention (<xref ref-type="bibr" rid="B15">15</xref>). Social Cognitive Theory also emphasizes observational learning and prior experience as foundations of trust, which explains why individuals familiar with digital health tools showed higher adoption (<xref ref-type="bibr" rid="B14">14</xref>).</p>
</sec>
<sec id="s5e"><title>Influence of mental health on AI adoption in healthcare</title>
<p>In our findings, lower self-rated mental health was associated with a significantly higher likelihood of turning to AI tools for health purposes. This pattern suggests that individuals experiencing elevated psychological distress may be more inclined to use AI as a supplemental or accessible resource, especially when traditional care appears less accessible. A parallel German study similarly found that psychological distress moderated the relationship between perceived usefulness of AI and actual mental health app usage, suggesting that distress itself can drive greater engagement with digital tools (<xref ref-type="bibr" rid="B42">42</xref>). This pattern aligns with the Health Belief Model, which predicts that individuals with heightened perceived vulnerability or health-related concern may be more motivated to seek supplemental health information (<xref ref-type="bibr" rid="B17">17</xref>). It also reflects cognitive appraisal processes described in health anxiety frameworks, where distress can increase engagement with accessible information sources such as AI (<xref ref-type="bibr" rid="B17">17</xref>).</p>
<p>Conversely, prior use of mental health apps showed a negative, but non-significant, association with the use of broader AI health tools. This may reflect a phenomenon where individuals already using established digital mental health solutions feel a degree of satisfaction or trust in those services, reducing their motivation to explore AI alternatives. A related study in a Portuguese university cohort found that mental health app use was significantly associated with perceived stress and ongoing mental health concerns, indicating that known app usage may substitute for broader AI tool exploration (<xref ref-type="bibr" rid="B43">43</xref>).</p>
</sec>
<sec id="s5f"><title>AI as a compensatory tool for limited healthcare access</title>
<p>In addition to these individual predictors, the patterns observed in this study suggest that AI may function as a compensatory healthcare resource for individuals who face barriers to traditional care. Participants who were uninsured, who reported lower trust in the healthcare system, or who had difficulty understanding printed medical materials demonstrated greater reliance on AI tools. Similarly, higher use among individuals with lower self-rated mental health indicates that psychological barriers may also prompt greater engagement with AI when in-person care feels less accessible or less supportive. These trends collectively imply that AI may be used as an alternative source of healthcare guidance when structural, informational, or psychological constraints limit engagement with conventional healthcare options.</p>
</sec>
<sec id="s5g"><title>Interpretation of AI tools by participants</title>
<p>An important consideration in interpreting these findings is how participants understood the AI referenced in the survey. Because ChatGPT was used consistently across items, ranging from personal health information seeking to hypothetical diagnostic or referral scenarios, respondents may have evaluated AI through the lens of a familiar, general-purpose chatbot rather than a medically validated clinical tool. ChatGPT&#x0027;s popularity made it a practical anchor for assessing public attitudes, yet its consumer-facing design differs substantially from supervised diagnostic or decision-support systems used in healthcare. As a result, lower trust in ChatGPT for clinical tasks may reflect skepticism toward consumer AI rather than toward regulated medical AI. This distinction suggests that our trust estimates may underestimate how the public would respond to clinically validated AI tools integrated into provider workflows.</p>
</sec>
<sec id="s5h"><title>Future directions and considerations</title>
<p>Findings from this study surface several themes that warrant deeper examination in future work. Patterns related to digital literacy, transparency, and structural barriers suggest directions for continued inquiry into how people evaluate and engage with emerging healthcare technologies. These exploratory observations can help inform future research that examines how such factors might shape policy discussions around the development and implementation of AI tools. Further research across diverse settings may clarify how different groups interact with these technologies and what influences their trust and willingness to use them.</p>
</sec>
<sec id="s5i"><title>Limitations and future research</title>
<p>While this study offers valuable insights, several limitations must be acknowledged. First, the use of Qualtrics as a survey platform resulted in convenient sampling that may limit the generalizability of the findings. Hispanic or Latino respondents comprised more than half of the sample, creating the possibility of overrepresentation bias. This skew may have influenced subgroup comparisons, for example, observed differences in Hispanic participants&#x0027; AI adoption could be partially confounded by their high proportion in the dataset and their socioeconomic characteristics. Future work could employ weighting procedures or stratified sampling to balance demographic representation. Although the sample size of 568 participants was adequate for logistic regression analyses, it remains small relative to the broader U.S. population it aims to represent. This, combined with the use of convenience sampling and the demographic skew of the sample, limits the extent to which these findings can be generalized. The size and composition of the sample also do not capture the full heterogeneity of the U.S. healthcare landscape, further constraining generalizability.</p>
<p>Second, reliance on self-reported data introduces the possibility of response bias and social desirability bias, particularly for questions about technology use, trust, and health behaviors. Respondents may have underreported barriers or overstated positive attitudes toward AI tools to align with perceived norms.</p>
<p>Third, the cross-sectional design precludes any claims of causality. While associations between demographic, behavioral, and attitudinal variables and AI adoption were identified, longitudinal studies are needed to assess how these relationships evolve over time and to establish temporal directionality.</p>
<p>Fourth, the choice of ChatGPT as the reference point for AI use may have shaped participant responses. Given its widespread media coverage and public visibility, ChatGPT may not be perceived as interchangeable with other AI health tools, introducing potential framing effects that could bias adoption rates or trust.</p>
<p>Fifth, although our study shares certain methodological constraints with prior research, such as the use of self-reported data and a cross-sectional design, we sought to address a major conceptual limitation noted in the current literature. Prior studies often examine single determinants of AI acceptance in isolation, which results in fragmented explanatory models. In contrast, our study integrates multiple behavioral science frameworks and examines demographic, cognitive, psychosocial, and trust-related variables within one unified model. This approach helps reduce conceptual fragmentation and provides a more comprehensive view of the factors shaping AI adoption in healthcare.</p>
<p>Finally, although the sample was diverse across several sociodemographic categories, it may not fully capture the heterogeneity of experiences across all subgroups. Future research should incorporate both longitudinal designs and qualitative methods to more deeply explore how demographic context, trust, and cultural factors shape the evolving landscape of AI use in healthcare. Additionally, because the study reflects attitudes shaped within the U.S. healthcare system, the findings may not generalize to countries with different structures, cultural norms, or access barriers.</p>
</sec>
</sec>
<sec id="s6" sec-type="conclusions"><title>Conclusion</title>
<p>This study provides new insight into the demographic, behavioral, and cultural factors that shape public adoption of AI in healthcare. Key findings indicate that age, gender, income, and mental health status significantly influence adoption patterns, while prior engagement with digital health tools, such as telehealth and menstrual tracking apps, also contributes to openness toward AI-based healthcare. At the same time, barriers such as lack of privacy, lower education levels, and cultural differences underscore the persistence of structural inequities in the digital health landscape. Importantly, the analysis revealed that trust functions as a central mediator of AI use, amplifying or constraining adoption across diverse subgroups.</p>
<p>Taken together, these findings offer insights that can help generate hypotheses for future research on how demographic, structural, and attitudinal factors shape engagement with healthcare AI tools. The patterns observed here highlight areas for context-specific investigation, including how transparency, digital literacy, cultural factors, and trust influence public perceptions of AI. Understanding these dynamics more deeply may support future discussions about the development and implementation of AI systems in ways that reflect the needs and concerns of diverse U.S. communities. Despite using a cross-sectional approach similar to earlier studies, this work advances the field by integrating multiple behavioral science perspectives and examining how demographic, cognitive, and trust-related factors jointly influence public adoption of AI in healthcare.</p>
<p>Future research should move beyond cross-sectional analyses to employ longitudinal and experimental designs that clarify causal pathways between trust, demographic characteristics, and AI adoption. Additionally, qualitative studies could provide deeper insight into cultural and psychosocial factors underlying adoption behaviors, particularly among underrepresented groups. Expanding analyses to include a wider range of AI tools beyond ChatGPT would also improve generalizability and account for differences in public perceptions of various technologies. Together, these efforts will advance understanding of how to foster equitable, sustainable, and culturally sensitive integration of AI in healthcare.</p>
</sec>
</body>
<back>
<sec id="s7" sec-type="data-availability"><title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p>
</sec>
<sec id="s8" sec-type="ethics-statement"><title>Ethics statement</title>
<p>The studies involving humans were approved by Western-Institutional Review Board (Protocol &#x0023;1755408). The studies were conducted in accordance with the local legislation and institutional requirements. The ethics committee/institutional review board waived the requirement of written informed consent for participation from the participants or the participants&#x0027; legal guardians/next of kin because no personal identifiers were collected. All participants, who responded, agreed to complete the electronic survey and were informed at the beginning of the survey that they were providing data for a research study and that their information was collected anonymously and that their information was protected.</p>
</sec>
<sec id="s9" sec-type="author-contributions"><title>Author contributions</title>
<p>SA: Methodology, Software, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. CD: Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing, Conceptualization, Data curation. VM: Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing, Data curation. MN: Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. CB: Data curation, Project administration, Supervision, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing, Conceptualization, Methodology. DS: Supervision, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing, Conceptualization, Methodology. OE: Supervision, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing, Conceptualization, Formal analysis, Funding acquisition, Investigation, Methodology, Project administration, Resources.</p>
</sec>
<ack><title>Acknowledgments</title>
<p>We would like to express our gratitude to Angela Bakaj for her support during the planning and implementation of this project. Additionally, we acknowledge California State University Los Angeles and Long Beach students and others who were involved: Marissa Munoz, Marco Toledo, Ximena Espinoza, Mikaela Tolosa, Judith Estrada, Bryan Li, Ryan Lopez, Samuel Ewelike, Nisha Joseph, Karen Cruz, Blair Martell, Sophia Magana, Anja Teap, Hilda Tapia, Anh Wu, Gwendolyn, Karla Arizmendi, Monika Murillo, Sachi Swami, Rozana Bicaku, Aditi Goyal, Keyshon Howard, Maya Gonzales, Adrienne Estes.</p>
</ack>
<sec id="s11" sec-type="COI-statement"><title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="s12" sec-type="ai-statement"><title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec id="s14" sec-type="disclaimer"><title>Publisher&#x0027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec id="s13" sec-type="supplementary-material"><title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fdgth.2025.1664345/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fdgth.2025.1664345/full&#x0023;supplementary-material</ext-link></p>
<supplementary-material xlink:href="Datasheet1.pdf" id="SM1" mimetype="application/pdf"/>
</sec>
<ref-list><title>References</title>
<ref id="B1"><label>1.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Topol</surname> <given-names>E</given-names></name></person-group>. <source>Deep Medicine: How Artificial Intelligence Can Make Healthcare Human Again</source>. <publisher-loc>New York, NY</publisher-loc>: <publisher-name>Basic Books</publisher-name> (<year>2019</year>).</mixed-citation></ref>
<ref id="B2"><label>2.</label><mixed-citation publication-type="other"><collab>Accenture</collab>. <comment>AI: Healthcare&#x2019;s new nervous system. (2017)</comment>.</mixed-citation></ref>
<ref id="B3"><label>3.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Sahni</surname> <given-names>NR</given-names></name> <name><surname>Stein</surname> <given-names>G</given-names></name> <name><surname>Zemmel</surname> <given-names>R</given-names></name> <name><surname>Cutler</surname> <given-names>DM</given-names></name></person-group>. <source>The Potential Impact of Artificial Intelligence on Healthcare Spending</source>. <publisher-loc>Cambridge, MA</publisher-loc>: <publisher-name>National Bureau of Economic Research</publisher-name> (<year>2023</year>). <comment>Report No.: w30857. Available online at:</comment> <ext-link ext-link-type="uri" xlink:href="https://www.nber.org/system/files/working_papers/w30857/w30857.pdf">https://www.nber.org/system/files/working_papers/w30857/w30857.pdf</ext-link> (Accessed September 22, 2025).</mixed-citation></ref>
<ref id="B4"><label>4.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jiang</surname> <given-names>F</given-names></name> <name><surname>Jiang</surname> <given-names>Y</given-names></name> <name><surname>Zhi</surname> <given-names>H</given-names></name> <name><surname>Dong</surname> <given-names>Y</given-names></name> <name><surname>Li</surname> <given-names>H</given-names></name> <name><surname>Ma</surname> <given-names>S</given-names></name><etal/></person-group> <article-title>Artificial intelligence in healthcare: past, present and future</article-title>. <source>Stroke Vasc Neurol</source>. (<year>2017</year>) <volume>2</volume>(<issue>4</issue>):<fpage>230</fpage>&#x2013;<lpage>43</lpage>. <pub-id pub-id-type="doi">10.1136/svn-2017-000101</pub-id><pub-id pub-id-type="pmid">29507784</pub-id></mixed-citation></ref>
<ref id="B5"><label>5.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Venkatesh</surname> <given-names>V</given-names></name> <name><surname>Morris</surname> <given-names>MG</given-names></name> <name><surname>Davis</surname> <given-names>GB</given-names></name> <name><surname>Davis</surname> <given-names>FD</given-names></name></person-group>. <article-title>User acceptance of information technology: a unified view</article-title>. <source>MIS Q</source>. (<year>2003</year>) <volume>27</volume>(<issue>3</issue>):<fpage>425</fpage>&#x2013;<lpage>78</lpage>. <pub-id pub-id-type="doi">10.2307/30036540</pub-id></mixed-citation></ref>
<ref id="B6"><label>6.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Tyson</surname> <given-names>A</given-names></name> <name><surname>Pasquini</surname> <given-names>G</given-names></name> <name><surname>Spencer</surname> <given-names>A</given-names></name> <name><surname>Funk</surname> <given-names>C</given-names></name></person-group>. <source>60&#x0025; of Americans Would be Uncomfortable with Provider Relying on AI in Their Own Health Care</source>. <publisher-loc>Washington, DC</publisher-loc>: <publisher-name>Pew Research Center</publisher-name> (<year>2023</year>). <comment>Available online at:</comment> <ext-link ext-link-type="uri" xlink:href="https://www.pewresearch.org/science/2023/02/22/60-of-americans-would-be-uncomfortable-with-provider-relying-on-ai-in-their-own-health-care/">https://www.pewresearch.org/science/2023/02/22/60-of-americans-would-be-uncomfortable-with-provider-relying-on-ai-in-their-own-health-care/</ext-link> (Accessed September 22, 2025).</mixed-citation></ref>
<ref id="B7"><label>7.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Beets</surname> <given-names>B</given-names></name> <name><surname>Newman</surname> <given-names>TP</given-names></name> <name><surname>Howell</surname> <given-names>EL</given-names></name> <name><surname>Bao</surname> <given-names>L</given-names></name> <name><surname>Yang</surname> <given-names>S</given-names></name></person-group>. <article-title>Surveying public perceptions of artificial intelligence in health care in the United States: systematic review</article-title>. <source>J Med Internet Res</source>. (<year>2023</year>) <volume>25</volume>:<fpage>e40337</fpage>. <pub-id pub-id-type="doi">10.2196/40337</pub-id><pub-id pub-id-type="pmid">37014676</pub-id></mixed-citation></ref>
<ref id="B8"><label>8.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Witkowski</surname> <given-names>K</given-names></name> <name><surname>Dougherty</surname> <given-names>RB</given-names></name> <name><surname>Neely</surname> <given-names>SR</given-names></name></person-group>. <article-title>Public perceptions of artificial intelligence in healthcare: ethical concerns and opportunities for patient-centered care</article-title>. <source>BMC Med Ethics</source>. (<year>2024</year>) <volume>25</volume>:<fpage>74</fpage>. <pub-id pub-id-type="doi">10.1186/s12910-024-01066-4</pub-id><pub-id pub-id-type="pmid">38909180</pub-id></mixed-citation></ref>
<ref id="B9"><label>9.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nong</surname> <given-names>P</given-names></name> <name><surname>Ji</surname> <given-names>M</given-names></name></person-group>. <article-title>Expectations of healthcare AI and the role of trust: understanding patient views on how AI will impact cost, access, and patient-provider relationships</article-title>. <source>J Am Med Inform Assoc</source>. (<year>2025</year>) <volume>32</volume>(<issue>5</issue>):<fpage>795</fpage>&#x2013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1093/jamia/ocaf031</pub-id><pub-id pub-id-type="pmid">40036944</pub-id></mixed-citation></ref>
<ref id="B10"><label>10.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Antes</surname> <given-names>AL</given-names></name> <name><surname>Burrous</surname> <given-names>S</given-names></name> <name><surname>Sisk</surname> <given-names>BA</given-names></name> <name><surname>Schuelke</surname> <given-names>MJ</given-names></name> <name><surname>Keune</surname> <given-names>JD</given-names></name> <name><surname>DuBois</surname> <given-names>JM</given-names></name></person-group>. <article-title>Exploring perceptions of healthcare technologies enabled by artificial intelligence: an online, scenario-based survey</article-title>. <source>BMC Med Inform Decis Mak</source>. (<year>2021</year>) <volume>21</volume>:<fpage>221</fpage>. <pub-id pub-id-type="doi">10.1186/s12911-021-01573-5</pub-id><pub-id pub-id-type="pmid">34284756</pub-id></mixed-citation></ref>
<ref id="B11"><label>11.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Poon</surname> <given-names>EG</given-names></name> <name><surname>Lemak</surname> <given-names>CH</given-names></name> <name><surname>Rojas</surname> <given-names>JC</given-names></name> <name><surname>Guptill</surname> <given-names>J</given-names></name> <name><surname>Classen</surname> <given-names>D</given-names></name></person-group>. <article-title>Adoption of artificial intelligence in healthcare: survey of health system priorities, successes, and challenges</article-title>. <source>J Am Med Inform Assoc</source>. (<year>2025</year>) <volume>32</volume>(<issue>7</issue>):<fpage>1093</fpage>&#x2013;<lpage>100</lpage>. <pub-id pub-id-type="doi">10.1093/jamia/ocaf065</pub-id><pub-id pub-id-type="pmid">40323320</pub-id></mixed-citation></ref>
<ref id="B12"><label>12.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Botha</surname> <given-names>NN</given-names></name> <name><surname>Ansah</surname> <given-names>EW</given-names></name> <name><surname>Segbedzi</surname> <given-names>CE</given-names></name> <name><surname>Dumahasi</surname> <given-names>VK</given-names></name> <name><surname>Maneen</surname> <given-names>S</given-names></name> <name><surname>Kodom</surname> <given-names>RV</given-names></name><etal/></person-group> <article-title>Artificial intelligent tools: evidence-mapping on the perceived positive effects on patient-care and confidentiality</article-title>. <source>BMC Digit Health</source>. (<year>2024</year>) <volume>2</volume>:<fpage>33</fpage>. <pub-id pub-id-type="doi">10.1186/s44247-024-00091-y</pub-id></mixed-citation></ref>
<ref id="B13"><label>13.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Botha</surname> <given-names>N</given-names></name> <name><surname>Segbedzi</surname> <given-names>CE</given-names></name> <name><surname>Dumahasi</surname> <given-names>VK</given-names></name> <name><surname>Maneen</surname> <given-names>S</given-names></name> <name><surname>Kodom</surname> <given-names>RV</given-names></name> <name><surname>Tsedze</surname> <given-names>IS</given-names></name><etal/></person-group> <article-title>Artificial intelligence in healthcare: a scoping review of perceived threats to patient rights and safety</article-title>. <source>Arch Public Health</source>. (<year>2024</year>) <volume>82</volume>:<fpage>188</fpage>. <pub-id pub-id-type="doi">10.1186/s13690-024-01414-1</pub-id><pub-id pub-id-type="pmid">39444019</pub-id></mixed-citation></ref>
<ref id="B14"><label>14.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bandura</surname> <given-names>A</given-names></name></person-group>. <article-title>Social cognitive theory: an agentic perspective</article-title>. <source>Annu Rev Psychol</source>. (<year>2001</year>) <volume>52</volume>:<fpage>1</fpage>&#x2013;<lpage>26</lpage>. <pub-id pub-id-type="doi">10.1146/annurev.psych.52.1.1</pub-id><pub-id pub-id-type="pmid">11148297</pub-id></mixed-citation></ref>
<ref id="B15"><label>15.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ajzen</surname> <given-names>I</given-names></name></person-group>. <article-title>The theory of planned behavior</article-title>. <source>Organ Behav Hum Decis Process</source>. (<year>1991</year>) <volume>50</volume>(<issue>2</issue>):<fpage>179</fpage>&#x2013;<lpage>211</lpage>. <pub-id pub-id-type="doi">10.1016/0749-5978(91)90020-T</pub-id></mixed-citation></ref>
<ref id="B16"><label>16.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Siau</surname> <given-names>K</given-names></name> <name><surname>Wang</surname> <given-names>W</given-names></name></person-group>. <article-title>Building trust in artificial intelligence, machine learning, and robotics</article-title>. <source>Cutter Bus Technol J</source>. (<year>2018</year>) <volume>31</volume>(<issue>2</issue>):<fpage>47</fpage>&#x2013;<lpage>53</lpage>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.cutter.com/article/building-trust-artificial-intelligence-machine-learning-and-robotics-498981">https://www.cutter.com/article/building-trust-artificial-intelligence-machine-learning-and-robotics-498981</ext-link> (Accessed September 22, 2025).</mixed-citation></ref>
<ref id="B17"><label>17.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Leventhal</surname> <given-names>H</given-names></name> <name><surname>Brissette</surname> <given-names>I</given-names></name> <name><surname>Leventhal</surname> <given-names>EA</given-names></name></person-group>. <article-title>The common-sense model of self-regulation of health and illness</article-title>. In: <person-group person-group-type="editor"><name><surname>Cameron</surname> <given-names>LD</given-names></name> <name><surname>Leventhal</surname> <given-names>H</given-names></name></person-group>, editors. <source>The Self-Regulation of Health and Illness Behaviour</source>. <publisher-loc>London</publisher-loc>: <publisher-name>Routledge</publisher-name> (<year>2003</year>). p. <fpage>42</fpage>&#x2013;<lpage>65</lpage>.</mixed-citation></ref>
<ref id="B18"><label>18.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>X</given-names></name> <name><surname>Cheng</surname> <given-names>Z</given-names></name></person-group>. <article-title>Cross-sectional studies: strengths, weaknesses, and recommendations</article-title>. <source>Chest</source>. (<year>2020</year>) <volume>158</volume>(<issue>1 Suppl</issue>):<fpage>S65</fpage>&#x2013;<lpage>71</lpage>. <pub-id pub-id-type="doi">10.1016/j.chest.2020.03.012</pub-id><pub-id pub-id-type="pmid">32658654</pub-id></mixed-citation></ref>
<ref id="B19"><label>19.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Stratton</surname> <given-names>SJ</given-names></name></person-group>. <article-title>Population research: convenience sampling strategies</article-title>. <source>Prehosp Disaster Med</source>. (<year>2021</year>) <volume>36</volume>(<issue>4</issue>):<fpage>373</fpage>&#x2013;<lpage>4</lpage>. <pub-id pub-id-type="doi">10.1017/S1049023X21000649</pub-id><pub-id pub-id-type="pmid">34284835</pub-id></mixed-citation></ref>
<ref id="B20"><label>20.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Svestkova</surname> <given-names>A</given-names></name> <name><surname>Huang</surname> <given-names>Y</given-names></name> <name><surname>Smahel</surname> <given-names>D</given-names></name></person-group>. <article-title>Factors that influence trust and willingness to use generative AI for health information: a cross-sectional study</article-title>. <source>Digit Health</source>. (<year>2025</year>) <volume>11</volume>:<fpage>20552076251360973</fpage>. <pub-id pub-id-type="doi">10.1177/20552076251360973</pub-id><pub-id pub-id-type="pmid">40735543</pub-id></mixed-citation></ref>
<ref id="B21"><label>21.</label><mixed-citation publication-type="other"><collab>Tax Foundation</collab>. <comment>2024 federal income tax brackets. (2023). Available online at: <ext-link ext-link-type="uri" xlink:href="https://taxfoundation.org/data/all/federal/2024-tax-brackets/">https://taxfoundation.org/data/all/federal/2024-tax-brackets/</ext-link> (Accessed September 22, 2025).</comment></mixed-citation></ref>
<ref id="B22"><label>22.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bagley</surname> <given-names>SC</given-names></name> <name><surname>White</surname> <given-names>H</given-names></name> <name><surname>Golomb</surname> <given-names>BA</given-names></name></person-group>. <article-title>Logistic regression in the medical literature: standards for use and reporting, with particular attention to one medical domain</article-title>. <source>J Clin Epidemiol</source>. (<year>2001</year>) <volume>54</volume>(<issue>10</issue>):<fpage>979</fpage>&#x2013;<lpage>85</lpage>. <pub-id pub-id-type="doi">10.1016/S0895-4356(01)00372-9</pub-id><pub-id pub-id-type="pmid">11576808</pub-id></mixed-citation></ref>
<ref id="B23"><label>23.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Agresti</surname> <given-names>A</given-names></name></person-group>. <source>Statistical Methods for the Social Sciences</source>. <edition>5th ed.</edition> <publisher-loc>Boston</publisher-loc>: <publisher-name>Pearson</publisher-name> (<year>2018</year>).</mixed-citation></ref>
<ref id="B24"><label>24.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>James</surname> <given-names>G</given-names></name> <name><surname>Witten</surname> <given-names>D</given-names></name> <name><surname>Hastie</surname> <given-names>T</given-names></name> <name><surname>Tibshirani</surname> <given-names>R</given-names></name></person-group>. <source>An introduction to Statistical Learning: With Applications in R</source>. <edition>2nd ed.</edition> <publisher-loc>New York</publisher-loc>: <publisher-name>Springer</publisher-name> (<year>2021</year>).</mixed-citation></ref>
<ref id="B25"><label>25.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Little</surname> <given-names>RJA</given-names></name> <name><surname>Rubin</surname> <given-names>DB</given-names></name></person-group>. <source>Statistical Analysis with Missing Data</source>. <edition>3rd ed.</edition> <publisher-loc>Hoboken (NJ)</publisher-loc>: <publisher-name>Wiley</publisher-name> (<year>2019</year>). <comment>(Wiley Series in Probability and Statistics)</comment>. <pub-id pub-id-type="doi">10.1002/9781119482260</pub-id></mixed-citation></ref>
<ref id="B26"><label>26.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>McHugh</surname> <given-names>ML</given-names></name></person-group>. <article-title>The chi-square test of independence</article-title>. <source>Biochem Med</source>. (<year>2013</year>) <volume>23</volume>(<issue>2</issue>):<fpage>143</fpage>&#x2013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.11613/BM.2013.018</pub-id></mixed-citation></ref>
<ref id="B27"><label>27.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Hochberg</surname> <given-names>Y</given-names></name> <name><surname>Tamhane</surname> <given-names>AC</given-names></name></person-group>. <source>Multiple Comparison Procedures</source>. <publisher-loc>New York</publisher-loc>: <publisher-name>Wiley</publisher-name> (<year>1987</year>). <comment>(Wiley Series in Probability and Statistics)</comment>. <pub-id pub-id-type="doi">10.1002/9780470316672</pub-id></mixed-citation></ref>
<ref id="B28"><label>28.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Hosmer</surname> <given-names>DW</given-names></name> <name><surname>Lemeshow</surname> <given-names>S</given-names></name> <name><surname>Sturdivant</surname> <given-names>RX</given-names></name></person-group>. <source>Applied Logistic Regression</source>. <edition>3rd ed.</edition> <publisher-loc>Hoboken (NJ)</publisher-loc>: <publisher-name>Wiley</publisher-name> (<year>2013</year>).</mixed-citation></ref>
<ref id="B29"><label>29.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Fisher</surname> <given-names>RA</given-names></name></person-group>. <source>Statistical Methods for Research Workers</source>. <edition>2nd ed.</edition> <publisher-loc>Edinburgh</publisher-loc>: <publisher-name>Oliver and Boyd</publisher-name> (<year>1925</year>).</mixed-citation></ref>
<ref id="B30"><label>30.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Hosmer</surname> <given-names>DW</given-names></name> <name><surname>Lemeshow</surname> <given-names>S</given-names></name> <name><surname>Sturdivant</surname> <given-names>RX</given-names></name></person-group>. <source>Applied Logistic Regression</source>. <edition>3rd ed</edition>. <publisher-loc>Hoboken, NJ</publisher-loc>: <publisher-name>Wiley</publisher-name> (<year>2013</year>).</mixed-citation></ref>
<ref id="B31"><label>31.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Weissman</surname> <given-names>JS</given-names></name> <name><surname>Stern</surname> <given-names>R</given-names></name> <name><surname>Fielding</surname> <given-names>SL</given-names></name> <name><surname>Epstein</surname> <given-names>AM</given-names></name></person-group>. <article-title>Delayed access to health care: risk factors, reasons, and consequences</article-title>. <source>Am J Med</source>. (<year>1991</year>) <volume>91</volume>(<issue>2</issue>):<fpage>114</fpage>&#x2013;<lpage>22</lpage>. <pub-id pub-id-type="doi">10.7326/0003-4819-114-4-325</pub-id></mixed-citation></ref>
<ref id="B32"><label>32.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tuitert</surname> <given-names>I</given-names></name> <name><surname>Marinus</surname> <given-names>JD</given-names></name> <name><surname>Dalenberg</surname> <given-names>JR</given-names></name> <name><surname>van &#x2018;t Veer</surname> <given-names>JTB</given-names></name></person-group>. <article-title>Digital health technology use across socioeconomic groups prior to and during the COVID-19 pandemic: panel study</article-title>. <source>JMIR Public Health Surveill</source>. (<year>2024</year>) <volume>10</volume>:<fpage>e55384</fpage>. <pub-id pub-id-type="doi">10.2196/55384</pub-id><pub-id pub-id-type="pmid">39269755</pub-id></mixed-citation></ref>
<ref id="B33"><label>33.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lee</surname> <given-names>CMY</given-names></name> <name><surname>Thomas</surname> <given-names>E</given-names></name> <name><surname>Norman</surname> <given-names>R</given-names></name> <name><surname>Wells</surname> <given-names>L</given-names></name> <name><surname>Shaw</surname> <given-names>T</given-names></name> <name><surname>Nesbitt</surname> <given-names>J</given-names></name><etal/></person-group> <article-title>Educational attainment and willingness to use technology for health and to share health information&#x2014;the reimagining healthcare survey</article-title>. <source>Int J Med Inform</source>. (<year>2022</year>) <volume>164</volume>:<fpage>104803</fpage>. <pub-id pub-id-type="doi">10.1016/j.ijmedinf.2022.104803</pub-id><pub-id pub-id-type="pmid">35644052</pub-id></mixed-citation></ref>
<ref id="B34"><label>34.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Burns</surname> <given-names>C</given-names></name> <name><surname>Bakaj</surname> <given-names>A</given-names></name> <name><surname>Berishaj</surname> <given-names>A</given-names></name> <name><surname>Hristidis</surname> <given-names>V</given-names></name> <name><surname>Deak</surname> <given-names>P</given-names></name> <name><surname>Equils</surname> <given-names>O</given-names></name></person-group>. <article-title>Use of generative AI for improving health literacy in reproductive health: case study</article-title>. <source>JMIR Form Res</source>. (<year>2024</year>) <volume>8</volume>:<fpage>e59434</fpage>. <pub-id pub-id-type="doi">10.2196/59434</pub-id><pub-id pub-id-type="pmid">38986153</pub-id></mixed-citation></ref>
<ref id="B35"><label>35.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Shahid</surname> <given-names>R</given-names></name> <name><surname>Shoker</surname> <given-names>M</given-names></name> <name><surname>Chu</surname> <given-names>LM</given-names></name> <name><surname>McCormack</surname> <given-names>B</given-names></name> <name><surname>van Walraven</surname> <given-names>C</given-names></name></person-group>. <article-title>Impact of low health literacy on patients&#x2019; health outcomes: a multicenter cohort study</article-title>. <source>BMC Health Serv Res</source>. (<year>2022</year>) <volume>22</volume>:<fpage>1148</fpage>. <pub-id pub-id-type="doi">10.1186/s12913-022-08527-9</pub-id><pub-id pub-id-type="pmid">36096793</pub-id></mixed-citation></ref>
<ref id="B36"><label>36.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kraft</surname> <given-names>SA</given-names></name> <name><surname>Chopra</surname> <given-names>S</given-names></name> <name><surname>Duran</surname> <given-names>MC</given-names></name> <name><surname>Rojina</surname> <given-names>JA</given-names></name> <name><surname>Beretta</surname> <given-names>A</given-names></name> <name><surname>L&#x00F3;pez</surname> <given-names>KI</given-names></name><etal/></person-group> <article-title>Perspectives of Hispanic and Latinx community members on AI-enabled mHealth tools: qualitative focus group study</article-title>. <source>J Med Internet Res</source>. (<year>2025</year>) <volume>27</volume>:<fpage>e59817</fpage>. <pub-id pub-id-type="doi">10.2196/59817</pub-id><pub-id pub-id-type="pmid">39912577</pub-id></mixed-citation></ref>
<ref id="B37"><label>37.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Thompson</surname> <given-names>AE</given-names></name> <name><surname>Anisimowicz</surname> <given-names>Y</given-names></name> <name><surname>Miedema</surname> <given-names>B</given-names></name> <name><surname>Hogg</surname> <given-names>W</given-names></name> <name><surname>Wodchis</surname> <given-names>WP</given-names></name> <name><surname>Aubrey-Bassler</surname> <given-names>K</given-names></name></person-group>. <article-title>The influence of gender and other patient characteristics on health care-seeking behaviour: a QUALICOPC study</article-title>. <source>BMC Fam Pract</source>. (<year>2016</year>) <volume>17</volume>:<fpage>38</fpage>. <pub-id pub-id-type="doi">10.1186/s12875-016-0440-0</pub-id><pub-id pub-id-type="pmid">27036116</pub-id></mixed-citation></ref>
<ref id="B38"><label>38.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Taherdoost</surname> <given-names>H</given-names></name> <name><surname>Mohamed</surname> <given-names>N</given-names></name> <name><surname>Madanchian</surname> <given-names>M</given-names></name></person-group>. <article-title>Navigating technology adoption/acceptance models</article-title>. <source>Procedia Comput Sci</source>. (<year>2024</year>) <volume>237</volume>:<fpage>833</fpage>&#x2013;<lpage>40</lpage>. <pub-id pub-id-type="doi">10.1016/j.procs.2024.05.091</pub-id></mixed-citation></ref>
<ref id="B39"><label>39.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hassan</surname> <given-names>M</given-names></name> <name><surname>Kushniruk</surname> <given-names>A</given-names></name> <name><surname>Borycki</surname> <given-names>E</given-names></name></person-group>. <article-title>Barriers to and facilitators of artificial intelligence adoption in health care: scoping review</article-title>. <source>JMIR Hum Factors</source>. (<year>2024</year>) <volume>11</volume>:<fpage>e48633</fpage>. <pub-id pub-id-type="doi">10.2196/48633</pub-id><pub-id pub-id-type="pmid">39207831</pub-id></mixed-citation></ref>
<ref id="B40"><label>40.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hao</surname> <given-names>C</given-names></name> <name><surname>Uusitalo</surname> <given-names>S</given-names></name> <name><surname>Figueroa</surname> <given-names>C</given-names></name> <name><surname>Smit</surname> <given-names>QTS</given-names></name> <name><surname>Strange</surname> <given-names>M</given-names></name> <name><surname>Chang</surname> <given-names>WT</given-names></name><etal/></person-group> <article-title>A human-centered perspective on research challenges for hybrid human artificial intelligence in lifestyle and behavior change support</article-title>. <source>Front Digit Health</source>. (<year>2025</year>) <volume>7</volume>:<fpage>1544185</fpage>. <pub-id pub-id-type="doi">10.3389/fdgth.2025.1544185</pub-id><pub-id pub-id-type="pmid">40182585</pub-id></mixed-citation></ref>
<ref id="B41"><label>41.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>X</given-names></name> <name><surname>Hay</surname> <given-names>JL</given-names></name> <name><surname>Waters</surname> <given-names>EA</given-names></name> <name><surname>Kiviniemi</surname> <given-names>MT</given-names></name> <name><surname>Biddle</surname> <given-names>C</given-names></name> <name><surname>Schofield</surname> <given-names>E</given-names></name><etal/></person-group> <article-title>Health literacy and use and trust in health information</article-title>. <source>J Health Commun</source>. (<year>2018</year>) <volume>23</volume>(<issue>8</issue>):<fpage>724</fpage>&#x2013;<lpage>34</lpage>. <pub-id pub-id-type="doi">10.1080/10810730.2018.1511658</pub-id><pub-id pub-id-type="pmid">30160641</pub-id></mixed-citation></ref>
<ref id="B42"><label>42.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Fritz</surname> <given-names>B</given-names></name> <name><surname>Eppelmann</surname> <given-names>L</given-names></name> <name><surname>Edelmann</surname> <given-names>A</given-names></name> <name><surname>Rohrmann</surname> <given-names>S</given-names></name> <name><surname>Wessa</surname> <given-names>M</given-names></name></person-group>. <article-title>How mental health status and attitudes toward mental health shape AI acceptance in psychosocial care: a cross-sectional analysis</article-title>. <source>BMC Psychol</source>. (<year>2025</year>) <volume>13</volume>:<fpage>617</fpage>. <pub-id pub-id-type="doi">10.1186/s40359-025-02954-z</pub-id><pub-id pub-id-type="pmid">40481588</pub-id></mixed-citation></ref>
<ref id="B43"><label>43.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nogueira-Leite</surname> <given-names>D</given-names></name> <name><surname>Marques-Cruz</surname> <given-names>M</given-names></name> <name><surname>Cruz-Correia</surname> <given-names>R</given-names></name></person-group>. <article-title>Individuals&#x2019; attitudes toward digital mental health apps and implications for adoption in Portugal: web-based survey</article-title>. <source>BMC Med Inform Decis Mak</source>. (<year>2024</year>) <volume>24</volume>:<fpage>99</fpage>. <pub-id pub-id-type="doi">10.1186/s12911-024-02488-1</pub-id><pub-id pub-id-type="pmid">38637866</pub-id></mixed-citation></ref></ref-list>
<fn-group>
<fn id="n1" fn-type="custom" custom-type="edited-by"><p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/906454/overview">Cl&#x00E1;udia De Freitas</ext-link>, University of Porto, Portugal</p></fn>
<fn id="n2" fn-type="custom" custom-type="reviewed-by"><p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1305817/overview">Viktoriya Semeshenko</ext-link>, Universidad de Buenos Aires, Argentina</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1740726/overview">Abdallah Al-Ani</ext-link>, Boston Children&#x0027;s Hospital and Harvard Medical School, Boston, United States</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2958723/overview">Nkosi Nkosi Botha</ext-link>, University of Cape Coast, Ghana</p></fn>
</fn-group>
</back>
</article>