<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" dtd-version="1.3" article-type="discussion">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Sociol.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Sociology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Sociol.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2297-7775</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fsoc.2026.1786377</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Opinion</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>In the room but not on the byline: trust, fear, and the human role in addiction science in the age of AI</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name><surname>Bahji</surname> <given-names>Anees</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<xref ref-type="author-notes" rid="fn001"><sup>&#x02020;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<uri xlink:href="https://loop.frontiersin.org/people/3347616"/>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Department of Psychiatry, University of Calgary</institution>, <city>Calgary</city>, <state>AB</state>, <country country="ca">Canada</country></aff>
<aff id="aff2"><label>2</label><institution>Department of Community Health Sciences, University of Calgary</institution>, <city>Calgary</city>, <state>AB</state>, <country country="ca">Canada</country></aff>
<aff id="aff3"><label>3</label><institution>Hotchkiss Brain Institute, University of Calgary</institution>, <city>Calgary</city>, <state>AB</state>, <country country="ca">Canada</country></aff>
<author-notes>
<corresp id="c001"><label>&#x0002A;</label>Correspondence: Anees Bahji, <email xlink:href="mailto:anees.bahji1@ucalgary.ca">anees.bahji1@ucalgary.ca</email></corresp>
<fn fn-type="other" id="fn001"><label>&#x02020;</label><p>ORCID: Anees Bahji <uri xlink:href="https://orcid.org/0000-0002-3490-314X">orcid.org/0000-0002-3490-314X</uri></p></fn></author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-03-03">
<day>03</day>
<month>03</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>11</volume>
<elocation-id>1786377</elocation-id>
<history>
<date date-type="received">
<day>12</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="rev-recd">
<day>26</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>28</day>
<month>01</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2026 Bahji.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Bahji</copyright-holder>
<license>
<ali:license_ref start_date="2026-03-03">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<kwd-group>
<kwd>artificial intelligence</kwd>
<kwd>idea generation</kwd>
<kwd>research</kwd>
<kwd>research writing</kwd>
<kwd>scientific process</kwd>
</kwd-group>
<funding-group>
  <funding-statement>The author(s) declared that financial support was not received for this work and/or its publication.</funding-statement>
</funding-group>
<counts>
<fig-count count="0"/>
<table-count count="0"/>
<equation-count count="0"/>
<ref-count count="0"/>
<page-count count="4"/>
<word-count count="2286"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>ELSI in Science and Genetics</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<title>Introduction</title>
<p>I write this as an addiction psychiatrist, a clinician-scientist, a PhD candidate, and a deputy editor of the Canadian Journal of Addiction. I encounter artificial intelligence not as a hypothetical future, but as a recurring practical problem-one that surfaces in manuscripts, reviewer comments, editorial meetings, and disclosure forms with increasing regularity. Questions about AI are no longer about whether it will affect scientific publishing, but about what it is already revealing about how science actually works.</p>
<p>At two o&#x00027;clock in the morning, with a manuscript deadline looming after a full clinical day, I have used ChatGPT to rephrase a stubborn paragraph. I have used it to check reference formatting. I have relied on AI-powered tools to screen thousands of abstracts for systematic reviews and to summarize dense policy documents that would otherwise take hours. None of this felt unethical. None of it felt like cheating. It felt efficient. It felt pragmatic. It felt human. By human, I do not mean that the technology itself possesses agency or intention, but that its use reflects human judgment operating under real-world constraints-fatigue, time pressure, and responsibility for the final product.</p>
<p>Yet when submitting manuscripts, I am increasingly asked to disclose this use as though it were a conflict of interest. AI cannot be listed as an author. It cannot be cited. It cannot take responsibility. And yet it is clearly present throughout the research workflow. AI is not on the byline-but it is very much in the room.</p>
<p>What drives many of these policies is not only concern about accuracy or misconduct, but something more affective: fear, and a corresponding lack of trust. The scientific enterprise rests on assumptions we rarely state explicitly. We assume researchers act in good faith. We assume the data are real. We assume the analyses were actually conducted. We assume the methods reflect what was done. We assume the writing faithfully represents the work rather than obscuring it. Peer review depends on these assumptions. Without them, the system collapses.</p>
<p>AI did not create these vulnerabilities. It exposed them.</p>
<p>Our response to that exposure has been strikingly asymmetric. Using AI to fabricate data, falsify results, or invent citations is rightly condemned as scientific misconduct. But using AI to refine language, structure, or clarity is often treated with suspicion, as though linguistic polish itself undermines integrity. Writing is implicitly framed as secondary to &#x0201C;real&#x0201D; research, despite the fact that knowledge generation, synthesis, and translation are inseparable in practice. Research that is never clearly communicated might as well not exist.</p>
<p>Recognizing writing as constitutive of knowledge production does not require assigning epistemic agency to every tool involved in that process. Scientific authorship has always relied on assistance-from statisticians, editors, translators, and software-without dissolving responsibility or authorship. The ethical distinction does not hinge on whether writing matters, but on who retains judgment over truth claims. AI-assisted refinement alters form, not ownership of meaning; fabrication, by contrast, substitutes generated content for accountable human judgment.</p>
<p>This opinion advances a simple but necessary claim: AI use in research is not inherently a replacement for human scientific judgment-it is, in most legitimate cases, a refinement of it. The ethical failure is not using AI to improve clarity, efficiency, or accessibility. The ethical failure is using AI to fabricate, falsify, or evade responsibility.</p></sec>
<sec id="s2">
<title>Fear of replacement and what it clarifies</title>
<p>I will admit something less comfortable. Part of the anxiety around AI is personal. I am occasionally afraid that AI might replace aspects of my own role. In some narrow domains, it already performs certain tasks faster than I do: summarizing literature, drafting text, synthesizing arguments, generating outlines. In those moments, the question becomes unsettlingly direct: If AI can do this, what am I for?</p>
<p>That fear turns out to be clarifying rather than paralyzing. It forces a more honest accounting of what is actually unique about human scientific contribution. My value is not typing speed. It is not flawless grammar. It is not even synthesis in the abstract. It lies in judgment: deciding what matters, what is plausible, what is ethical, what is clinically meaningful, and what responsibility I am willing to publicly stand behind.</p>
<p>AI can generate text. It cannot own consequences.</p></sec>
<sec id="s3">
<title>Refinement is not deception</title>
<p>Influencers routinely use AI to refine social media posts-adjusting tone, clarity, and reach. Few would argue this constitutes deception. In academic writing, using AI to refine language performs an analogous function. It does not alter the underlying data, analyses, or interpretations. It makes the work more legible.</p>
<p>In addiction science, this matters deeply. Our field sits at the intersection of medicine, policy, lived experience, and public discourse. Poorly written science does not merely inconvenience readers; it distorts meaning, limits uptake, and reinforces inequities-particularly for non-native English speakers. Treating clarity as ethically suspect while tolerating opaque prose is a strange inversion of scientific values.</p>
<p>Using AI to fabricate data is misconduct. Using AI to refine language is not. Conflating the two erodes trust rather than protecting it.</p></sec>
<sec id="s4">
<title>What AI has actually exposed about science</title>
<p>The unease surrounding AI reflects an uncomfortable truth: much of scientific publishing already operates on trust rather than verification. Reviewers rarely re-run analyses. Editors cannot audit raw data for every submission. We rely on professional norms and accountability.</p>
<p>AI did not make science fragile; it revealed where it already was.</p>
<p>Hallucinations are a genuine risk, but not merely a technical one. They are an epistemic threat precisely because fluent prose can masquerade as knowledge. When linguistic confidence substitutes for evidentiary accuracy, peer review risks becoming stylistic proofreading rather than scientific evaluation. The danger lies not in refinement, but in unexamined authority.</p>
<p>AI has exposed a longstanding vulnerability in science: our reliance on coherence as a proxy for epistemic authority. A recent and tragic case in which an individual died by suicide after interacting with an AI chatbot has intensified public concern about large language models. It is tempting to frame such cases as evidence that AI itself is harmful or malevolent. I find that framing unhelpful. A more uncomfortable interpretation is that AI did exactly what it was designed to do: mirror user input, align with conversational cues, and generate responses that feel coherent and validating. It did not generate suicidal intent, nor did it possess the capacity to recognize when validation becomes containment failure.</p>
<p>In psychiatry, we are trained to understand that empathy without judgment-and reflection without boundaries-can be dangerous. AI has not created this ethical problem; it has exposed what happens when systems designed for linguistic refinement are implicitly entrusted with moral or clinical authority they do not possess. The failure was not one of refinement, but of misplaced responsibility. Authority derived from coherence rather than accountability is already a structural risk-one that AI makes more visible and more scalable.</p>
<p>Scale amplifies this risk. Humans can fabricate papers; AI allows fabrication at industrial volume. Paper mills, fake peer review, and mass-generated manuscripts are already emerging realities. Blanket prohibition is a poor response. Detection tools are unreliable and disproportionately flag non-native English writers. Fear-driven policies push AI use underground and discourage honesty.</p>
<p>The solution is not reflexive fear, but institutional and epistemic literacy. Literacy here means more than technical familiarity with AI systems. It requires understanding where AI is appropriate, where it is not, and which responsibilities remain irreducibly human.</p></sec>
<sec id="s5">
<title>Peer review, burden, and responsibility</title>
<p>Peer review in addiction science is already strained. Clinician-reviewers juggle heavy caseloads, administrative demands, and academic expectations. AI-assisted writing accelerates submission volume without expanding reviewer capacity.</p>
<p>Here again, refinement matters. AI can assist with statistical checks, reference verification, and image analysis. It can reduce cognitive load without replacing judgment. What it cannot do-and must not be allowed to do-is assume responsibility for evaluative decisions.</p>
<p>Uploading manuscripts to public large language models for &#x0201C;review assistance&#x0201D; violates confidentiality. Using AI to support internal checks does not. The difference is not technological; it is ethical.</p></sec>
<sec id="s6">
<title>Disclosure without stigma</title>
<p>Current disclosure practices lack nuance. What counts as meaningful AI use? Spellcheck? Paragraph rewriting? Abstract screening? Without clarity, disclosure becomes performative rather than informative-and begins to resemble a moral test rather than a transparency mechanism.</p>
<p>A functional framework would ask:</p>
<list list-type="bullet">
<list-item><p>What task was AI used for?</p></list-item>
<list-item><p>Did it alter content or only form?</p></list-item>
<list-item><p>Could the same task reasonably have been performed by a human assistant?</p></list-item>
</list>
<p>Disclosure should build trust, not punish efficiency or honesty.</p></sec>
<sec id="s7">
<title>Conclusion: what remains human</title>
<p>AI is not replacing science. It is refining how science is written, processed, and shared. The real threat lies not in responsible AI use, but in our reluctance to confront what science has always required: trust, accountability, and judgment.</p>
<p>AI can scale information. It can generate text, summarize evidence, and surface patterns at speeds no human can match. What it cannot do&#x02014;at least for now&#x02014;is what science ultimately demands of its practitioners. It cannot weigh competing values. It cannot situate findings within lived human contexts. It cannot decide when uncertainty warrants caution, when evidence is sufficient for action, or when silence would be more ethical than publication. It cannot be held morally or professionally accountable for the consequences of its outputs.</p>
<p>My role&#x02014;as a clinician, a scientist, and an editor&#x02014;is not simply to produce information. It is to interpret it, to contextualize it, to judge its relevance and limits, and to stand publicly behind those judgments. It is to bring not just technical competence, but ethical responsibility and human concern to decisions that affect patients, policy, and public trust. These are not inefficiencies to be automated away; they are the core of scientific and clinical work.</p>
<p>If AI has clarified anything, it is this: the value of human contribution in science was never primarily linguistic or mechanical. It has always resided in judgment, interpretation, and accountability&#x02014;in the willingness to say <italic>this matters, this is uncertain, this may cause harm</italic>, and <italic>I am responsible for this claim</italic>. AI can assist that work. It cannot replace it.</p>
<p>That responsibility remains human work.</p></sec>
</body>
<back>
<sec sec-type="author-contributions" id="s8">
<title>Author contributions</title>
<p>AB: Methodology, Visualization, Investigation, Validation, Project administration, Resources, Data curation, Supervision, Writing &#x02013; original draft, Funding acquisition, Software, Conceptualization, Writing &#x02013; review &#x00026; editing, Formal analysis.</p>
</sec>
<ack><title>Acknowledgments</title><p>We acknowledge that this work was conducted on the historical and contemporary Indigenous lands of Treaties 6, 7, and 8, and the homeland of the M&#x000E9;tis. We also recognize the many Indigenous communities established in urban centers across Alberta.</p>
</ack>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of interest</title>
<p>AB has received doctoral research funding from the Canadian Institutes of Health Research (CIHR) Fellowship, the University of Calgary Cumming School of Medicine, and the Calgary Health Trust. He receives modest honoraria for teaching undergraduate and postgraduate medical trainees at the University of Calgary. AB is an unpaid member of the Canadian Network for Mood and Anxiety Treatments (CANMAT) Editorial Committee and the Section of Addiction Psychiatry of the Canadian Psychiatric Association (CPA). He serves as Deputy Editor of the <italic>Canadian Journal of Addiction</italic> and as a mental health educator for TED-Ed, for which he receives a small honorarium. He reports no royalties, licenses, consulting fees, speaker fees, honoraria for lectures or presentations, expert testimony, patents, or participation on other boards.</p>
</sec>
<sec sec-type="ai-statement" id="s10">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was used in the creation of this manuscript. The author takes full responsibility for the content of this manuscript. Generative AI tools were used solely for linguistic refinement, including editing for clarity, phrasing, and structure. Generative AI was not used to generate original ideas, factual content, citations, data, analyses, or interpretations.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p></sec>
<sec sec-type="disclaimer" id="s11">
<title>Publisher&#x00027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<fn-group>
<fn fn-type="custom" custom-type="edited-by" id="fn0001">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/46314/overview">Dov Greenbaum</ext-link>, Yale University, United States</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by" id="fn0002">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3042512/overview">Sarel Ohayon</ext-link>, Bar-Ilan University, Israel</p>
</fn>
</fn-group>
</back>
</article>