<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" article-type="research-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Psychol.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Psychology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Psychol.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">1664-1078</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fpsyg.2026.1745164</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Algorithmic anxiety: AI, work, and the evolving psychological contract in digital discourse</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Shekhar</surname>
<given-names>Anurag</given-names>
</name>
<xref ref-type="aff" rid="aff1"/>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3124874"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Saurombe</surname>
<given-names>Musawenkosi D.</given-names>
</name>
<xref ref-type="aff" rid="aff1"/>
<uri xlink:href="https://loop.frontiersin.org/people/1341285"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
</contrib-group>
<aff id="aff1"><institution>Department of Industrial Psychology and People Management, College of Business and Economics, University of Johannesburg</institution>, <city>Auckland Park</city>, <country country="za">South Africa</country></aff>
<author-notes>
<corresp id="c001"><label>&#x002A;</label>Correspondence: Anurag Shekhar, <email xlink:href="mailto:anuragshekhar.email@gmail.com">anuragshekhar.email@gmail.com</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-17">
<day>17</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>17</volume>
<elocation-id>1745164</elocation-id>
<history>
<date date-type="received">
<day>12</day>
<month>11</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>26</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>26</day>
<month>01</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2026 Shekhar and Saurombe.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Shekhar and Saurombe</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-17">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<sec>
<title>Introduction</title>
<p>The study used a mixed-methods approach, utilising 1,454 Reddit narratives about AI-driven job displacement, to examine how AI is transforming the workplace psychological contract.</p>
</sec>
<sec>
<title>Methods</title>
<p>This study used both quantitative and qualitative methods of analysis. It analysed sentiment patterns, emotional responses, and thematic content from digital discourse.</p>
</sec>
<sec>
<title>Results</title>
<p>While our results show a surface level of optimism regarding the use of AI (52% of all sentiment was positive according to VADER), our results also showed a significant amount of negative sentiment (51% of all sentiment was negative according to BERT) that indicates a deeper concern of people in terms of their feelings of &#x201C;algorithmic anxiety&#x201D; related to job loss. Network analysis showed three interconnected discourse groups centered on employment disruption, ethical concerns, and technical systems (modularity Q = 0.42). Furthermore, seven themes emerged from the data analysis: shattered trust and corporate betrayal, eroded identities, technostress, devalued expertise, anxiety about the future, cynicism about adapting, and affirming human values, which illustrate how the use of AI has disrupted the psychological contract between employees and employers.</p>
</sec>
<sec>
<title>Discussion</title>
<p>This study adds to psychological contract theory by illustrating ways that technology can breach an individual&#x2019;s psychological contract at work. In addition, this study extends existing technostress literature by identifying specific sources of stress associated with AI use in the workplace. Finally, it applies self-determination theory to work settings where algorithms are shaping the work environment. Practically speaking, the findings suggest that employers who wish to address the growing problem of &#x201C;algorithmic anxiety&#x201D; should engage in transparent communication, involve employees in decision-making, and design their technological systems to preserve employee dignity in increasingly automated workplaces.</p>
</sec>
</abstract>
<kwd-group>
<kwd>algorithmic anxiety</kwd>
<kwd>artificial intelligence</kwd>
<kwd>conservation of resources</kwd>
<kwd>digital discourse</kwd>
<kwd>mixed methods</kwd>
<kwd>psychological contract</kwd>
<kwd>Reddit</kwd>
<kwd>self-determination theory</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was not received for this work and/or its publication.</funding-statement>
</funding-group>
<counts>
<fig-count count="0"/>
<table-count count="0"/>
<equation-count count="0"/>
<ref-count count="75"/>
<page-count count="16"/>
<word-count count="14947"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Organizational Psychology</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec1">
<label>1</label>
<title>Introduction</title>
<p>The integration of artificial intelligence into workplaces represents not merely technological advancement but a fundamental reconfiguration of work itself, challenging centuries-old assumptions about human labor, expertise, and organizational relationships (<xref ref-type="bibr" rid="ref19">Cramarenco et al., 2023</xref>; <xref ref-type="bibr" rid="ref52">Murire, 2024</xref>). As organizations deploy increasingly sophisticated AI systems (from machine learning algorithms that make hiring decisions to autonomous agents that perform complex analytical tasks), the implicit social contracts binding employers and employees undergo unprecedented strain (<xref ref-type="bibr" rid="ref7">Bankins et al., 2024</xref>; <xref ref-type="bibr" rid="ref52">Murire, 2024</xref>; <xref ref-type="bibr" rid="ref68">Soulami et al., 2024</xref>). This transformation is most visible in digital discourse where workers collectively process their experiences of displacement and resistance through online platforms that offer both anonymity and community (<xref ref-type="bibr" rid="ref21">Dang and Liu, 2025</xref>; <xref ref-type="bibr" rid="ref31">Gagn&#x00E9; et al., 2022</xref>).</p>
<p>The integration of artificial intelligence into workplaces is profoundly uneven across geographic and economic contexts. In advanced economies like Germany, AI adoption has proceeded cautiously, with strong data protection frameworks and established worker consultation mechanisms shaping implementation (<xref ref-type="bibr" rid="ref10">Bitkom, 2020</xref>; <xref ref-type="bibr" rid="ref55">&#x00D6;zkiziltan and Hassel, 2021</xref>). However, even in these regulated environments, AI redistributes demand toward highly skilled professionals while creating &#x201C;winners and losers&#x201D; among vulnerable groups including women and older workers (<xref ref-type="bibr" rid="ref55">&#x00D6;zkiziltan and Hassel, 2021</xref>). In emerging economies, digital disparities are further compounded by infrastructure gaps and limited technological access, determining whether AI serves as a catalyst for prosperity or a driver of economic exclusion (<xref ref-type="bibr" rid="ref16">Calugan et al., 2025</xref>). The growth of platform-based gig work has introduced additional complexity, creating a &#x201C;gray zone&#x201D; between internal and external labor where workers face precarious employment managed by opaque, algorithmic systems (<xref ref-type="bibr" rid="ref42">Keegan and Meijerink, 2025</xref>).</p>
<p>These structural disparities intersect with cultural factors that shape how workers experience AI-driven changes. Cultural orientations influence whether job displacement is processed through collectivist interdependent networks emphasizing social harmony or through individualist frameworks focusing on self-directed resource preservation (<xref ref-type="bibr" rid="ref37">Hobfoll et al., 2018</xref>). AI&#x2019;s pervasive presence fundamentally alters psychological contracts by reshaping unwritten expectations between employers and employees, often diminishing perceptions of autonomy, dignity, and job security (<xref ref-type="bibr" rid="ref50">Moghayedi et al., 2024</xref>; <xref ref-type="bibr" rid="ref73">Tomprou and Lee, 2022</xref>). The implementation of algorithmic management can trigger experiences of dehumanization, leaving workers feeling reduced to data points while experiencing heightened loneliness, cynicism, and technostress (<xref ref-type="bibr" rid="ref21">Dang and Liu, 2025</xref>; <xref ref-type="bibr" rid="ref70">Tarafdar et al., 2019</xref>).</p>
<p>The current moment represents a critical juncture in the history of work. Unlike previous waves of automation that primarily displaced manual labor, AI threatens knowledge work, creative professions, and roles previously considered uniquely human. Recent evidence suggests AI&#x2019;s workplace impact is profoundly ambivalent and unevenly distributed (<xref ref-type="bibr" rid="ref19">Cramarenco et al., 2023</xref>; <xref ref-type="bibr" rid="ref31">Gagn&#x00E9; et al., 2022</xref>; <xref ref-type="bibr" rid="ref54">Oyekunle et al., 2024</xref>; <xref ref-type="bibr" rid="ref71">Taslim et al., 2025</xref>; <xref ref-type="bibr" rid="ref75">Zirar et al., 2023</xref>). While automation promises efficiency gains, cost reduction, and liberation from routine tasks, it simultaneously generates job insecurity, identity erosion, and novel forms of workplace stress that existing organizational frameworks struggle to address (<xref ref-type="bibr" rid="ref19">Cramarenco et al., 2023</xref>; <xref ref-type="bibr" rid="ref68">Soulami et al., 2024</xref>). The COVID-19 pandemic dramatically accelerated these dynamics, compressing years of anticipated digital transformation into months while leaving workers anxious about their continued relevance amid increasingly &#x201C;intelligent&#x201D; machines (<xref ref-type="bibr" rid="ref7">Bankins et al., 2024</xref>; <xref ref-type="bibr" rid="ref32">Garc&#x00ED;a-Madurga et al., 2024</xref>).</p>
<p>This acceleration has revealed profound gaps between technological capability and human adaptation (<xref ref-type="bibr" rid="ref29">Frank et al., 2019</xref>; <xref ref-type="bibr" rid="ref40">Johnson et al., 2020</xref>; <xref ref-type="bibr" rid="ref75">Zirar et al., 2023</xref>). Organizations often implement AI with minimal consideration of psychological impacts, treating workforce transformation as a technical problem rather than a human one (<xref ref-type="bibr" rid="ref40">Johnson et al., 2020</xref>; <xref ref-type="bibr" rid="ref75">Zirar et al., 2023</xref>). Workers report feeling blindsided by automation decisions, betrayed by employers who promised job security, and increasingly uncertain about the sustainability of any career path (<xref ref-type="bibr" rid="ref7">Bankins et al., 2024</xref>; <xref ref-type="bibr" rid="ref26">Duggan et al., 2022</xref>). These experiences coalesce into what we term &#x201C;algorithmic anxiety,&#x201D; a complex syndrome encompassing not just fear of job loss but deeper concerns about human value, professional identity, and the meaning of work in an automated future (<xref ref-type="bibr" rid="ref21">Dang and Liu, 2025</xref>; <xref ref-type="bibr" rid="ref44">Kinowska and Sienkiewicz, 2023</xref>; <xref ref-type="bibr" rid="ref68">Soulami et al., 2024</xref>).</p>
<p>Digital platforms, particularly Reddit, have emerged as crucial spaces where workers process these transformations (<xref ref-type="bibr" rid="ref3">Amaya et al., 2021</xref>; <xref ref-type="bibr" rid="ref22">De Choudhury and De, 2014</xref>). Reddit&#x2019;s structure, combining anonymity with community validation through upvoting creates unique conditions for authentic disclosure (<xref ref-type="bibr" rid="ref3">Amaya et al., 2021</xref>; <xref ref-type="bibr" rid="ref22">De Choudhury and De, 2014</xref>; <xref ref-type="bibr" rid="ref41">Kahlow, 2024</xref>). Workers share experiences too sensitive for workplace discussion, collectively constructing narratives about AI&#x2019;s impact that often contradict official corporate communications (<xref ref-type="bibr" rid="ref4">Andalibi et al., 2018</xref>; <xref ref-type="bibr" rid="ref22">De Choudhury and De, 2014</xref>; <xref ref-type="bibr" rid="ref53">Nukhu et al., 2025</xref>). These digital narratives reveal not just individual distress but emergent patterns of collective sense-making about technology, work, and human value in the algorithmic age (<xref ref-type="bibr" rid="ref12">Boyd and Crawford, 2012</xref>; <xref ref-type="bibr" rid="ref22">De Choudhury and De, 2014</xref>; <xref ref-type="bibr" rid="ref47">Leavitt and Robinson, 2017</xref>; <xref ref-type="bibr" rid="ref53">Nukhu et al., 2025</xref>).</p>
<p>The significance of understanding these dynamics extends far beyond documenting worker experiences. As AI capabilities expand toward artificial general intelligence, the question is not whether human work will be transformed but how society will manage that transformation (<xref ref-type="bibr" rid="ref16">Calugan et al., 2025</xref>; <xref ref-type="bibr" rid="ref43">Khogali and Mekid, 2023</xref>; <xref ref-type="bibr" rid="ref75">Zirar et al., 2023</xref>). Current approaches often prioritize technical efficiency and economic optimization while neglecting human considerations, leading to resistance, disengagement, and the erosion of organizational trust (<xref ref-type="bibr" rid="ref16">Calugan et al., 2025</xref>; <xref ref-type="bibr" rid="ref44">Kinowska and Sienkiewicz, 2023</xref>; <xref ref-type="bibr" rid="ref48">Leicht-Deobald et al., 2019</xref>; <xref ref-type="bibr" rid="ref58">Ragu-Nathan et al., 2008</xref>). Without understanding the psychological and social dimensions of AI integration, organizations risk undermining the very productivity gains they seek while inflicting unnecessary human suffering.</p>
<p>Despite growing research attention, significant knowledge gaps persist in understanding AI&#x2019;s psychosocial workplace impacts (<xref ref-type="bibr" rid="ref21">Dang and Liu, 2025</xref>; <xref ref-type="bibr" rid="ref44">Kinowska and Sienkiewicz, 2023</xref>; <xref ref-type="bibr" rid="ref68">Soulami et al., 2024</xref>; <xref ref-type="bibr" rid="ref74">Vrontis et al., 2022</xref>). Existing studies predominantly focus on narrow contexts with limited attention to the lived experiences of those directly displaced by automation (<xref ref-type="bibr" rid="ref7">Bankins et al., 2024</xref>; <xref ref-type="bibr" rid="ref43">Khogali and Mekid, 2023</xref>; <xref ref-type="bibr" rid="ref68">Soulami et al., 2024</xref>; <xref ref-type="bibr" rid="ref75">Zirar et al., 2023</xref>). Methodological approaches remain fragmented, with quantitative studies often lacking emotional nuance, while qualitative research often lacks the scale to identify broader patterns (<xref ref-type="bibr" rid="ref45">K&#x00F6;chling and Wehner, 2020</xref>; <xref ref-type="bibr" rid="ref68">Soulami et al., 2024</xref>). Few studies successfully integrate computational and qualitative methods to capture both the breadth and depth of worker experiences in the face of algorithmic disruption (<xref ref-type="bibr" rid="ref71">Taslim et al., 2025</xref>).</p>
<p>Most critically, theoretical frameworks for understanding technology-mediated psychological contracts remain underdeveloped (<xref ref-type="bibr" rid="ref7">Bankins et al., 2024</xref>; <xref ref-type="bibr" rid="ref42">Keegan and Meijerink, 2025</xref>). Traditional organizational theories assume human actors on both sides of the employment relationship, but what happens when algorithms make decisions previously reserved for human managers (<xref ref-type="bibr" rid="ref42">Keegan and Meijerink, 2025</xref>)? How do workers maintain professional identity when machines perform their core tasks (<xref ref-type="bibr" rid="ref21">Dang and Liu, 2025</xref>; <xref ref-type="bibr" rid="ref42">Keegan and Meijerink, 2025</xref>; <xref ref-type="bibr" rid="ref43">Khogali and Mekid, 2023</xref>)? What new forms of resistance emerge when traditional labor organizing confronts algorithmic management (<xref ref-type="bibr" rid="ref31">Gagn&#x00E9; et al., 2022</xref>; <xref ref-type="bibr" rid="ref34">Golgeci et al., 2025</xref>; <xref ref-type="bibr" rid="ref42">Keegan and Meijerink, 2025</xref>)? These questions require not just empirical investigation but theoretical innovation (<xref ref-type="bibr" rid="ref55">&#x00D6;zkiziltan and Hassel, 2021</xref>).</p>
<p>This study addresses these gaps through a comprehensive mixed-methods analysis of Reddit discourse about AI-driven job displacement. We analyze 1,454 comments from a discussion thread explicitly soliciting narratives from workers affected by automation, combining computational text analytics to map discourse patterns with qualitative thematic analysis to understand meaning and context. The discourse community includes both workers directly displaced by AI and those experiencing anticipatory anxiety about potential future displacement, with community engagement through upvoting and commenting revealing collective sense-making processes. We apply four theoretical lenses (psychological contract theory, technostress, self-determination theory, and conservation of resources) to interpret how workers experience and collectively construct understandings of AI-driven workplace transformation. Our aim is to illuminate the human dimensions of algorithmic management, contributing both theoretical insights for scholars and practical guidance for organizations seeking to implement ethical AI.</p>
</sec>
<sec id="sec2">
<label>2</label>
<title>Literature review</title>
<sec id="sec3">
<label>2.1</label>
<title>Theoretical foundations for understanding AI&#x2019;S workplace impact</title>
<p>The psychological ramifications of AI in the workplace cannot be understood through a single theoretical lens. Instead, we must integrate multiple perspectives to capture the complexity of human responses to algorithmic disruption. Four theoretical frameworks prove particularly illuminating: psychological contract theory, technostress theory, self-determination theory, and conservation of resources theory. Each offers unique insights while together providing a comprehensive understanding of algorithmic anxiety.</p>
<p>Psychological contract theory (<xref ref-type="bibr" rid="ref62">Rousseau, 1995</xref>) provides the foundational framework for understanding how AI disrupts workplace relationships. This theory posits that beyond formal employment contracts, workers and employers maintain implicit expectations of mutual obligations. Employees expect fair treatment, job security, and opportunities for career development in exchange for their loyalty, effort, and commitment. Recent empirical work reveals how AI fundamentally challenges these assumptions. When organizations implement AI systems that lead to layoffs or significant role changes, workers perceive not just strategic business decisions but profound violations of trust (<xref ref-type="bibr" rid="ref7">Bankins et al., 2024</xref>; <xref ref-type="bibr" rid="ref68">Soulami et al., 2024</xref>).</p>
<p>The nature of these violations differs qualitatively from traditional organizational changes. <xref ref-type="bibr" rid="ref44">Kinowska and Sienkiewicz (2023)</xref> found that AI-driven decisions feel particularly impersonal and arbitrary, lacking the human element that traditionally cushioned difficult organizational transitions. Workers report feeling &#x201C;<italic>betrayed by an algorithm</italic>,&#x201D; a phrase that captures the unique sense of alienation that comes with being evaluated, managed, or replaced by non-human entities. The psychological contract (built on assumptions of human reciprocity) struggles to accommodate relationships where one party is algorithmic. This creates what <xref ref-type="bibr" rid="ref42">Keegan and Meijerink (2025)</xref> term &#x201C;algorithmic accountability gaps,&#x201D; where workers cannot identify whom to hold responsible for AI-driven decisions affecting their livelihood.</p>
<p>Technostress theory (<xref ref-type="bibr" rid="ref70">Tarafdar et al., 2019</xref>) highlights the psychological strain associated with technology adoption, with AI introducing stressors that are qualitatively different from those of traditional information systems. The framework identifies five technostress creators: techno-overload (excessive demands resulting from technology use), techno-invasion (technology blurring work-life boundaries), techno-complexity (the constant need to learn new systems), techno-insecurity (fear of being replaced by technology), and techno-uncertainty (continuous technological changes). AI amplifies each dimension while adding novel stressors unique to intelligent systems (<xref ref-type="bibr" rid="ref70">Tarafdar et al., 2019</xref>).</p>
<p>Recent empirical studies demonstrate AI&#x2019;s distinctive stress profile. Algorithmic management creates &#x201C;performance anxiety loops&#x201D; where workers feel perpetually monitored and evaluated by opaque systems they neither understand nor trust (<xref ref-type="bibr" rid="ref32">Garc&#x00ED;a-Madurga et al., 2024</xref>; <xref ref-type="bibr" rid="ref65">Segkouli et al., 2023</xref>). Unlike traditional IT that workers can master through training, AI systems continuously evolve, creating perpetual techno-complexity. <xref ref-type="bibr" rid="ref75">Zirar et al. (2023)</xref> document how AI-related technostress involves existential dimensions absent from traditional technology stress; workers question not just their competence with tools but their fundamental value as humans. The concept of &#x201C;algorithmic precarity&#x201D; emerges, where workers experience chronic uncertainty about whether their skills, regardless of proficiency level, will remain relevant.</p>
<p>Self-determination theory (<xref ref-type="bibr" rid="ref63">Ryan and Deci, 2000</xref>) identifies three fundamental psychological needs essential for well-being and motivation: autonomy, competence, and relatedness. Autonomy suffers when algorithmic management dictates work processes through rigid protocols that eliminate human judgment and creativity. <xref ref-type="bibr" rid="ref31">Gagn&#x00E9; et al. (2022)</xref> provide extensive evidence that algorithmic management reduces perceived autonomy as workers cannot negotiate with or influence algorithmic decisions.</p>
<p>Competence needs are threatened when AI systems outperform humans at tasks previously defining professional identity (<xref ref-type="bibr" rid="ref34">Golgeci et al., 2025</xref>). Customer service workers report feeling isolated when AI handles initial client contact, leaving humans to manage only escalated problems (<xref ref-type="bibr" rid="ref40">Johnson et al., 2020</xref>). The cumulative effect is profound demotivation, with workers questioning the purpose of developing skills that machines can instantly replicate (<xref ref-type="bibr" rid="ref31">Gagn&#x00E9; et al., 2022</xref>; <xref ref-type="bibr" rid="ref34">Golgeci et al., 2025</xref>).</p>
<p>Conservation of resources theory (<xref ref-type="bibr" rid="ref36">Hobfoll, 1989</xref>) frames AI anxiety as a response to a perceived threat or loss of resources. This theory posits that individuals strive to obtain, retain, and protect resources they value, whether material (such as salary and job security), personal (including skills and health), or social (including status and relationships) (<xref ref-type="bibr" rid="ref37">Hobfoll et al., 2018</xref>; <xref ref-type="bibr" rid="ref48">Leicht-Deobald et al., 2019</xref>). Stress occurs when resources are threatened, lost, or when investment fails to yield expected returns. AI threatens multiple resource categories simultaneously (<xref ref-type="bibr" rid="ref34">Golgeci et al., 2025</xref>).</p>
<p>The threat begins with potential job loss (material resource) but quickly cascades (<xref ref-type="bibr" rid="ref36">Hobfoll, 1989</xref>; <xref ref-type="bibr" rid="ref37">Hobfoll et al., 2018</xref>; <xref ref-type="bibr" rid="ref55">&#x00D6;zkiziltan and Hassel, 2021</xref>). Professional identity (a personal resource) erodes when machines perform tasks that define it. Social status diminishes when expertise becomes obsolete (<xref ref-type="bibr" rid="ref16">Calugan et al., 2025</xref>; <xref ref-type="bibr" rid="ref36">Hobfoll, 1989</xref>). Future planning capacity (psychological resource) suffers under radical uncertainty about career viability (<xref ref-type="bibr" rid="ref36">Hobfoll, 1989</xref>; <xref ref-type="bibr" rid="ref72">Tenakwah and Watson, 2025</xref>; <xref ref-type="bibr" rid="ref75">Zirar et al., 2023</xref>). Even workers retaining their positions experience resource loss through &#x201C;skill hollowing,&#x201D; where AI handles challenging tasks, leaving humans with either mundane work or high-stress exception handling (<xref ref-type="bibr" rid="ref5">Ashok et al., 2022</xref>; <xref ref-type="bibr" rid="ref36">Hobfoll, 1989</xref>; <xref ref-type="bibr" rid="ref55">&#x00D6;zkiziltan and Hassel, 2021</xref>). The anticipatory nature of these threats (i.e., workers fearing future automation) creates chronic stress that depletes coping resources before actual displacement occurs (<xref ref-type="bibr" rid="ref34">Golgeci et al., 2025</xref>; <xref ref-type="bibr" rid="ref37">Hobfoll et al., 2018</xref>).</p>
</sec>
<sec id="sec4">
<label>2.2</label>
<title>Empirical evidence: the state of knowledge</title>
<p>Recent systematic reviews reveal AI&#x2019;s profoundly ambivalent impact on the workplace. While AI-driven automation can boost efficiency and reduce monotonous work, it also increases job insecurity and the need for continuous reskilling (<xref ref-type="bibr" rid="ref19">Cramarenco et al., 2023</xref>; <xref ref-type="bibr" rid="ref68">Soulami et al., 2024</xref>). The impact is uneven across sectors and regions, with non-Western and lower-skilled workers facing greater risks (<xref ref-type="bibr" rid="ref19">Cramarenco et al., 2023</xref>; <xref ref-type="bibr" rid="ref50">Moghayedi et al., 2024</xref>; <xref ref-type="bibr" rid="ref68">Soulami et al., 2024</xref>; <xref ref-type="bibr" rid="ref75">Zirar et al., 2023</xref>).</p>
<p>The COVID-19 pandemic significantly altered the trajectories of AI adoption, particularly in the wake of accelerated digital transformation (<xref ref-type="bibr" rid="ref19">Cramarenco et al., 2023</xref>; <xref ref-type="bibr" rid="ref43">Khogali and Mekid, 2023</xref>; <xref ref-type="bibr" rid="ref68">Soulami et al., 2024</xref>). Organizations implemented AI systems rapidly during the pandemic, often bypassing normal change management processes (<xref ref-type="bibr" rid="ref7">Bankins et al., 2024</xref>; <xref ref-type="bibr" rid="ref32">Garc&#x00ED;a-Madurga et al., 2024</xref>).</p>
<p>Organizational trust emerges consistently as a critical casualty of algorithmic management. AI-driven decision-making can erode organizational trust and psychological contracts, particularly when perceived as impersonal or leading to layoffs (<xref ref-type="bibr" rid="ref7">Bankins et al., 2024</xref>; <xref ref-type="bibr" rid="ref44">Kinowska and Sienkiewicz, 2023</xref>; <xref ref-type="bibr" rid="ref68">Soulami et al., 2024</xref>). Employees report feelings of betrayal and reduced reciprocity, especially when algorithmic management lacks transparency or fairness (<xref ref-type="bibr" rid="ref44">Kinowska and Sienkiewicz, 2023</xref>; <xref ref-type="bibr" rid="ref56">Pereira et al., 2023</xref>; <xref ref-type="bibr" rid="ref71">Taslim et al., 2025</xref>). The effects are more pronounced in gig work, call centers, and sectors with high algorithmic oversight (<xref ref-type="bibr" rid="ref44">Kinowska and Sienkiewicz, 2023</xref>; <xref ref-type="bibr" rid="ref74">Vrontis et al., 2022</xref>; <xref ref-type="bibr" rid="ref75">Zirar et al., 2023</xref>).</p>
</sec>
<sec id="sec5">
<label>2.3</label>
<title>Ethical dimensions and moral injury</title>
<p>The ethical implications of workplace AI extend beyond traditional concerns about bias and privacy to fundamental questions about human dignity and the nature of work itself. Ethical concerns (i.e., fairness, bias, privacy, and human dignity) are central to debates on AI in the workplace (<xref ref-type="bibr" rid="ref18">Cheng et al., 2022</xref>; <xref ref-type="bibr" rid="ref38">Hunkenschroer and Luetge, 2022</xref>; <xref ref-type="bibr" rid="ref46">Kordzadeh and Ghasemaghaei, 2022</xref>; <xref ref-type="bibr" rid="ref54">Oyekunle et al., 2024</xref>). Moral injury and alienation are reported when employees feel replaced or surveilled by AI (<xref ref-type="bibr" rid="ref21">Dang and Liu, 2025</xref>; <xref ref-type="bibr" rid="ref35">Gratch and Fast, 2022</xref>; <xref ref-type="bibr" rid="ref38">Hunkenschroer and Luetge, 2022</xref>; <xref ref-type="bibr" rid="ref54">Oyekunle et al., 2024</xref>).</p>
<p>Ethics and discrimination in artificial intelligence-enabled recruitment practices reveal systematic bias patterns (<xref ref-type="bibr" rid="ref17">Chen, 2023</xref>). Algorithmic bias in recruitment and performance management can perpetuate discrimination and erode perceptions of fairness (<xref ref-type="bibr" rid="ref17">Chen, 2023</xref>; <xref ref-type="bibr" rid="ref38">Hunkenschroer and Luetge, 2022</xref>; <xref ref-type="bibr" rid="ref46">Kordzadeh and Ghasemaghaei, 2022</xref>; <xref ref-type="bibr" rid="ref54">Oyekunle et al., 2024</xref>; <xref ref-type="bibr" rid="ref69">Starke et al., 2022</xref>).</p>
<p>Ethics of AI-Enabled Recruiting and Selection research identifies multiple categories of ethical harm from workplace AI (<xref ref-type="bibr" rid="ref38">Hunkenschroer and Luetge, 2022</xref>). Workers experiencing AI-driven changes report risks associated with dehumanization due to artificial intelligence use (<xref ref-type="bibr" rid="ref21">Dang and Liu, 2025</xref>). Algorithmic evaluation and AI-driven HR practices affect employees&#x2019; sense of competence, autonomy, and purpose, with creative professionals and knowledge workers particularly sensitive to perceived dehumanization (<xref ref-type="bibr" rid="ref7">Bankins et al., 2024</xref>; <xref ref-type="bibr" rid="ref18">Cheng et al., 2022</xref>; <xref ref-type="bibr" rid="ref31">Gagn&#x00E9; et al., 2022</xref>).</p>
</sec>
<sec id="sec6">
<label>2.4</label>
<title>Digital discourse as a nexus for collective interpretation</title>
<p>Digital platforms afford researchers unprecedented opportunities to observe authentic employee experiences, often unavailable to conventional organizational studies (<xref ref-type="bibr" rid="ref15">Brown et al., 2018</xref>). Traditional workplace research often encounters pervasive response bias because employees are reluctant to express negative opinions, fearing professional repercussions that could impact their employment status (<xref ref-type="bibr" rid="ref3">Amaya et al., 2021</xref>; <xref ref-type="bibr" rid="ref12">Boyd and Crawford, 2012</xref>). Platforms such as Reddit mitigate these limitations because their pseudonymous or anonymous nature facilitates candid discourse and low inhibition (<xref ref-type="bibr" rid="ref15">Brown et al., 2018</xref>; <xref ref-type="bibr" rid="ref41">Kahlow, 2024</xref>). This reliance on dissociative anonymity enables individuals to express intimate details they would otherwise withhold in contexts where they could be identified (<xref ref-type="bibr" rid="ref41">Kahlow, 2024</xref>; <xref ref-type="bibr" rid="ref67">Sit et al., 2024</xref>). Furthermore, community features, including voting and commentary, enable the collective validation and interpretation of shared experiences (<xref ref-type="bibr" rid="ref3">Amaya et al., 2021</xref>; <xref ref-type="bibr" rid="ref22">De Choudhury and De, 2014</xref>; <xref ref-type="bibr" rid="ref47">Leavitt and Robinson, 2017</xref>).</p>
<p>Research confirms the distinctive value of platforms like Reddit for investigating sensitive or stigmatizing issues, particularly within mental health discourse (<xref ref-type="bibr" rid="ref22">De Choudhury and De, 2014</xref>; <xref ref-type="bibr" rid="ref41">Kahlow, 2024</xref>). Studies demonstrate that Reddit&#x2019;s architecture, which combines the ability to employ varying degrees of anonymity (including &#x201C;throwaway&#x201D; accounts) with community curation via voting, generates exceptionally rich data about challenging topics (<xref ref-type="bibr" rid="ref22">De Choudhury and De, 2014</xref>). This environment encourages users toward deeper self-disclosure, enabling narratives related to mental illness, work, and personal relationships that would typically be too sensitive or risky in identifiable settings (<xref ref-type="bibr" rid="ref22">De Choudhury and De, 2014</xref>; <xref ref-type="bibr" rid="ref41">Kahlow, 2024</xref>; <xref ref-type="bibr" rid="ref67">Sit et al., 2024</xref>).</p>
<p>Nevertheless, the systematic analysis of massive digital discourse introduces inherent methodological difficulties (<xref ref-type="bibr" rid="ref12">Boyd and Crawford, 2012</xref>; <xref ref-type="bibr" rid="ref57">Proferes et al., 2021</xref>). Automated tools like sentiment analysis and Natural Language Processing (NLP) frequently struggle with the complexities inherent in social communication (<xref ref-type="bibr" rid="ref6">Balcio&#x011F;lu et al., 2025</xref>). The interpretation of context in digital environments is problematic, as nuances such as sarcasm, irony, cultural references, and typos can severely confound sophisticated algorithms seeking to interpret meaning (<xref ref-type="bibr" rid="ref6">Balcio&#x011F;lu et al., 2025</xref>). Researchers must actively address this complexity, recognizing that data interpretation is inherently subjective and that claims of objectivity are often misleading, particularly when information is removed from its original conversational context (<xref ref-type="bibr" rid="ref12">Boyd and Crawford, 2012</xref>). A sophisticated contextual analysis framework is necessary to isolate genuine sentiment from broader community trends and account for temporal biases, thereby improving the reliability of the findings (<xref ref-type="bibr" rid="ref6">Balcio&#x011F;lu et al., 2025</xref>).</p>
</sec>
</sec>
<sec sec-type="methods" id="sec7">
<label>3</label>
<title>Methods</title>
<sec id="sec8">
<label>3.1</label>
<title>Research design and epistemological framework</title>
<p>This study employs an explanatory sequential mixed-methods design (<xref ref-type="bibr" rid="ref20">Creswell and Clark, 2017</xref>) to examine AI&#x2019;s psychosocial workplace impacts through digital discourse analysis. This approach integrates computational text analytics for pattern identification with qualitative thematic analysis for meaning interpretation. The sequential design allows quantitative findings to inform qualitative investigation, while maintaining flexibility for emergent insights.</p>
<p>Our epistemological stance combines critical realism with social constructivism, and this dual perspective directly shaped our analytical approach at multiple levels. Critical realism proposes that reality exists independently of our perceptions but is only accessible through socially mediated interpretation (<xref ref-type="bibr" rid="ref9">Bhaskar, 1975</xref>). Social constructivism emphasizes that meaning emerges through collective sense-making processes (<xref ref-type="bibr" rid="ref8">Berger and Luckmann, 1966</xref>). These seemingly contradictory positions prove complementary when analyzing workplace AI discourse.</p>
<p>We operationalized this epistemological framework through specific analytical decisions. First, we treat certain elements as objectively real (the ontological dimension): Retrenchment at work, wage reductions, and organizational restructuring described by participants are taken as factual events that occurred in the material world. Our computational analysis of discourse patterns (sentiment distributions, topic frequencies, network structures) similarly treats the text corpus as an objective reality amenable to systematic measurement.</p>
<p>Second, we simultaneously treat the meaning and emotional significance of these events as socially constructed (the epistemological dimension). Workers collectively construct interpretations of AI&#x2019;s impact through digital discourse, negotiating whether displacement represents &#x201C;innovation,&#x201D; &#x201C;betrayal,&#x201D; &#x201C;inevitability,&#x201D; or &#x201C;injustice.&#x201D; Our qualitative thematic analysis examines this meaning-making process, recognizing that &#x201C;algorithmic anxiety&#x201D; is not a direct physiological response to technology but a culturally and linguistically mediated psychological experience. The same objective event (being replaced by AI) can be experienced and narrated differently depending on interpretive frameworks available within particular discourse communities.</p>
<p>Third, we recognize that digital discourse simultaneously reflects and constitutes reality (the dialectical relationship). Reddit comments both represent workers&#x2019; existing psychological states and actively construct those states through the act of articulation and community validation. When a worker writes &#x201C;I feel betrayed by my employer&#x201D; and receives upvotes and supportive comments, the psychological experience of betrayal is both expressed and intensified through the discursive process. This reflexive quality justifies our attention to discourse structure (how meaning is collectively constructed) alongside discourse content (what is being experienced).</p>
<p>This epistemological stance justified our methodological integration: computational methods capture patterns in the objective reality of discourse structure (what is observable and measurable), while qualitative interpretation examines how workers collectively construct meaning from those patterns (what significance they assign to events). The divergence between VADER and BERT sentiment analysis exemplifies this approach&#x2014;we treat both the surface linguistic features (objective) and the contextual meaning (constructed through linguistic conventions about irony and sarcasm) as simultaneously real and worthy of analysis.</p>
</sec>
<sec id="sec9">
<label>3.2</label>
<title>Data collection and sampling</title>
<p>We analyzed 1,454 comments from the Reddit thread &#x201C;<italic>Hey people who lost their jobs to AI, what happened?</italic>&#x201D; posted on <ext-link xlink:href="https://www.reddit.com/r/AskReddit/" ext-link-type="uri">r/AskReddit</ext-link> in 2025. This thread generated exceptional engagement (over 5,000 upvotes), providing one of the largest collections of firsthand accounts about AI displacement available for research. The r/AskReddit community, with 35 million members, represents diverse demographics and occupations, offering broader perspective than profession-specific forums.</p>
<p>Data collection was conducted through Reddit&#x2019;s official API, ensuring the complete capture of public comments while respecting the platform&#x2019;s terms of service. We excluded comments that were deleted, moderator posts, and bot-generated content. Comments ranged from brief responses (five words) to detailed narratives (over 2,000 words), with a median length of 73 words. The temporal concentration (most comments posted within 72&#x202F;h) captures a synchronous collective discussion rather than scattered individual posts.</p>
<p>The thread&#x2019;s framing <italic>(&#x201C;Hey people who lost their jobs to AI, what happened?&#x201D;)</italic> introduces systematic selection bias toward negative experiences of AI-driven workplace transformation. This framing explicitly solicits displacement narratives, attracting workers who experienced AI implementation as threatening, disruptive, or unjust, while likely excluding those whose AI experiences were neutral, positive, or genuinely augmenting. However, the discourse community engaging with these narratives extends beyond those directly displaced. The upvoting system and comment threads reveal participation from workers experiencing anticipatory anxiety about potential future displacement, sympathetic observers witnessing AI&#x2019;s impact on colleagues or industries, and knowledge workers more broadly recognizing themselves in these narratives. This broader engagement pattern suggests the discourse captures not only direct experiences of displacement but also collective sense-making about AI&#x2019;s implications across the knowledge work sector. The most highly upvoted comments (some receiving 10,000 +&#x202F;upvotes) indicate widespread resonance with displacement narratives, suggesting these experiences express concerns shared by workers not yet directly affected but recognizing their potential future in others&#x2019; present circumstances.</p>
<p>We contend this sampling characteristic shapes the scope of our findings while also providing methodological value aligned with our research objectives. Rather than seeking to measure the prevalence of different AI experiences across the working population, we aimed to understand the phenomenology of algorithmic anxiety&#x2014;both the lived experience of workers directly displaced and the anticipatory anxiety of those witnessing displacement and recognizing their vulnerability. The selection bias toward negative experiences provides access to rich, authentic narratives from those directly impacted, while the community validation through upvotes and supportive comments reveals how these narratives resonate with broader worker populations experiencing vicarious or anticipatory anxiety. This engagement pattern illuminates how algorithmic anxiety functions as both individual psychological response and collective social phenomenon.</p>
<p>The comments reveal diverse experiences within this negatively-affected population, including complete job elimination, partial role automation, forced early retirement, and pre-emptive career changes. Industries represented include creative services, data analysis, customer support, manufacturing, legal services, and education, suggesting the phenomenon crosses occupational boundaries. However, our findings characterize the experience and collective construction of algorithmic anxiety among workers directly or vicariously affected, rather than estimating the prevalence of such experiences in the general working population. Claims about how commonly workers experience AI negatively, what proportion experience algorithmic anxiety, or whether negative outcomes are inevitable would be unwarranted extrapolations from our data. The interpretive consequences of this sampling approach are discussed further in the Limitations section.</p>
</sec>
<sec id="sec10">
<label>3.3</label>
<title>Computational analysis pipeline</title>
<p>We implemented six complementary computational techniques to map discourse structure and emotional patterns. These methods were selected to capture different facets of emotional expression that single approaches might miss. VADER and BERT were paired deliberately to examine whether surface language aligns with contextual meaning in discourse about job displacement. VADER represents widely-used lexicon-based approaches optimized for social media (<xref ref-type="bibr" rid="ref39">Hutto and Gilbert, 2014</xref>), while BERT&#x2019;s transformer architecture captures contextual complexity and has demonstrated advantages on texts containing sarcasm (<xref ref-type="bibr" rid="ref61">Ribeiro et al., 2016</xref>; <xref ref-type="bibr" rid="ref64">Saha et al., 2022</xref>). The NRC Emotion Lexicon allows examination of discrete emotions beyond polarity (<xref ref-type="bibr" rid="ref51">Mohammad and Turney, 2013</xref>), while LDA identifies thematic structure without researcher preconception (<xref ref-type="bibr" rid="ref11">Blei, 2012</xref>). Network analysis reveals how concepts cluster in workers&#x2019; thinking. This multi-method approach provides complementary insights into the corpus.</p>
<p>Sentiment analysis comparison: We employed two distinct approaches to understand the complexity of emotional valence. VADER (Valence Aware Dictionary and sEntiment Reasoner), a rule-based tool optimized for social media, calculates sentiment through lexicon matching with adjustments for intensifiers, negations, and punctuation. Each comment received four scores: positive, negative, neutral, and compound (normalized aggregate). Classification thresholds followed standard conventions: compound scores&#x202F;&#x2265;&#x202F;0.05 indicated positive sentiment, &#x003C;&#x2212;0.05 indicated negative sentiment, and intermediate values indicated neutral sentiment.</p>
<p>For contextual analysis, we employed RoBERTa (Robustly Optimized BERT Pretraining Approach), fine-tuned on Twitter data. This transformer model uses attention mechanisms to consider word relationships across entire comments, better capturing sarcasm, irony, and contextual meaning. The model processed comments in 32-item batches with a maximum length of 512 tokens, outputting categorical predictions along with confidence scores.</p>
<p>The divergence between models (52% positive for VADER versus 51% negative for BERT) warrants explanation, as it reveals important differences in how these tools process emotionally complex text. Comparative studies indicate that lexicon-based methods like VADER and transformer-based approaches like BERT capture different dimensions of sentiment, with BERT demonstrating advantages on datasets containing sarcasm and contextual complexity (<xref ref-type="bibr" rid="ref61">Ribeiro et al., 2016</xref>; <xref ref-type="bibr" rid="ref64">Saha et al., 2022</xref>). VADER operates by matching words against pre-scored sentiment dictionaries, treating terms like &#x201C;free,&#x201D; &#x201C;opportunity,&#x201D; and &#x201C;great&#x201D; as inherently positive regardless of context (<xref ref-type="bibr" rid="ref39">Hutto and Gilbert, 2014</xref>). BERT&#x2019;s transformer architecture examines word relationships across entire sentences through attention mechanisms, allowing it to capture how surrounding context modifies apparent sentiment (<xref ref-type="bibr" rid="ref25">Devlin et al., 2019</xref>).</p>
<p>Manual inspection of the 659 comments where models disagreed most strongly revealed patterns suggesting VADER misclassified resigned acceptance and defensive humor as genuine positivity. Comments like &#x201C;Great news, everyone gets to retrain for jobs that do not exist yet!&#x201D; scored positive in VADER (detecting &#x201C;great&#x201D; and &#x201C;news&#x201D;) but negative in BERT (recognizing sarcasm through contextual cues). Similarly, &#x201C;At least I have more time to update my resume daily&#x201D; triggered positive VADER scores (detecting &#x201C;more time&#x201D;) while BERT identified underlying negativity through implied futility. These patterns echo findings from pandemic-related discourse, where VADER misclassified anxiety-laden posts based on surface markers while contextual models detected underlying distress (<xref ref-type="bibr" rid="ref64">Saha et al., 2022</xref>).</p>
<p>For this study, we treat BERT&#x2019;s contextual analysis as the more appropriate measure of underlying sentiment for our corpus, while recognizing VADER&#x2019;s results as potentially revealing how workers present their experiences in public forums. This interpretation assumes that ironic and resigned language reflects genuine negative emotion rather than neutral coping, an assumption we acknowledge may not hold universally. The discordance between methods informed our qualitative coding by directing attention to ironic expressions, resigned acceptance, and gallows humor as potentially significant features of the discourse rather than dismissing them as noise. However, this interpretation cannot be definitively validated with our current data. Alternative explanations remain plausible: workers might experience genuine emotional ambivalence rather than masked negativity, humor might represent effective coping that genuinely reduces distress rather than concealing it, or the divergence might partly reflect model-specific limitations rather than solely revealing emotional complexity. Validation would require complementary methods unavailable in our design (such as physiological stress measures, behavioral observations, or follow-up interviews with participants) to establish whether BERT&#x2019;s classifications better approximate participants&#x2019; actual emotional states than VADER&#x2019;s classifications.</p>
<p>Discrete emotion analysis: The NRC Emotion Lexicon mapped eight basic emotions (anger, fear, anticipation, trust, surprise, sadness, joy, disgust) plus positive/negative affect. NRC lexicon was selected over alternatives because its crowdsourced development provides coverage of the eight basic emotions identified in psychological research (<xref ref-type="bibr" rid="ref51">Mohammad and Turney, 2013</xref>), offering finer-grained emotional mapping than simple polarity measures. This allows examination of whether workers experience complex emotional mixtures (e.g., simultaneous trust and fear) rather than uniform negative or positive states. This word-association approach counts emotion-linked terms, providing granular emotional mapping beyond simple polarity. Results showed complex emotional co-occurrence: trust (13%) appeared alongside fear (6%), suggesting simultaneous faith in technology and personal anxiety.</p>
<p>Topic modeling for thematic discovery: Latent Dirichlet Allocation with eight topics optimally balanced interpretability with distinctiveness. The algorithm assumes documents contain mixtures of topics, with topics defined by word probability distributions. LDA was selected because its probabilistic approach allows comments to contain mixtures of topics, reflecting how workers may simultaneously discuss practical, emotional, and ethical dimensions rather than discrete separable themes (<xref ref-type="bibr" rid="ref11">Blei, 2012</xref>). The eight-topic solution was determined through coherence score optimization, balancing interpretability with distinctiveness. Hyperparameter tuning (<italic>&#x03B1;</italic>&#x202F;=&#x202F;0.1, <italic>&#x03B2;</italic>&#x202F;=&#x202F;0.01) encouraged sparse topic assignment. Coherence scores validated topic quality (C_v&#x202F;=&#x202F;0.52), indicating meaningful semantic clusters.</p>
<p>Keyword Significance Testing: TF-IDF (Term Frequency-Inverse Document Frequency) analysis identified statistically distinctive vocabulary. This technique weighs word importance by frequency within documents against rarity across the corpus. We retained the top 50 terms after removing stop words and applying frequency thresholds (minimum 5 occurrences maximum 80% document frequency).</p>
<p>Semantic network construction: Co-occurrence networks visualized term relationships, with edges weighted by within-comment co-appearance frequency. This graph-based approach complements topic modeling by revealing which concepts cluster together in workers&#x2019; thinking, exposing cognitive associations that may not emerge as discrete topics. Network structure can indicate whether discourse is fragmented or integrated across different concerns. We applied modularity optimization (Louvain algorithm) for community detection, identifying cohesive term clusters. The resulting network showed three primary communities with modularity Q&#x202F;=&#x202F;0.42, indicating meaningful structural divisions.</p>
<p>Statistical validation: Inter-method reliability assessed through convergent validity testing. Sentiment classifications showed fair agreement (Cohen&#x2019;s <italic>&#x03BA;</italic>&#x202F;=&#x202F;0.24), while topic assignments demonstrated moderate stability across multiple runs (average Jaccard similarity&#x202F;=&#x202F;0.68).</p>
</sec>
<sec id="sec11">
<label>3.4</label>
<title>Qualitative analysis protocol</title>
<p>Thematic analysis followed <xref ref-type="bibr" rid="ref13">Braun and Clarke's (2006)</xref> reflexive approach, emphasizing researcher interpretation rather than theme &#x201C;discovery.&#x201D; We utilized Atlas.ti for data management and coding.</p>
<p>Coding framework development: We created seven intentional coding questions bridging computational findings with theoretical frameworks:</p>
<list list-type="order">
<list-item>
<p>How do individuals emotionally respond to AI-driven change? (linking to sentiment/emotion findings)</p>
</list-item>
<list-item>
<p>How do commenters describe organizational fairness and trust? (connecting to psychological contract theory)</p>
</list-item>
<list-item>
<p>How has AI impacted professional identity and meaning? (relating to self-determination theory)</p>
</list-item>
<list-item>
<p>What technology-induced stresses do workers experience? (examining technostress dimensions)</p>
</list-item>
<list-item>
<p>How do individuals adapt to or resist AI adoption? (exploring agency and coping)</p>
</list-item>
<list-item>
<p>How do workers connect personal experiences to societal patterns? (investigating structural attributions)</p>
</list-item>
<list-item>
<p>What visions for human-AI futures emerge? (identifying prescriptive themes)</p>
</list-item>
</list>
<p>Iterative coding process: The initial coding remained close to the data, utilizing participants&#x2019; language where possible. We coded for semantic and latent meaning, capturing both explicit content and underlying assumptions. Second-cycle coding consolidated initial codes into candidate themes through a process of constant comparison. We sought patterns across the dataset while remaining attentive to divergent cases.</p>
<p>Theme development: Themes were constructed through iterative refinement, ensuring internal homogeneity and external heterogeneity. We consolidated the initial 10 themes into seven final themes by combining conceptually related patterns. For instance, &#x201C;AI as Corporate Justification&#x201D; merged with &#x201C;Shattered Psychological Contract&#x201D; as both addressed trust and betrayal. Each theme required substantial data support (minimum 20 comments) with clear conceptual boundaries.</p>
</sec>
<sec id="sec12">
<label>3.5</label>
<title>Integration strategy</title>
<p>Mixed-methods integration occurred at multiple points. Quantitative findings informed qualitative sampling. Comments were purposively selected to show sentiment disagreement between VADER and BERT for in-depth analysis. Topic models provided initial thematic categories, which were refined through qualitative coding. Network clusters guided attention to interconnected concepts during the interpretation process.</p>
<p>The integration philosophy followed complementarity logic; each method addresses the limitations of the others. Computational breadth complements qualitative depth; pattern identification supports meaning interpretation; statistical regularities contextualize individual narratives. This multi-level integration produces findings neither method could achieve independently.</p>
</sec>
<sec id="sec13">
<label>3.6</label>
<title>Ethical considerations</title>
<sec id="sec14">
<label>3.6.1</label>
<title>Studies involving human subjects</title>
<p>This study involved secondary analysis of publicly available Reddit comments. The researcher holds active ethical clearance from the University of Johannesburg&#x2019;s Department of Industrial Psychology and People Management Research Ethics Committee (IPPM-2022-618(D), valid until 12 June 2026) for the broader doctoral research program. However, this specific component analyzing publicly available Reddit data does not constitute human subjects research requiring formal ethics review under University of Johannesburg policies.</p>
<p>According to the University of Johannesburg&#x2019;s adopted guidelines (<xref ref-type="bibr" rid="ref24">Department of Health, Republic of South Africa, 2015</xref>), &#x201C;<italic>Research that relies exclusively on publicly available information or accessible through legislation or regulation usually need not undergo formal ethics review</italic>&#x201D; (Section 1.1.8, p. 9). The guidelines further specify that research involving observation of people in public spaces (including digital spaces) or secondary use of anonymous information is typically exempt from formal review when specific criteria are met: (1) no direct interaction with individuals or groups, (2) no staged intervention, (3) individuals have no reasonable expectation of privacy, (4) findings do not identify individuals or groups, and (5) no identifiable information is generated during the research process (Section 4.3.2, p. 34).</p>
<p>This research meets all exemption criteria specified in the University of Johannesburg&#x2019;s ethics framework:</p>
<p>Publicly available information: The data consists entirely of comments posted on Reddit&#x2019;s r/AskReddit forum, a public platform where content is freely accessible without authentication requirements. Reddit&#x2019;s terms of service and platform design make explicit that user contributions are publicly viewable and may be accessed by third parties (<xref ref-type="bibr" rid="ref60">Reddit, 2025</xref>).</p>
<p>No direct interaction or intervention: The research involved no recruitment, contact with, or intervention involving participants. All data was accessed retrospectively in compliance with platform terms of service. No interaction with individual users occurred.</p>
<p>No reasonable expectation of privacy: Reddit users posting in public forums operate under contextual norms of publicity. The platform architecture, cultural practices, and terms of service establish that r/AskReddit posts are intended for broad public consumption (<xref ref-type="bibr" rid="ref33">Gliniecka, 2023</xref>; <xref ref-type="bibr" rid="ref57">Proferes et al., 2021</xref>).</p>
<p>No identification of individuals: All usernames and personal identifiers were excluded from data collection, analysis, and reporting. Findings are presented in aggregate form or with numerical participant identifiers (e.g., Participant 1). Verbatim quotes are contextualized to prevent reverse-identification through search engines (<xref ref-type="bibr" rid="ref1">Adams, 2024</xref>; <xref ref-type="bibr" rid="ref59">Reagle, 2022</xref>).</p>
<p>No generation of identifiable information: The research process generated no new identifiable information about participants. Analysis focused on textual patterns, themes, and discourse structure rather than individual attribution.</p>
<p>While formal ethics review was not required under institutional policy, we nevertheless adhered to established ethical principles for social media research (<xref ref-type="bibr" rid="ref28">Fiesler et al., 2024</xref>; <xref ref-type="bibr" rid="ref49">Markham and Buchanan, 2012</xref>; <xref ref-type="bibr" rid="ref57">Proferes et al., 2021</xref>). We applied &#x201C;situated ethics&#x201D; recognizing that even public data about sensitive topics (job loss, mental health) warrants protective measures. Our approach prioritized participant protection despite the exemption status, implementing de-identification protocols and minimizing potential risks through careful data handling and presentation.</p>
</sec>
<sec id="sec15">
<label>3.6.2</label>
<title>Inclusion of identifiable human data</title>
<p>No potentially identifiable images or data are presented in this study. All participant references use numerical identifiers. Direct quotations are limited and presented without user attribution.</p>
</sec>
</sec>
</sec>
<sec id="sec16">
<label>4</label>
<title>Findings</title>
<sec id="sec17">
<label>4.1</label>
<title>Quantitative patterns: mapping the emotional and semantic landscape</title>
<p>The computational analysis revealed a complex emotional and thematic landscape characterized by profound ambivalence and interconnected concerns about work, identity, and human value in an algorithmic age.</p>
<p>Sentiment divergence and emotional complexity: The stark disagreement between sentiment analysis methods illuminates the emotional complexity of AI discourse. VADER&#x2019;s lexicon-based approach classified 52.2% of comments as positive, 32.5% as negative, and 15.3% as neutral. In striking contrast, the contextual BERT model identified 51.1% as negative, 37.0% as neutral, and only 11.9% as positive. This reversal (from majority positive to majority negative) represents more than methodological variance; it reveals how workers employ linguistic strategies to cope with distressing experiences.</p>
<p>Manual analysis of the 659 comments where models disagreed most strongly (VADER positive, BERT negative) uncovered consistent patterns of ironic positivity and resigned acceptance. Comments like &#x201C;Great news, I&#x2019;m free from that soul-crushing job thanks to our AI overlords&#x201D; exemplify this pattern; surface markers of positivity (&#x201C;great,&#x201D; &#x201C;free&#x201D;) mask deep negativity captured by contextual analysis. This linguistic strategy serves multiple functions: maintaining face while expressing distress, using humor to process trauma, and performing resilience while experiencing vulnerability.</p>
<p>Emotional ambivalence and co-occurrence: The NRC emotion analysis revealed striking emotional co-occurrence patterns. Trust words appeared in 13% of comments, but closer examination showed that these often expressed broken trust (&#x201C;trusted my employer,&#x201D; &#x201C;cannot trust companies anymore&#x201D;). Fear (6%) and sadness (5%) co-occurred in 67% of cases, suggesting compound negative emotional states. Anticipation (9%) appeared split between positive (excitement about new opportunities) and negative (dread about future automation) valences.</p>
<p>Anger (4%) concentrated in comments about corporate behavior rather than technology itself. Workers directed anger at &#x201C;greedy executives,&#x201D; &#x201C;shareholder capitalism,&#x201D; and &#x201C;consultants who have never done real work.&#x201D; This attribution pattern suggests workers blame human decisions about AI implementation rather than technology itself&#x2014;a critical distinction for intervention design.</p>
<p>Thematic architecture: Topic modeling revealed eight statistically distinct themes with clear interpretive meaning:</p>
<p>Topic 1 (12.3%): &#x201C;Adaptation and Learning&#x201D; - Featured terms like &#x201C;learn,&#x201D; &#x201C;new,&#x201D; &#x201C;skills,&#x201D; &#x201C;adapt,&#x201D; suggesting active responses to AI challenges.</p>
<p>Topic 2 (20.6%): &#x201C;Corporate Power and Workplace Politics&#x201D; - Dominated by &#x201C;company,&#x201D; &#x201C;management,&#x201D; &#x201C;shareholders,&#x201D; &#x201C;profits,&#x201D; indicating structural critiques.</p>
<p>Topic 3 (9.7%): &#x201C;Technical Realities&#x201D; - Included &#x201C;algorithm,&#x201D; &#x201C;data,&#x201D; &#x201C;model,&#x201D; &#x201C;error,&#x201D; showing technical literacy and system critique.</p>
<p>Topic 4 (11.2%): &#x201C;Emotional Processing&#x201D; - Centered on &#x201C;feel,&#x201D; &#x201C;scared,&#x201D; &#x201C;anxious,&#x201D; &#x201C;depressed,&#x201D; revealing psychological impacts.</p>
<p>Topic 5 (11.4%): &#x201C;Resistance and Critique&#x201D; - Featured &#x201C;refuse,&#x201D; &#x201C;fight,&#x201D; &#x201C;wrong,&#x201D; &#x201C;human,&#x201D; expressing active opposition.</p>
<p>Topic 6 (7.9%): &#x201C;Economic Impacts&#x201D; - Focused on &#x201C;salary,&#x201D; &#x201C;bills,&#x201D; &#x201C;unemployment,&#x201D; &#x201C;savings,&#x201D; addressing material consequences.</p>
<p>Topic 7 (17.9%): &#x201C;Job Loss and Replacement&#x201D; - Dominated by &#x201C;replaced,&#x201D; &#x201C;fired,&#x201D; &#x201C;automated,&#x201D; &#x201C;obsolete,&#x201D; capturing displacement experiences.</p>
<p>Topic 8 (10.8%): &#x201C;Future Uncertainty&#x201D; - Included &#x201C;future,&#x201D; &#x201C;career,&#x201D; &#x201C;years,&#x201D; &#x201C;survive,&#x201D; expressing temporal anxiety.</p>
<p>Keyword Distinctiveness: TF-IDF analysis confirmed the centrality of the human-AI-work intersection. Beyond expected terms (&#x201C;AI,&#x201D; &#x201C;job,&#x201D; &#x201C;work&#x201D;) distinctive keywords revealed specific concerns. &#x201C;Copilot&#x201D; appeared 47 times indicating the widespread impact of GitHub&#x2019;s coding assistant. &#x201C;Creative&#x201D; ranked surprisingly high (TF-IDF score: 0.73) challenging assumptions that AI primarily threatens routine work. &#x201C;Betrayed&#x201D; scored higher than &#x201C;unemployed,&#x201D; suggesting psychological impacts outweigh economic concerns for many workers.</p>
<p>Network structure and community formation: The co-occurrence network revealed three tightly interconnected communities with bridge terms facilitating cross-cluster communication:</p>
<p>Community 1 (Employment/Automation): Core terms included &#x201C;job,&#x201D; &#x201C;work,&#x201D; &#x201C;replaced,&#x201D; &#x201C;company,&#x201D; &#x201C;AI,&#x201D; forming the network&#x2019;s gravitational center. This cluster&#x2019;s centrality (average degree: 24.3) indicates employment concerns anchor all discourse.</p>
<p>Community 2 (Ethics/Emotion): Centered on &#x201C;human,&#x201D; &#x201C;feel,&#x201D; &#x201C;wrong,&#x201D; &#x201C;trust,&#x201D; &#x201C;fair,&#x201D; this cluster connected emotional and moral dimensions. &#x201C;Human&#x201D; served as the primary bridge term, appearing in 73% of edges between communities.</p>
<p>Community 3 (Technical/Systemic): Focused on &#x201C;system,&#x201D; &#x201C;algorithm,&#x201D; &#x201C;data,&#x201D; &#x201C;technology,&#x201D; &#x201C;error,&#x201D; representing technical literacy and systematic critique. This smaller cluster (18% of nodes) showed surprising sophistication in technical understanding.</p>
<p>The network&#x2019;s high clustering coefficient (0.67) indicates dense local connections within communities, while moderate average path length (2.4) shows efficient global communication across topics. This structure suggests integrated rather than fragmented discourse&#x2014;workers simultaneously process practical, emotional, and ethical dimensions.</p>
</sec>
<sec id="sec18">
<label>4.2</label>
<title>Qualitative themes: lived experiences of algorithmic disruption</title>
<p>Seven major themes emerged from thematic analysis, each representing distinct yet interconnected dimensions of algorithmic anxiety:</p>
<sec id="sec19">
<label>4.2.1</label>
<title>Theme 1: shattered trust and corporate betrayal</title>
<p>Participants described profound betrayal when organizations replaced human teams with AI. A data scientist (Participant 1, 10,861 upvotes) captured this violation: <italic>&#x201C;I used to be a data scientist (with 13&#x202F;years of experience). My boss wanted me to solve a problem which involved clustering sensor data by location. Because errors in latitude and longitude tend to be random, we&#x2019;ll have elliptical clouds of points, so I said we should use k-means. My boss picked up his laptop, turned it around, and said: &#x2018;But Copilot says that we should use DBSCAN. I researched DBSCAN and found that it would be very slow and do the wrong thing in a worse way. My boss did not agree. I was laid off a few weeks later, along with the rest of the data team.&#x201D;</italic></p>
<p>The Microsoft vendor engineer (Participant 12, 1,111 upvotes) described being forced to train their AI replacement: <italic>&#x201C;We are just expected to work as normal and keep &#x2018;training&#x2019; this AI until our last day at the end of the year. It&#x2019;s malding and insanity.&#x201D;</italic> This experience of training one&#x2019;s replacement while being strung along with false promises exemplified the breach of the psychological contract. As Participant 8 (1,641 upvotes) observed: <italic>&#x201C;It&#x2019;s like the people in charge of companies would really prefer not to have employees at all. Businesses boast about their &#x2018;revenue per employee&#x2019; metrics. American-style management does not want to have employees.&#x201D;</italic></p>
<p>A few more relevant comments:</p><list list-type="bullet">
<list-item>
<p><italic>It is every CEO&#x2019;s w&#x002A;t dream to replace humans with A. I.</italic></p>
</list-item>
<list-item>
<p><italic>Companies tell you it&#x2019;s &#x201C;AI,&#x201D; but it&#x2019;s just an excuse to cut staff.</italic></p>
</list-item>
<list-item>
<p><italic>A $20&#x202F;k license for software that&#x2019;s &#x2018;good enough is better than a team of people making great work product at $300&#x202F;k&#x202F;+&#x202F;fringe, so sayeth the Wall Street Gods of Old.</italic></p>
</list-item>
<list-item>
<p><italic>Like when computers and productivity software got popular, secretaries did not lose their jobs. Companies just stopped hiring secretaries.</italic></p>
</list-item>
<list-item>
<p><italic>Keep in mind, most people will not directly lose their jobs to AI. Companies will just hire fewer people and expect the current people to get more done.</italic></p>
</list-item>
</list>
</sec>
<sec id="sec20">
<label>4.2.2</label>
<title>Theme 2: identity and meaning erosion</title>
<p>Professional identity emerged as a casualty of AI integration, when workers found their core tasks redefined or devalued. For many creative professionals, AI tools were introduced as &#x201C;assistants&#x201D; but rapidly shifted the fundamental nature of their work from original creation to simple editing (curation and correction). A graphic designer (Participant 2, 5,565 upvotes) whose firm adopted AI image generation tools within a six-month period articulated this transformation<italic>: &#x201C;The job is becoming less about executing the first idea and more about curating, refining, and adding the crucial human touch (and catching AI&#x2019;s weird mistakes). It feels less like I lost my job and more like my job description was rewritten overnight.&#x201D;</italic> This participant described moving from conceptualizing and creating original designs to primarily reviewing and correcting AI-generated options, a shift that eliminated the creative process they had spent years developing.</p>
<p>Some responded through career pivots seeking meaning. These transitions typically involved leaving knowledge work entirely for fields perceived as more resistant to automation or offering clearer human value. Participant 24 (436 upvotes) stated: <italic>&#x201C;I&#x2019;m shifting to social work to hopefully have a more meaningful career.&#x201D;</italic> This participant had worked in payroll administration before their role was automated, and explicitly framed the career change as seeking work where &#x201C;helping real people&#x201D; provided intrinsic meaning that algorithmic efficiency could not replicate. The literary editor (Participant 3, 4,690 upvotes) captured the irony: <italic>&#x201C;So after spending 15&#x202F;years working for the greater good of sci-fi, I got outsourced to a goddamn robot. To be fair, I guess I probably should have seen that coming, given the genre.&#x201D;</italic> This participant&#x2019;s publisher had replaced human editorial review with AI text analysis tools that assessed manuscript marketability.</p>
<p>A few more relevant comments:</p>
<list list-type="bullet">
<list-item>
<p><italic>I was set to work on a children&#x2019;s book for the school I worked for. I told them straight away I would not sign up if there was AI (they used AI art for their last children&#x2019;s book, and apparently the kids hated it). They laughed and told me it was all going to be from my imagination. So I wrote it and started to put together some clip art, as they had asked me to make the book. Then I had a lupus flare-up. I was out for a whole week, and when I came back, they said the book was ready. However, when I looked at it, yes, it was my writing, but all the clip art had been replaced with the ugliest AI art I had ever seen. I faked ignorance and asked who illustrated it. Excitedly, my boss showed me the AI tool she used. I cried all the way home and for the next few hours. I was already a fiction author and if this new book with AI art got onto Amazon like they planned, it was going to ruin my reputation as a very anti-AI author. I texted and asked to have my name removed. One batch had already been sent out, and it was too late, but they felt bad and decided to give me a pen name for the next few batches. I parted ways soon after that.</italic></p>
</list-item>
<list-item>
<p><italic>Worked payroll, then got replaced, now I work food service again, and the existential angst about money and my career trajectory hits harder in my 30s than 20s. I&#x2019;m so lucky to have my fianc&#x00E9; be so supportive, at least. Been applying to payroll jobs ever since, gotten a few interviews, but no satisfactory offers</italic>.</p>
</list-item>
</list>
</sec>
<sec id="sec21">
<label>4.2.3</label>
<title>Theme 3: technostress and coerced adoption</title>
<p>Mandatory AI integration created intense strain. The Microsoft vendor engineer (Participant 12, 1,111 upvotes) described coercion: <italic>&#x201C;This AI became a mandatory metric where we could get fired for not using it. This AI was and is almost always wrong with technical information and always wrong on key details when assessing complex issues. Essentially it was completely useless if you have any semblance of competency in your role.&#x201D;</italic></p>
<p>The graphic designer (Participant 2, 5,565 upvotes) added: <italic>&#x201C;The pressure to constantly adapt is the real challenge.&#x201D;</italic> Participant 19 (692 upvotes) expressed FOMO-driven stress: <italic>&#x201C;I feel I am falling behind as most people are actively using AI in their creative careers which I do not because you need to buy credits to do anything, I do not wanna be chained.&#x201D;</italic></p>
<p>A few more relevant comments:</p>
<list list-type="bullet">
<list-item>
<p><italic>This bet on &#x201C;AI&#x201D; originating from the US-based companies is so weird to me. It seems to be so detrimental, yet most of the leadership seems to be so committed, it feels almost like a cult. As somewhat of a bystander, it feels like China&#x2019;s bet on electrification and exports of technology related to sustainable energy seems so much better, and yet, US folks seem to be doubling down on their initial stance.</italic></p>
</list-item>
<list-item>
<p><italic>All jobs are (indirectly) affected. I&#x2019;ve seen people say, &#x201C;Oh, well, I&#x2019;m a chef, and that will take much longer to automate.&#x201D; Everyone who&#x2019;s been laid off by AI will want these few remaining jobs now, so your competition is increasing exponentially. The odds of you becoming a chef, or retaining your chef job, will decrease drastically, even if the job itself remains manual labor for a while longer.</italic></p>
</list-item>
<list-item>
<p><italic>They just started introducing more AI things to &#x201C;save us time so we can focus on the important stuff,&#x201D; and they swore they were not trying to replace people; they just wanted to help us at work. Then they eliminated positions one by one. Once they had eliminated 2 jobs and moved the responsibility to my role (so the work of 3 positions), they started laying off those people.</italic></p>
</list-item>
</list>
</sec>
<sec id="sec22">
<label>4.2.4</label>
<title>Theme 4: devaluation of expertise</title>
<p>Experienced professionals described humiliation when algorithms overruled judgment. The literary editor (Participant 3, 4,690 upvotes) expressed: <italic>&#x201C;After spending 15 years working for the greater good of sci-fi, I got outsourced to a goddamn robot&#x2026; Just never thought it&#x2019;d surpass human reading/analysis THAT fast.&#x201D;</italic> Though Participant 10 (1,306 upvotes) countered: <italic>&#x201C;It has not surpassed human reading and analysis. Your company is just soulless and greedy.&#x201D;</italic></p>
<p>Participant 14 (901 upvotes) warned about systemic consequences: <italic>&#x201C;They&#x2019;ll have fired all their experts and replaced them with a computer who knows how to interview well but in practical terms is fresh out of college.&#x201D;</italic> A designer (Participant 9, 1,566 upvotes) shared: <italic>&#x201C;My team and I spent a few days working on a branding&#x2026; Then our boss AI-generated a (very crappy) mood-board + logo, and presented it to the client then shoved it in our faces like &#x2018;haha see should&#x2019;ve used AI from the start, the client loved it&#x2019;.&#x201D;</italic></p>
<p>A few more relevant comments:</p>
<list list-type="bullet">
<list-item>
<p><italic>I have been saying this since its inception. An increasingly common observation is that AI amplifies the Dunning-Kruger effect. It gives the layman a very convincing delusion of competency while actually being confidently and objectively wrong in both nuanced and obvious ways.</italic></p>
</list-item>
<list-item>
<p><italic>I teach at university and this describes my experience with my students using AI to a T. They&#x2019;ll have a very general idea of the material, which we want them to deepen by doing some sort of project or essay or the like, and they&#x2019;ll offload it to ChatGPT, polish the results a bit and get rid of the em dashes and then act super surprised when we tell them it&#x2019;s shitty work. Because like&#x2026; If you have a very general idea of what you are talking about, it sounds perfectly plausible.</italic></p>
</list-item>
<list-item>
<p><italic>I&#x2019;m an epidemiologist working for a local health department in a team building disease surveillance capacity. Basically, my team makes data cleaning and visualizations automated so we can spend time interpreting the output and detecting outbreaks and patterns earlier. We all have master&#x2019;s or PhDs in epidemiology/biostatistics. We are being pushed out in the middle of respiratory season at the end of the year, so the IT team can make oversimplified graphs that are not useful and use AI for the rest. It&#x2019;s absolutely horrifying that our community&#x2019;s health is in the hands of untrained IT and AI.</italic></p>
</list-item>
<list-item>
<p><italic>This seems to be the big appeal of AI for bosses. It allows people who do not know how to do things to</italic> look <italic>like they can do things. So yeah, your boss is like &#x201C;look at me, I&#x2019;m a graphic designer/data scientist/whatever&#x201D; when they have no idea what you actually do or how to evaluate the AI results.</italic></p>
</list-item>
<list-item>
<p><italic>We had one client do their original concepts with AI, which is fine for them to communicate what they need to a designer. But they got so attached to this initial concept, they did not want to pay a designer to recreate it so we could actually use it for print. They did not see the mistakes. They did not understand the basic concept that trying to put something that&#x2019;s 10&#x201D;x10&#x201D; on a flat screen is not going to be usable for what we need, printing and installing these graphics on a truck, a 3D object. Or logo creation, sure, you can use it for small print, but not large unless you have a vector file. AI cannot vectorize well yet, especially if they got gradients and effects all over it. People are starting to do everything and anything to not pay a designer, even if it takes up more of their time.</italic></p>
</list-item>
</list>
</sec>
<sec id="sec23">
<label>4.2.5</label>
<title>Theme 5: job insecurity and future anxiety</title>
<p>Existential uncertainty permeated discussions. Participant 47 (203 upvotes) worried: <italic>&#x201C;My teams in Uruguay and China were really good, but maybe even those guys are at risk of loss of work to an AI. Frightening.&#x201D;</italic> Participant 55 (151 upvotes) expressed paralysis: <italic>&#x201C;Every time I consider pivoting to a new career (I&#x2019;m unemployed), I have massive doubts it&#x2019;s even going to exist in this cluster fuck of a time.&#x201D;</italic></p>
<p>Participant 85 (90 upvotes) explained systemic drivers: <italic>&#x201C;CEOs are legally obligated to invest in AI as a business strategy, as it props their stock up today, and is marketed as a requirement for long-term success.&#x201D;</italic> Participant 74 (102 upvotes) predicted broader impacts: <italic>&#x201C;My hypothesis is that we are about to see a drop off in expertise and specialization.&#x201D;</italic></p>
<p>A few more relevant comments:</p>
<list list-type="bullet">
<list-item>
<p><italic>Until they make robots that can work in a kitchen, my job as a chef is safe. I give it 10&#x202F;years.</italic></p>
</list-item>
<list-item>
<p><italic>I&#x2019;m a teenager and just seeing how bad the AI stuff is getting is really scaring me and making me lose hope</italic>: <italic>(&#x2026; I wanted to be a scientist, but I&#x2019;m not sure if that&#x2019;ll even be an option for me)</italic></p>
</list-item>
<list-item>
<p><italic>I work a bunch of different positions in audio post production, and it&#x2019;s only a matter of before a director or producer tells me that the talent has agreed to let AI clone/train their voice so I can do edits and pick-ups without having to re-record them. So far, it has not been a thing, but the clock is ticking. Ultimately, it means less money for me and less money for the talent, so I&#x2019;m holding off for as long as I can.</italic></p>
</list-item>
<list-item>
<p><italic>My uncle does voice acting. A lot of his work came from audiobooks, but that&#x2019;s drying up lately and being replaced by AI voice.</italic></p>
</list-item>
<list-item>
<p><italic>Graphic designer here. I have not been fully replaced yet, but the landscape has completely changed. Clients now expect me to use AI as a &#x2018;co-pilot&#x2019;, generating initial concepts, mood boards, and even rough copy in minutes, not hours. The job is becoming less about executing the first idea and more about curating, refining, and adding the crucial human touch (and catching AI&#x2019;s weird mistakes). It feels less like I lost my job and more like my job description was rewritten overnight. The pressure to constantly adapt is the real challenge.</italic></p>
</list-item>
<list-item>
<p><italic>I was a full-time visual artist. The commissions dried up when people started using ChatGPT to make all their images, flyers, posters, etc.</italic></p>
</list-item>
<list-item>
<p><italic>I&#x2019;m now an OF content creator&#x2026; after losing a marketing copywriter job.</italic></p>
</list-item>
</list>
</sec>
<sec id="sec24">
<label>4.2.6</label>
<title>Theme 6: cynical adaptation and quiet resistance</title>
<p>Strategic disengagement emerged as a coping mechanism. Participant 220 (18 upvotes) advised: <italic>&#x201C;Stop pushing to improve your output, just press the button on the robot. Stop innovating, just press the button on the robot. Stop trying to impress and showcase your ability, just press the button on the robot. Keep your creativity for yourself. You&#x2019;ll be paid or laid off either way.&#x201D;</italic></p>
<p>Others adapted pragmatically. Participant 698 (2 upvotes) shared: <italic>&#x201C;I realized AI is inevitable, so now I&#x2019;m in school as a tradesman looking to be a welder.&#x201D;</italic> Participant 694 (2 upvotes) described the new reality: <italic>&#x201C;customer-facing support roles have become a nightmare due to AI. customer-facing jobs that were once plentiful are few and far between, and absolutely misery.&#x201D;</italic></p>
</sec>
<sec id="sec25">
<label>4.2.7</label>
<title>Theme 7: human touch as enduring value</title>
<p>Despite pessimism, participants affirmed uniquely human qualities. A voice actor (Participant 4, 3,770 upvotes) described principled resistance: <italic>&#x201C;I&#x2019;ve worked on campaigns where all of the voice artists refused to sign their contract because there&#x2019;s a new clause in it - if they sign it, they are signing the rights of their voice over to AI&#x2026; They all refused; the client just recast all of them.&#x201D;</italic></p>
<p>Participant 694 (2 upvotes) noted customer preferences: <italic>&#x201C;customers still crave human interaction, especially for technical support or support scheduling.&#x201D;</italic> The graphic designer (Participant 2, 5,565 upvotes) emphasized &#x201C;adding the crucial human touch&#x201D; as remaining valuable, while acknowledging the transformed nature of their work.</p>
<p>Another relevant comment:</p>
<list list-type="bullet">
<list-item>
<p><italic>I have not lost my job, and I doubt I will, but I find the way people are using AI</italic> like <italic>my job to be extremely concerning. I&#x2019;m licensed on a national and state level as a mental health therapist. Right now, I do evaluations for mental health services and am not providing direct therapy services, but regardless, I do not think AI is capable of either of those jobs (at least, not yet). There is a certain level of empathy you have to have to do the job(s), and I&#x2019;m not sure a machine can fake it well enough. In theory, I do not think evaluations could be replaced by AI, assuming clients could type back answers, but most people I see are woefully technologically illiterate, if not outright Luddite/technophobic and refuse to interact with computers because they do not know how. I think even providing voice responses to a machine or an AI-avatar would be so off-putting. People bitch enough now about doing telehealth, which has become very widespread post-COVID. All that to say, the trend or &#x2018;replacing&#x2019; a therapist with AI is something that disappoints, frustrates, and outright scares me.</italic></p>
</list-item>
<list-item>
<p><italic>What I do not understand is how people can prefer AI VAs over actual artists. Take a look at any video with AI-slop voice over, and regardless of the content, that&#x2019;s what the comments will be about</italic>.</p>
</list-item>
</list>
</sec>
</sec>
</sec>
<sec sec-type="discussion" id="sec26">
<label>5</label>
<title>Discussion</title>
<sec id="sec27">
<label>5.1</label>
<title>Theoretical contributions and implications</title>
<p>Our findings extend existing theoretical frameworks while also revealing their limitations in explaining the impacts of AI in the workplace. The integration of multiple theories proves essential, as no single framework can capture the full complexity of algorithmic anxiety.</p>
</sec>
<sec id="sec28">
<label>5.2</label>
<title>Algorithmic anxiety: an integrative framework</title>
<p>While the term &#x201C;algorithmic anxiety&#x201D; appears in scholarship examining the paradoxical relationship between technological promises of control and the widespread fears algorithms unleash across domains including surveillance, identity, and the outsourcing of human decision-making to smart machines (<xref ref-type="bibr" rid="ref23">De Vries and Schinkel, 2019</xref>; <xref ref-type="bibr" rid="ref27">Elliott, 2024</xref>), its specific manifestation in employment relationships (where AI mediates decisions about livelihood, professional worth, and economic security) remains undertheorized. Existing constructs address discrete elements&#x2014;technostress captures technology-use strain (<xref ref-type="bibr" rid="ref70">Tarafdar et al., 2019</xref>), job insecurity addresses employment fears (<xref ref-type="bibr" rid="ref66">Shoss, 2017</xref>), and automation anxiety focuses on replacement concerns (<xref ref-type="bibr" rid="ref14">Brougham and Haar, 2018</xref>). However, our empirical analysis reveals that workers experience not discrete, separable anxieties but a compound psychological phenomenon integrating multiple dimensions simultaneously.</p>
<p>We position algorithmic anxiety as an umbrella construct encompassing seven interrelated dimensions identified in our thematic analysis. Shattered trust captures the experience of corporate betrayal when organizations frame AI as &#x201C;assistive&#x201D; while using it to eliminate positions or devalue expertise&#x2014;a psychological contract breach unique to being deceived about technology&#x2019;s true purpose. Identity erosion reflects the loss of professional self-concept when core competencies become automated, leaving workers to curate algorithmic outputs rather than exercise craft. Technostress and coerced adoption describe the strain of being forced to use and train systems that threaten one&#x2019;s role, creating the psychologically contradictory position of facilitating one&#x2019;s own potential obsolescence. Expertise devaluation captures the deflation experienced when skills developed over years become worthless overnight, not through personal failure but through algorithmic advancement.</p>
<p>Future anxiety extends beyond current job loss to encompass existential uncertainty even among currently employed workers who recognize their potential future in others&#x2019; present displacement. This anticipatory dimension distinguishes algorithmic anxiety from traditional job insecurity&#x2014;workers fear not just losing this job but the possibility that no human expertise retains lasting value. Cynical adaptation represents defensive coping through dark humor, resignation, and quiet resistance&#x2014;attempts to maintain psychological equilibrium while acknowledging limited agency. Finally, human value affirmation reflects efforts to assert inherent worth beyond productivity, countering the implicit message that human work has become redundant.</p>
<p>These seven dimensions constitute algorithmic anxiety as a compound phenomenon distinct from existing constructs in three ways. First, it integrates cognitive (uncertainty about the future), affective (betrayal, deflation), and behavioral (resistance, adaptation) responses within a single framework. Second, it captures both actualized distress (among displaced workers) and anticipatory distress (among those witnessing displacement), explaining why narratives of job loss receive widespread validation from workers not yet affected. Third, it addresses the unique circumstance of being evaluated, managed, and potentially replaced by systems one helped create, introducing dimensions of betrayal and complicity absent from traditional automation frameworks where workers and machines occupied clearly separate spheres.</p>
<p>Our contribution is not introducing a term but empirically characterizing algorithmic anxiety&#x2019;s constituent dimensions through workers&#x2019; own accounts. Future research should develop measurement instruments capturing these seven dimensions, examine whether they emerge as distinct factors or load onto higher-order constructs, and assess whether algorithmic anxiety predicts unique variance in outcomes beyond what established constructs explain individually.</p>
<p>Extending psychological contract theory: Traditional psychological contract theory assumes human agents on both sides of the employment relationship. Our findings reveal how AI fundamentally disrupts this assumption, introducing what we term &#x201C;algorithmic mediation&#x201D; of psychological contracts. Workers experience AI not as neutral tools but as quasi-agents making decisions previously reserved for human managers. This creates novel breach types: &#x201C;technological betrayal&#x201D; (when AI systems workers helped build and replace them), &#x201C;algorithmic abandonment&#x201D; (when human judgment is systematically devalued), and &#x201C;digital dehumanization&#x201D; (when workers become data points for algorithmic processing).</p>
<p>The temporal dimension proves critical. Traditional contract breaches occur at discrete moments, such as a broken promise or a layoff announcement. AI-mediated breaches unfold gradually through incremental automation, creating &#x201C;breach cascades&#x201D; where each small violation compounds the effects of previous ones. Workers describe a &#x201C;thousand cuts&#x201D; phenomenon where no single change seems breach-worthy, but cumulative impact devastates trust.</p>
<p>Reconceptualizing Technostress: Our findings suggest existing technostress frameworks inadequately capture AI-specific stressors. We propose &#x201C;algorithmic technostress&#x201D; as a distinct construct with unique characteristics. Unlike traditional IT stress, which focuses on tool usage, algorithmic stress involves existential dimensions, questioning human value, purpose, and the future viability of humanity. The stressor is not technology itself but technology&#x2019;s implications for human worth.</p>
<p>We identify novel stress mechanisms: &#x201C;performance anxiety loops&#x201D; (perpetual evaluation by opaque systems), &#x201C;competence inversion stress&#x201D; (when expertise becomes liability), and &#x201C;automation anticipation stress&#x201D; (chronic anxiety about future displacement). These mechanisms operate simultaneously, creating compound stress that exceeds the sum of its parts.</p>
<p>Self-determination theory in algorithmic contexts: AI systematically undermines all three basic psychological needs, but through mechanisms SDT does not fully anticipate. Autonomy loss occurs not just through external control but through &#x201C;algorithmic channeling,&#x201D; where AI shapes decision spaces so fundamentally that genuine choice becomes impossible. Competence threats extend beyond current performance to &#x201C;prospective competence anxiety,&#x201D; doubt about any skill&#x2019;s future relevance. Relatedness suffers through &#x201C;algorithmic intermediation,&#x201D; where human connections become filtered through AI systems.</p>
<p>Conservation of resources in accelerated change: COR theory helps explain cascading impacts, but AI&#x2019;s pace challenges the framework&#x2019;s assumptions about resource cycles. Traditional resource loss and gain occur over predictable timeframes. AI creates &#x201C;resource volatility&#x201D; where valuable skills become obsolete overnight, while new requirements emerge faster than acquisition possibilities. This creates perpetual resource-deficient states, where workers cannot build resources quickly enough to offset losses.</p>
</sec>
<sec id="sec29">
<label>5.3</label>
<title>Digital discourse as collective sense-making</title>
<p>Reddit discussions represent more than individual venting; they constitute collective sense-making about technological transformation. Through upvoting, commenting, and sharing experiences, workers collaboratively construct narratives about the meaning of AI and appropriate responses. This process serves multiple functions: emotional validation, sharing practical strategies, and ideological resistance to corporate AI narratives.</p>
<p>The platform&#x2019;s affordances (pseudonymity and community validation) create unique conditions for authentic disclosure and collective processing. Workers who share experiences too threatening for workplace expression receive validation from others with similar experiences and access accumulated wisdom about coping strategies. The discourse becomes a parallel institution to formal workplace structures, providing support and resistance that corporate environments deny.</p>
</sec>
</sec>
<sec id="sec30">
<label>6</label>
<title>Implications for practice and policy</title>
<p>The following implications derive from workers who experienced AI-driven workplace transformation negatively, within predominantly Western contexts, at a specific moment during AI&#x2019;s rapid evolution. Given our sampling approach and data limitations, these recommendations should not be interpreted as universally applicable across all cultural contexts, organizational types, or AI implementation scenarios. Rather, they identify risk factors, warning signs, and protective strategies relevant to preventing or mitigating the negative experiences documented in our findings. Organizations in non-Western contexts, those implementing AI successfully, or those operating in different cultural environments may require adaptations of these principles. These implications represent hypothesis-generating insights requiring validation through comparative research across diverse contexts and longitudinal studies tracking implementation outcomes over time. They are offered as initial guidance for practitioners navigating AI implementation, recognizing that successful approaches likely vary by institutional context, cultural norms, and temporal factors.</p>
<p>Our findings suggest fundamental reconsideration of AI implementation approaches. Current strategies prioritize technical optimization while treating human impacts as secondary &#x201C;change management&#x201D; concerns. This approach generates the resistance, cynicism, and disengagement that our data documents. While our findings derive from Western-centric contexts where workers experienced AI negatively, they suggest that alternative approaches centering human experience from inception may mitigate these risks.</p>
<p>Participatory AI governance: In contexts similar to those documented in our study&#x2014;Western, knowledge-work environments undergoing AI-driven transformation&#x2014;workers must participate meaningfully in AI adoption decisions, not through token consultation but genuine co-design. This includes representation in selection processes, implementation planning, and ongoing evaluation. Our data shows that workers possess a sophisticated understanding of AI&#x2019;s capabilities and limitations; excluding this knowledge impoverishes implementation and generates resentment. However, the form such participation takes may differ across cultural contexts, particularly in collectivist societies where representation mechanisms operate differently than in individualist Western settings.</p>
<p>Transparent communication: Organizations must honestly communicate AI&#x2019;s intended role and impact. Our findings reveal that workers particularly resent deception, being told AI will &#x201C;augment&#x201D; work while planning replacement. Transparency about automation plans, even when difficult, generates less betrayal than discovered deception.</p>
<p>Meaningful reskilling: Current &#x201C;reskilling&#x201D; initiatives often offer superficial training in using AI tools rather than developing AI-complementary capabilities. Workers need pathways to genuinely secure roles, not temporary reprieves before the next wave of automation. Effective programs require strategic design that addresses AI&#x2019;s rapid evolution. Siemens&#x2019; digital learning platform demonstrates key success factors: providing personalized learning paths based on individual skill gaps, offering microlearning modules that fit within work schedules, and creating clear connections between training and actual job requirements (<xref ref-type="bibr" rid="ref30">Freise et al., 2025</xref>). Critically, Siemens invested in infrastructure that made learning accessible during work hours rather than expecting employees to reskill in their personal time while facing job insecurity.</p>
<p>However, our findings suggest workers remain skeptical of employer-sponsored reskilling when organizations simultaneously automate positions, perceiving programs as symbolic gestures rather than genuine commitments. Reskilling alone cannot address structural displacement when entire occupational categories face automation. Organizations must honestly assess whether programs provide genuine security or merely delay inevitable displacement, and provide appropriate transition support accordingly.</p>
<p>Ethical frameworks: Organizations (particularly in Western contexts where our data was generated) need explicit ethical frameworks governing the role of AI in the workplace. These must address not just bias and privacy but human dignity, meaningful work, and fair transition support for displaced workers. Our findings suggest workers judge organizations more on how they implement AI than whether they implement it. The specific content of such frameworks will necessarily vary across cultural and institutional contexts, as notions of dignity, meaningful work, and fair treatment are culturally mediated.</p>
<sec id="sec31">
<label>6.1</label>
<title>Societal implications</title>
<p>This study documents patterns within Western digital discourse about AI-driven job displacement that suggest broader societal challenges, though the specific manifestation of these challenges may vary across cultural and economic contexts. Individual organizational efforts, however well-intentioned, cannot address systemic issues such as technological unemployment, meaning crises in automated economies, or the value of human beings in an age of artificial general intelligence.</p>
<p>Policy interventions in contexts similar to those represented in our data (Western liberal market economies) might include stronger worker protections during technological transitions, requirements for human oversight in algorithmic decision-making, public investment in human-centric sectors that are resistant to automation, and the evolution of the social safety net to acknowledge automation&#x2019;s impact. Some participants mentioned a universal basic income, although views remained divided on whether this addresses concerns about meaning and purpose beyond economic security. Policy responses in other institutional contexts (such as coordinated market economies with strong labor protections, or developing economies with limited social safety nets) would require different configurations balancing worker protection with economic development goals.</p>
<p>Our findings underscore the importance of proactive workforce preparation for AI integration. Research on psychological resilience in future workplaces demonstrates that organizations investing in protective factors (psychological safety, participatory decision-making, supportive leadership) before implementing AI can prevent the algorithmic anxiety our study documents (<xref ref-type="bibr" rid="ref2">Alitabar and Parsakia, 2025</xref>). Foresight methodologies involving workers in scenario planning may help them develop adaptive capacity before encountering displacement threats. This suggests treating workforce resilience as infrastructure requiring advance investment rather than post-crisis damage control.</p>
<p>Educational systems require fundamental reconception. Current approaches emphasize technical skills that are increasingly automated, while neglecting uniquely human capabilities such as ethical reasoning, emotional intelligence, and creative problem-solving. Career counselling must acknowledge radical uncertainty, preparing students not for specific careers but for the ability to adapt. These educational recommendations are framed within Western educational paradigms; their applicability to other educational systems and cultural contexts requires careful consideration of local institutional arrangements and cultural values regarding education, work, and human development.</p>
</sec>
</sec>
<sec id="sec32">
<label>7</label>
<title>Limitations and future directions</title>
<p>Several limitations constrain the generalizability and interpretation of our findings. Reddit&#x2019;s user base skews younger, more technologically literate, male, and Western-centric than the general working population (<xref ref-type="bibr" rid="ref57">Proferes et al., 2021</xref>). More critically, the thread prompt <italic>(&#x201C;Hey people who lost their jobs to AI, what happened?&#x201D;)</italic> creates selection bias toward negative experiences, attracting workers who experienced AI as threatening while excluding those with neutral or positive experiences. Community engagement through upvotes suggests the discourse resonates with workers experiencing anticipatory rather than actualized displacement, but we cannot verify the actual experiences of those engaging with the content. Pseudonymity prevents demographic verification or systematic analysis of participant characteristics.</p>
<p>These sampling characteristics have important interpretive consequences. Our thematic findings (shattered trust, identity erosion, technostress) describe psychological responses among workers negatively affected by AI, not universal reactions to workplace AI. The 51% negative sentiment in our corpus cannot be extrapolated to estimate how commonly workers experience AI negatively in general populations. Our theoretical contributions illuminate mechanisms operating among distressed workers rather than invariant laws applying to all AI contexts. Claims about the prevalence, frequency, or inevitability of algorithmic anxiety would be unwarranted given our sampling approach. Our findings characterize the phenomenology of negative AI impact, providing depth of understanding about this experience while requiring complementary research on representative samples to establish its prevalence.</p>
<p>Our cross-sectional design captures a temporal snapshot during the rapid evolution of AI. Longitudinal research could track how worker attitudes evolve as AI capabilities expand and societies adapt, specifically examining whether initial algorithmic anxiety diminishes with familiarization, whether predicted job losses materialize at anticipated rates, and how coping strategies shift over time. Panel studies following the same workers across multiple years of AI implementation would reveal whether psychological contract breaches prove temporary disruptions or permanent shifts in employment relationships.</p>
<p>Comparative studies across cultures with different labor protections, technological attitudes, and social contracts would illuminate contextual factors shaping AI&#x2019;s impact. Specific research questions merit investigation: Do workers in Nordic countries with strong social safety nets experience less algorithmic anxiety than those in liberal market economies? How do collectivist cultures (where job loss affects family honor and social standing) differ from individualist cultures in processing AI displacement? Do nations with codetermination rights (requiring worker consultation in technology decisions) show different implementation outcomes than those without such protections? Cross-national comparisons could identify which institutional features mitigate negative impacts and which cultural factors predict resistance versus acceptance.</p>
<p>Future research should examine positive cases, organizations successfully integrating AI while maintaining worker well-being and dignity. What differentiates these contexts? How do workers experience AI as genuinely augmenting rather than replacing? Understanding success conditions is essential for practical guidance.</p>
<p>Methodologically, our approach demonstrates mixed-methods value for complex sociotechnical phenomena. Future studies could extend this integration, perhaps combining digital discourse analysis with workplace ethnography and physiological stress measures. Real-time data collection during AI implementation can capture the evolution of worker responses, rather than relying on retrospective accounts.</p>
</sec>
<sec sec-type="conclusions" id="sec33">
<label>8</label>
<title>Conclusion</title>
<p>This study illuminates the human dimensions of workplace AI integration through analysis of digital discourse from those directly experiencing algorithmic disruption. Our mixed-methods approach reveals algorithmic anxiety as a complex syndrome encompassing not just job insecurity but fundamental threats to identity, meaning, and human value in increasingly automated workplaces.</p>
<p>The sentiment divergence between surface positivity and contextual negativity suggests workers employ sophisticated coping strategies (humor, irony, resignation) while experiencing genuine distress. The seven themes emerging from thematic analysis paint a picture of profound transformation where traditional psychological contracts shatter, professional identities erode, and workers struggle to maintain dignity and purpose as machines assume previously human roles.</p>
<p>Theoretical contributions include extending psychological contract theory to accommodate algorithmic mediation, identifying AI-specific technostress mechanisms, demonstrating systematic undermining of basic psychological needs, and revealing cascading resource loss in accelerated technological change. These frameworks require further development to fully capture AI&#x2019;s novel challenges to established organizational theories.</p>
<p>The digital discourse analyzed represents more than individual grievances, it constitutes collective sense-making about one of the most significant transformations in work history. Through Reddit&#x2019;s pseudonymous platform, workers create parallel institutions for processing experiences, sharing strategies, and constructing counter-narratives to corporate AI rhetoric. This grassroots response warrants attention from scholars and practitioners seeking to understand the true impact of AI.</p>
<p>Practical implications emphasize that sustainable AI integration requires fundamental reconsideration of implementation approaches. Technical optimization without human consideration generates the resistance and cynicism our data documents. Organizations must genuinely involve workers in AI governance, communicate transparently about automation plans, provide meaningful reskilling opportunities for secure roles, and establish ethical frameworks that protect human dignity.</p>
<p>The path forward requires recognizing AI integration as a fundamentally human challenge, not a technical problem. Success metrics must expand beyond efficiency gains to include worker well-being, organizational trust, and societal flourishing. This demands new forms of human-AI collaboration that preserve what makes work meaningful while leveraging AI&#x2019;s capabilities.</p>
<p>As we stand at this historical inflexion point, choices made about AI&#x2019;s workplace role will reverberate for generations. Our findings suggest current approaches often fail to account for human costs, generating unnecessary suffering while undermining potential benefits. Alternative paths exist, ones that center human dignity, preserve meaningful work, and create genuinely augmented rather than diminished human potential.</p>
<p>The workers whose voices animate this study offer both warning and wisdom. They warn of futures where humans become secondary to systems they created, where expertise becomes obsolete overnight, where meaning drains from work reduced to algorithmic supervision. But they also affirm enduring human qualities (creativity, empathy, ethical judgment, relationship) that no algorithm replicates.</p>
<p>The ultimate measure of our technological progress will not be the sophistication of artificial intelligence, but rather the wisdom in its integration with human life. This study contributes to that wisdom by amplifying voices from the front lines of automation, translating their experiences into theoretical insights and practical guidance. Their message deserves to be heard: preserve the human in human-AI collaboration, or risk losing not just jobs but the meaning, dignity, and purpose that make work fundamentally human.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="sec34">
<title>Data availability statement</title>
<p>The computational analysis code, topic modelling outputs, network analysis files, and statistical results are available from the corresponding author upon reasonable request. Researchers may access the restricted data under a restricted data use agreement that requires institutional ethics approval and adherence to the protective protocols described in Section 3.6.</p>
</sec>
<sec sec-type="author-contributions" id="sec35">
<title>Author contributions</title>
<p>AS: Conceptualization, Data curation, Formal analysis, Investigation, Methodology, Project administration, Resources, Software, Validation, Visualization, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. MS: Supervision, Writing &#x2013; review &#x0026; editing.</p>
</sec>
<sec sec-type="COI-statement" id="sec36">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="sec37">
<title>Generative AI statement</title>
<p>The author(s) declared that Generative AI was used in the creation of this manuscript. Grammarly and Microsoft 365 Editor were used for language editing (grammar, spelling and readability).</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="sec38">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="ref1"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Adams</surname><given-names>N. N.</given-names></name></person-group> (<year>2024</year>). <article-title>Scraping&#x2019; Reddit posts for academic research? Addressing some blurred lines of consent in growing internet-based research trend during the time of COVID-19</article-title>. <source>Int. J. Soc. Res. Methodol.</source> <volume>27</volume>, <fpage>47</fpage>&#x2013;<lpage>62</lpage>. doi: <pub-id pub-id-type="doi">10.1080/13645579.2022.2111816</pub-id></mixed-citation></ref>
<ref id="ref2"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Alitabar</surname><given-names>S. H. S.</given-names></name> <name><surname>Parsakia</surname><given-names>K.</given-names></name></person-group> (<year>2025</year>). <article-title>Psychological resilience in the workplace of the future: a qualitative scenario analysis</article-title>. <source>Foresight Health Governance</source> <volume>2</volume>, <fpage>32</fpage>&#x2013;<lpage>41</lpage>. Available at: <ext-link xlink:href="https://journalfhg.com/index.php/jfph/article/view/4" ext-link-type="uri">https://journalfhg.com/index.php/jfph/article/view/4</ext-link></mixed-citation></ref>
<ref id="ref3"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Amaya</surname><given-names>A.</given-names></name> <name><surname>Bach</surname><given-names>R.</given-names></name> <name><surname>Keusch</surname><given-names>F.</given-names></name> <name><surname>Kreuter</surname><given-names>F.</given-names></name></person-group> (<year>2021</year>). <article-title>New data sources in social science research: things to know before working with Reddit data</article-title>. <source>Soc. Sci. Comput. Rev.</source> <volume>39</volume>, <fpage>943</fpage>&#x2013;<lpage>960</lpage>. doi: <pub-id pub-id-type="doi">10.1177/0894439319893305</pub-id></mixed-citation></ref>
<ref id="ref4"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Andalibi</surname><given-names>N.</given-names></name> <name><surname>Haimson</surname><given-names>O. L.</given-names></name> <name><surname>Choudhury</surname><given-names>M. D.</given-names></name> <name><surname>Forte</surname><given-names>A.</given-names></name></person-group> (<year>2018</year>). <article-title>Social support, reciprocity, and anonymity in responses to sexual abuse disclosures on social media</article-title>. <source>ACM Trans. Comput.-Hum. Interact.</source> <volume>25</volume>, <fpage>1</fpage>&#x2013;<lpage>35</lpage>. doi: <pub-id pub-id-type="doi">10.1145/3234942</pub-id></mixed-citation></ref>
<ref id="ref5"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ashok</surname><given-names>M.</given-names></name> <name><surname>Madan</surname><given-names>R.</given-names></name> <name><surname>Joha</surname><given-names>A.</given-names></name> <name><surname>Sivarajah</surname><given-names>U.</given-names></name></person-group> (<year>2022</year>). <article-title>Ethical framework for artificial intelligence and digital technologies</article-title>. <source>Int. J. Inf. Manag.</source> <volume>62</volume>:<fpage>102433</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ijinfomgt.2021.102433</pub-id></mixed-citation></ref>
<ref id="ref6"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Balcio&#x011F;lu</surname><given-names>Y. S.</given-names></name> <name><surname>&#x00C7;elik</surname><given-names>A. A.</given-names></name> <name><surname>Altinda&#x011F;</surname><given-names>E.</given-names></name></person-group> (<year>2025</year>). <article-title>Sentiment analysis of Reddit reviews on mobile gaming: insights from the gaming community</article-title>. <source>Int. J. Hum.-Comput. Interact.</source> <volume>41</volume>, <fpage>12697</fpage>&#x2013;<lpage>12709</lpage>. doi: <pub-id pub-id-type="doi">10.1080/10447318.2025.2464897</pub-id></mixed-citation></ref>
<ref id="ref7"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bankins</surname><given-names>S.</given-names></name> <name><surname>Ocampo</surname><given-names>A. C.</given-names></name> <name><surname>Marrone</surname><given-names>M.</given-names></name> <name><surname>Restubog</surname><given-names>S. L. D.</given-names></name> <name><surname>Woo</surname><given-names>S. E.</given-names></name></person-group> (<year>2024</year>). <article-title>A multilevel review of artificial intelligence in organizations: implications for organizational behavior research and practice</article-title>. <source>J. Organ. Behav.</source> <volume>45</volume>, <fpage>159</fpage>&#x2013;<lpage>182</lpage>. doi: <pub-id pub-id-type="doi">10.1002/job.2735</pub-id></mixed-citation></ref>
<ref id="ref8"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Berger</surname><given-names>P. L.</given-names></name> <name><surname>Luckmann</surname><given-names>T.</given-names></name></person-group> (<year>1966</year>). <source>The social construction of reality: A treatise in the sociology of knowledge</source>. ed. <person-group person-group-type="editor"><name><surname>Luckmann</surname><given-names>T.</given-names></name></person-group> (<publisher-loc>New York</publisher-loc>: <publisher-name>Anchor Books</publisher-name>). Available at: <ext-link xlink:href="https://philpapers.org/rec/BERTSC-2" ext-link-type="uri">https://philpapers.org/rec/BERTSC-2</ext-link></mixed-citation></ref>
<ref id="ref9"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Bhaskar</surname><given-names>R.</given-names></name></person-group> (<year>1975</year>). <source>A Realist Theory of Science</source>. <publisher-loc>New York</publisher-loc>: <publisher-name>Routledge</publisher-name>. Available at: <ext-link xlink:href="https://philpapers.org/rec/BHAART-6" ext-link-type="uri">https://philpapers.org/rec/BHAART-6</ext-link></mixed-citation></ref>
<ref id="ref10"><mixed-citation publication-type="other"><collab id="coll1">Bitkom</collab>. <year>2020</year>. Companies are still struggling with artificial intelligence. Available online at: <ext-link xlink:href="https://www.bitkom.org/Presse/Presseinformation/Unternehmen-tun-sich-noch-schwer-mit-Kuenstlicher-Intelligenz" ext-link-type="uri">https://www.bitkom.org/Presse/Presseinformation/Unternehmen-tun-sich-noch-schwer-mit-Kuenstlicher-Intelligenz</ext-link> (Accessed January 01, 2017).</mixed-citation></ref>
<ref id="ref11"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Blei</surname><given-names>D. M.</given-names></name></person-group> (<year>2012</year>). <article-title>Probabilistic topic models</article-title>. <source>Commun. ACM</source> <volume>55</volume>, <fpage>77</fpage>&#x2013;<lpage>84</lpage>. doi: <pub-id pub-id-type="doi">10.1145/2133806.2133826</pub-id></mixed-citation></ref>
<ref id="ref12"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Boyd</surname><given-names>D.</given-names></name> <name><surname>Crawford</surname><given-names>K.</given-names></name></person-group> (<year>2012</year>). <article-title>Critical questions for big data: provocations for a cultural, technological, and scholarly phenomenon</article-title>. <source>Inf. Commun. Soc.</source> <volume>15</volume>, <fpage>662</fpage>&#x2013;<lpage>679</lpage>. doi: <pub-id pub-id-type="doi">10.1080/1369118X.2012.678878</pub-id></mixed-citation></ref>
<ref id="ref13"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Braun</surname><given-names>V.</given-names></name> <name><surname>Clarke</surname><given-names>V.</given-names></name></person-group> (<year>2006</year>). <article-title>Using thematic analysis in psychology</article-title>. <source>Qual. Res. Psychol.</source> <volume>3</volume>, <fpage>77</fpage>&#x2013;<lpage>101</lpage>. doi: <pub-id pub-id-type="doi">10.1191/1478088706qp063oa</pub-id></mixed-citation></ref>
<ref id="ref14"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Brougham</surname><given-names>D.</given-names></name> <name><surname>Haar</surname><given-names>J.</given-names></name></person-group> (<year>2018</year>). <article-title>Smart technology, artificial intelligence, robotics, and algorithms (STARA): employees&#x2019; perceptions of our future workplace</article-title>. <source>J. Manage. Organ.</source> <volume>24</volume>, <fpage>239</fpage>&#x2013;<lpage>257</lpage>. doi: <pub-id pub-id-type="doi">10.1017/jmo.2016.55</pub-id></mixed-citation></ref>
<ref id="ref15"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Brown</surname><given-names>D. K.</given-names></name> <name><surname>Ng</surname><given-names>Y. M. M.</given-names></name> <name><surname>Riedl</surname><given-names>M. J.</given-names></name> <name><surname>Lacasa-Mas</surname><given-names>I.</given-names></name></person-group> (<year>2018</year>). <article-title>Reddit&#x2019;s veil of anonymity: predictors of engagement and participation in media environments with hostile reputations</article-title>. <source>Soc. Media Soc.</source> <volume>4</volume>:<fpage>2056305118810216</fpage>. doi: <pub-id pub-id-type="doi">10.1177/2056305118810216</pub-id></mixed-citation></ref>
<ref id="ref16"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Calugan</surname><given-names>B.</given-names></name> <name><surname>Tanyag</surname><given-names>I.</given-names></name> <name><surname>Tanyag</surname><given-names>R.</given-names></name> <name><surname>Dawigi</surname><given-names>A.</given-names></name></person-group> (<year>2025</year>). <article-title>AI transformation in the workplace: a comprehensive review of trends and future directions</article-title>. <source>J. Interdisciplin. Perspect.</source> <volume>3</volume>, <fpage>335</fpage>&#x2013;<lpage>344</lpage>. doi: <pub-id pub-id-type="doi">10.69569/jip.2025.175</pub-id></mixed-citation></ref>
<ref id="ref17"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname><given-names>Z.</given-names></name></person-group> (<year>2023</year>). <article-title>Ethics and discrimination in artificial intelligence-enabled recruitment practices</article-title>. <source>Humanit. Soc. Sci. Commun.</source> <volume>10</volume>:<fpage>567</fpage>. doi: <pub-id pub-id-type="doi">10.1057/s41599-023-02079-x</pub-id></mixed-citation></ref>
<ref id="ref18"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cheng</surname><given-names>K.-T.</given-names></name> <name><surname>Chang</surname><given-names>K.</given-names></name> <name><surname>Tai</surname><given-names>H.-W.</given-names></name></person-group> (<year>2022</year>). <article-title>AI boosts performance but affects employee emotions</article-title>. <source>Inf. Resour. Manag. J.</source> <volume>35</volume>, <fpage>1</fpage>&#x2013;<lpage>18</lpage>. doi: <pub-id pub-id-type="doi">10.4018/irmj.314220</pub-id></mixed-citation></ref>
<ref id="ref19"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cramarenco</surname><given-names>R. E.</given-names></name> <name><surname>Burc&#x0103;-Voicu</surname><given-names>M. I.</given-names></name> <name><surname>Dabija</surname><given-names>D. C.</given-names></name></person-group> (<year>2023</year>). <article-title>The impact of artificial intelligence (AI) on employees&#x2019; skills and well-being in global labor markets: a systematic review</article-title>. <source>Oecon. Copernic.</source> <volume>14</volume>, <fpage>731</fpage>&#x2013;<lpage>767</lpage>. doi: <pub-id pub-id-type="doi">10.24136/oc.2023.022</pub-id></mixed-citation></ref>
<ref id="ref20"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Creswell</surname><given-names>J. W.</given-names></name> <name><surname>Clark</surname><given-names>V. L. P.</given-names></name></person-group> (<year>2017</year>). <source>Designing and conducting mixed methods research</source>. <edition>Third</edition> Edn. <publisher-loc>Thousand Oaks, CA</publisher-loc>: <publisher-name>SAGE</publisher-name>.</mixed-citation></ref>
<ref id="ref21"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Dang</surname><given-names>J.</given-names></name> <name><surname>Liu</surname><given-names>L.</given-names></name></person-group> (<year>2025</year>). <article-title>Dehumanization risks associated with artificial intelligence use</article-title>. <source>Am. Psychol.</source> doi: <pub-id pub-id-type="doi">10.1037/amp0001542</pub-id></mixed-citation></ref>
<ref id="ref22"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>De Choudhury</surname><given-names>M.</given-names></name> <name><surname>De</surname><given-names>S</given-names></name></person-group>. (<year>2014</year>). <article-title>Mental health discourse on reddit: self-disclosure, social support, and anonymity</article-title>. <source>Proceedings of the international AAAI conference on web and social media</source>, <publisher-loc>Palo Alto, CA</publisher-loc>. <volume>8</volume>, <fpage>71</fpage>&#x2013;<lpage>80</lpage></mixed-citation></ref>
<ref id="ref23"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>De Vries</surname><given-names>P.</given-names></name> <name><surname>Schinkel</surname><given-names>W.</given-names></name></person-group> (<year>2019</year>). <article-title>Algorithmic anxiety: masks and camouflage in artistic imaginaries of facial recognition algorithms</article-title>. <source>Big Data Soc.</source> <volume>6</volume>:<fpage>2053951719851532</fpage>. doi: <pub-id pub-id-type="doi">10.1177/2053951719851532</pub-id></mixed-citation></ref>
<ref id="ref24"><mixed-citation publication-type="other"><collab id="coll2">Department of Health, Republic of South Africa</collab> <year>2015</year> Ethics in Health Research principles, processes and structures University of Johannesburg. Available online at: <ext-link xlink:href="https://www.uj.ac.za/wp-content/uploads/2023/08/nhrec-doh-2015-ethics-in-health-research-guidelines.pdf" ext-link-type="uri">https://www.uj.ac.za/wp-content/uploads/2023/08/nhrec-doh-2015-ethics-in-health-research-guidelines.pdf</ext-link> (Accessed January 19, 2026).</mixed-citation></ref>
<ref id="ref25"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Devlin</surname><given-names>J.</given-names></name> <name><surname>Chang</surname><given-names>M.-W.</given-names></name> <name><surname>Lee</surname><given-names>K.</given-names></name> <name><surname>Toutanova</surname><given-names>K.</given-names></name></person-group> (<year>2019</year>). &#x201C;<article-title>BERT: pre-training of deep bidirectional transformers for language understanding</article-title>&#x201D; in <source>Association for computational linguistics</source>, <publisher-loc>Minneapolis, Minnesota</publisher-loc>. <fpage>4171</fpage>&#x2013;<lpage>4186</lpage>.</mixed-citation></ref>
<ref id="ref26"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Duggan</surname><given-names>J.</given-names></name> <name><surname>Sherman</surname><given-names>U.</given-names></name> <name><surname>Carbery</surname><given-names>R.</given-names></name> <name><surname>McDonnell</surname><given-names>A.</given-names></name></person-group> (<year>2022</year>). <article-title>Boundaryless careers and algorithmic constraints in the gig economy</article-title>. <source>Int. J. Hum. Resour. Manag.</source> <volume>33</volume>, <fpage>4468</fpage>&#x2013;<lpage>4498</lpage>. doi: <pub-id pub-id-type="doi">10.1080/09585192.2021.1953565</pub-id></mixed-citation></ref>
<ref id="ref27"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Elliott</surname><given-names>A.</given-names></name></person-group> (<year>2024</year>). <source>Algorithms of anxiety: Fear in the digital age</source>. <publisher-loc>Cambridge, UK</publisher-loc>: <publisher-name>Polity</publisher-name>.</mixed-citation></ref>
<ref id="ref28"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Fiesler</surname><given-names>C.</given-names></name> <name><surname>Zimmer</surname><given-names>M.</given-names></name> <name><surname>Proferes</surname><given-names>N.</given-names></name> <name><surname>Gilbert</surname><given-names>S.</given-names></name> <name><surname>Jones</surname><given-names>N.</given-names></name></person-group> (<year>2024</year>). &#x201C;<article-title>Remember the human: a systematic review of ethical considerations in Reddit research</article-title>&#x201D; in <source>Proceedings of the ACM on human-computer interaction</source>, (<publisher-loc>New York, NY</publisher-loc>: <publisher-name>GROUP</publisher-name>), <volume>8</volume>, <fpage>1</fpage>&#x2013;<lpage>33</lpage>.</mixed-citation></ref>
<ref id="ref29"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Frank</surname><given-names>M. R.</given-names></name> <name><surname>Autor</surname><given-names>D.</given-names></name> <name><surname>Bessen</surname><given-names>J. E.</given-names></name> <name><surname>Brynjolfsson</surname><given-names>E.</given-names></name> <name><surname>Cebrian</surname><given-names>M.</given-names></name> <name><surname>Deming</surname><given-names>D. J.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>Toward understanding the impact of artificial intelligence on labor</article-title>. <source>Proc. Natl. Acad. Sci.</source> <volume>116</volume>, <fpage>6531</fpage>&#x2013;<lpage>6539</lpage>. doi: <pub-id pub-id-type="doi">10.1073/pnas.1900949116</pub-id>, <pub-id pub-id-type="pmid">30910965</pub-id></mixed-citation></ref>
<ref id="ref30"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Freise</surname><given-names>L. R.</given-names></name> <name><surname>Ritz</surname><given-names>E.</given-names></name> <name><surname>Rietsche</surname><given-names>R.</given-names></name> <name><surname>Beitinger</surname><given-names>G.</given-names></name></person-group> (<year>2025</year>). <article-title>How Siemens empowered workforce re- and upskilling through digital learning</article-title>. <source>MIS Q. Exec.</source> <volume>24</volume>, <fpage>239</fpage>&#x2013;<lpage>254</lpage>. doi: <pub-id pub-id-type="doi">10.17705/2msqe.00118</pub-id></mixed-citation></ref>
<ref id="ref31"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gagn&#x00E9;</surname><given-names>M.</given-names></name> <name><surname>Parker</surname><given-names>S. K.</given-names></name> <name><surname>Griffin</surname><given-names>M. A.</given-names></name> <name><surname>Dunlop</surname><given-names>P. D.</given-names></name> <name><surname>Knight</surname><given-names>C.</given-names></name> <name><surname>Klonek</surname><given-names>F. E.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Understanding and shaping the future of work with self-determination theory</article-title>. <source>Nat. Rev. Psychol.</source> <volume>1</volume>, <fpage>378</fpage>&#x2013;<lpage>392</lpage>. doi: <pub-id pub-id-type="doi">10.1038/s44159-022-00056-w</pub-id>, <pub-id pub-id-type="pmid">35574235</pub-id></mixed-citation></ref>
<ref id="ref32"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Garc&#x00ED;a-Madurga</surname><given-names>M.-&#x00C1;.</given-names></name> <name><surname>Gil-Lacruz</surname><given-names>A.-I.</given-names></name> <name><surname>Saz-Gil</surname><given-names>I.</given-names></name> <name><surname>Gil-Lacruz</surname><given-names>M.</given-names></name></person-group> (<year>2024</year>). <article-title>The role of artificial intelligence in improving workplace well-being: a systematic review</article-title>. <source>Businesses</source> <volume>4</volume>, <fpage>389</fpage>&#x2013;<lpage>410</lpage>. doi: <pub-id pub-id-type="doi">10.3390/businesses4030024</pub-id></mixed-citation></ref>
<ref id="ref33"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gliniecka</surname><given-names>M.</given-names></name></person-group> (<year>2023</year>). <article-title>The ethics of publicly available data research: a situated ethics framework for Reddit</article-title>. <source>Soc. Media Soc.</source> <volume>9</volume>:<fpage>20563051231192021</fpage>. doi: <pub-id pub-id-type="doi">10.1177/20563051231192021</pub-id></mixed-citation></ref>
<ref id="ref34"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Golgeci</surname><given-names>I.</given-names></name> <name><surname>Ritala</surname><given-names>P.</given-names></name> <name><surname>Arslan</surname><given-names>A.</given-names></name> <name><surname>McKenna</surname><given-names>B.</given-names></name> <name><surname>Ali</surname><given-names>I.</given-names></name></person-group> (<year>2025</year>). <article-title>Confronting and alleviating AI resistance in the workplace: an integrative review and a process framework</article-title>. <source>Hum. Resour. Manag. Rev.</source> <volume>35</volume>:<fpage>101075</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.hrmr.2024.101075</pub-id></mixed-citation></ref>
<ref id="ref35"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gratch</surname><given-names>J.</given-names></name> <name><surname>Fast</surname><given-names>N. J.</given-names></name></person-group> (<year>2022</year>). <article-title>The power to harm: AI assistants pave the way to unethical behavior</article-title>. <source>Curr. Opin. Psychol.</source> <volume>47</volume>:<fpage>101382</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.copsyc.2022.101382</pub-id>, <pub-id pub-id-type="pmid">35830764</pub-id></mixed-citation></ref>
<ref id="ref36"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hobfoll</surname><given-names>S. E.</given-names></name></person-group> (<year>1989</year>). <article-title>Conservation of resources: a new attempt at conceptualizing stress</article-title>. <source>Am. Psychol.</source> <volume>44</volume>, <fpage>513</fpage>&#x2013;<lpage>524</lpage>. doi: <pub-id pub-id-type="doi">10.1037/0003-066X.44.3.513</pub-id>, <pub-id pub-id-type="pmid">2648906</pub-id></mixed-citation></ref>
<ref id="ref37"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hobfoll</surname><given-names>S. E.</given-names></name> <name><surname>Halbesleben</surname><given-names>J.</given-names></name> <name><surname>Neveu</surname><given-names>J.-P.</given-names></name> <name><surname>Westman</surname><given-names>M.</given-names></name></person-group> (<year>2018</year>). <article-title>Conservation of resources in the organizational context: the reality of resources and their consequences</article-title>. <source>Annu. Rev. Organ. Psychol. Organ. Behav.</source> <volume>5</volume>, <fpage>103</fpage>&#x2013;<lpage>128</lpage>. doi: <pub-id pub-id-type="doi">10.1146/annurev-orgpsych-032117-104640</pub-id></mixed-citation></ref>
<ref id="ref38"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hunkenschroer</surname><given-names>A. L.</given-names></name> <name><surname>Luetge</surname><given-names>C.</given-names></name></person-group> (<year>2022</year>). <article-title>Ethics of AI-enabled recruiting and selection: a review and research agenda</article-title>. <source>J. Bus. Ethics</source> <volume>178</volume>, <fpage>977</fpage>&#x2013;<lpage>1007</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10551-022-05049-6</pub-id></mixed-citation></ref>
<ref id="ref39"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Hutto</surname><given-names>C.</given-names></name> <name><surname>Gilbert</surname><given-names>E</given-names></name></person-group>. (<year>2014</year>). <article-title>VADER: a parsimonious rule-based model for sentiment analysis of social media text</article-title>. <source>Proceedings of the international AAAI conference on web and social media</source>, <publisher-loc>Palo Alto, CA</publisher-loc>. <volume>8</volume>, <fpage>216</fpage>&#x2013;<lpage>225</lpage></mixed-citation></ref>
<ref id="ref40"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Johnson</surname><given-names>A.</given-names></name> <name><surname>Dey</surname><given-names>S.</given-names></name> <name><surname>Nguyen</surname><given-names>H.</given-names></name> <name><surname>Groth</surname><given-names>M.</given-names></name> <name><surname>Joyce</surname><given-names>S.</given-names></name> <name><surname>Tan</surname><given-names>L.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>A review and agenda for examining how technology-driven changes at work will impact workplace mental health and employee well-being</article-title>. <source>Aust. J. Manage.</source> <volume>45</volume>, <fpage>402</fpage>&#x2013;<lpage>424</lpage>. doi: <pub-id pub-id-type="doi">10.1177/0312896220922292</pub-id></mixed-citation></ref>
<ref id="ref41"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kahlow</surname><given-names>J. A.</given-names></name></person-group> (<year>2024</year>). <article-title>Beyond the surface: Reddit&#x2019;s anonymity facilitates deeper disclosures than Facebook</article-title>. <source>Int. J. Soc. Media Online Commun.</source> <volume>16</volume>, <fpage>1</fpage>&#x2013;<lpage>8</lpage>. doi: <pub-id pub-id-type="doi">10.4018/IJSMOC.343629</pub-id></mixed-citation></ref>
<ref id="ref42"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Keegan</surname><given-names>A.</given-names></name> <name><surname>Meijerink</surname><given-names>J.</given-names></name></person-group> (<year>2025</year>). <article-title>Algorithmic Management in Organizations? From edge case to center stage</article-title>. <source>Annu. Rev. Organ. Psychol. Organ. Behav.</source> <volume>12</volume>, <fpage>395</fpage>&#x2013;<lpage>422</lpage>. doi: <pub-id pub-id-type="doi">10.1146/annurev-orgpsych-110622-070928</pub-id></mixed-citation></ref>
<ref id="ref43"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Khogali</surname><given-names>H. O.</given-names></name> <name><surname>Mekid</surname><given-names>S.</given-names></name></person-group> (<year>2023</year>). <article-title>The blended future of automation and AI: examining some long-term societal and ethical impact features</article-title>. <source>Technol. Soc.</source> <volume>73</volume>:<fpage>102232</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.techsoc.2023.102232</pub-id></mixed-citation></ref>
<ref id="ref44"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kinowska</surname><given-names>H.</given-names></name> <name><surname>Sienkiewicz</surname><given-names>&#x0141;. J.</given-names></name></person-group> (<year>2023</year>). <article-title>Influence of algorithmic management practices on workplace well-being &#x2013; evidence from European organisations</article-title>. <source>Inf. Technol. People</source> <volume>36</volume>, <fpage>21</fpage>&#x2013;<lpage>42</lpage>. doi: <pub-id pub-id-type="doi">10.1108/ITP-02-2022-0079</pub-id></mixed-citation></ref>
<ref id="ref45"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>K&#x00F6;chling</surname><given-names>A.</given-names></name> <name><surname>Wehner</surname><given-names>M. C.</given-names></name></person-group> (<year>2020</year>). <article-title>Discriminated by an algorithm: a systematic review of discrimination and fairness by algorithmic decision-making in the context of HR recruitment and HR development</article-title>. <source>Bus. Res.</source> <volume>13</volume>, <fpage>795</fpage>&#x2013;<lpage>848</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s40685-020-00134-w</pub-id></mixed-citation></ref>
<ref id="ref46"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kordzadeh</surname><given-names>N.</given-names></name> <name><surname>Ghasemaghaei</surname><given-names>M.</given-names></name></person-group> (<year>2022</year>). <article-title>Algorithmic bias: review, synthesis, and future research directions</article-title>. <source>Eur. J. Inf. Syst.</source> <volume>31</volume>, <fpage>388</fpage>&#x2013;<lpage>409</lpage>. doi: <pub-id pub-id-type="doi">10.1080/0960085X.2021.1927212</pub-id></mixed-citation></ref>
<ref id="ref47"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Leavitt</surname><given-names>A.</given-names></name> <name><surname>Robinson</surname><given-names>J. J.</given-names></name></person-group> (<year>2017</year>). &#x201C;<article-title>Upvote my news: the practices of peer information aggregation for breaking news on reddit.Com</article-title>&#x201D; in <source>Proceedings of the ACM on human-computer interaction, 1</source>, (<publisher-loc>New York, NY</publisher-loc>: <publisher-name>CSCW</publisher-name>) <fpage>1</fpage>&#x2013;<lpage>18</lpage>.</mixed-citation></ref>
<ref id="ref48"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Leicht-Deobald</surname><given-names>U.</given-names></name> <name><surname>Busch</surname><given-names>T.</given-names></name> <name><surname>Schank</surname><given-names>C.</given-names></name> <name><surname>Weibel</surname><given-names>A.</given-names></name> <name><surname>Schafheitle</surname><given-names>S.</given-names></name> <name><surname>Wildhaber</surname><given-names>I.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>The challenges of algorithm-based HR decision-making for personal integrity</article-title>. <source>J. Bus. Ethics</source> <volume>160</volume>, <fpage>377</fpage>&#x2013;<lpage>392</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10551-019-04204-w</pub-id>, <pub-id pub-id-type="pmid">31814653</pub-id></mixed-citation></ref>
<ref id="ref49"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Markham</surname><given-names>A.</given-names></name> <name><surname>Buchanan</surname><given-names>E.</given-names></name></person-group> <collab id="col101">with feedback from the AOIR Ethics Working Committee</collab>. (<year>2012</year>). <source>Ethical decision-making and internet research: Recommendations from the AoIR Ethics Working Committee (Version 2.0)</source>. Available at: <ext-link xlink:href="https://www.aoir.org/reports/ethics2.pdf" ext-link-type="uri">https://www.aoir.org/reports/ethics2.pdf</ext-link></mixed-citation></ref>
<ref id="ref50"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Moghayedi</surname><given-names>A.</given-names></name> <name><surname>Michell</surname><given-names>K.</given-names></name> <name><surname>Awuzie</surname><given-names>B.</given-names></name> <name><surname>Adama</surname><given-names>U. J.</given-names></name></person-group> (<year>2024</year>). <article-title>A comprehensive analysis of the implications of artificial intelligence adoption on employee social well-being in south African facility management organizations</article-title>. <source>J. Corp. Real Estate</source> <volume>26</volume>, <fpage>237</fpage>&#x2013;<lpage>261</lpage>. doi: <pub-id pub-id-type="doi">10.1108/JCRE-09-2023-0041</pub-id></mixed-citation></ref>
<ref id="ref51"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Mohammad</surname><given-names>S. M.</given-names></name> <name><surname>Turney</surname><given-names>P. D.</given-names></name></person-group> <year>2013</year> <article-title>Crowdsourcing a word-emotion association lexicon</article-title> [Epub ahead of print] doi: <pub-id pub-id-type="doi">10.48550/arXiv.1308.6297</pub-id></mixed-citation></ref>
<ref id="ref52"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Murire</surname><given-names>O. T.</given-names></name></person-group> (<year>2024</year>). <article-title>Artificial intelligence and its role in shaping organizational work practices and culture</article-title>. <source>Admin. Sci.</source> <volume>14</volume>:<fpage>316</fpage>. doi: <pub-id pub-id-type="doi">10.3390/admsci14120316</pub-id></mixed-citation></ref>
<ref id="ref53"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nukhu</surname><given-names>R.</given-names></name> <name><surname>Singh</surname><given-names>S.</given-names></name> <name><surname>Chittiprolu</surname><given-names>V.</given-names></name> <name><surname>Ali</surname><given-names>A.</given-names></name></person-group> (<year>2025</year>). <article-title>Do users anthropomorphize AI-based virtual influencers? Unraveling Reddit user perceptions via text mining</article-title>. <source>Int. J. Hum. Comput. Interact.</source> <volume>41</volume>, <fpage>8975</fpage>&#x2013;<lpage>8988</lpage>. doi: <pub-id pub-id-type="doi">10.1080/10447318.2024.2416017</pub-id></mixed-citation></ref>
<ref id="ref54"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Oyekunle</surname><given-names>D.</given-names></name> <name><surname>Boohene</surname><given-names>D.</given-names></name> <name><surname>Preston</surname><given-names>D.</given-names></name></person-group> (<year>2024</year>). <article-title>Ethical considerations in AI-powered work environments: a literature review and theoretical framework for ensuring human dignity and fairness</article-title>. <source>Int. J. Sci. Res. Manag.</source> <volume>12</volume>, <fpage>6166</fpage>&#x2013;<lpage>6178</lpage>. doi: <pub-id pub-id-type="doi">10.18535/ijsrm/v12i03.em18</pub-id></mixed-citation></ref>
<ref id="ref55"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>&#x00D6;zkiziltan</surname><given-names>D.</given-names></name> <name><surname>Hassel</surname><given-names>A.</given-names></name></person-group> (<year>2021</year>). <article-title>Artificial intelligence at work: an overview of the literature</article-title>. <source>SSRN Electron. J.</source> doi: <pub-id pub-id-type="doi">10.2139/ssrn.3796746</pub-id></mixed-citation></ref>
<ref id="ref56"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pereira</surname><given-names>V.</given-names></name> <name><surname>Hadjielias</surname><given-names>E.</given-names></name> <name><surname>Christofi</surname><given-names>M.</given-names></name> <name><surname>Vrontis</surname><given-names>D.</given-names></name></person-group> (<year>2023</year>). <article-title>A systematic literature review on the impact of artificial intelligence on workplace outcomes: a multi-process perspective</article-title>. <source>Hum. Resour. Manag. Rev.</source> <volume>33</volume>:<fpage>100857</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.hrmr.2021.100857</pub-id></mixed-citation></ref>
<ref id="ref57"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Proferes</surname><given-names>N.</given-names></name> <name><surname>Jones</surname><given-names>N.</given-names></name> <name><surname>Gilbert</surname><given-names>S.</given-names></name> <name><surname>Fiesler</surname><given-names>C.</given-names></name> <name><surname>Zimmer</surname><given-names>M.</given-names></name></person-group> (<year>2021</year>). <article-title>Studying Reddit: a systematic overview of disciplines, approaches, methods, and ethics</article-title>. <source>Soc. Media Soc.</source> <volume>7</volume>:<fpage>20563051211019004</fpage>. doi: <pub-id pub-id-type="doi">10.1177/20563051211019004</pub-id></mixed-citation></ref>
<ref id="ref58"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ragu-Nathan</surname><given-names>T. S.</given-names></name> <name><surname>Tarafdar</surname><given-names>M.</given-names></name> <name><surname>Ragu-Nathan</surname><given-names>B. S.</given-names></name> <name><surname>Tu</surname><given-names>Q.</given-names></name></person-group> (<year>2008</year>). <article-title>The consequences of technostress for end users in organizations: conceptual development and empirical validation</article-title>. <source>Inf. Syst. Res.</source> <volume>19</volume>, <fpage>417</fpage>&#x2013;<lpage>433</lpage>. doi: <pub-id pub-id-type="doi">10.1287/isre.1070.0165</pub-id></mixed-citation></ref>
<ref id="ref59"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Reagle</surname><given-names>J.</given-names></name></person-group> (<year>2022</year>). <article-title>Disguising Reddit sources and the efficacy of ethical research</article-title>. <source>Ethics Inf. Technol.</source> <volume>24</volume>:<fpage>41</fpage>. doi: <pub-id pub-id-type="doi">10.1007/s10676-022-09663-w</pub-id>, <pub-id pub-id-type="pmid">36105629</pub-id></mixed-citation></ref>
<ref id="ref60"><mixed-citation publication-type="other"><collab id="coll3">Reddit</collab> <year>2025</year> User Agreement. Available online at: <ext-link xlink:href="https://redditinc.com/policies/user-agreement-june-28-2025" ext-link-type="uri">https://redditinc.com/policies/user-agreement-june-28-2025</ext-link> (Accessed January 17, 2026).</mixed-citation></ref>
<ref id="ref61"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ribeiro</surname><given-names>F. N.</given-names></name> <name><surname>Ara&#x00FA;jo</surname><given-names>M.</given-names></name> <name><surname>Gon&#x00E7;alves</surname><given-names>P.</given-names></name> <name><surname>Andr&#x00E9; Gon&#x00E7;alves</surname><given-names>M.</given-names></name> <name><surname>Benevenuto</surname><given-names>F.</given-names></name></person-group> (<year>2016</year>). <article-title>SentiBench&#x2014;a benchmark comparison of state-of-the-practice sentiment analysis methods</article-title>. <source>EPJ Data Sci.</source> <volume>5</volume>:<fpage>23</fpage>. doi: <pub-id pub-id-type="doi">10.1140/epjds/s13688-016-0085-1</pub-id></mixed-citation></ref>
<ref id="ref62"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Rousseau</surname><given-names>D.</given-names></name></person-group> (<year>1995</year>). <source>Psychological contracts in organizations: Understanding written and unwritten agreements</source>. <publisher-loc>Thousand Oaks, CA</publisher-loc>: <publisher-name>SAGE Publications, Inc</publisher-name>.</mixed-citation></ref>
<ref id="ref63"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ryan</surname><given-names>R. M.</given-names></name> <name><surname>Deci</surname><given-names>E. L.</given-names></name></person-group> (<year>2000</year>). <article-title>Self-determination theory and the facilitation of intrinsic motivation, social development, and well-being</article-title>. <source>Am. Psychol.</source> <volume>55</volume>, <fpage>68</fpage>&#x2013;<lpage>78</lpage>. doi: <pub-id pub-id-type="doi">10.1037/0003-066X.55.1.68</pub-id>, <pub-id pub-id-type="pmid">11392867</pub-id></mixed-citation></ref>
<ref id="ref64"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Saha</surname><given-names>S.</given-names></name> <name><surname>Basu</surname><given-names>S.</given-names></name> <name><surname>Pandit</surname><given-names>D.</given-names></name></person-group> (<year>2022</year>). <article-title>Identifying factors influencing perceived quality of life (QoL) of indian elderly: case study of Kolkata, India</article-title>. <source>Soc. Indic. Res.</source> <volume>160</volume>, <fpage>867</fpage>&#x2013;<lpage>907</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s11205-020-02493-7</pub-id></mixed-citation></ref>
<ref id="ref65"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Segkouli</surname><given-names>S.</given-names></name> <name><surname>Giakoumis</surname><given-names>D.</given-names></name> <name><surname>Votis</surname><given-names>K.</given-names></name> <name><surname>Triantafyllidis</surname><given-names>A.</given-names></name> <name><surname>Paliokas</surname><given-names>I.</given-names></name> <name><surname>Tzovaras</surname><given-names>D.</given-names></name></person-group> (<year>2023</year>). <article-title>Smart workplaces for older adults: coping &#x2018;ethically&#x2019; with technology pervasiveness</article-title>. <source>Univ. Access Inf. Soc.</source> <volume>22</volume>, <fpage>37</fpage>&#x2013;<lpage>49</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10209-021-00829-9</pub-id>, <pub-id pub-id-type="pmid">34305502</pub-id></mixed-citation></ref>
<ref id="ref66"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Shoss</surname><given-names>M. K.</given-names></name></person-group> (<year>2017</year>). <article-title>Job insecurity: an integrative review and agenda for future research</article-title>. <source>J. Manage.</source> <volume>43</volume>, <fpage>1911</fpage>&#x2013;<lpage>1939</lpage>. doi: <pub-id pub-id-type="doi">10.1177/0149206317691574</pub-id></mixed-citation></ref>
<ref id="ref67"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sit</surname><given-names>M.</given-names></name> <name><surname>Elliott</surname><given-names>S. A.</given-names></name> <name><surname>Wright</surname><given-names>K. S.</given-names></name> <name><surname>Scott</surname><given-names>S. D.</given-names></name> <name><surname>Hartling</surname><given-names>L.</given-names></name></person-group> (<year>2024</year>). <article-title>Youth mental health help-seeking information needs and experiences: a thematic analysis of Reddit posts</article-title>. <source>Youth Soc.</source> <volume>56</volume>, <fpage>24</fpage>&#x2013;<lpage>41</lpage>. doi: <pub-id pub-id-type="doi">10.1177/0044118X221129642</pub-id></mixed-citation></ref>
<ref id="ref68"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Soulami</surname><given-names>M.</given-names></name> <name><surname>Benchekroun</surname><given-names>S.</given-names></name> <name><surname>Galiulina</surname><given-names>A.</given-names></name></person-group> (<year>2024</year>). <article-title>Exploring how AI adoption in the workplace affects employees: a bibliometric and systematic review</article-title>. <source>Front. Artif. Intel.</source> <volume>7</volume>:<fpage>1473872</fpage>. doi: <pub-id pub-id-type="doi">10.3389/frai.2024.1473872</pub-id>, <pub-id pub-id-type="pmid">39610851</pub-id></mixed-citation></ref>
<ref id="ref69"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Starke</surname><given-names>C.</given-names></name> <name><surname>Baleis</surname><given-names>J.</given-names></name> <name><surname>Keller</surname><given-names>B.</given-names></name> <name><surname>Marcinkowski</surname><given-names>F.</given-names></name></person-group> (<year>2022</year>). <article-title>Fairness perceptions of algorithmic decision-making: a systematic review of the empirical literature</article-title>. <source>Big Data Soc.</source> <volume>9</volume>:<fpage>20539517221115189</fpage>. doi: <pub-id pub-id-type="doi">10.1177/20539517221115189</pub-id></mixed-citation></ref>
<ref id="ref70"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tarafdar</surname><given-names>M.</given-names></name> <name><surname>Cooper</surname><given-names>C. L.</given-names></name> <name><surname>Stich</surname><given-names>J.</given-names></name></person-group> (<year>2019</year>). <article-title>The technostress trifecta - techno eustress, techno distress and design: theoretical directions and an agenda for research</article-title>. <source>Inf. Syst. J.</source> <volume>29</volume>, <fpage>6</fpage>&#x2013;<lpage>42</lpage>. doi: <pub-id pub-id-type="doi">10.1111/isj.12169</pub-id></mixed-citation></ref>
<ref id="ref71"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Taslim</surname><given-names>W. S.</given-names></name> <name><surname>Rosnani</surname><given-names>T.</given-names></name> <name><surname>Fauzan</surname><given-names>R.</given-names></name></person-group> (<year>2025</year>). <article-title>Employee involvement in AI-driven HR decision-making: a systematic review</article-title>. <source>SA J. Hum. Resour. Manage.</source> <volume>23</volume>:<fpage>a2856</fpage>. doi: <pub-id pub-id-type="doi">10.4102/sajhrm.v23i0.2856</pub-id></mixed-citation></ref>
<ref id="ref72"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tenakwah</surname><given-names>E. S.</given-names></name> <name><surname>Watson</surname><given-names>C.</given-names></name></person-group> (<year>2025</year>). <article-title>Embracing the AI/automation age: preparing your workforce for humans and machines working together</article-title>. <source>Strat. Leadersh.</source> <volume>53</volume>, <fpage>32</fpage>&#x2013;<lpage>48</lpage>. doi: <pub-id pub-id-type="doi">10.1108/SL-05-2024-0040</pub-id></mixed-citation></ref>
<ref id="ref73"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tomprou</surname><given-names>M.</given-names></name> <name><surname>Lee</surname><given-names>M. K.</given-names></name></person-group> (<year>2022</year>). <article-title>Employment relationships in algorithmic management: a psychological contract perspective</article-title>. <source>Comput. Hum. Behav.</source> <volume>126</volume>:<fpage>106997</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.chb.2021.106997</pub-id></mixed-citation></ref>
<ref id="ref74"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Vrontis</surname><given-names>D.</given-names></name> <name><surname>Christofi</surname><given-names>M.</given-names></name> <name><surname>Pereira</surname><given-names>V.</given-names></name> <name><surname>Tarba</surname><given-names>S.</given-names></name> <name><surname>Makrides</surname><given-names>A.</given-names></name> <name><surname>Trichina</surname><given-names>E.</given-names></name></person-group> (<year>2022</year>). <article-title>Artificial intelligence, robotics, advanced technologies and human resource management: a systematic review</article-title>. <source>Int. J. Hum. Resour. Manage.</source> <volume>33</volume>, <fpage>1237</fpage>&#x2013;<lpage>1266</lpage>. doi: <pub-id pub-id-type="doi">10.1080/09585192.2020.1871398</pub-id></mixed-citation></ref>
<ref id="ref75"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zirar</surname><given-names>A.</given-names></name> <name><surname>Ali</surname><given-names>S. I.</given-names></name> <name><surname>Islam</surname><given-names>N.</given-names></name></person-group> (<year>2023</year>). <article-title>Worker and workplace artificial intelligence (AI) coexistence: emerging themes and research agenda</article-title>. <source>Technovation</source> <volume>124</volume>:<fpage>102747</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.technovation.2023.102747</pub-id></mixed-citation></ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by" id="fn0001">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/740869/overview">Xi Chen</ext-link>, Yunnan University, China</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by" id="fn0002">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/809229/overview">Morteza Taheri</ext-link>, University of Tehran, Iran</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3138607/overview">Ling Huang</ext-link>, Yunnan University, China</p>
</fn>
</fn-group>
</back>
</article>