<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" article-type="research-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Educ.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Education</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Educ.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2504-284X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/feduc.2026.1760626</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Artificial intelligence as a catalyst for transformative assessment: designing teacher literacy at the crossroads of ethics, pedagogy, and human relationships</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes"><name><surname>Vinci</surname> <given-names>Viviana</given-names></name><xref ref-type="aff" rid="aff1"><sup>1</sup></xref><xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1566136"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author"><name><surname>Agrati</surname> <given-names>Laura Sara</given-names></name><xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1286633"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author"><name><surname>Berardi</surname> <given-names>Pierangelo</given-names></name><xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3123225"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author"><name><surname>Beri</surname> <given-names>Arianna</given-names></name><xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3038352"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Department of Humanities, Literature, Cultural Heritage, Education Sciences, University of Foggia</institution>, <city>Foggia</city>, <country country="it">Italy</country></aff>
<aff id="aff2"><label>2</label><institution>Department of Education and Sports Sciences, Telematic University Pegaso</institution>, <city>Naples</city>, <country country="it">Italy</country></aff>
<aff id="aff3"><label>3</label><institution>Department of Education, Psychology, Communication Sciences, University of Bari</institution>, <city>Bari</city>, <country country="it">Italy</country></aff>
<aff id="aff4"><label>4</label><institution>Department of Humanities and Social Sciences, University of Bergamo</institution>, <city>Bergamo</city>, <country country="it">Italy</country></aff>
<author-notes>
<corresp id="c001"><label>&#x002A;</label>Correspondence: Viviana Vinci, <email xlink:href="mailto:viviana.vinci@unifg.it">viviana.vinci@unifg.it</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-17">
<day>17</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>11</volume>
<elocation-id>1760626</elocation-id>
<history>
<date date-type="received">
<day>18</day>
<month>12</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>21</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>22</day>
<month>01</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2026 Vinci, Agrati, Berardi and Beri.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Vinci, Agrati, Berardi and Beri</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-17">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<p>The rapid proliferation of Generative Artificial Intelligence (GenAI) in educational settings necessitates a redefinition of assessment practices, raising crucial questions regarding the relationship between automation and pedagogical ethics. While AI offers opportunities for efficiency and personalization, there persists a risk of a technocratic drift that threatens the relational dimension of teaching. This study explores the perceptions of trainee teachers regarding the integration of AI in assessment, investigating the transformation of practices (RQ1) and the competencies required for an &#x201C;AI-literate&#x201D; teacher profile (RQ2). Adopting a convergent mixed-methods design, a survey was conducted on a sample of 662 teachers enrolled in qualifying courses (60 ECTS) at the University of Foggia and the University of Bergamo. Data collected via a CAWI survey were analyzed using Spearman&#x2019;s correlation and thematic analysis supported by text mining. The data reveal a &#x201C;conscious resistance&#x201D;: despite possessing a good level of technical comfort, pedagogical trust in AI remains low (<italic>M</italic>&#x202F;=&#x202F;4.83/10). Inferential analysis highlights a training paradox: trust does not correlate with the technical training received (<italic>&#x03C1;</italic>&#x202F;=&#x202F;0.36), but rather with the perception of concrete support (<italic>&#x03C1;</italic>&#x202F;=&#x202F;0.57). Qualitatively, concerns regarding dehumanization and a demand for a principle of subsidiarity emerge. The study suggests that technical competence is insufficient for AI adoption. A Critical AI Literacy that integrates ethics and pedagogy is required, restoring decision-making control to the teacher to ensure fair and humanizing assessment.</p>
</abstract>
<kwd-group>
<kwd>AI literacy</kwd>
<kwd>artificial intelligence in education</kwd>
<kwd>ethics</kwd>
<kwd>formative assessment</kwd>
<kwd>teacher agency</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was not received for this work and/or its publication.</funding-statement>
</funding-group>
<counts>
<fig-count count="4"/>
<table-count count="3"/>
<equation-count count="0"/>
<ref-count count="26"/>
<page-count count="10"/>
<word-count count="6838"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Digital Education</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec1">
<label>1</label>
<title>Introduction</title>
<p>The pervasive and rapid diffusion of Artificial Intelligence (AI), with particular reference to Generative AI (GenAI) systems, is radically redefining the foundational paradigms of education. This suggests the need for a critical revision of how learning is conceived, delivered, and, above all, assessed (<xref ref-type="bibr" rid="ref20">OECD, 2023</xref>; <xref ref-type="bibr" rid="ref23">UNESCO, 2023</xref>). In this continuously evolving scenario, assessment emerges as a strategic yet extremely delicate arena, raising questions that transcend the mere technical dimension to affect the epistemological and ethical spheres of the educational act (<xref ref-type="bibr" rid="ref15">Luckin et al., 2022</xref>; <xref ref-type="bibr" rid="ref10">Holmes and Tuomi, 2022</xref>). In this study, a distinction is drawn between AI Literacy and traditional Digital Literacy. As observed by <xref ref-type="bibr" rid="ref19">Ng et al. (2021)</xref>, the concept of literacy is broadening to address the complexity of algorithmic environments; whereas digital literacy primarily concerns the operational proficiency required to utilize technological tools, AI Literacy constitutes a more intricate, multidimensional construct. Grounded in the frameworks proposed by <xref ref-type="bibr" rid="ref14">Long and Magerko (2020)</xref> and expanded by <xref ref-type="bibr" rid="ref15">Luckin et al. (2022)</xref> and <xref ref-type="bibr" rid="ref26">Williams (2024)</xref>, teacher AI Literacy is defined here as the intersection of conceptual understanding, the ability to comprehend the underlying logic of machine learning algorithms, and critical agency, which entails evaluating ethical implications such as bias and data privacy. Additionally, this competence encompasses pedagogical governance, understood as the strategic ability to discern when to delegate tasks to AI, thereby supporting the transition from passive users to critical evaluators of the technology. The introduction of algorithms capable of generating complex content should not be viewed merely as a simple functional substitution of traditional tools; rather, it may act as a catalyst prompting a rethinking of assessment practices through a more equitable, inclusive, and personalized lens, while averting the risk of a technocratic drift that would reduce formative judgment to a computational automatism.</p>
<p>However, the reception of these technologies within the school system is neither linear nor devoid of conflict. As highlighted by recent investigations into initial teacher training, a marked polarization is evident, oscillating between enthusiasm and resistance (<xref ref-type="bibr" rid="ref22">Selwyn, 2016</xref>; <xref ref-type="bibr" rid="ref4">Buchholz et al., 2020</xref>). While a fascination with innovation and the potential for personalization emerges on one side, on the other, the fear of professional &#x201C;deskilling&#x201D; is consolidating: the perceived risk that automation may erode fundamental pedagogical competencies, delegating the responsibility of judgment to algorithmic &#x201C;black boxes&#x201D; and reducing teaching to mere output management. This tension highlights how AI is perceived not merely as a tool, but as an agent that interrogates the very identity of the teacher.</p>
<p>Regarding opportunities, the literature highlights significant cross-cutting benefits. For teachers, GenAI represents a powerful resource for automating low-value-added and time-consuming tasks, such as the correction of structured tests or the preliminary analysis of learning data, thereby ensuring greater consistency in grading and freeing up cognitive resources to be reinvested in the educational relationship (<xref ref-type="bibr" rid="ref8">Chan and Hu, 2023</xref>; <xref ref-type="bibr" rid="ref13">Liu, 2024</xref>). In parallel, AI supports instructional design by offering creative insights and materials aligned with learning objectives. For students, the added value lies in access to adaptive and on-demand learning: intelligent tutoring systems can simplify complex concepts and offer immediate feedback, fostering personalized pathways that respect individual cognitive rhythms (<xref ref-type="bibr" rid="ref11">Kasneci et al., 2023</xref>; <xref ref-type="bibr" rid="ref24">Walter, 2024</xref>).</p>
<p>Nevertheless, this technological integration clashes with profound criticalities that threaten the validity of the assessment process itself. One of the most urgent concerns regards academic integrity, which is severely tested by the ease with which students can use chatbots to draft assignments, incurring practices of sophisticated plagiarism that are often difficult to detect (<xref ref-type="bibr" rid="ref9">Gruenhagen et al., 2024</xref>; <xref ref-type="bibr" rid="ref12">Lim et al., 2023</xref>). Even more insidious is the risk linked to the potential decline of cognitive skills: an over-reliance on GenAI for problem-solving could hinder the development of critical thinking and creativity, leading to a form of &#x201C;cognitive laziness&#x201D; wherein the student delegates deep processing to the machine (<xref ref-type="bibr" rid="ref5">&#x00C7;ela et al., 2024</xref>; <xref ref-type="bibr" rid="ref27">Zhai et al., 2024</xref>).</p>
<p>In this balance between risks and benefits, a determining factor comes into play: &#x201C;familiarity&#x201D; with the tool. Specific studies in the context of Teacher Education (<xref ref-type="bibr" rid="ref7">Chai et al., 2016</xref>; <xref ref-type="bibr" rid="ref6">Celik, 2023</xref>) suggest that the critical acceptance of AI does not depend solely on its perceived utility, but is strongly correlated with the teacher&#x2019;s degree of technical and pedagogical familiarity. Resistance is often not ideological but the result of scarce exposure that generates anxiety and uncertainty; conversely, greater habituation allows for the demystification of the technology and its conscious integration into teaching practices.</p>
<p>In light of these challenges, the need to move beyond standardized assessment methods in favor of forms of authentic assessment, based on processes that AI cannot entirely simulate, becomes increasingly relevant (<xref ref-type="bibr" rid="ref21">Salinas-Navarro et al., 2024</xref>). This shift advocates for a deep redefinition of teachers&#x2019; assessment literacy (<xref ref-type="bibr" rid="ref1">Bearman et al., 2020</xref>). Possessing traditional assessment competencies may no longer be sufficient; it appears crucial to train an &#x201C;AI-literate&#x201D; teacher profile, endowed with solid critical and ethical awareness. Consequently, the integration of AI in education calls for a paradigm shift from simple automation to a Human-in-the-loop (HITL) approach (<xref ref-type="bibr" rid="ref16">Memarian and Doleck, 2024</xref>; <xref ref-type="bibr" rid="ref17">Mosqueira-Rey et al., 2023</xref>). However, to fully preserve teacher agency, we argue for the adoption of the more advanced AI-in-the-loop (AI<sup>2</sup>L) perspective recently proposed by <xref ref-type="bibr" rid="ref18">Natarajan et al. (2025)</xref>. In this framework, the human expert is not merely a supervisor &#x2018;in the loop&#x2019; of the algorithm, but the active owner of the educational process, while the AI is positioned &#x2018;in the loop&#x2019; of the teacher as a subservient support tool. Adopting an AI<sup>2</sup>L perspective mitigates ethical risks such as bias and opacity, ensuring that assessment remains a pedagogical act guided by human intentionality rather than an automated statistical output. Consistent with this scenario, the present research aims to explore the relationship between Artificial Intelligence (AI) and educational assessment through a critical and transformative lens, placing particular attention on the changes emerging in the professionalism of trainee teachers. Specifically, the investigation is guided by two main research questions:</p>
<list list-type="bullet">
<list-item>
<p>RQ1: How do trainee teachers perceive the transformation of assessment practices mediated by AI, and what tensions emerge between automation and pedagogical responsibility?</p>
</list-item>
<list-item>
<p>RQ2: What specific training dimensions are central to outlining an &#x201C;AI-literate&#x201D; teacher profile capable of integrating AI into assessment without compromising the educational dimension?</p>
</list-item>
</list>
</sec>
<sec sec-type="materials|methods" id="sec2">
<label>2</label>
<title>Materials and methods</title>
<sec id="sec3">
<label>2.1</label>
<title>Research design and participants</title>
<p>The study employs a convergent parallel mixed-methods design, aimed at investigating the perceptions and competencies of trainee teachers regarding the integration of AI in assessment. Data collection was conducted between April and May 2025 via digital administration.</p>
<p>The convenience sample consists of <italic>N</italic>&#x202F;=&#x202F;662 teachers enrolled in qualifying courses (60 ECTS) at the University of Foggia (<italic>n</italic>&#x202F;=&#x202F;523; 79.0%) and the University of Bergamo (<italic>n</italic>&#x202F;=&#x202F;139; 21.0%).</p>
<p>The demographic composition (<xref ref-type="table" rid="tab1">Table 1</xref>) highlights a prevalence of female participants (<italic>n</italic>&#x202F;=&#x202F;424; 64.0%) compared to the male component (<italic>n</italic>&#x202F;=&#x202F;234; 35.3%). The distribution by age group reveals a heterogeneous population: although the modal group is represented by the 25&#x2013;29 age range (<italic>n</italic>&#x202F;=&#x202F;176; 26.6%), a significant presence of trainee teachers in the over-45 range (<italic>n</italic>&#x202F;=&#x202F;162; 24.5%) is observed, delineating a multigenerational training context.</p>
<table-wrap position="float" id="tab1">
<label>Table 1</label>
<caption>
<p>Sociodemographic and professional characteristics of the sample (<italic>N</italic>&#x202F;=&#x202F;662).</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th>Characteristics</th>
<th align="center" valign="top">Number of participants (<italic>n</italic>. 662)</th>
<th align="center" valign="top">Percentage of participants (%)</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top" colspan="3">Gender</td>
</tr>
<tr>
<td align="left" valign="top">Male</td>
<td align="center" valign="top">234</td>
<td align="center" valign="top">35.3</td>
</tr>
<tr>
<td align="left" valign="top">Female</td>
<td align="center" valign="top">424</td>
<td align="center" valign="top">64.0</td>
</tr>
<tr>
<td align="left" valign="top">I prefer not to answer</td>
<td align="center" valign="top">2</td>
<td align="center" valign="top">0.3</td>
</tr>
<tr>
<td align="left" valign="top">Non-binary</td>
<td align="center" valign="top">2</td>
<td align="center" valign="top">0.3</td>
</tr>
<tr>
<td align="left" valign="top" colspan="3">Age</td>
</tr>
<tr>
<td align="left" valign="top">Under 24&#x202F;years</td>
<td align="center" valign="top">39</td>
<td align="center" valign="top">5.9</td>
</tr>
<tr>
<td align="left" valign="top">From 25 to 29&#x202F;years</td>
<td align="center" valign="top">176</td>
<td align="center" valign="top">26.6</td>
</tr>
<tr>
<td align="left" valign="top">From 30 to 34&#x202F;years</td>
<td align="center" valign="top">122</td>
<td align="center" valign="top">18.4</td>
</tr>
<tr>
<td align="left" valign="top">From 35 to 39&#x202F;years</td>
<td align="center" valign="top">97</td>
<td align="center" valign="top">14.6</td>
</tr>
<tr>
<td align="left" valign="top">From 40 to 44&#x202F;years</td>
<td align="center" valign="top">66</td>
<td align="center" valign="top">10.0</td>
</tr>
<tr>
<td align="left" valign="top">Over 45&#x202F;years</td>
<td align="center" valign="top">162</td>
<td align="center" valign="top">24.5</td>
</tr>
<tr>
<td align="left" valign="top" colspan="3">Education</td>
</tr>
<tr>
<td align="left" valign="top">Master&#x2019;s degree</td>
<td align="center" valign="top">464</td>
<td align="center" valign="top">70.0</td>
</tr>
<tr>
<td align="left" valign="top">Postgraduate degree</td>
<td align="center" valign="top">30</td>
<td align="center" valign="top">4.5</td>
</tr>
<tr>
<td align="left" valign="top">PhD</td>
<td align="center" valign="top">19</td>
<td align="center" valign="top">2.9</td>
</tr>
<tr>
<td align="left" valign="top">Other</td>
<td align="center" valign="top">149</td>
<td align="center" valign="top">22.5</td>
</tr>
<tr>
<td align="left" valign="top">Missing</td>
<td align="center" valign="top">189</td>
<td align="center" valign="top">28.6</td>
</tr>
<tr>
<td align="left" valign="top" colspan="3">Years of Teaching</td>
</tr>
<tr>
<td align="left" valign="top">0&#x2013;5</td>
<td align="center" valign="top">338</td>
<td align="center" valign="top">51.0</td>
</tr>
<tr>
<td align="left" valign="top">6&#x2013;10</td>
<td align="center" valign="top">116</td>
<td align="center" valign="top">17.6</td>
</tr>
<tr>
<td align="left" valign="top">11&#x2013;20</td>
<td align="center" valign="top">19</td>
<td align="center" valign="top">2.8</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>In terms of academic background, the majority of participants hold a Master&#x2019;s Degree (70.0%), followed by postgraduate qualifications or research doctorates (7.4% overall). Regarding professional experience, the sample is primarily divided between teachers with less than 5&#x202F;years of service and those with experience ranging between 6 and 10&#x202F;years (51.0%).</p>
</sec>
<sec id="sec4">
<label>2.2</label>
<title>Data collection instrument</title>
<p>The data were collected between April and May 2025 using a Computer-Assisted Web Interview (CAWI) methodology. The survey was self-administered via a secure online platform (Microsoft Forms), and the link was distributed to participants through the institutional Learning Management Systems (LMS) of the involved universities. Participation was voluntary and anonymous, with an estimated completion time of approximately 20&#x202F;min. The instrument consisted of 57 items organized into four logical sections, which were subsequently operationalized into five analytical dimensions (Trust, Competence, Relational Impact, Training, Perceived Utility) for the analysis:</p>
<list list-type="order">
<list-item>
<p>Socio-Professional Profile and Motivation (Items 1&#x2013;11): This section collected demographic data (gender, age), educational background, and professional status (years of teaching, school grade). It also investigated the participants&#x2019; intrinsic motivation for training and their priority expectations regarding professional competence acquisition.</p>
</list-item>
<list-item>
<p>Pedagogical and Assessment Background (Items 12&#x2013;25): This section explored the participants&#x2019; existing &#x201C;analog&#x201D; practices before introducing AI. Items queried their perceived self-efficacy in instructional design, the weight attributed to assessment tools, and their preferences regarding traditional (&#x201C;paper-and-pencil&#x201D;) versus software-mediated assessment formats.</p>
</list-item>
<list-item>
<p>AI Familiarity and Perceived Utility (Items 26&#x2013;38): This section measured the &#x201C;digital baseline&#x201D; of the trainees. It included self-assessment scales (1&#x2013;10) on comfort with digital technologies and specific AI tools (e.g., ChatGPT). It also investigated the specific perceived utility of Generative AI for tasks such as self-correction of writing skills and students&#x2019; learning errors.</p>
</list-item>
<list-item>
<p>Ethical Agency and Future Scenarios (Items 39&#x2013;57): The core of the survey investigated the tension between automation and human control. Through agreement scales and open-ended questions, participants were asked to define which teaching activities could be delegated to AI and which should remain exclusively human to preserve educational effectiveness. Specific items addressed the risks of dehumanization, the &#x201C;trust&#x201D; in AI-supported assessment, and the need for critical control over algorithmic decisions.</p>
</list-item>
</list>
<p>To ensure maximum measurement sensitivity, the response scales were adapted to the nature of the constructs. Specifically, Trust, Technical Comfort, Training Adequacy, and Relational Risks were measured using a 10-point estimation scale to capture subtle variations in sentiment and avoid the flattening effect of shorter scales. Conversely, specific constructs related to Perceived Utility (e.g., for writing correction) and traditional pedagogical background utilized a standard 5-point Likert scale. <xref ref-type="table" rid="tab2">Table 2</xref> provides a detailed overview of the key variables and the measurement scales used in the present study.</p>
<table-wrap position="float" id="tab2">
<label>Table 2</label>
<caption>
<p>Overview of the key variables analyzed.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Variable label (<xref ref-type="fig" rid="fig1">Figure 1</xref>)</th>
<th align="left" valign="top">Construct measured</th>
<th align="left" valign="top">Related item (translated)</th>
<th align="left" valign="top">Scale range</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Trust</td>
<td align="left" valign="middle">Trust in assessment</td>
<td align="left" valign="middle"><italic>What is your level of trust in the use of AI to support educational assessment?</italic> (Item 48)</td>
<td align="left" valign="middle">1 (very low)&#x2014;10 (very high)</td>
</tr>
<tr>
<td align="left" valign="middle">Support</td>
<td align="left" valign="middle">Pedagogical support</td>
<td align="left" valign="middle"><italic>I believe that Artificial Intelligence can support assessment.</italic> (Item 43)</td>
<td align="left" valign="middle">1 (totally disagree)&#x2014;10 (totally agree)</td>
</tr>
<tr>
<td align="left" valign="middle">Comfort</td>
<td align="left" valign="middle">Technical comfort</td>
<td align="left" valign="middle"><italic>How comfortable do you feel using Artificial Intelligence tools?</italic> (Item 34)</td>
<td align="left" valign="middle">1 (not at all comfortable)&#x2014;10 (totally comfortable)</td>
</tr>
<tr>
<td align="left" valign="middle">Training</td>
<td align="left" valign="middle">Training adequacy</td>
<td align="left" valign="middle"><italic>How much do you think your current training develops skills in the ethical management of collaboration between teachers and AI?</italic> (Item 52)</td>
<td align="left" valign="middle">1 (not at all effective)&#x2014;10 (very effective)</td>
</tr>
<tr>
<td align="left" valign="middle">Usefulness</td>
<td align="left" valign="middle">Perceived utility</td>
<td align="left" valign="middle"><italic>How useful do you consider GenAI for students&#x2019; self-correction tasks?</italic> (Item 26)</td>
<td align="left" valign="middle">1 (not at all useful)&#x2014;5 (very useful)</td>
</tr>
<tr>
<td align="left" valign="middle">Usage</td>
<td align="left" valign="middle">Frequency of use</td>
<td align="left" valign="middle"><italic>Have you ever utilized AI tools in the educational context?</italic> (Item 32)</td>
<td align="left" valign="middle">Binary/Freq. (yes&#x2014;no)</td>
</tr>
<tr>
<td align="left" valign="middle">Agency</td>
<td align="left" valign="middle">Critical agency</td>
<td align="left" valign="middle"><italic>How prepared do you feel to critically decide which aspects of teaching can be entrusted to an AI?</italic> (Item 51)</td>
<td align="left" valign="middle">1 (not at all prepared)&#x2014;10 (very prepared)</td>
</tr>
</tbody>
</table>
</table-wrap>
<fig position="float" id="fig1">
<label>Figure 1</label>
<caption>
<p>Spearman correlation matrix (<italic>N</italic>&#x202F;=&#x202F;662) illustrating the relationships between key variables influencing AI adoption in assessment. The variables are defined as follows: Usefulness (perceived utility of AI for self-correction tasks), Comfort (technical ease of use), Training (perceived adequacy of specific training received), Support (perception of AI as a valid pedagogical support), Trust (confidence in AI-based assessment reliability), and Usage (reported frequency of AI use).</p>
</caption>
<graphic xlink:href="feduc-11-1760626-g001.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Correlation matrix displaying relationships among six variables: Usefulness, Comfort, Training, Support, Trust, and Usage. Values range from 0.20 to 0.57, shaded in blue, indicating positive correlations. A color scale on the right shows correlation strength from negative one to one.</alt-text>
</graphic>
</fig>
</sec>
<sec id="sec5">
<label>2.3</label>
<title>Data analysis</title>
<p>The analysis plan was conducted entirely using the R statistical computing environment (via the RStudio interface) and was structured into distinct procedures for the two types of data. Regarding the quantitative component, descriptive statistics (frequencies, mean, and standard deviation) were initially calculated to characterize the sample profile. Subsequently, to address the research questions, Spearman&#x2019;s bivariate correlation analysis (<italic>&#x03C1;</italic>) was applied using the <italic>psych</italic> and <italic>tidyverse</italic> packages; this methodological choice was dictated by the ordinal nature of the variables (measured on Likert scales) and the non-normality of the data distribution, thereby ensuring greater robustness compared to parametric coefficients. On the qualitative front, open-ended responses were subjected to Inductive Thematic Analysis following the model by <xref ref-type="bibr" rid="ref2">Braun and Clarke (2006)</xref>. To ensure the trustworthiness and reduce the subjectivity of the inductive coding process, a methodological triangulation strategy was adopted. The themes emerged from the manual qualitative analysis were cross-validated against the results of the automated Text Mining analysis performed in R. Specifically, the high-frequency lexical clusters identified by the software (using tidytext and tm packages) consistently matched the semantic categories manually identified by the researchers. This convergence between human interpretation and algorithmic extraction confirms the robustness of the coding frame and mitigates the risk of researcher bias.</p>
</sec>
</sec>
<sec sec-type="results" id="sec6">
<label>3</label>
<title>Results</title>
<sec id="sec7">
<label>3.1</label>
<title>Transformation of assessment and technological ambivalence</title>
<p>Regarding the first research question, aimed at investigating the transformation of assessment in the interaction with AI, the data outline a scenario characterized by a deep ambivalence between technical competence and pedagogical trust. Descriptive analysis highlights that, despite a moderate level of reported comfort in the general use of intelligent tools (<italic>M</italic>&#x202F;=&#x202F;6.04; SD&#x202F;=&#x202F;2.52), trust in delegating assessment support to these systems suffers a significant decline, settling on mean values below the sufficiency threshold (<italic>M</italic>&#x202F;=&#x202F;4.83; SD&#x202F;=&#x202F;2.21). Even more critical is the data regarding the perceived utility of AI for self-correction, which records a minimal mean (<italic>M</italic>&#x202F;=&#x202F;2.57 on a 5-point scale), suggesting widespread skepticism toward the current didactic efficacy of such tools. Sample polarization is further confirmed by the distribution of responses on AI&#x2019;s capacity to support assessment (<italic>M</italic>&#x202F;=&#x202F;5.18), where the modal frequency coincides with the scale&#x2019;s minimum value (1), indicating the presence of certain resistance. However, inferential analysis, conducted via Spearman&#x2019;s correlation coefficient, clarifies that trust is not independent of competence: indeed, significant positive associations emerge between the level of trust and the perception of the support role (<italic>&#x03C1;</italic>&#x202F;=&#x202F;0.56), as well as between trust itself and the degree of technological comfort (<italic>&#x03C1;</italic>&#x202F;=&#x202F;0.54). The qualitative analysis allows for decoding the underlying reasons for these numerical trends, revealing four semantic areas that explain the tension between acceptance and rejection:</p>
<p>Instrumental Efficiency (Acceptance): On the pragmatic axis, a view oriented towards optimization consolidates. Teachers recognize AI as a valuable resource for time management, the automation of repetitive correction tasks, and the enhancement of feedback speed.</p>
<p>Relational Depersonalization (Rejection): Conversely, on the emotional axis, a pervasive fear of &#x201C;dehumanization&#x201D; emerges. Participant narratives describe the risk of a drift towards an &#x201C;arid,&#x201D; &#x201C;sterile,&#x201D; and &#x201C;distant&#x201D; assessment, deprived of the empathetic component deemed essential for education.</p>
<p>Epistemic Opacity (Distrust): A third area concerns the lack of transparency. The recurring references to the inability to explain the algorithm&#x2019;s judgment (&#x201C;I do not know how it decides&#x201D;) fuel the distrust recorded in the quantitative data, linking resistance to the &#x201C;Black Box&#x201D; phenomenon.</p>
<p>Human Irreducibility (Defense): Finally, the tension resolves in the claim of a &#x201C;human reserve.&#x201D; Assessment is described as a hermeneutic act requiring context and sensitivity, qualities that participants define as exclusively human and non-delegable to a machine.</p>
<p>This dialectic resolves into an explicit demand for a principle of pedagogical subsidiarity, according to which AI is legitimized exclusively as an ancillary support tool, without ever being able to replace professional judgment and the educational centrality of the teacher (see <xref ref-type="table" rid="tab3">Table 3</xref>).</p>
<table-wrap position="float" id="tab3">
<label>Table 3</label>
<caption>
<p>Descriptive statistics of the main dimensions.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Analytical dimension</th>
<th align="left" valign="top">Reference item</th>
<th align="center" valign="top">Scale range</th>
<th align="center" valign="top">Mean (M)</th>
<th align="center" valign="top">Std. Dev. (SD)</th>
<th align="left" valign="top">Interpretation of result</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Technical comfort</td>
<td align="left" valign="middle">Item 34</td>
<td align="char" valign="middle" char="&#x2013;">1&#x2013;10</td>
<td align="char" valign="middle" char=".">6.04</td>
<td align="char" valign="middle" char=".">2.52</td>
<td align="left" valign="middle">Moderate/high technical familiarity</td>
</tr>
<tr>
<td align="left" valign="middle">Critical agency</td>
<td align="left" valign="middle">Item 51</td>
<td align="char" valign="middle" char="&#x2013;">1&#x2013;10</td>
<td align="char" valign="middle" char=".">5.39</td>
<td align="char" valign="middle" char=".">2.45</td>
<td align="left" valign="middle">Sufficient perceived capability</td>
</tr>
<tr>
<td align="left" valign="middle">Training adequacy</td>
<td align="left" valign="middle">Item 52</td>
<td align="char" valign="middle" char="&#x2013;">1&#x2013;10</td>
<td align="char" valign="middle" char=".">4.29</td>
<td align="char" valign="middle" char=".">2.63</td>
<td align="left" valign="middle">Insufficient preparation</td>
</tr>
<tr>
<td align="left" valign="middle">Trust in AI</td>
<td align="left" valign="middle">Item 48</td>
<td align="char" valign="middle" char="&#x2013;">1&#x2013;10</td>
<td align="char" valign="middle" char=".">4.83</td>
<td align="char" valign="middle" char=".">2.21</td>
<td align="left" valign="middle">Low trust (resistance area)</td>
</tr>
<tr>
<td align="left" valign="middle">Perceived utility</td>
<td align="left" valign="middle">Item 26</td>
<td align="char" valign="middle" char="&#x2013;">1&#x2013;5</td>
<td align="char" valign="middle" char=".">2.57</td>
<td align="char" valign="middle" char=".">1.10</td>
<td align="left" valign="middle">Low utility for self-correction</td>
</tr>
<tr>
<td align="left" valign="middle">Pedagogical support</td>
<td align="left" valign="middle">Item 43</td>
<td align="char" valign="middle" char="&#x2013;">1&#x2013;10</td>
<td align="char" valign="middle" char=".">5.18</td>
<td align="char" valign="middle" char=".">2.80</td>
<td align="left" valign="middle">Ambivalent/uncertain</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="sec8">
<label>3.2</label>
<title>The &#x201C;AI-literate&#x201D; teacher profile: between ethics and technique</title>
<p>Regarding the second research question, aimed at delineating the central training dimensions for a teacher capable of integrating AI without educational compromises, the descriptive data primarily indicate a significant gap in perceived preparation. The adequacy of the training received is evaluated as insufficient, with a mean of 4.29 (SD&#x202F;=&#x202F;2.63) and a modal frequency situated at the scale&#x2019;s minimum value (1), pointing to a widespread perception of structural deficiency in current training pathways. Nevertheless, participants express moderate confidence in their capacities for critical discernment (<italic>M</italic>&#x202F;=&#x202F;5.23) and ethical management (<italic>M</italic>&#x202F;=&#x202F;5.13), as well as in the competence to mediate the use of AI while preserving the educational relationship (<italic>M</italic>&#x202F;=&#x202F;5.39). The inferential analysis, conducted on a sample of 662 subjects using Spearman&#x2019;s correlation, offers a crucial interpretive key for understanding these self-perceptions. A network of statistically significant relationships (<italic>p</italic>&#x202F;&#x003C;&#x202F;0.001) emerges, delineating a complex framework of technological acceptance. A finding of particular relevance concerns the role of training: while the training received correlates positively with the sense of technical comfort in using the tool (<italic>&#x03C1;</italic>&#x202F;=&#x202F;0.46), it shows a decidedly weaker association with trust in assessment (<italic>&#x03C1;</italic>&#x202F;=&#x202F;0.36) and a marginal association with the perception of AI as an effective didactic support (<italic>&#x03C1;</italic>&#x202F;=&#x202F;0.20). Conversely, trust in AI as an assessment tool appears to be driven much more strongly by the perception of concrete pedagogical support (<italic>&#x03C1;</italic>&#x202F;=&#x202F;0.57) and by the actual use of the tool (<italic>&#x03C1;</italic>&#x202F;=&#x202F;0.54), as well as by its perceived pragmatic utility (<italic>&#x03C1;</italic>&#x202F;=&#x202F;0.48) (see <xref ref-type="fig" rid="fig1">Figure 1</xref>).</p>
<p>These results suggest the existence of a &#x201C;training paradox&#x201D;: current pathways appear effective in fostering technical confidence (&#x201C;knowing how to use the tool&#x201D;), yet prove less impactful in constructing an integrated pedagogical vision (&#x201C;knowing how to assess with the tool&#x201D;). Consequently, teachers&#x2019; trust appears to stem not so much from theoretical instruction as from direct and pragmatic experience. This highlights the necessity to reorient training toward the development of a Critical AI Literacy that anchors technical competence in ethical and decision-making responsibility.</p>
</sec>
<sec id="sec9">
<label>3.3</label>
<title>Qualitative analysis: the phenomenology of resistance and training needs</title>
<p>The qualitative inquiry, supported by lexical evidence emerging from text mining, provides a vivid snapshot not only of what teachers think but of how they position themselves professionally vis-&#x00E0;-vis technology. The analysis of the textual corpus allows for the construction of training needs into three interconnected dimensions: operational uncertainty, ethical urgency, and the defense of pedagogical humanism.</p>
<p>First and foremost, the most immediate finding is the pervasiveness of doubt. The recurrence of dubitative linguistic forms, specifically the lemma &#x201C;Uncertainty,&#x201D; signals a state of operational paralysis (<xref ref-type="fig" rid="fig2">Figure 2</xref>). This term, appearing alongside the explicit demand for &#x201C;Training&#x201D; and technical &#x201C;Knowledge,&#x201D; suggests that the issue is not merely a lack of tools, but a difficulty of pedagogical imagination: many respondents struggle to concretely envision how AI can be integrated into their routine without disrupting it. This void of &#x201C;vision&#x201D; implies that current training may have been too theoretical, leaving teachers devoid of role models or practical examples of sustainable integration.</p>
<fig position="float" id="fig2">
<label>Figure 2</label>
<caption>
<p>Lexical frequency of training needs. The bar chart highlights the most recurring terms regarding teachers&#x2019; professional development needs, emphasizing operational uncertainty alongside the demand for training.</p>
</caption>
<graphic xlink:href="feduc-11-1760626-g002.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Bar chart titled "Training Needs" displaying categories with corresponding values. Training (86) and AI (82) are highest, followed by Knowledge (51), and others like Uncertainty (45), Competencies (43), with Assessment and Courses lowest at 25. Bars decrease in length sequentially.</alt-text>
</graphic>
</fig>
<p>Once the initial bewilderment is overcome, the training request crystallizes in a precise direction: the demand for meaning prevails over the demand for tools. The prominence of terms such as &#x201C;Critical Thinking&#x201D; and &#x201C;Ethics,&#x201D; contrasted with the high frequency of &#x201C;Risk&#x201D; and &#x201C;Loss&#x201D; (<xref ref-type="fig" rid="fig3">Figure 3</xref>), indicates that teachers are not asking to become computer technicians, but are claiming a deontological compass. The dominant concern does not regard the mechanics of the algorithm, but the management of its implications: data privacy, the recognition of cognitive biases, and, above all, the ability to discern when the use of technology is an added value and when it represents a loss of educational context.</p>
<fig position="float" id="fig3">
<label>Figure 3</label>
<caption>
<p>Semantic mapping of perceived risks. Frequency analysis of terms associated with the dangers of AI in assessment, highlighting ethical concerns over technical ones.</p>
</caption>
<graphic xlink:href="feduc-11-1760626-g003.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Bar chart titled "Risks Ethics" showing various risks with associated values. The highest values are for Risk (150) and Assessment (151), while the lowest are Losing and Context, both at 20. Other categories range from 21 to 57.</alt-text>
</graphic>
</fig>
<p>This evidence supports the hypothesis that Critical AI Literacy is not an academic luxury, but a pragmatic need for those who must manage educational responsibility in the classroom.</p>
<p>Finally, the semantic analysis of instructional strategies reveals a clear defensive line of demarcation. The strong recurrence of the term &#x201C;Relationship,&#x201D; flanked by &#x201C;Human&#x201D; and &#x201C;Educational&#x201D; values, unveils the teachers&#x2019; adaptation strategy (<xref ref-type="fig" rid="fig4">Figure 4</xref>): AI is welcomed if it acts as a &#x201C;prosthesis&#x201D; to enhance attention to the individual (e.g., by creating differentiated materials), but is rejected when it attempts to surrogate the relationship. In the free narratives, assessment is described as a hermeneutic act, an interpretation of student growth that requires empathy and context. Consequently, the &#x201C;educational centrality&#x201D; invoked by participants translates into the will to guard the emotional space of learning, delegating procedural execution to the machine while firmly retaining the monopoly on value judgment and educational care.</p>
<fig position="float" id="fig4">
<label>Figure 4</label>
<caption>
<p>The defense of pedagogical humanism. Analysis of non-delegable tasks, showing the dominance of relational and human-centric terms.</p>
</caption>
<graphic xlink:href="feduc-11-1760626-g004.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Bar chart titled "Humanism" showing values for various categories. "Assessment" has the highest value at 122, followed by "Relationship" at 96. "Human" and "Activity" each have 74, "Educational" has 62, followed by "Protection" at 57. "Didactics" and "Design" both have 46, "Classroom" is 43, "Reason" is 39, "Rapport" is 35, "Personal" is 33, "Students" is 30, and "Management" is 28.</alt-text>
</graphic>
</fig>
</sec>
</sec>
<sec sec-type="discussion" id="sec10">
<label>4</label>
<title>Discussion</title>
<p>The body of collected evidence places a crucial challenge for teacher training in the AI era at the center of the academic debate: the necessity to redefine assessment not merely as a docimological procedure, but as a reflective educational practice capable of integrating the computational power of algorithmic tools without yielding to either professional abdication or the technicization of formative processes. The research results confirm that AI, far from representing a simple instrumental opportunity, acts as a radical transformative device that deeply interrogates the very identity of the teacher and their functions of ethical and pedagogical guarantee. Primarily, the data reveal a profound tension between the perception of AI as a tool for operational efficiency and the pervasive fear of a dehumanization of the assessment gesture.</p>
<p>This ambivalence mirrors the dimension of &#x201C;Symbolic Representation&#x201D; identified in our qualitative analysis: while teachers recognize AI&#x2019;s potential in automating repetitive tasks, quantitative data record drastically low trust (<italic>M</italic> =&#x202F;4.83 on a 1&#x2013;10 scale) and marked negative polarization. Such a discrepancy suggests that participants are not expressing a Luddite judgment, but are actively evaluating the risks of a delegation that could compromise the relational nature of teaching. The demand for a &#x201C;principle of subsidiarity,&#x201D; according to which AI must act exclusively as ancillary support, finds statistical validation in the positive correlation between pragmatic utility and trust (<italic>&#x03C1;</italic> =&#x202F;0.48); this indicates that AI is accepted only if confined to the role of a &#x201C;prosthesis&#x201D; for efficiency. This hesitation also reflects a latent ethical concern regarding the epistemic opacity of these systems. As noted by <xref ref-type="bibr" rid="ref25">Widder et al. (2024)</xref>, current AI models often function as &#x2018;black boxes&#x2019; where knowing the underlying code or weights does not guarantee interpretability of the outputs. Since these systems lack semantic transparency regarding their decision-making logic, teachers appear reluctant to validate assessment outcomes they cannot fully trace or explain to students. Responding to the need to situate these findings within the specific national context, the data allow us to reinterpret the participants&#x2019; apparent &#x201C;prudence&#x201D; not as cultural resistance, but as a rational consequence of a systemic training gap. Contrary to the narrative of teachers fearing professional replacement, our analysis shows that only a minority (<italic>N</italic> =&#x202F;195; 29.5%) agrees that AI might replace their skills (<italic>M</italic> =&#x202F;4.57). The determining factor for their caution is instead found in the lack of preparation: a striking 64.2% (<italic>N</italic> =&#x202F;425) of participants explicitly state that their training did not adequately prepare them to use AI tools (<italic>M</italic> =&#x202F;3.67, SD&#x202F;=&#x202F;2.55), and only 29.9% (<italic>N</italic> =&#x202F;198) feel fully capable of critically deciding which tasks to delegate. This evidence suggests that the Italian educational context, currently in a transitional phase of digital reform, has generated a demand for innovation without providing the necessary &#x201C;AI Literacy&#x201D;. Consequently, the low trust observed should be read as a demand for competence and ethical governance rather than a refusal of technology. Specifically, the data suggest that teachers are implicitly calling for an AI-in-the-loop (AI<sup>2</sup>L) configuration (<xref ref-type="bibr" rid="ref18">Natarajan et al., 2025</xref>): they do not reject the efficiency of the tool (as shown by the correlation with Utility), but they express a need for training that empowers them to maintain control over the learning loop. Their hesitation reflects a resistance to models where the human is passive, aligning instead with the Machine Teaching paradigm (<xref ref-type="bibr" rid="ref17">Mosqueira-Rey et al., 2023</xref>), where the teacher&#x2019;s role is to explicitly guide and correct the AI&#x2019;s logic. A second element of crucial epistemological significance emerges from the analysis of the determinants of competence. The data refute the assumption that formal training alone automatically generates technology adoption. Indeed, inferential analysis highlighted that while the training received correlates with the sense of technical comfort (<italic>&#x03C1;</italic>&#x202F;=&#x202F;0.46), it shows only a moderate association with trust in the tool (<italic>&#x03C1;</italic>&#x202F;=&#x202F;0.36). Conversely, the most significant statistical evidence supporting integration is the strong positive correlation (<italic>&#x03C1;</italic>&#x202F;=&#x202F;0.57) found between the perception of AI as concrete pedagogical support and the level of trust. This result dismantles the technocentric narrative: professional security does not derive from abstract instruction, but from the pragmatic validation of the tool&#x2019;s utility in a real-world context (&#x201C;Utility&#x201D; vs. &#x201C;Trust,&#x201D; <italic>&#x03C1;</italic>&#x202F;=&#x202F;0.48). Thus, the profile of a teacher who becomes &#x201C;AI-literate&#x201D; emerges not when they learn to use the software, but when they recognize its subsidiary value for their own didactic action, exercising reflective control over assessment practices. This profile imposes a substantial revision of current initial and in-service training models. The data indicate an urgent need to move beyond technocentric instructional approaches in favor of a Ethical AI Literacy. As evidenced by the discrepancy between comfort and trust, teachers are not merely asking for technical manuals, but are claiming a deontological compass. Training programs should explicitly address the ethical dimensions of automation: data privacy, the recognition of algorithmic bias (which can reinforce social inequalities), and the moral implications of delegating evaluation. Teachers need to be trained not just to <italic>operate</italic> the machine, but to exercise epistemic responsibility: validating the fairness of the AI&#x2019;s output before it impacts the student&#x2019;s learning path. In this direction, the study&#x2019;s results not only enrich theoretical reflection but also provide operational guidelines for training policies: advanced digital competence must necessarily hybridize with ethics and pedagogy to generate a professional culture capable of governing innovation. We can affirm that assessment today constitutes the privileged locus for observing and orienting the impact of AI in educational settings. It becomes an interpretive lens and a transformative lever for exploring new balances between automation and reflexivity. However, as suggested by the discrepancy between technical comfort and pedagogical trust, this transformation cannot be sustained by mere instrumental training. It is within this liminal space that the challenge of constructing a new culture of teaching plays out: a culture that must respond to the empirical demand for &#x2018;AI-in-the-loop&#x2019; governance, inhabiting the complexity of the algorithmic era not just with technical skill, but with the critical and ethical spirit claimed by the teachers themselves.</p>
<sec id="sec11">
<label>4.1</label>
<title>Implications for policy and practice</title>
<p>The evidence collected in this study suggests that the observed resistance is not a barrier to be dismantled, but a meaningful signal to be heeded, translating into specific implications for policy and practice. Consequently, we propose a strategic realignment of teacher training and school governance based on the &#x201C;Training Paradox&#x201D; and the demand for ethical control emerged from the data.</p>
<p>Regarding curriculum design, the discrepancy between high technical comfort and low trust indicates that professional development programs must urgently shift their focus from mere operational interaction (&#x201C;prompt engineering&#x201D;) to a rigorous &#x201C;Output Validation Literacy.&#x201D; Teachers need to be trained not as passive users but as critical evaluators capable of detecting hallucinations, logic errors, and algorithmic biases in AI-generated content. Crucially, this literacy must be grounded in a deep ethical understanding of the tool; to mitigate the distrust fueled by &#x201C;epistemic opacity,&#x201D; training modules should demystify the &#x201C;Black Box&#x201D; nature of Large Language Models, clarifying their probabilistic rather than logical functioning. Only by understanding the mechanical limits of the machine can teachers exercise true epistemic responsibility without fearing professional abdication.</p>
<p>At the institutional level, these insights call for the standardization of &#x201C;Human-in-the-loop&#x201D; (AI<sup>2</sup>L) workflows. School policies should explicitly regulate that while AI may assist in preliminary screening or feedback generation, the final assessment and its semantic explanation must remain the exclusive prerogative of the human teacher, thereby formalizing the &#x201C;Principle of Subsidiarity&#x201D; demanded by participants. In operational terms, we recommend adopting &#x201C;Triangulated Assessment&#x201D; practices, where AI is utilized not as a grader but as an adversarial critic or a &#x201C;second opinion&#x201D; to refine the teacher&#x2019;s own judgment. This approach ensures that the integration of AI enhances rather than replaces the relational and hermeneutic value of educational assessment.</p>
</sec>
<sec id="sec12">
<label>4.2</label>
<title>Limitations of the study</title>
<p>Despite the significance of the findings and the sample size, the present investigation entails certain limitations that warrant a cautious interpretation of the results. First, the non-probabilistic nature of the sampling strategy and the geographical concentration of participants in two specific universities (Foggia and Bergamo) preclude the statistical generalizability of the data to the entire Italian teaching population, although they offer a representative cross-section of the current state of initial teacher training. Second, the sample composition, characterized by a prevalence of novice teachers or those in the qualification phase with less than 5&#x202F;years of experience, may not fully reflect the professional stances, resistances, or consolidated pedagogical competencies of tenured teachers with greater seniority, who might approach technological innovation through different dynamics. A further limitation lies in the data collection methodology based on self-reported measures. While effective for investigating perceptions and beliefs, this approach may be subject to social desirability bias, particularly regarding sensitive topics such as ethics and digital competence. Consequently, it remains to be verified through future observational or experimental studies the extent to which declared attitudes translate into actual instructional practices in the classroom. Finally, the rapid evolution of Generative AI tools necessitates viewing these results as a snapshot situated within a specific historical-technological moment, inevitably destined to evolve as teachers&#x2019; familiarity with these new devices progresses. Additionally, it must be noted that the correlational nature of the study allows us to identify significant associations between variables (e.g., Training and Trust) but does not establish direct causal links. Therefore, the interpretative models proposed (such as the Training Paradox) should be understood as probabilistic frameworks to be further verified through experimental designs.</p>
</sec>
<sec id="sec13">
<label>4.3</label>
<title>Future research directions</title>
<p>In light of the emerging evidence and the outlined limitations, several promising avenues for future research arise. First, it would be beneficial to extend the investigation through longitudinal studies monitoring the evolution of the perceptions and assessment practices of the same subjects during their professional induction, verifying whether field experience modifies trust or resistance toward AI. Second, it appears crucial to complement the survey of perceptions with direct observational or experimental analysis of classroom practices, investigating how teachers effectively integrate AI into assessment design and formative feedback. Finally, a promising line of inquiry concerns the comparison between STEM and Humanities teachers, to verify whether the epistemological nature of the subject taught influences the propensity to delegate or retain specific assessment functions. Such in-depth investigations could provide the empirical foundations for constructing an <italic>AI Assessment Literacy</italic> framework that is increasingly refined and grounded in educational reality.</p>
</sec>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="sec14">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/supplementary material, further inquiries can be directed to the corresponding author.</p>
</sec>
<sec sec-type="ethics-statement" id="sec15">
<title>Ethics statement</title>
<p>Ethical approval was not required for the study involving humans in accordance with the local legislation and institutional requirements. Written informed consent to participate in this study was not required from the participants or the participants&#x2019; legal guardians/next of kin, in accordance with the national legislation and the institutional requirements.</p>
</sec>
<sec sec-type="author-contributions" id="sec16">
<title>Author contributions</title>
<p>VV: Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. LA: Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. PB: Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. AB: Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing.</p>
</sec>
<sec sec-type="COI-statement" id="sec17">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="sec18">
<title>Generative AI statement</title>
<p>The author(s) declared that Generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="sec19">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="ref1"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bearman</surname> <given-names>M.</given-names></name> <name><surname>Dawson</surname> <given-names>P.</given-names></name> <name><surname>Ajjawi</surname> <given-names>R.</given-names></name> <name><surname>Tai</surname> <given-names>J.</given-names></name> <name><surname>Boud</surname> <given-names>D.</given-names></name></person-group> (<year>2020</year>). <article-title>Assessment in a digital world: challenges and recommendations</article-title>. <source>Assess. Eval. High. Educ.</source> <volume>45</volume>, <fpage>1228</fpage>&#x2013;<lpage>1241</lpage>. doi: <pub-id pub-id-type="doi">10.1080/02602938.2020.1772354</pub-id></mixed-citation></ref>
<ref id="ref2"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Braun</surname> <given-names>V.</given-names></name> <name><surname>Clarke</surname> <given-names>V.</given-names></name></person-group> (<year>2006</year>). <article-title>Using thematic analysis in psychology</article-title>. <source>Qual. Res. Psychol.</source> <volume>3</volume>, <fpage>77</fpage>&#x2013;<lpage>101</lpage>. doi: <pub-id pub-id-type="doi">10.1191/1478088706qp063oa</pub-id></mixed-citation></ref>
<ref id="ref4"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Buchholz</surname> <given-names>B. A.</given-names></name> <name><surname>DeHart</surname> <given-names>J.</given-names></name> <name><surname>Moorman</surname> <given-names>G.</given-names></name></person-group> (<year>2020</year>). <article-title>Digital citizenship during a global pandemic: moving beyond digital literacy</article-title>. <source>J. Adolesc. Adult. Lit.</source> <volume>64</volume>, <fpage>11</fpage>&#x2013;<lpage>17</lpage>. doi: <pub-id pub-id-type="doi">10.1002/jaal.1076</pub-id>, <pub-id pub-id-type="pmid">32834710</pub-id></mixed-citation></ref>
<ref id="ref5"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>&#x00C7;ela</surname> <given-names>E.</given-names></name> <name><surname>Fonkam</surname> <given-names>M.</given-names></name> <name><surname>Potluri</surname> <given-names>R. M.</given-names></name></person-group> (<year>2024</year>). <article-title>Risks of AI-assisted learning on student critical thinking</article-title>. <source>Int. J. Risk Conting. Manag.</source> <volume>12</volume>, <fpage>1</fpage>&#x2013;<lpage>19</lpage>. doi: <pub-id pub-id-type="doi">10.4018/IJRCM.350185</pub-id></mixed-citation></ref>
<ref id="ref6"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Celik</surname> <given-names>I.</given-names></name></person-group> (<year>2023</year>). <article-title>Towards intelligent-TPACK: an empirical study on teachers&#x2019; professional knowledge to ethically integrate artificial intelligence (AI)-based tools into education</article-title>. <source>Comput. Hum. Behav.</source> <volume>138</volume>:<fpage>107468</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.chb.2022.107468</pub-id></mixed-citation></ref>
<ref id="ref7"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chai</surname> <given-names>C. S.</given-names></name> <name><surname>Wong</surname> <given-names>L. H.</given-names></name> <name><surname>King</surname> <given-names>R. B.</given-names></name></person-group> (<year>2016</year>). <article-title>Surveying and modeling students&#x2019; motivation and learning strategies for mobile-assisted seamless Chinese language learning</article-title>. <source>Educ. Technol. Soc.</source> <volume>19</volume>, <fpage>170</fpage>&#x2013;<lpage>180</lpage>.</mixed-citation></ref>
<ref id="ref8"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chan</surname> <given-names>C. K. Y.</given-names></name> <name><surname>Hu</surname> <given-names>W.</given-names></name></person-group> (<year>2023</year>). <article-title>Students&#x2019; voices on generative AI: perceptions, benefits, and challenges in higher education</article-title>. <source>Int. J. Educ. Technol. High. Educ.</source> <volume>20</volume>:<fpage>43</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s41239-023-00411-8</pub-id></mixed-citation></ref>
<ref id="ref9"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gruenhagen</surname> <given-names>J. H.</given-names></name> <name><surname>Sinclair</surname> <given-names>P. M.</given-names></name> <name><surname>Carroll</surname> <given-names>J.-A.</given-names></name> <name><surname>Baker</surname> <given-names>P. R. A.</given-names></name> <name><surname>Wilson</surname> <given-names>A.</given-names></name> <name><surname>Demant</surname> <given-names>D.</given-names></name></person-group> (<year>2024</year>). <article-title>The rapid rise of generative AI and its implications for academic integrity: students&#x2019; perceptions and use of chatbots for assistance with assessments</article-title>. <source>Comput. Educ. Artif. Intell.</source> <volume>7</volume>:<fpage>100273</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.caeai.2024.100273</pub-id></mixed-citation></ref>
<ref id="ref10"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Holmes</surname> <given-names>W.</given-names></name> <name><surname>Tuomi</surname> <given-names>I.</given-names></name></person-group> (<year>2022</year>). <article-title>State of the art and practice in AI in education</article-title>. <source>Eur. J. Educ.</source> <volume>57</volume>, <fpage>542</fpage>&#x2013;<lpage>570</lpage>. doi: <pub-id pub-id-type="doi">10.1111/ejed.12533</pub-id></mixed-citation></ref>
<ref id="ref11"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kasneci</surname> <given-names>E.</given-names></name> <name><surname>Sessler</surname> <given-names>K.</given-names></name> <name><surname>K&#x00FC;chemann</surname> <given-names>S.</given-names></name> <name><surname>Bannert</surname> <given-names>M.</given-names></name> <name><surname>Dementieva</surname> <given-names>D.</given-names></name> <name><surname>Fischer</surname> <given-names>F.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>ChatGPT for good? On opportunities and challenges of large language models for education</article-title>. <source>Learn. Individ. Differ.</source> <volume>103</volume>:<fpage>102274</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.lindif.2023.102274</pub-id></mixed-citation></ref>
<ref id="ref12"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lim</surname> <given-names>W. M.</given-names></name> <name><surname>Gunasekara</surname> <given-names>A.</given-names></name> <name><surname>Pallant</surname> <given-names>J. L.</given-names></name> <name><surname>Pallant</surname> <given-names>J. I.</given-names></name> <name><surname>Pechenkina</surname> <given-names>E.</given-names></name></person-group> (<year>2023</year>). <article-title>Generative AI and the future of education: Ragnar&#x00F6;k or reformation? A paradoxical perspective from management educators</article-title>. <source>Int. J. Manag. Educ.</source> <volume>21</volume>:<fpage>100790</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ijme.2023.100790</pub-id></mixed-citation></ref>
<ref id="ref13"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>X.</given-names></name></person-group> (<year>2024</year>). <article-title>Navigating uncharted waters: teachers&#x2019; perceptions of and reactions to AI-induced challenges to assessment</article-title>. <source>Asia Pac. Educ. Res.</source> <volume>34</volume>, <fpage>711</fpage>&#x2013;<lpage>722</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s40299-024-00890-x</pub-id></mixed-citation></ref>
<ref id="ref14"><mixed-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Long</surname> <given-names>D.</given-names></name> <name><surname>Magerko</surname> <given-names>B</given-names></name></person-group>. (<year>2020</year>). <article-title>What is AI literacy? Competencies and design considerations</article-title>. <conf-name>Proceedings of the 2020 CHI conference on human factors in computing systems</conf-name>, <fpage>1</fpage>&#x2013;<lpage>16</lpage>.</mixed-citation></ref>
<ref id="ref15"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Luckin</surname> <given-names>R.</given-names></name> <name><surname>Cukurova</surname> <given-names>M.</given-names></name> <name><surname>Kent</surname> <given-names>C.</given-names></name> <name><surname>du Boulay</surname> <given-names>B.</given-names></name></person-group> (<year>2022</year>). <article-title>Empowering educators to be AI-ready</article-title>. <source>Comput. Educ. Artif. Intell.</source> <volume>3</volume>:<fpage>100076</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.caeai.2022.100076</pub-id></mixed-citation></ref>
<ref id="ref16"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Memarian</surname> <given-names>B.</given-names></name> <name><surname>Doleck</surname> <given-names>T.</given-names></name></person-group> (<year>2024</year>). <article-title>Human-in-the-loop in artificial intelligence in education: a review and entity-relationship (ER) analysis</article-title>. <source>Comput. Hum. Behav. Artif. Hum.</source> <volume>2</volume>:<fpage>100053</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.chbah.2024.100053</pub-id></mixed-citation></ref>
<ref id="ref17"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mosqueira-Rey</surname> <given-names>E.</given-names></name> <name><surname>Hern&#x00E1;ndez-Pereira</surname> <given-names>E.</given-names></name> <name><surname>Alonso-R&#x00ED;os</surname> <given-names>D.</given-names></name> <name><surname>P&#x00E9;rez-S&#x00E1;nchez</surname> <given-names>R. C.</given-names></name> <name><surname>Moret-Bonillo</surname> <given-names>V.</given-names></name></person-group> (<year>2023</year>). <article-title>Human-in-the-loop machine learning: a state of the art</article-title>. <source>Artif. Intell. Rev.</source> <volume>56</volume>, <fpage>3005</fpage>&#x2013;<lpage>3054</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10462-022-10246-w</pub-id></mixed-citation></ref>
<ref id="ref18"><mixed-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Natarajan</surname> <given-names>S.</given-names></name> <name><surname>Mathur</surname> <given-names>S.</given-names></name> <name><surname>Sidheekh</surname> <given-names>S.</given-names></name> <name><surname>Stammer</surname> <given-names>W.</given-names></name> <name><surname>Kersting</surname> <given-names>K.</given-names></name></person-group> (<year>2025</year>). <article-title>Human-in-the-loop or AI-in-the-loop? Automate or collaborate?</article-title> <conf-name>Proceedings of the AAAI conference on artificial intelligence</conf-name>, <volume>39</volume>, <fpage>28594</fpage>&#x2013;<lpage>28600</lpage>.</mixed-citation></ref>
<ref id="ref19"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ng</surname> <given-names>D. T. K.</given-names></name> <name><surname>Leung</surname> <given-names>J. K. L.</given-names></name> <name><surname>Chu</surname> <given-names>S. K. W.</given-names></name> <name><surname>Qiao</surname> <given-names>M. S.</given-names></name></person-group> (<year>2021</year>). <article-title>Conceptualizing AI literacy: an exploratory review</article-title>. <source>Comput. Educ. Artif. Intell.</source> <volume>2</volume>:<fpage>100041</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.caeai.2021.100041</pub-id></mixed-citation></ref>
<ref id="ref20"><mixed-citation publication-type="book"><person-group person-group-type="author"><collab id="coll1">OECD</collab></person-group> (<year>2023</year>). <source>AI and the future of skills, volume 2: methods for evaluating AI capabilities</source>. <publisher-loc>Paris</publisher-loc>: <publisher-name>OECD Publishing</publisher-name>.</mixed-citation></ref>
<ref id="ref21"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Salinas-Navarro</surname> <given-names>D.</given-names></name> <name><surname>Vilalta-Perdomo</surname> <given-names>E.</given-names></name> <name><surname>Michel-Villarreal</surname> <given-names>R.</given-names></name> <name><surname>Montesinos</surname> <given-names>L.</given-names></name></person-group> (<year>2024</year>). <article-title>Designing experiential learning activities with Generative Artificial Intelligence tools for authentic assessment</article-title>. <source>Interact. Technol. Smart Educ.</source> <volume>21</volume>, <fpage>708</fpage>&#x2013;<lpage>734</lpage>. doi: <pub-id pub-id-type="doi">10.1108/ITSE-12-2023-0236</pub-id></mixed-citation></ref>
<ref id="ref22"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Selwyn</surname> <given-names>N.</given-names></name></person-group> (<year>2016</year>). <source>Is technology good for education?</source> <publisher-loc>Cambridge, UK</publisher-loc>: <publisher-name>Polity Press</publisher-name>.</mixed-citation></ref>
<ref id="ref23"><mixed-citation publication-type="book"><person-group person-group-type="author"><collab id="coll2">UNESCO</collab></person-group> (<year>2023</year>) in <source>Guidance for generative AI in education and research</source>. eds. <person-group person-group-type="editor"><name><surname>Miao</surname> <given-names>F.</given-names></name> <name><surname>Holmes</surname> <given-names>W.</given-names></name></person-group> (<publisher-loc>Paris</publisher-loc>: <publisher-name>UNESCO</publisher-name>).</mixed-citation></ref>
<ref id="ref24"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Walter</surname> <given-names>Y.</given-names></name></person-group> (<year>2024</year>). <article-title>Embracing the future of artificial intelligence in the classroom: the relevance of AI literacy, prompt engineering, and critical thinking in modern education</article-title>. <source>Int. J. Educ. Technol. High. Educ.</source> <volume>21</volume>:<fpage>15</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s41239-024-00448-3</pub-id></mixed-citation></ref>
<ref id="ref25"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Widder</surname> <given-names>D. G.</given-names></name> <name><surname>Whittaker</surname> <given-names>M.</given-names></name> <name><surname>West</surname> <given-names>S. M.</given-names></name></person-group> (<year>2024</year>). <article-title>Why &#x2018;open&#x2019; AI systems are actually closed, and why this matters</article-title>. <source>Nature</source> <volume>635</volume>, <fpage>827</fpage>&#x2013;<lpage>833</lpage>. doi: <pub-id pub-id-type="doi">10.1038/s41586-024-08141-1</pub-id>, <pub-id pub-id-type="pmid">39604616</pub-id></mixed-citation></ref>
<ref id="ref26"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Williams</surname> <given-names>R.</given-names></name></person-group> (<year>2024</year>). Impact.AI: democratizing AI through K-12 artificial intelligence education. (doctoral dissertation). Massachusetts Institute of Technology, MIT theses. Available online at: <ext-link xlink:href="https://dspace.mit.edu/handle/1721.1/153676" ext-link-type="uri">https://dspace.mit.edu/handle/1721.1/153676</ext-link> (Accessed September 23, 2025).</mixed-citation></ref>
<ref id="ref27"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhai</surname> <given-names>C.</given-names></name> <name><surname>Wibowo</surname> <given-names>S.</given-names></name> <name><surname>Li</surname> <given-names>L. D.</given-names></name></person-group> (<year>2024</year>). <article-title>The effects of overreliance on AI dialogue systems on students&#x2019; cognitive abilities: a systematic review</article-title>. <source>Smart Learn. Environ.</source> <volume>11</volume>:<fpage>28</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s40561-024-00316-7</pub-id></mixed-citation></ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by" id="fn0001">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1959966/overview">Tom Prickett</ext-link>, Northumbria University, United Kingdom</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by" id="fn0002">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2564181/overview">Ryan Thomas Williams</ext-link>, Teesside University, United Kingdom</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3322871/overview">Marie Devlin</ext-link>, Newcastle University, United Kingdom</p>
</fn>
</fn-group>
</back>
</article>