<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" article-type="review-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Educ.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Education</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Educ.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2504-284X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/feduc.2026.1778339</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Conceptual Analysis</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Teachers&#x2019; conceptions of AI for inclusive mathematics learning: a conceptual analysis</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>G&#x00F3;mez Ni&#x00F1;o</surname>
<given-names>Juli&#x00E1;n Ricardo</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>&#x00C1;rias Delgado</surname>
<given-names>Liliana Patricia</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Chiappe</surname>
<given-names>Andr&#x00E9;s</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3147372"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>S&#x00E1;ez-Delgado</surname>
<given-names>Fabiola</given-names>
</name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1148518"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Education Faculty, Universidad de La Sabana</institution>, <city>Ch&#x00ED;a</city>, <country country="co">Colombia</country></aff>
<aff id="aff2"><label>2</label><institution>Education Faculty, Universidad Catolica de la Santisima Concepcion</institution>, <city>Concepci&#x00F3;n</city>, <country country="cl">Chile</country></aff>
<author-notes>
<corresp id="c001"><label>&#x002A;</label>Correspondence: Andr&#x00E9;s Chiappe, <email xlink:href="mailto:andres.chiappe@unisabana.edu.co">andres.chiappe@unisabana.edu.co</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-19">
<day>19</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>11</volume>
<elocation-id>1778339</elocation-id>
<history>
<date date-type="received">
<day>30</day>
<month>12</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>03</day>
<month>02</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>09</day>
<month>02</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2026 G&#x00F3;mez Ni&#x00F1;o, &#x00C1;rias Delgado, Chiappe and S&#x00E1;ez-Delgado.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>G&#x00F3;mez Ni&#x00F1;o, &#x00C1;rias Delgado, Chiappe and S&#x00E1;ez-Delgado</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-19">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<p>Artificial intelligence is increasingly entering mathematics classrooms through tools for feedback, personalization, assessment support, and instructional decision-making; however, its potential to contribute to inclusion depends less on technical availability than on how teachers conceptualize what AI is, what it can legitimately do, and what risks it introduces. The discussion is framed primarily around K&#x2013;12 mathematics classrooms (primary and secondary) and targets both pre-service preparation and in-service professional development for mathematics teachers. This Conceptual Analysis clarifies the construct of teachers&#x2019; conceptions of AI for inclusive mathematics learning and argues that such conceptions shape not only adoption decisions but also the quality and equity of classroom use. In a first approach, the paper delineates analytically distinct dimensions of conceptions, including beliefs about AI capabilities and limits, professional role and identity, ethical governance concerns, and perceived institutional conditions. Then, it outlines a minimal typology showing how different configurations of these dimensions can generate predictable inclusion-related barriers, such as over-automation of pedagogical judgement, opacity in feedback and decision processes, and unequal exposure to risk in vulnerable contexts. Later, it synthesizes governance-linked conditions for informed engagement, making explicit the safeguards under which AI-supported practices are more compatible with inclusion and the criteria under which cautious non-adoption is professionally warranted. The analysis culminates in actionable implications for teacher education, foregrounding the capabilities required for accountable mediation of AI-supported practice in mathematics classrooms, including boundary-setting, bias-awareness, transparency, and context-sensitive orchestration.</p>
</abstract>
<kwd-group>
<kwd>AI and pedagogy</kwd>
<kwd>artificial intelligence in education</kwd>
<kwd>inclusive mathematics education</kwd>
<kwd>teacher beliefs</kwd>
<kwd>teacher professional identity</kwd>
</kwd-group>
<funding-group>
<award-group id="gs1">
<funding-source id="sp1">
<institution-wrap>
<institution>Universidad de La Sabana</institution>
<institution-id institution-id-type="doi" vocab="open-funder-registry" vocab-identifier="10.13039/open_funder_registry">10.13039/501100010628</institution-id>
</institution-wrap>
</funding-source>
<award-id rid="sp1">Project EDUPHD-20-2022</award-id>
</award-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This work was supported by Universidad de La Sabana, Project EDUPHD-20-2022.</funding-statement>
</funding-group>
<counts>
<fig-count count="1"/>
<table-count count="1"/>
<equation-count count="0"/>
<ref-count count="40"/>
<page-count count="9"/>
<word-count count="7854"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Teacher Education</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec1">
<label>1</label>
<title>Introduction</title>
<p>In the context of a society increasingly defined by the integration of advanced technologies, artificial intelligence (AI) has emerged as a transformative agent in multiple spheres of human life. From automating routine tasks to optimizing complex processes in fields such as medicine, industry, and commerce, AI is not only redefining the limits of what is possible but also posing ethical, technical, and social challenges. This multifaceted impact has generated substantial debate regarding its long-term implications for human development and the structure of social institutions (<xref ref-type="bibr" rid="ref35">Selwyn et al., 2017</xref>; <xref ref-type="bibr" rid="ref26">Mohammed and &#x2018;Nell&#x2019; Watson, 2019</xref>). Particularly, the education sector has experienced unprecedented disruption due to the incorporation of AI-based technologies, which challenge established paradigms in teaching, assessment, and curriculum design.</p>
<p>In education, AI implementation has sparked significant crises. These range from questions about the validity and reliability of automated assessment systems to concerns about increasing technological dependency in teaching. AI-based platforms, such as intelligent tutoring systems and adaptive learning tools, promise to personalize instruction to meet individual student needs. However, their implementation in real-world educational settings has revealed a series of limitations. For instance, these technologies face challenges in their ability to understand diverse cultural, linguistic, and pedagogical contexts, making them susceptible to reproducing existing inequalities (<xref ref-type="bibr" rid="ref14">Kabudi et al., 2021</xref>; <xref ref-type="bibr" rid="ref12">Holmes et al., 2022</xref>). Additionally, the emergence of these technologies has exposed significant deficiencies in the technical and pedagogical preparation of teachers, who often lack the competencies needed to effectively integrate these tools into their daily practices (<xref ref-type="bibr" rid="ref40">Zawacki-Richter et al., 2019</xref>). These tensions have unfolded alongside a rapidly expanding body of scholarship on AI and education. To contextualize the timeliness of the present conceptual analysis within this broader landscape, <xref ref-type="fig" rid="fig1">Figure 1</xref> is included as background, illustrating the growth in the number of Scopus-indexed publications on AI and education over time. Importantly, <xref ref-type="fig" rid="fig1">Figure 1</xref> should be read as a descriptive indicator of indexed academic attention in one of the most widely used global databases; it does not constitute a bibliometric analysis, does not assess the quality of the literature, and is not used here to make causal claims about what drives publication growth.</p>
<fig position="float" id="fig1">
<label>Figure 1</label>
<caption>
<p>Research on AI and education published in Scopus peer-reviewed journals.</p>
</caption>
<graphic xlink:href="feduc-11-1778339-g001.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Line graph showing the dramatic rise in publications about artificial intelligence and education from 2000 to 2024, with a stable trend until 2018 followed by a steep increase after 2020.</alt-text>
</graphic>
</fig>
<p>Although AI is being introduced across educational sectors, this conceptual analysis is intentionally framed around K-12 mathematics teaching and learning, where compulsory schooling, high-stakes assessment, and long-standing stratification can make technology-mediated decisions particularly consequential for inclusion. Accordingly, the argument is written primarily for mathematics teachers and teacher educators designing in-service professional development; however, the dimensions and governance conditions articulated here are also intended to inform pre-service programs that aim to cultivate early habits of accountable AI mediation.</p>
<p>A critical aspect emerging from contemporary research is that many of the crises associated with AI use in education stem not only from technological limitations but also from how teachers conceptualize its legitimacy, utility, and risks in everyday practice (<xref ref-type="bibr" rid="ref31">Pedro et al., 2019</xref>; <xref ref-type="bibr" rid="ref36">Slade et al., 2024</xref>). Importantly, these conceptions should not be reduced to &#x201C;resistance&#x201D; understood as a deficit; rather, they often reflect a spectrum of professional stances ranging from cautious hesitation grounded in duty-of-care concerns to conditional acceptance shaped by knowledge, experience, and institutional support. This spectrum is particularly consequential in mathematics teaching, where entrenched expectations about pedagogy, assessment, and teacher authority intersect with high-stakes decisions that can either reproduce or disrupt inequity.</p>
<p>These conceptions, which range from fears of replacement to conditional acceptance shaped by limited knowledge, are a key determinant of when and how AI is judged pedagogically legitimate and ethically permissible in classroom practice. This is particularly salient in mathematics teaching, where deeply rooted expectations about pedagogy and assessment intersect with high-stakes decisions, so teachers&#x2019; conceptions structure the boundaries, safeguards, and use cases under which AI may be responsibly adopted or cautiously declined.</p>
<p>The need to transform mathematics teaching is a widely highlighted point in contemporary educational literature. In a world increasingly mediated by digital technologies, programming, and data analysis, the development of mathematical and computational thinking has emerged as a fundamental skill for current and future generations. These skills are not only crucial for professional performance in a constantly transforming labor market but also for fostering critical and analytical thinking, enabling students to face the complex challenges of contemporary society (<xref ref-type="bibr" rid="ref29">Opesemowo and Ndlovu, 2024</xref>; <xref ref-type="bibr" rid="ref30">Orhani, 2024</xref>). In a labor environment defined by automation and dependence on AI-based technologies, the ability to understand and manipulate data, design algorithms, and apply mathematical principles is essential to ensure individuals&#x2019; competitiveness and adaptability.</p>
<p>On the other hand, mathematics teaching faces unique challenges related to equity and inclusion. Historically, this discipline has been perceived as a high-stakes academic filter that segregates students based on perceived ability, thereby perpetuating structural inequalities in access to educational and employment opportunities. In this context, AI has the potential to redefine mathematics teaching by providing tools that personalize instruction and adapt pedagogical strategies to the specific needs of each student. These technologies could, in theory, broaden access to mathematical learning and contribute to reducing long-standing barriers related to gender, social class, and disability only when safeguards such as privacy protections, contestability, bias monitoring (including group-sensitive checks), and realistic teacher-mediated oversight are in place, and when institutional capacity and workload conditions make such mediation feasible (<xref ref-type="bibr" rid="ref12">Holmes et al., 2022</xref>).</p>
<p>This &#x201C;gatekeeping&#x201D; function makes mathematics a high-stakes subject in a distinctive way. First, mathematics commonly mediates stratification through placement, streaming, and credentialing decisions that shape who gains access to advanced pathways and, later, to educational and labor opportunities. Also, assessment is not merely an evaluative add-on in mathematics; rather, it is a central mechanism through which progression is regulated, because judgements about competence are frequently operationalized through tests, grading, and task performance that determine subsequent opportunities. Furthermore, mathematics learning is strongly cumulative: misconceptions and conceptual gaps can compound over time, so the timing, quality, and interpretability of feedback becomes pivotal for whether learners can re-enter productive trajectories or become locked into deficit-labelled pathways.</p>
<p>These discipline-specific dynamics matter because the AI use-cases increasingly discussed in education, automated or AI-supported feedback, adaptive task sequencing and personalization, assessment support, and analytics-informed instructional decisions, intersect directly with the mechanisms through which mathematics can either reproduce or disrupt inequity. Consequently, any argument about AI for inclusive mathematics learning must treat adoption and classroom use as governance-sensitive and outcome-contingent, rather than as intrinsically beneficial.</p>
<p>However, for these tools to fulfill their transformative promise, it is essential that teachers adopt a critical and constructive perspective regarding their use. This requires a profound change in traditional conceptions about mathematics teaching, recognizing that AI is not a replacement for teaching but a complement that can enrich pedagogical practices. According to <xref ref-type="bibr" rid="ref17">Knox et al. (2019)</xref>, only by adopting an open and flexible attitude towards technology can teachers contribute to transforming the classroom into an inclusive space aspiring to equity and accessibility for all students.</p>
<p>The creation of inclusive learning environments in mathematics is not only a matter of equity but also an indispensable pedagogical strategy for preparing students for future challenges. As emphasized by <xref ref-type="bibr" rid="ref13">Humburg et al. (2024)</xref>, inclusive learning involves designing environments that recognize and value student diversity, offering meaningful opportunities for all, regardless of their physical, cognitive, or socioeconomic conditions. AI can play a fundamental role in this process by analyzing large volumes of educational data, identifying learning patterns, and adapting content and pedagogical strategies accordingly.</p>
<p>Moreover, incorporating AI into mathematics teaching has direct implications for developing computational thinking, a skill increasingly in demand in fields such as engineering, data science, and computer science. As the labor market becomes more digitalized, students must be equipped not only with fundamental mathematical skills but also with the ability to solve complex problems, design algorithms, and work with advanced technologies. Recent research highlights that this knowledge is essential not only for technical careers but also for a wide range of disciplines that now depend on digital tools and data analysis (<xref ref-type="bibr" rid="ref20">Luckin et al., 2024</xref>).</p>
<p>In this sense, addressing mathematics teaching from an inclusive and technology-mediated perspective not only responds to labor market demands but also meets the need to foster critical and participatory citizenship. Students who develop mathematical and computational thinking are better prepared to face labor challenges and contribute to designing innovative solutions to address the most pressing social and environmental problems of our time.</p>
<p>Considering these issues, this article offers a conceptual analysis of teachers&#x2019; conceptions of AI for inclusive mathematics learning. Rather than issuing a generic call for change, we make three specific contributions. First, we clarify what &#x201C;teachers&#x2019; conceptions of AI&#x201D; entails by distinguishing analytically separable dimensions (e.g., beliefs about AI capabilities and limits, professional role and identity, ethical governance concerns, and perceived institutional conditions). Later, we use these dimensions to outline a minimal typology that explains how different configurations of conceptions can generate distinct inclusion-related barriers regarding mathematics education. And finally, we synthesize governance-linked conditions for informed engagement with AI-making explicit not only what constructive adoption can look like, but also when cautious non-adoption is professionally justified. Building on these outputs, we then derive actionable implications for teacher education focused on the capabilities required to mediate AI-supported practices in mathematics classrooms.</p>
<p>To develop these three contributions in a coherent and transparent way, the manuscript is organized around four complementary theoretical lenses, each used for a specific explanatory purpose in the argument. In doing so, we make explicit how the proposed dimensions, typology, and governance conditions are conceptually grounded and how they connect across sections. Ajzen&#x2019;s Theory of Planned Behavior is used to explain how teachers&#x2019; beliefs and perceived risks can translate into intentions and decisions regarding AI adoption (<xref ref-type="bibr" rid="ref33">Sanusi et al., 2024</xref>). Constructivist theory is used to frame the pedagogical plausibility of AI-supported personalization and feedback, focusing on how support should connect to learners&#x2019; prior knowledge and conceptual development in mathematics (<xref ref-type="bibr" rid="ref27">Moundridou et al., 2024</xref>). Social representations theory is used to interpret how collective narratives, norms, and institutional cultures shape teachers&#x2019; conceptions and shared stances towards AI, particularly in relation to inclusion and equity (<xref ref-type="bibr" rid="ref21">Mangi&#x00F2; et al., 2025</xref>). Finally, TPACK is used as a capability-oriented lens to clarify what teachers need to know and be able to do to mediate AI-supported practices responsibly in mathematics classrooms (<xref ref-type="bibr" rid="ref5">Cao et al., 2026</xref>). Together, these lenses support an explicit pathway in the paper: teachers&#x2019; conceptions shape adoption intentions and the conditions under which AI is used; adoption decisions then materialize as classroom practices (e.g., feedback, personalization, assessment support) that, depending on governance and teacher mediation, can either enable or constrain inclusion outcomes.</p>
<sec id="sec2">
<label>1.1</label>
<title>Conceptual construction behind this paper</title>
<p>This manuscript is positioned as a conceptual analysis and accordingly, it does not report original empirical data; instead, it aims to clarify constructs, specify relationships among key ideas, and articulate practice-facing conditions under which AI can be argued to support (or undermine) inclusion in mathematics learning. The methodological work was organized as an iterative sequence of argument construction and conceptual development, carried out in five interlinked steps.</p>
<p>First, we conducted problem framing and boundary setting to define the scope of the analysis (K&#x2013;12 mathematics; implications for both pre-service and in-service teacher education) and to identify the core tension motivating the argument: the expanding presence of AI in educational practice alongside the risk that uncritical adoption may amplify inequities. This step established the analytic aim of the paper: moving from a generic call for change to specifying what should change, why, and under what conditions.</p>
<p>Besides the above, we performed concept clarification to delimit what &#x201C;teachers&#x2019; conceptions of AI&#x201D; entails in relation to inclusive mathematics learning. This involved identifying the minimal set of analytically separable dimensions required to make the construct explanatory rather than merely descriptive (e.g., beliefs about AI capabilities and limits; professional role and identity; ethical and governance concerns; and perceived institutional conditions). The outcome of this step is a clarified construct that can support consistent reasoning throughout the manuscript.</p>
<p>Also, we developed an argumentative synthesis that links the clarified construct to classroom-level phenomena. In this move, we traced how different configurations of teachers&#x2019; conceptions plausibly shape adoption intentions and, crucially, the ways AI is enacted in practice (e.g., feedback, personalization, assessment support, and instructional decision-making). This synthesis is intentionally conditional: rather than assuming that AI use is inherently beneficial, it specifies how effects on inclusion depend on contextual constraints, governance safeguards, and teachers&#x2019; mediating actions.</p>
<p>In the fourth step, building on the previous ones, we articulated a minimal typology as a conceptual device for explaining variation. The typology was derived by systematically considering how distinct combinations of the identified dimensions can generate different inclusion-relevant barriers (for example, over-automation of pedagogical judgement, opacity in feedback and decision processes, and uneven distribution of risk in vulnerable contexts). The typology is &#x201C;minimal&#x201D; in the sense that it aims to be sufficiently parsimonious to remain usable for diagnosis and professional learning design, while still capturing consequential differences in stance and practice.</p>
<p>Finally, we derived governance-linked conditions for informed engagement with AI. These conditions were formulated as practice-facing criteria that make explicit both the safeguards under which AI-supported practices are more compatible with inclusion and the circumstances under which cautious non-adoption is professionally warranted. Across iterations, the argument was refined through internal coherence checks, ensuring that claims remained aligned with the scope of the paper, that each section advanced the same conceptual pathway, and that normative recommendations (what teachers and teacher education should do) remained grounded in the conceptual distinctions articulated earlier.</p>
</sec>
</sec>
<sec id="sec3">
<label>2</label>
<title>Transforming teachers&#x2019; conceptions: the first step toward AI adoption</title>
<p>In this paper, &#x201C;teachers&#x2019; conceptions of AI&#x201D; is treated as a practice-relevant construct that goes beyond general attitudes towards technology. We define it as the structured set of beliefs, expectations, and normative judgements (<xref ref-type="bibr" rid="ref24">Mirete et al., 2020</xref>) through which teachers make sense of what AI is, what it can legitimately do in mathematics learning, and what risks and responsibilities its use entails. For analytic clarity, we organize the construct into four interrelated dimensions: beliefs about AI capabilities and limits; professional role and identity in relation to AI-supported practice; ethical and governance judgements (e.g., bias, transparency, privacy, accountability); and perceived institutional conditions (e.g., infrastructure, policy, support) that enable or constrain responsible use. This definition is used throughout the manuscript to connect conceptions to adoption intentions, enacted classroom practices, and inclusion outcomes.</p>
<p>Regarding the above, teachers&#x2019; hesitation or resistance towards artificial intelligence can be understood as an expression of deeply ingrained conceptions of teaching and learning and of the professional responsibilities attached to those conceptions. These conceptions, defined as beliefs that evolve over time according to <xref ref-type="bibr" rid="ref6">D&#x2019;Amore and Pinilla (2004)</xref>, shape what teachers consider pedagogically legitimate and ethically defensible in mathematics classrooms. In this sense, concerns about professional displacement and the dehumanization of educational processes can limit willingness to adopt AI, particularly when AI systems are positioned to take on functions such as curriculum planning and automated assessment (<xref ref-type="bibr" rid="ref34">Saputra et al., 2023</xref>). At the same time, skepticism is not always a simple lack of openness: it may also reflect uncertainty produced by limited opportunities for situated evaluation, unclear governance safeguards, and uneven institutional conditions for responsible use. This is where <xref ref-type="bibr" rid="ref1">Ajzen&#x2019;s Theory of Planned Behavior (TPB) (2020)</xref> becomes useful, because it explains how such belief configurations shape intentions and decisions regarding AI engagement, which later materialize as classroom practices with direct inclusion implications. In the logic of this paper, TPB helps specify how conceptions translate into adoption intentions and decisions, which later become visible as classroom practices with inclusion implications.</p>
<p>Transforming these beliefs requires a comprehensive approach that combines technical training with pedagogical reflection. <xref ref-type="bibr" rid="ref15">Kazimzade et al. (2019)</xref> highlight that by re-conceptualizing AI as a conditional ally rather than a categorical threat, teachers can move from blanket rejection or uncritical enthusiasm towards informed professional judgement about when, how, and under what safeguards AI may be used to support learning. This involves creating continuous professional development spaces where teachers reflect on their professional identity in a digitized environment, develop critical skills to integrate technology ethically and effectively, and establish criteria for when cautious non-adoption is the most responsible stance. This shift is fundamental to facilitating inclusive mathematics learning environments aligned with the demands of the future workforce and students&#x2019; needs.</p>
<p>To move from construct clarification to explanatory usefulness, we propose a minimal typology that captures how different configurations of these four dimensions can shape AI adoption decisions and, more importantly, the ways AI is enacted in mathematics classrooms. The typology is intentionally heuristic rather than exhaustive: it aims to support diagnosis and professional learning design, not to label teachers. As shown in <xref ref-type="table" rid="tab1">Table 1</xref>, each profile is described in terms of its characteristic configuration across the four dimensions, its likely classroom enactments, the inclusion-related risks or barriers it may generate, and the teacher-education moves most likely to strengthen accountable mediation.</p>
<table-wrap position="float" id="tab1">
<label>Table 1</label>
<caption>
<p>Minimal typology of teachers&#x2019; conceptions of AI for inclusive mathematics learning.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Typology profile</th>
<th align="left" valign="top">AI capabilities &#x0026; limits</th>
<th align="left" valign="top">Professional role &#x0026; identity</th>
<th align="left" valign="top">Primary inclusion risk (if any)</th>
<th align="left" valign="top">Core teacher-education priority</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Accountable mediators</td>
<td align="left" valign="middle">Balanced (benefits conditional)</td>
<td align="left" valign="middle">Teacher remains accountable decision-maker</td>
<td align="left" valign="middle">Mainly systemic gaps (policy/time) rather than conception-driven</td>
<td align="left" valign="middle">Advanced mediation routines; contestability; transparency practices</td>
</tr>
<tr>
<td align="left" valign="middle">Enthusiastic delegators</td>
<td align="left" valign="middle">Inflated (AI over-trusted)</td>
<td align="left" valign="middle">Boundaries blurred (AI as decision authority)</td>
<td align="left" valign="middle">Over-automation; opacity; disproportionate harm for vulnerable learners</td>
<td align="left" valign="middle">Boundary-setting; human-in-the-loop rules; verification protocols</td>
</tr>
<tr>
<td align="left" valign="middle">Guarded pragmatists</td>
<td align="left" valign="middle">Moderate/conditional</td>
<td align="left" valign="middle">Agency retained</td>
<td align="left" valign="middle">Underuse; uneven access benefits across contexts</td>
<td align="left" valign="middle">Minimal viable safe-use patterns; low-burden governance; institutional navigation</td>
</tr>
<tr>
<td align="left" valign="middle">Protective skeptics <italic>(cautious non-adopters)</italic></td>
<td align="left" valign="middle">Skeptical/uncertain</td>
<td align="left" valign="middle">Strong duty-of-care</td>
<td align="left" valign="middle">Missed support opportunities; risk of exclusion if adoption is mandated without safeguards</td>
<td align="left" valign="middle">Criteria for justified non-adoption; risk literacy; incremental safeguarded trials</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>These profiles should be read as starting points for professional reflection rather than fixed categories. Teachers and institutions may shift across profiles over time as governance arrangements evolve, as capabilities develop, and as evidence about tool performance in specific contexts becomes available.</p>
<p>Regarding the above, these considerations suggest that conceptions are not only abstract beliefs but practical determinants of whether, how, and under what safeguards teachers choose to adopt AI. The next section therefore shifts from adoption intentions to the pedagogical locus where adoption becomes practice: AI-supported personalization and feedback in mathematics learning.</p>
</sec>
<sec id="sec4">
<label>3</label>
<title>AI and mathematics: a pathway to personalized learning</title>
<p>Mathematics teaching faces the challenge of addressing students&#x2019; diverse needs and learning styles, a task historically complicated by uniform pedagogical approaches. Artificial intelligence can offer a potentially promising route to personalized learning through real-time data analysis, provided that its outputs remain pedagogically interpretable, governance safeguards are in place, and teachers retain accountability for high-stakes judgements. Tools such as intelligent tutoring systems and adaptive platforms adjust content to the skills, interests, and paces of individual students, ensuring that all can progress effectively (<xref ref-type="bibr" rid="ref12">Holmes et al., 2022</xref>; <xref ref-type="bibr" rid="ref16">Khine, 2024</xref>).</p>
<p>It is noteworthy that personalization is considered inclusion-supportive only when it remains transparent and contestable for teachers and learners, does not automate high-stakes judgements, and is implemented with safeguards for privacy and bias (<xref ref-type="bibr" rid="ref38">UNESCO, 2021</xref>; <xref ref-type="bibr" rid="ref12">Holmes et al., 2022</xref>; <xref ref-type="bibr" rid="ref18">Komljenovic, 2022</xref>).</p>
<p>AI-based personalized learning is grounded in constructivist theory, which holds that knowledge is actively constructed from prior experiences and individual contexts (<xref ref-type="bibr" rid="ref8">Grubaugh et al., 2023</xref>). In mathematics, this capability to identify and address conceptual gaps has a transformative impact. According to <xref ref-type="bibr" rid="ref39">Walkington and Bernacki (2019)</xref>, AI can provide immediate feedback and design adaptive strategies that foster interactive and contextualized learning. This is particularly important in a discipline where concepts build progressively, and any gaps can hinder advancement.</p>
<p>Additionally, AI can contribute to democratizing access to mathematics learning by supporting differentiated pathways for learners who have been historically underserved by homogeneous methodologies. However, these benefits are outcome-contingent and should be claimed only insofar as implementation is paired with explicit safeguards, such as privacy-protective data practices, transparency and contestability of recommendations, and group-sensitive monitoring of performance and error burden, while preserving realistic teacher-mediated oversight and accountability for pedagogical judgement. In this sense, and in line with National Council of Teachers of Mathematics priorities for inclusive mathematics education (<xref ref-type="bibr" rid="ref19">Leinwand et al., 2014</xref>), AI-supported personalization may help mitigate&#x2014;rather than inadvertently reproduce&#x2014;historical barriers related to gender, social class, and learner diversity when these governance and capacity conditions are actually met.</p>
<p>At this point, it is important to distinguish between two broad clusters of AI use-cases in mathematics education, because they differ markedly in ethical risk and in the teacher conceptions that become most consequential. The first cluster concerns tutoring and learning support, including AI-supported feedback, adaptive task sequencing, and low-stakes personalization intended to help learners re-enter productive conceptual trajectories. The second cluster concerns assessment- and administration-adjacent uses, including automated or AI-supported grading, placement recommendations, predictive analytics, and decision-support processes that can shape progression opportunities. Given mathematics&#x2019; high-stakes gatekeeping dynamics, the second cluster typically carries higher risks of opaque gatekeeping and proxy discrimination, and therefore demands stricter governance conditions and clearer human-accountability boundaries than learning-support uses (<xref ref-type="bibr" rid="ref22">McConvey et al., 2023</xref>).</p>
<p>Accordingly, any benefits claimed for AI in inclusive mathematics learning should be read as outcome-contingent rather than likely by default. Predictable failure modes include bias amplification and proxy discrimination, uneven access to devices and connectivity, language and cultural mismatch in feedback and task framing, opacity that makes recommendations difficult to contest, and the uneven distribution of error burden in already vulnerable contexts (<xref ref-type="bibr" rid="ref23">Mergen et al., 2025</xref>). For this reason, inclusion-supportive adoption depends on governance and design conditions such as transparency and contestability of outputs, data-minimisation and privacy protection, local monitoring of tool performance across learner groups, and teacher capacity to verify, override, and contextualize AI support rather than delegate judgement to it.</p>
</sec>
<sec id="sec5">
<label>4</label>
<title>Educational inclusion: the promise of AI in mathematics</title>
<p>In this paper, &#x201C;inclusion&#x201D; is treated as more than access or participation; it refers to learners&#x2019; meaningful opportunities to engage in mathematical activity, to receive intelligible support that enables conceptual development, and to progress without being disproportionately exposed to harm or exclusionary decision-making. Complementarily, &#x201C;equity&#x201D; concerns whether resources, support, and learning opportunities actively reduce barriers that have historically constrained particular learners in mathematics classrooms. In operational terms, our inclusion claims center learners who are more likely to be underserved or excluded in technology-mediated mathematics learning, including students with disabilities and other accessibility needs, learners facing socioeconomic disadvantage, and students whose linguistic, cultural, or contextual realities are often poorly represented in data-driven systems (<xref ref-type="bibr" rid="ref14">Kabudi et al., 2021</xref>; <xref ref-type="bibr" rid="ref12">Holmes et al., 2022</xref>; <xref ref-type="bibr" rid="ref7">Flores-Vivar and Garc&#x00ED;a-Pe&#x00F1;alvo, 2023</xref>).</p>
<p>Equity in mathematics education has historically been a challenge due to its perception as an inaccessible discipline for certain groups of students. This perception has contributed to perpetuating structural inequalities related to gender, social class, and cognitive abilities. However, artificial intelligence offers a unique opportunity to transform this reality by providing tools that personalize teaching and promote inclusive learning. According to <xref ref-type="bibr" rid="ref12">Holmes et al. (2022)</xref>, AI can analyze large volumes of educational data to identify specific needs and adapt pedagogical strategies accordingly, thereby potentially promoting equity in the classroom when adoption is accompanied by safeguards for bias, privacy, and contestability of recommendations.</p>
<p>However, AI-enabled personalization should not be treated as inclusion-supportive by default. In a high-stakes discipline such as mathematics, where feedback, task sequencing, and assessment can shape placement and future opportunities, personalization can introduce trade-offs that matter directly for inclusion. These include intensified data capture and surveillance-like monitoring, opaque recommendation processes that are difficult to contest, deficit-labelled profiling, and automated gatekeeping when outputs implicitly steer some learners towards narrower tasks or lower expectations (<xref ref-type="bibr" rid="ref12">Holmes et al., 2022</xref>; <xref ref-type="bibr" rid="ref18">Komljenovic, 2022</xref>; <xref ref-type="bibr" rid="ref4">bin Abdullah and bin Yusuf, 2023</xref>). For this reason, the manuscript treats inclusion outcomes as contingent on governance safeguards and teacher mediation, rather than as an intrinsic property of AI tools.</p>
<p>The creation of inclusive environments is not only an ethical imperative but also a pedagogical necessity. The theory of social representations (<xref ref-type="bibr" rid="ref11">H&#x00F6;ijer, 2011</xref>) highlights that attitudes toward technology are influenced by social and cultural environments. Therefore, it is essential for educational communities to foster a culture of collaboration where teachers can share experiences, resolve concerns, and collectively explore AI&#x2019;s transformative potential in mathematics teaching.</p>
<p>Implementing AI for inclusion requires not only changes in pedagogical practices but also educational policies that ensure accessibility and ethics. According to <xref ref-type="bibr" rid="ref7">Flores-Vivar and Garc&#x00ED;a-Pe&#x00F1;alvo (2023)</xref>, AI can become an agent of inclusion if used to address the specific needs of students with cognitive, physical, or socioeconomic barriers. This involves developing differentiated strategies that allow all students, regardless of their individual characteristics, to access meaningful learning opportunities.</p>
<p>Regarding the above, AI may contribute to more equitable and inclusive mathematics teaching if it is integrated through a critical, rights-respecting approach that makes safeguards explicit in practice. This includes selecting and using tools with transparent and contestable outputs, implementing privacy protections and data minimisation, and monitoring differential impacts across learner groups, while ensuring that teachers retain accountable oversight, particularly wherever recommendations could shape expectations, support allocations, or progression. Under these conditions, AI-supported personalization can help narrow, rather than widen, historical gaps in meaningful opportunities to engage with mathematics and to progress in learning.</p>
</sec>
<sec id="sec6">
<label>5</label>
<title>Ethics and equity: fundamental conditions for adopting AI in inclusive mathematics learning</title>
<p>The implementation of artificial intelligence in mathematics teaching has the potential to transform education by personalizing learning and addressing the diverse needs of students. However, for this technology to truly contribute to educational inclusion, it is essential to address the ethical and equity challenges accompanying its integration. AI must not only be a technical tool but also a resource aligned with values of justice and accessibility, ensuring that all students have equal access to meaningful educational opportunities.</p>
<p>From an inclusion standpoint, these ethical concerns become acute when personalization depends on continuous data capture and algorithmic classification that can normalize tracking, profiling, or gatekeeping without transparent accountability (<xref ref-type="bibr" rid="ref10">Hintz, 2024</xref>).</p>
<p>One of the most evident challenges is the risk of perpetuating inequalities through biased algorithms. Dependence on historical data, which often reflects prejudices related to gender, social class, or cognitive abilities, can create systems that favor certain groups of students while marginalizing others (<xref ref-type="bibr" rid="ref4">bin Abdullah and bin Yusuf, 2023</xref>). This issue is particularly critical in mathematics, a discipline often perceived as exclusive and reserved for an academic minority. Poorly designed algorithms could reinforce this perception, widening educational gaps instead of narrowing them.</p>
<p>Moreover, the collection and analysis of students&#x2019; personal data pose significant risks to privacy and security. AI-based personalized learning requires large volumes of sensitive data, ranging from learning patterns to specific behaviors, raising ethical and trust concerns. According to <xref ref-type="bibr" rid="ref18">Komljenovic (2022)</xref>, the inadequate handling of this data can undermine students&#x2019; rights and erode trust in digital educational systems, especially in communities already facing structural inequalities.</p>
<p>In the framework of inclusive learning, AI must be designed to identify and address the specific needs of traditionally marginalized students. This includes tools that adapt mathematical content to different learning styles, provide access to students with disabilities, and offer resources that level opportunities across diverse socioeconomic contexts. For instance, intelligent tutoring systems can provide personalized feedback, which especially benefits students who require more time or alternative approaches to grasp key mathematical concepts (<xref ref-type="bibr" rid="ref37">Son, 2024</xref>). To ensure that AI promotes inclusive learning in mathematics, robust regulatory frameworks prioritizing equity and ethics are essential. In this regard, <xref ref-type="bibr" rid="ref38">UNESCO (2021)</xref> and the European Commission (<xref ref-type="bibr" rid="ref32">Reinhardt, 2023</xref>) have highlighted the importance of establishing principles that guide transparency in algorithm design, protect students&#x2019; privacy, and promote universal accessibility. However, while regulatory frameworks are necessary, they are insufficient on their own. Teachers play a critical role as mediators, ensuring that AI tools are used to close educational gaps rather than widen them.</p>
<p>Additionally, teacher training must include ethical and pedagogical components that enable critical evaluation of technological tools. This not only involves understanding how algorithms work but also developing the sensitivity required to identify biases and ensure that AI solutions respect and value student diversity. According to <xref ref-type="bibr" rid="ref7">Flores-Vivar and Garc&#x00ED;a-Pe&#x00F1;alvo (2023)</xref>, this critical approach is indispensable for ensuring that technology is used inclusively and adapted to classroom realities. Thus, ethics and equity are fundamental pillars for integrating AI into inclusive mathematics learning. Ensuring that these tools are designed and used in a fair and responsible manner not only protects students&#x2019; rights but also promotes a more accessible and meaningful mathematics education for all. This ethical approach strengthens AI&#x2019;s transformative potential, turning it into a key ally in addressing historical inequalities and building truly inclusive educational environments.</p>
</sec>
<sec id="sec7">
<label>6</label>
<title>Redefining the teacher&#x2019;s role: mediators in mathematics education with AI</title>
<p>The integration of artificial intelligence in mathematics teaching profoundly redefines the teacher&#x2019;s role, transforming them from knowledge transmitters to mediators of inclusive and personalized learning experiences. This paradigm shift responds to the demands of education adapted to the diversity of students and the complexities of the modern world, where mathematical and computational skills are essential. In this context, the teacher becomes a guide who, utilizing available technological tools, ensures that every student has access to equitable and meaningful learning opportunities.</p>
<p>According to <xref ref-type="bibr" rid="ref9">Heyes (2012)</xref>, learning is a social process where the teacher acts as a facilitator, helping students construct their own knowledge. In this regard, AI complements this function by automating repetitive tasks such as evaluation and immediate feedback, freeing the teacher to focus on more complex aspects of learning, such as developing critical thinking, problem-solving, and metacognition (<xref ref-type="bibr" rid="ref2">Attwood, 2020</xref>).</p>
<p>However, to fulfill this new role as a mediator, teachers must develop technical, pedagogical, and ethical competencies. <xref ref-type="bibr" rid="ref25">Mishra and Koehler (2006)</xref> argue that teacher education and professional development (both pre-service and in-service) should integrate knowledge of content, pedagogy, and technology, enabling educators to use AI effectively and critically. Here, TPACK is used not as a generic endorsement of technology integration, but as a capability framework to specify what accountable teacher mediation requires when AI shapes feedback, task sequencing, and assessment-related judgements. Considering this, a TPACK-based training should include the ability to identify algorithmic biases, assess the effectiveness of AI tools, and ensure their implementation aligns with principles of inclusion and equity.</p>
<p>Beyond developing competencies, inclusive mediation also requires explicit boundaries regarding what may be delegated to AI and what must remain a matter of professional human judgement. In this paper, AI is treated as a support for teachers&#x2019; work rather than a substitute for pedagogical responsibility; accordingly, teachers remain accountable for interpreting learners&#x2019; needs, deciding instructional responses, and ensuring that AI-supported actions do not restrict students&#x2019; opportunities to participate and progress.</p>
<p>These boundaries are especially important in mathematics because AI-supported feedback, task sequencing, and assessment-related recommendations can quickly become gatekeeping mechanisms (<xref ref-type="bibr" rid="ref28">Najjar et al., 2025</xref>). Therefore, high-stakes decisions, such as grading that determines progression, placement recommendations, or any recommendation that materially restricts learning opportunities, should not be automated or accepted without teacher verification and contestability. By contrast, lower-stakes uses (e.g., generating practice variants, drafting hints, supporting formative feedback) may be appropriate when outputs are pedagogically interpretable, bias risks are actively monitored, and teachers can override or adjust the AI response. Under conditions of weak governance or opaque tool performance, cautious non-adoption can thus be a responsible expression of duty of care rather than resistance to innovation. This is also where the minimal typology proposed earlier becomes practically useful, as different profiles imply different boundary-setting risks and professional learning priorities.</p>
<p>In the context of inclusive mathematics learning, the teacher&#x2019;s role expands further by designing environments that cater to the needs of students with diverse abilities. While AI can provide personalization, it is the teacher who ensures that these tools are used to close educational gaps rather than widen them. For example, teachers can adapt AI resources for students with physical or cognitive disabilities, fostering more active and equitable participation. Moreover, teachers play a critical role in incorporating cultural and social contexts into mathematics teaching, an aspect that technological tools have yet to fully address. According to <xref ref-type="bibr" rid="ref3">Bailey (2021)</xref>, the emotional connection and human interaction essential for inclusive and meaningful learning remain irreplaceable. Although AI can facilitate adaptive learning, the teacher remains the central figure who builds trust and motivates students to overcome challenges. This human-centered approach ensures that technology serves as a tool that enhances, rather than replaces, the transformative role of the teacher.</p>
<p>In essence, the teacher in the AI era is not merely a technology operator but an architect of learning environments that use these tools to enrich mathematics teaching. Their role as mediators is crucial to ensuring that AI contributes to inclusive, equitable, and meaningful learning. By redefining their role, teachers position themselves as agents of change, capable of leveraging AI to create learning experiences that address the diversity and needs of the 21st century.</p>
</sec>
<sec sec-type="conclusions" id="sec8">
<label>7</label>
<title>Conclusion</title>
<p>This paper contributes a conceptual analysis that clarifies what is at stake when teachers engage with AI in inclusive mathematics education. Specifically, it delineates &#x201C;teachers&#x2019; conceptions of AI&#x201D; into analytically distinct dimensions that matter for classroom practice and equity, outlines a minimal typology that connects these conceptions to foreseeable inclusion-related barriers and risks, and synthesizes governance-linked conditions for informed engagement, including criteria for when cautious non-adoption is professionally warranted. In doing so, the manuscript shifts the discussion from a general call to change towards specifying the targets, criteria, and practical implications of that change for inclusive mathematics learning. While many insights travel across sectors, the analysis is anchored in K&#x2013;12 mathematics classrooms and in the needs of teacher education and professional development in compulsory schooling.</p>
<p>The analysis suggests that AI-driven personalized learning may contribute to improving how mathematics is taught, but only under governance and implementation conditions that protect equity. In particular, such approaches may help address some long-standing gaps when safeguards (e.g., privacy protections, contestability, transparency, and group-sensitive bias monitoring) are in place, when institutional capacity supports accountable use, and when teacher workload realities allow meaningful mediation. Under these conditions, AI can support efforts to reduce persistent opportunity gaps, whereas under weak governance it may reproduce or amplify them. However, to accomplish this, it is essential for teachers to develop informed and accountable mediation of AI-supported practices, which may involve constructive adoption under clear safeguards as well as cautious non-adoption when governance, transparency, or equity conditions are not met. This shift demands comprehensive training that addresses not only technical skills but also ethical and pedagogical dimensions. The implications of this work are manifold. First, for AI to contribute to inclusive learning, its design and use must be guided by principles of ethics and equity. The risks associated with data privacy and algorithmic biases require robust regulatory frameworks and continuous evaluation of the tools implemented. Second, redefining the teacher&#x2019;s role not only enhances the effectiveness of technology but also ensures that learning remains a human-centered experience, respecting the emotional connection and cultural context of the classroom.</p>
<p>A call to action is essential at this juncture. Educational institutions, policymakers, and technology developers must work closely to establish teacher training programs that integrate technology critically and reflectively. Moreover, investments in technological infrastructure must be prioritized to ensure equitable access to AI tools, particularly in marginalized communities.</p>
<p>Regarding future directions, research should focus on evaluating the impact of AI tools in different educational contexts, identifying practices that maximize their effectiveness while minimizing risks. It is also crucial to explore how teachers can collaborate with these technologies to design innovative learning experiences that foster mathematical and computational thinking.</p>
<p>As a final insight, the manuscript provides an analytic map of how teachers&#x2019; conceptions shape the uptake and classroom use of AI in mathematics, and why that matters for inclusion. By making explicit the dimensions of conceptions, the pathways through which they can enable or constrain equitable practice, and the governance conditions under which AI use can be justified, the paper offers a clearer basis for diagnosing current positions and designing teacher education responses that are both pedagogically grounded and ethically accountable.</p>
</sec>
</body>
<back>
<sec sec-type="author-contributions" id="sec9">
<title>Author contributions</title>
<p>JRGN: Conceptualization, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. LP&#x00C1;D: Writing &#x2013; review &#x0026; editing, Writing &#x2013; original draft, Conceptualization. AC: Writing &#x2013; review &#x0026; editing, Supervision, Writing &#x2013; original draft, Conceptualization. FSD: Writing &#x2013; review &#x0026; editing, Conceptualization, Writing &#x2013; original draft.</p>
</sec>
<sec sec-type="COI-statement" id="sec10">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="sec11">
<title>Generative AI statement</title>
<p>The author(s) declared that Generative AI was used in the creation of this manuscript. During the preparation of this manuscript, the authors used generative AI (ChatGPT 5.2) solely to support language editing and clarity (grammar, readability, and phrasing). All AI-assisted text was critically reviewed and substantively edited by the authors, who take full responsibility for the content, interpretations, and conclusions reported. No generative AI was used to generate or analyse data, nor to produce figures or results.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="sec12">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="ref1"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ajzen</surname><given-names>I.</given-names></name></person-group> (<year>2020</year>). <article-title>The theory of planned behavior: frequently asked questions</article-title>. <source>Hum. Behav. Emerg. Technol.</source> <volume>2</volume>, <fpage>314</fpage>&#x2013;<lpage>324</lpage>. doi: <pub-id pub-id-type="doi">10.1002/hbe2.195</pub-id></mixed-citation></ref>
<ref id="ref2"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Attwood</surname><given-names>A. I.</given-names></name></person-group> (<year>2020</year>). <article-title>Changing social learning theory through reliance on the internet of things and artificial intelligence</article-title>. <source>J. Sustain. Soc. Change</source> <volume>12</volume>:<fpage>8</fpage>. doi: <pub-id pub-id-type="doi">10.5590/JOSC.2020.12.1.08</pub-id></mixed-citation></ref>
<ref id="ref3"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Bailey</surname><given-names>S.</given-names></name></person-group> (<year>2021</year>). <source>Drama for the inclusive classroom: Activities to support curriculum and social-emotional learning</source>, <edition>1st Edn</edition>. <publisher-loc>New York, NY</publisher-loc>: <publisher-name>Routledge</publisher-name>, | Series: Other eye on education: Routledge.</mixed-citation></ref>
<ref id="ref4"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>bin Abdullah</surname><given-names>A.</given-names></name> <name><surname>bin Yusuf</surname><given-names>M.</given-names></name></person-group> (<year>2023</year>). <article-title>The effectiveness of AI-based interventions in reducing healthcare inequalities a comprehensive review</article-title>. <source>J. Contemp. Healthc. Anal.</source> <volume>7</volume>, <fpage>1</fpage>&#x2013;<lpage>30</lpage>.</mixed-citation></ref>
<ref id="ref5"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cao</surname><given-names>X.</given-names></name> <name><surname>Huang</surname><given-names>Z.</given-names></name> <name><surname>Li</surname><given-names>M.</given-names></name> <name><surname>He</surname><given-names>T.</given-names></name></person-group> (<year>2026</year>). <article-title>Teachers&#x2019; AI-TPACK as a tangible outcome in the digital transformation of education: a machine learning-based multilevel approach</article-title>. <source>Teach. Teach. Educ.</source> <volume>169</volume>:<fpage>105270</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.tate.2025.105270</pub-id></mixed-citation></ref>
<ref id="ref6"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>D&#x2019;Amore</surname><given-names>B.</given-names></name> <name><surname>Pinilla</surname><given-names>M. I. F.</given-names></name></person-group> (<year>2004</year>). <article-title>Cambios de convicciones en futuros profesores de matem&#x00E1;ticas de la Escuela Secundaria Superior</article-title>. <source>Epsilon: Revista de la Sociedad Andaluza de Educaci&#x00F3;n Matem&#x00E1;tica" Thales"</source> <volume>58</volume>, <fpage>23</fpage>&#x2013;<lpage>44</lpage>.</mixed-citation></ref>
<ref id="ref7"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Flores-Vivar</surname><given-names>J.-M.</given-names></name> <name><surname>Garc&#x00ED;a-Pe&#x00F1;alvo</surname><given-names>F.-J.</given-names></name></person-group> (<year>2023</year>). <article-title>Reflections on the ethics, potential, and challenges of artificial intelligence in the framework of quality education (SDG4)</article-title>. <source>Comunicar: Revista Cient&#x00ED;fica de Comunicaci&#x00F3;n y Educaci&#x00F3;n</source> <volume>31</volume>, <fpage>37</fpage>&#x2013;<lpage>47</lpage>. doi: <pub-id pub-id-type="doi">10.3916/C74-2023-03</pub-id></mixed-citation></ref>
<ref id="ref8"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Grubaugh</surname><given-names>S.</given-names></name> <name><surname>Levitt</surname><given-names>G.</given-names></name> <name><surname>Deever</surname><given-names>D.</given-names></name></person-group> (<year>2023</year>). <article-title>Harnessing AI to power constructivist learning: an evolution in educational methodologies</article-title>. <source>EIKI J. Eff. Teach. Methods</source> <volume>1</volume>, <fpage>81</fpage>&#x2013;<lpage>83</lpage>. doi: <pub-id pub-id-type="doi">10.59652/jetm.v1i3.43</pub-id></mixed-citation></ref>
<ref id="ref9"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Heyes</surname><given-names>C.</given-names></name></person-group> (<year>2012</year>). <article-title>What&#x2019;s social about social learning?</article-title> <source>J. Comp. Psychol.</source> <volume>126</volume>, <fpage>193</fpage>&#x2013;<lpage>202</lpage>. doi: <pub-id pub-id-type="doi">10.1037/a0025180</pub-id>, <pub-id pub-id-type="pmid">21895355</pub-id></mixed-citation></ref>
<ref id="ref10"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Hintz</surname><given-names>A.</given-names></name></person-group> (<year>2024</year>) &#x201C;<chapter-title>AI, big data and bias: governing datafication through a data justice lens</chapter-title>,&#x201D; <source>Handbook of media and communication governance</source> <person-group person-group-type="editor"><name><surname>Puppis</surname><given-names>M.</given-names></name> <name><surname>Mansell</surname><given-names>R.</given-names></name> <name><surname>Bulck</surname><given-names>H.</given-names><prefix>Van Den</prefix></name></person-group>. <publisher-loc>Cheltenham</publisher-loc>: <publisher-name>Edward Elgar Publishing</publisher-name> <fpage>526</fpage>&#x2013;<lpage>537</lpage></mixed-citation></ref>
<ref id="ref11"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>H&#x00F6;ijer</surname><given-names>B.</given-names></name></person-group> (<year>2011</year>). <article-title>Social representations theory: a new theory for media research</article-title>. <source>Nordicom Rev.</source> <volume>32</volume>, <fpage>3</fpage>&#x2013;<lpage>16</lpage>. doi: <pub-id pub-id-type="doi">10.1515/nor-2017-0109</pub-id></mixed-citation></ref>
<ref id="ref12"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Holmes</surname><given-names>W.</given-names></name> <name><surname>Porayska-Pomsta</surname><given-names>K.</given-names></name> <name><surname>Holstein</surname><given-names>K.</given-names></name> <name><surname>Sutherland</surname><given-names>E.</given-names></name> <name><surname>Baker</surname><given-names>T.</given-names></name> <name><surname>Shum</surname><given-names>S. B.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Ethics of AI in education: towards a community-wide framework</article-title>. <source>Int. J. Artif. Intell. Educ.</source> <volume>32</volume>, <fpage>504</fpage>&#x2013;<lpage>526</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s40593-021-00239-1</pub-id></mixed-citation></ref>
<ref id="ref13"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Humburg</surname><given-names>M.</given-names></name> <name><surname>Dragni&#x0107;-Cindri&#x0107;</surname><given-names>D.</given-names></name> <name><surname>Hmelo-Silver</surname><given-names>C. E.</given-names></name> <name><surname>Glazewski</surname><given-names>K.</given-names></name> <name><surname>Lester</surname><given-names>J. C.</given-names></name> <name><surname>Danish</surname><given-names>J. A.</given-names></name></person-group> (<year>2024</year>). <article-title>Integrating youth perspectives into the design of AI-supported collaborative learning environments</article-title>. <source>Educ. Sci.</source> <volume>14</volume>:<fpage>1197</fpage>. doi: <pub-id pub-id-type="doi">10.3390/educsci14111197</pub-id></mixed-citation></ref>
<ref id="ref14"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kabudi</surname><given-names>T.</given-names></name> <name><surname>Pappas</surname><given-names>I.</given-names></name> <name><surname>Olsen</surname><given-names>D. H.</given-names></name></person-group> (<year>2021</year>). <article-title>AI-enabled adaptive learning systems: a systematic mapping of the literature</article-title>. <source>Comput. Educ. Artif. Intell.</source> <volume>2</volume>, <fpage>1</fpage>&#x2013;<lpage>12</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.caeai.2021.100017</pub-id></mixed-citation></ref>
<ref id="ref15"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Kazimzade</surname><given-names>G.</given-names></name> <name><surname>Patzer</surname><given-names>Y.</given-names></name> <name><surname>Pinkwart</surname><given-names>N.</given-names></name></person-group> (<year>2019</year>). &#x201C;<chapter-title>Artificial intelligence in education meets inclusive educational technology&#x2014;the technical state-of-the-art and possible directions</chapter-title>&#x201D; in <source>Artificial intelligence and inclusive education</source>. eds. <person-group person-group-type="editor"><name><surname>Knox</surname><given-names>J.</given-names></name> <name><surname>Wang</surname><given-names>Y.</given-names></name> <name><surname>Gallagher</surname><given-names>M.</given-names></name></person-group> (<publisher-loc>Singapore</publisher-loc>: <publisher-name>Springer Singapore</publisher-name>), <fpage>61</fpage>&#x2013;<lpage>73</lpage>.</mixed-citation></ref>
<ref id="ref16"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Khine</surname><given-names>M. S.</given-names></name></person-group> (<year>2024</year>). &#x201C;<chapter-title>Using AI for adaptive learning and adaptive assessment</chapter-title>&#x201D; in <source>Artificial intelligence in education</source> (<publisher-loc>Singapore</publisher-loc>: <publisher-name>Springer Nature Singapore</publisher-name>), <fpage>341</fpage>&#x2013;<lpage>466</lpage>.</mixed-citation></ref>
<ref id="ref17"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Knox</surname><given-names>J.</given-names></name> <name><surname>Wang</surname><given-names>Y.</given-names></name> <name><surname>Gallagher</surname><given-names>M.</given-names></name></person-group> (<year>2019</year>). <source>Artificial intelligence and inclusive education: Speculative futures and emerging practices</source>. <publisher-loc>Singapore</publisher-loc>: <publisher-name>Springer Singapore</publisher-name>.</mixed-citation></ref>
<ref id="ref18"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Komljenovic</surname><given-names>J.</given-names></name></person-group> (<year>2022</year>). <article-title>The future of value in digitalised higher education: why data privacy should not be our biggest concern</article-title>. <source>High. Educ.</source> <volume>83</volume>, <fpage>119</fpage>&#x2013;<lpage>135</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10734-020-00639-7</pub-id>, <pub-id pub-id-type="pmid">33230347</pub-id></mixed-citation></ref>
<ref id="ref19"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Leinwand</surname><given-names>S.</given-names></name> <name><surname>Brahier</surname><given-names>D. J.</given-names></name> <name><surname>Huinker</surname><given-names>D.</given-names></name> <name><surname>Berry</surname><given-names>R. Q.</given-names></name> <name><surname>Dillon</surname><given-names>F. L.</given-names></name> <name><surname>Larson</surname><given-names>M. R.</given-names></name> <etal/></person-group>. (<year>2014</year>). <source>Principles to actions: Ensuring mathematical success for all</source>. <publisher-loc>Reston</publisher-loc>: <publisher-name>NCTM, National Council of Teachers of Mathematics</publisher-name>.</mixed-citation></ref>
<ref id="ref20"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Luckin</surname><given-names>R.</given-names></name> <name><surname>Rudolph</surname><given-names>J.</given-names></name> <name><surname>Gr&#x00FC;nert</surname><given-names>M.</given-names></name> <name><surname>Tan</surname><given-names>S.</given-names></name></person-group> (<year>2024</year>). <article-title>Exploring the future of learning and the relationship between human intelligence and AI. An interview with professor rose Luckin</article-title>. <source>J. Appl. Learn. Teach.</source> <volume>7</volume>, <fpage>346</fpage>&#x2013;<lpage>363</lpage>. doi: <pub-id pub-id-type="doi">10.37074/jalt.2024.7.1.27</pub-id></mixed-citation></ref>
<ref id="ref21"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mangi&#x00F2;</surname><given-names>F.</given-names></name> <name><surname>Pedeliento</surname><given-names>G.</given-names></name> <name><surname>Wassler</surname><given-names>P.</given-names></name> <name><surname>Williams</surname><given-names>N.</given-names></name></person-group> (<year>2025</year>). <article-title>Discursively negotiating AI: a social representation theory approach to LLM-based chatbots</article-title>. <source>Technol. Forecast. Soc. Change</source> <volume>221</volume>:<fpage>124352</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.techfore.2025.124352</pub-id></mixed-citation></ref>
<ref id="ref22"><mixed-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>McConvey</surname><given-names>K.</given-names></name> <name><surname>Guha</surname><given-names>S.</given-names></name> <name><surname>Kuzminykh</surname><given-names>A.</given-names></name></person-group> (<year>2023</year>). <chapter-title>A human-centered review of algorithms in decision-making in higher education</chapter-title>. In <conf-name>Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems</conf-name>, (<publisher-loc>Hamburg</publisher-loc>: <publisher-name>ACM</publisher-name>), <fpage>1</fpage>&#x2013;<lpage>15</lpage>.</mixed-citation></ref>
<ref id="ref23"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Mergen</surname><given-names>A.</given-names></name> <name><surname>&#x00C7;etin-K&#x0131;l&#x0131;&#x00E7;</surname><given-names>N.</given-names></name> <name><surname>&#x00D6;zbilgin</surname><given-names>M. F.</given-names></name></person-group> (<year>2025</year>). &#x201C;<chapter-title>Artificial intelligence and Bias towards marginalised groups: theoretical roots and challenges</chapter-title>&#x201D; in <source>International perspectives on equality, diversity and inclusion</source>. eds. <person-group person-group-type="editor"><name><surname>Vassilopoulou</surname><given-names>J.</given-names></name> <name><surname>Kyriakidou</surname><given-names>O.</given-names></name></person-group> (<publisher-loc>Leeds</publisher-loc>: <publisher-name>Emerald Publishing Limited</publisher-name>), <fpage>17</fpage>&#x2013;<lpage>38</lpage>.</mixed-citation></ref>
<ref id="ref24"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mirete</surname><given-names>A. B.</given-names></name> <name><surname>Maquil&#x00F3;n</surname><given-names>J. J.</given-names></name> <name><surname>Mirete</surname><given-names>L.</given-names></name> <name><surname>Rodr&#x00ED;guez</surname><given-names>R. A.</given-names></name></person-group> (<year>2020</year>). <article-title>Digital competence and university teachers&#x2019; conceptions about teaching. A structural causal model</article-title>. <source>Sustainability</source> <volume>12</volume>:<fpage>4842</fpage>. doi: <pub-id pub-id-type="doi">10.3390/su12124842</pub-id></mixed-citation></ref>
<ref id="ref25"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mishra</surname><given-names>P.</given-names></name> <name><surname>Koehler</surname><given-names>M. J.</given-names></name></person-group> (<year>2006</year>). <article-title>Technological pedagogical content knowledge: a framework for teacher knowledge</article-title>. <source>Teach. Coll. Rec.</source> <volume>108</volume>, <fpage>1017</fpage>&#x2013;<lpage>1054</lpage>. doi: <pub-id pub-id-type="doi">10.1111/j.1467-9620.2006.00684.x</pub-id></mixed-citation></ref>
<ref id="ref26"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Mohammed</surname><given-names>P. S.</given-names></name> <name><surname>&#x2018;Nell&#x2019; Watson</surname><given-names>E.</given-names></name></person-group> (<year>2019</year>). &#x201C;<chapter-title>Towards inclusive education in the age of artificial intelligence: perspectives, challenges, and opportunities</chapter-title>&#x201D; in <source>Artificial intelligence and inclusive education</source>. eds. <person-group person-group-type="editor"><name><surname>Knox</surname><given-names>J.</given-names></name> <name><surname>Wang</surname><given-names>Y.</given-names></name> <name><surname>Gallagher</surname><given-names>M.</given-names></name></person-group> (<publisher-loc>Singapore</publisher-loc>: <publisher-name>Springer Singapore</publisher-name>), <fpage>17</fpage>&#x2013;<lpage>37</lpage>.</mixed-citation></ref>
<ref id="ref27"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Moundridou</surname><given-names>M.</given-names></name> <name><surname>Matzakos</surname><given-names>N.</given-names></name> <name><surname>Doukakis</surname><given-names>S.</given-names></name></person-group> (<year>2024</year>). <article-title>Generative AI tools as educators&#x2019; assistants: designing and implementing inquiry-based lesson plans</article-title>. <source>Comput. Educ. Artif. Intell.</source> <volume>7</volume>:<fpage>100277</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.caeai.2024.100277</pub-id></mixed-citation></ref>
<ref id="ref28"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Najjar</surname><given-names>N.</given-names></name> <name><surname>Rouphael</surname><given-names>M.</given-names></name> <name><surname>El Hajj</surname><given-names>M.</given-names></name> <name><surname>Bitar</surname><given-names>T.</given-names></name> <name><surname>Damien</surname><given-names>P.</given-names></name> <name><surname>Hleihel</surname><given-names>W.</given-names></name></person-group> (<year>2025</year>). <article-title>Faculty perceptions and adoption of AI in higher education: insights from two Lebanese universities</article-title>. <source>Educ. Sci.</source> <volume>16</volume>:<fpage>55</fpage>. doi: <pub-id pub-id-type="doi">10.3390/educsci16010055</pub-id></mixed-citation></ref>
<ref id="ref29"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Opesemowo</surname><given-names>O. A. G.</given-names></name> <name><surname>Ndlovu</surname><given-names>M.</given-names></name></person-group> (<year>2024</year>). <article-title>Artificial intelligence in mathematics education: the good, the bad, and the ugly</article-title>. <source>J. Pedagog. Res.</source> <volume>8</volume>, <fpage>333</fpage>&#x2013;<lpage>346</lpage>. doi: <pub-id pub-id-type="doi">10.33902/JPR.202426428</pub-id></mixed-citation></ref>
<ref id="ref30"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Orhani</surname><given-names>S.</given-names></name></person-group> (<year>2024</year>). <article-title>Personalization of math tasks for each student through AI</article-title>. <source>Res. Inv. Int. J. Eng. Sci.</source> <volume>14</volume>, <fpage>8</fpage>&#x2013;<lpage>15</lpage>.</mixed-citation></ref>
<ref id="ref31"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Pedro</surname><given-names>F.</given-names></name> <name><surname>Subosa</surname><given-names>M.</given-names></name> <name><surname>Rivas</surname><given-names>A.</given-names></name> <name><surname>Valverde</surname><given-names>P.</given-names></name></person-group> (<year>2019</year>). <source>Artificial intelligence in education: Challenges and opportunities for sustainable development</source>. <publisher-loc>Paris</publisher-loc>: <publisher-name>Unesco</publisher-name>. Available online at: <ext-link xlink:href="http://repositorio.minedu.gob.pe/handle/20.500.12799/6533" ext-link-type="uri">http://repositorio.minedu.gob.pe/handle/20.500.12799/6533</ext-link> (accessed December 2, 2024).</mixed-citation></ref>
<ref id="ref32"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Reinhardt</surname><given-names>K.</given-names></name></person-group> (<year>2023</year>). <article-title>Trust and trustworthiness in AI ethics</article-title>. <source>AI Ethics</source> <volume>3</volume>, <fpage>735</fpage>&#x2013;<lpage>744</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s43681-022-00200-5</pub-id></mixed-citation></ref>
<ref id="ref33"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sanusi</surname><given-names>I. T.</given-names></name> <name><surname>Ayanwale</surname><given-names>M. A.</given-names></name> <name><surname>Tolorunleke</surname><given-names>A. E.</given-names></name></person-group> (<year>2024</year>). <article-title>Investigating pre-service teachers&#x2019; artificial intelligence perception from the perspective of planned behavior theory</article-title>. <source>Comput. Educ. Artif. Intell.</source> <volume>6</volume>:<fpage>100202</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.caeai.2024.100202</pub-id></mixed-citation></ref>
<ref id="ref34"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Saputra</surname><given-names>I.</given-names></name> <name><surname>Astuti</surname><given-names>M.</given-names></name> <name><surname>Sayuti</surname><given-names>M.</given-names></name> <name><surname>Kusumastuti</surname><given-names>D.</given-names></name></person-group> (<year>2023</year>). <article-title>Integration of artificial intelligence in education: opportunities, challenges, threats and obstacles. A literature review</article-title>. <source>Indones. J. Comput. Sci.</source> <volume>12</volume>, <fpage>1590</fpage>&#x2013;<lpage>1600</lpage>. doi: <pub-id pub-id-type="doi">10.33022/ijcs.v12i4.3266</pub-id></mixed-citation></ref>
<ref id="ref35"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Selwyn</surname><given-names>N.</given-names></name> <name><surname>Nemorin</surname><given-names>S.</given-names></name> <name><surname>Bulfin</surname><given-names>S.</given-names></name> <name><surname>Johnson</surname><given-names>N. F.</given-names></name></person-group> (<year>2017</year>). <source>Everyday schooling in the digital age: High school, high tech?</source> <edition>1st</edition> Edn. <publisher-loc>OXfordshire</publisher-loc>: <publisher-name>Routledge</publisher-name>.</mixed-citation></ref>
<ref id="ref36"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Slade</surname><given-names>J. J.</given-names></name> <name><surname>Byers</surname><given-names>S. M.</given-names></name> <name><surname>Becker-Blease</surname><given-names>K. A.</given-names></name> <name><surname>Gurung</surname><given-names>R. A. R.</given-names></name></person-group> (<year>2024</year>). <article-title>Navigating the new frontier: recommendations to address the crisis and potential of AI in the classroom</article-title>. <source>Teach. Psychol.</source> <volume>52</volume>, <fpage>254</fpage>&#x2013;<lpage>261</lpage>. doi: <pub-id pub-id-type="doi">10.1177/00986283241276098</pub-id></mixed-citation></ref>
<ref id="ref37"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Son</surname><given-names>T.</given-names></name></person-group> (<year>2024</year>). <article-title>Intelligent tutoring systems in mathematics education: a systematic literature review using the substitution, augmentation, modification, redefinition model</article-title>. <source>Computers</source> <volume>13</volume>:<fpage>270</fpage>. doi: <pub-id pub-id-type="doi">10.3390/computers13100270</pub-id></mixed-citation></ref>
<ref id="ref38"><mixed-citation publication-type="other"><collab id="coll1">UNESCO</collab> (<year>2021</year>) <source>Recommendation on the ethics of artificial intelligence</source>. Available online at: <ext-link xlink:href="https://unesdoc.unesco.org/ark:/48223/pf0000381137" ext-link-type="uri">https://unesdoc.unesco.org/ark:/48223/pf0000381137</ext-link> (Accessed October 15, 2024).</mixed-citation></ref>
<ref id="ref39"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Walkington</surname><given-names>C.</given-names></name> <name><surname>Bernacki</surname><given-names>M. L.</given-names></name></person-group> (<year>2019</year>). <article-title>Personalizing algebra to students&#x2019; individual interests in an intelligent tutoring system: moderators of impact</article-title>. <source>Int. J. Artif. Intell. Educ.</source> <volume>29</volume>, <fpage>58</fpage>&#x2013;<lpage>88</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s40593-018-0168-1</pub-id></mixed-citation></ref>
<ref id="ref40"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zawacki-Richter</surname><given-names>O.</given-names></name> <name><surname>Mar&#x00ED;n</surname><given-names>V. I.</given-names></name> <name><surname>Bond</surname><given-names>M.</given-names></name> <name><surname>Gouverneur</surname><given-names>F.</given-names></name></person-group> (<year>2019</year>). <article-title>Systematic review of research on artificial intelligence applications in higher education &#x2013; where are the educators?</article-title> <source>Int. J. Educ. Technol. High. Educ.</source> <volume>16</volume>:<fpage>39</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s41239-019-0171-0</pub-id></mixed-citation></ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by" id="fn0001">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2427724/overview">Raona Williams</ext-link>, Ministry of Education, United Arab Emirates</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by" id="fn0002">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3122289/overview">Andreea Dragomir</ext-link>, Lucian Blaga University of Sibiu, Romania</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3358204/overview">Hossein Dabbagh</ext-link>, Northeastern University, United Kingdom</p>
</fn>
</fn-group>
</back>
</article>