<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" article-type="review-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Educ.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Education</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Educ.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2504-284X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/feduc.2026.1798126</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Mini Review</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Assessment using artificial intelligence in higher education: innovations and ethical challenges in the Ibero-American and Kazakh contexts&#x2014;a mini-review</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Zulpykhar</surname>
<given-names>Zhandos</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3310824"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Rakhmetov</surname>
<given-names>Maxot</given-names>
</name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2180607"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Jugembayeva</surname>
<given-names>Bakytgul</given-names>
</name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Kapanova</surname>
<given-names>Dameli</given-names>
</name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3367892"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Zhilmagambetova</surname>
<given-names>Raushan</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3260295"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Department of Computer Science, L. N. Gumilyov Eurasian National University</institution>, <city>Astana</city>, <country country="kz">Kazakhstan</country></aff>
<aff id="aff2"><label>2</label><institution>Department of Computer Science, Kh. Dosmukhamedov Atyrau University</institution>, <city>Atyrau</city>, <country country="kz">Kazakhstan</country></aff>
<aff id="aff3"><label>3</label><institution>Department of Physics and Technical Disciplines, Kh. Dosmukhamedov Atyrau University</institution>, <city>Atyrau</city>, <country country="kz">Kazakhstan</country></aff>
<aff id="aff4"><label>4</label><institution>Department of Social and Humanitarian Disciplines, Academy of Physical Education and Mass Sports</institution>, <city>Astana</city>, <country country="kz">Kazakhstan</country></aff>
<author-notes>
<corresp id="c001"><label>&#x002A;</label>Correspondence: Maxot Rakhmetov, <email xlink:href="mailto:maksot.raxmetov.96@gmail.com">maksot.raxmetov.96@gmail.com</email>; Raushan Zhilmagambetova, <email xlink:href="mailto:rausanzilmagambetova@gmail.com">rausanzilmagambetova@gmail.com</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-03-03">
<day>03</day>
<month>03</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>11</volume>
<elocation-id>1798126</elocation-id>
<history>
<date date-type="received">
<day>28</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="rev-recd">
<day>18</day>
<month>02</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>20</day>
<month>02</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2026 Zulpykhar, Rakhmetov, Jugembayeva, Kapanova and Zhilmagambetova.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Zulpykhar, Rakhmetov, Jugembayeva, Kapanova and Zhilmagambetova</copyright-holder>
<license>
<ali:license_ref start_date="2026-03-03">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<p>Scientific research on the assessment of educational outcomes using artificial intelligence technologies in higher education is still fragmented, especially in the context of Ibero-American countries and the transforming educational systems, which include Kazakhstan. Despite the active introduction of digital platforms, analytical tools, and automated assessment systems, comprehensive research revealing innovative approaches, methodological foundations, and ethical aspects of AI-based assessment remains limited. In this regard, this mini-review is aimed at filling the identified research gap by analyzing key theoretical works and modern empirical studies that determine the prospects for further development of this field. The methodological basis of the review is a conceptual model that considers assessment using artificial intelligence as a multi-level system that includes automated analysis of educational data, personalized feedback, predictive evaluation mechanisms, and algorithmic support for pedagogical decisions. Within the framework of this model, assessment is interpreted not as an isolated control tool, but as an integrated component of the educational environment that influences individual learning trajectories, academic success and student engagement. It should be noted that this mini-review is one of the first studies in which this conceptual framework is applied to a comparative analysis of assessment practices using artificial intelligence in the Ibero-American and Kazakh contexts of higher education. The review included 12 peer-reviewed scientific publications published since 2020, which addressed the issues of automated assessment, learning analytics, adaptive feedback systems and ethical aspects of the introduction of artificial intelligence in the university environment of these regions. The paper analyzes the main innovations in the field of AI-based assessment, including the use of behavioral and cognitive analytics, automated formative assessment, forecasting academic risks and personalized assessment support for students. Special attention is paid to contextual factors such as the level of digital readiness of students, the institutional features of universities, as well as differences in assessment practices between public and private educational organizations. Along with technological innovations, the mini-review examines the key ethical challenges associated with the use of artificial intelligence in assessing educational achievements. Such challenges include algorithmic bias, limited transparency of assessment decisions, issues of protecting students&#x2019; personal data, and the risks of increasing educational inequality. The analysis shows that the manifestation of these ethical issues varies in Ibero-American countries and Kazakhstan, reflecting differences in educational policies, the level of digital infrastructure, and socio-economic conditions. Based on research using a variety of methodological approaches, including quantitative, qualitative, and mixed designs, this mini-review demonstrates the multiplicity of interpretations and practices of applying artificial intelligence in higher education assessment. At the same time, the need to expand interdisciplinary research aimed at analyzing poorly studied categories of students and alternative educational trajectories in the digital environment is emphasized. In conclusion, the educational and ethical implications of further implementation of AI-based assessment in higher education are considered. Given the increasing role of automated assessment systems and the desire of universities to improve the objectivity and fairness of assessment, the development of methodologically sound and ethically responsible models for the use of artificial intelligence is of particular relevance. At the same time, taking into account the regional characteristics of educational systems makes the comparative analysis of the Ibero-American and Kazakh experience an important area of modern research in the field of digital education.</p>
</abstract>
<kwd-group>
<kwd>AI-based assessment</kwd>
<kwd>algorithmic transparency</kwd>
<kwd>comparative education</kwd>
<kwd>ethical challenges</kwd>
<kwd>higher education</kwd>
<kwd>learning analytics</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was not received for this work and/or its publication.</funding-statement>
</funding-group>
<counts>
<fig-count count="0"/>
<table-count count="1"/>
<equation-count count="0"/>
<ref-count count="18"/>
<page-count count="8"/>
<word-count count="6451"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Assessment, Testing and Applied Measurement</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec1">
<label>1</label>
<title>Introduction</title>
<p>The development and transformation of assessment systems in higher education is largely determined by the influence of the digital educational environment, institutional actors and the technological solutions used. In the context of digitalization of education, the introduction of artificial intelligence tools is of particular importance, which are increasingly used to analyze educational data, automate assessment procedures and support pedagogical decision-making. However, despite the active spread of digital educational platforms, most research in the field of higher education continues to focus on learning and teaching, while assessment using artificial intelligence remains a relatively little-studied area.</p>
<p>In recent years, there has been an increase in scientific interest in AI-based assessment, since assessment is the key mechanism that determines students&#x2019; educational trajectories, academic success and managerial decision-making at the level of educational organizations (<xref ref-type="bibr" rid="ref5">George and Wooden, 2023</xref>). At the same time, existing research primarily analyzes the technical aspects of automated assessment or the effectiveness of individual analytical tools, ignoring the methodological foundations, contextual differences and ethical implications of the introduction of artificial intelligence in the evaluation practices of higher education.</p>
<p>This problem is becoming particularly relevant in the Ibero-American countries and in the transforming educational systems to which Kazakhstan belongs. These regions are characterized by similar digital development challenges, including heterogeneity of digital infrastructure, differences in the level of digital readiness of students, and institutional constraints in the implementation of innovative educational technologies. Despite this, comparative studies on the use of artificial intelligence in assessment in these contexts remain extremely limited, which makes it difficult to form universal and context-sensitive AI-based assessment models.</p>
<p>Conceptually, artificial intelligence assessment is considered in this review as a multicomponent system that includes automated analysis of educational data, adaptive and personalized assessment mechanisms, formative assessment and predictive models for identifying academic risks. As part of this approach, AI-based assessment goes beyond traditional knowledge control and acts as a dynamic tool for managing the educational process, capable of influencing the quality of learning, student engagement, and the fairness of assessment decisions.</p>
<p>An important aspect of the introduction of artificial intelligence in assessment is the ethical challenges associated with algorithmic bias, limited transparency of automated solutions, protection of personal data of students and the risks of increasing educational inequality. These issues are of particular importance in the context of socio-economic and cultural diversity, characteristic of both the Ibero-American region and Kazakhstan. Insufficient consideration of ethical aspects can lead to a decrease in confidence in assessment systems and an increase in institutional and individual educational risks.</p>
<p>In connection with the above, this mini-review is aimed at synthesizing key theoretical and empirical studies on assessment using artificial intelligence in higher education, with a focus on innovation and ethical challenges in the Ibero-American and Kazakh contexts. The purpose of the review is to identify the main directions of AI-based assessment development, analyze existing problems and form a research agenda for further comparative and interdisciplinary research in the field of digital assessment in higher education.</p>
</sec>
<sec id="sec2">
<label>2</label>
<title>Conceptual framework and literature selection strategy</title>
<p>To date, there is no single universal theory that would fully describe the assessment of educational outcomes using artificial intelligence in higher education. This is largely due to the diversity of educational contexts, institutional models, digital infrastructures, and cultural and social environments in which AI-based assessment systems are being implemented. An attempt to build a universal theory of assessment using artificial intelligence inevitably runs the risk of ignoring contextual and ethical differences between educational systems of different countries and regions.</p>
<p>In this regard, modern research increasingly uses ecosystem and multi-level conceptual approaches that allow assessment to be considered as a result of the interaction of technological, pedagogical, institutional and socio-cultural factors (<xref ref-type="bibr" rid="ref1">Arifeen, 2023</xref>). Within the framework of such approaches, AI-based assessment is understood not as an isolated technological procedure, but as a dynamic system embedded in the digital educational environment and influencing students&#x2019; learning trajectories, pedagogical decisions and quality management of education (<xref ref-type="bibr" rid="ref17">Yan et al., 2025</xref>).</p>
<p>The expanded analytical model of AI-based assessment is used as the conceptual basis of this mini-review, considering assessment as a set of interrelated components: automated analysis of educational data, adaptive and personalized assessment mechanisms, formative assessment, predictive models of academic risks and algorithmic decision support. According to this model, the effectiveness and fairness of assessment are determined not by individual artificial intelligence tools, but by the nature of their interaction with the educational context, the level of digital readiness of students and institutional practices of higher education.</p>
<p>Particular importance within this conceptual framework is attached to the ethical dimension of AI-based assessment. Ethical aspects, including algorithmic bias, transparency of assessment decisions, protection of personal data, and risks of increasing social and educational inequality&#x2014; are considered as integral elements of the assessment system, directly affecting trust in digital assessment tools and their pedagogical expediency (<xref ref-type="bibr" rid="ref14">Maleki, 2025</xref>). This approach allows us to analyze AI-based assessment not only from the point of view of innovation, but also from the perspective of responsibility and sustainability of educational solutions.</p>
<p>It is this logic of the relationship between technological innovations, the educational context, and ethical consequences that forms the basis for the synthesis and discussion of literature in this mini-review. Special attention is paid to the comparative analysis of the Ibero-American and Kazakh contexts of higher education, which allows us to identify both universal trends in the development of AI-based assessment, as well as regionally specific features of its implementation and use.</p>
<p>The selection of scientific sources for the mini-review was based on the analysis of peer-reviewed publications on the use of artificial intelligence in assessing educational outcomes in higher education. The review included studies examining automated and formative assessment, learning analytics, predictive assessment models, as well as work addressing the ethical and social aspects of using artificial intelligence in a university environment. Special attention was paid to publications devoted to Ibero-American countries and Kazakhstan, as well as studies that allow for the comparison of different educational contexts.</p>
<p>This mini-review is probably one of the first studies in which this conceptual framework is used to systematically analyze the innovations and ethical challenges of AI-based assessment in higher education in the Ibero-American and Kazakh contexts. This allows not only to summarize existing scientific approaches, but also to form the basis for further comparative and empirical research in the field of digital assessment.</p>
</sec>
<sec sec-type="methods" id="sec3">
<label>3</label>
<title>Methodology</title>
<p>To conduct this mini-review, a targeted search of scientific literature was carried out in several international academic databases, including Scopus, Web of Science, ERIC and Google Scholar. The search was conducted using combinations of keywords and logical operators reflecting the subject of the study, including: artificial intelligence, AI-based assessment, automated assessment, learning analytics, higher education, ethical challenges, fairness, transparency, Ibero-American, Latin America, Spain, Portugal, Kazakhstan. The use of a wide range of search terms was due to the interdisciplinary nature of the issues under study and the variability of terminology used in scientific publications.</p>
<p>The initial search showed significant heterogeneity of results: in a number of databases, excessively extensive samples were obtained, including studies only indirectly related to the assessment, while in other sources the volume of relevant publications turned out to be limited. In this regard, the following inclusion criteria were formulated for the selection of literature:</p><list list-type="order">
<list-item>
<p>Publication in a peer-reviewed scientific journal;</p>
</list-item>
<list-item>
<p>Explicit focus on the assessment of educational outcomes using artificial intelligence or learning data analytics in higher education;</p>
</list-item>
<list-item>
<p>consideration of innovative or ethical aspects of AI-based assessment;</p>
</list-item>
<list-item>
<p>the empirical, review, or conceptual nature of the study;</p>
</list-item>
<list-item>
<p>Publication between 2020 and 2025.</p>
</list-item>
</list>
<p>The choice of this time interval is due to the fact that since the early 2020s, there has been an intensive development of learning analytics, automated assessment and algorithmic educational decision support systems, which form the methodological basis of modern AI-based assessment models. At the same time, during this period, a scientific discussion is being formed related to the ethical risks of using artificial intelligence in education.</p>
<p>At the next stage, exclusion criteria were applied, which made it possible to narrow the sample and ensure the thematic relevance of the mini-review. Publications that were excluded from the analysis were:</p><list list-type="bullet">
<list-item>
<p>focused solely on learning or teaching without considering evaluative practices;</p>
</list-item>
<list-item>
<p>considered levels of education other than higher education;</p>
</list-item>
<list-item>
<p>the use of digital technologies without the use of artificial intelligence elements was touched upon;</p>
</list-item>
<list-item>
<p>did not contain an empirical, analytical or conceptual discussion of the ethical aspects of assessment;</p>
</list-item>
<list-item>
<p>it was not possible to correlate the results of the study with the Ibero-American or Kazakh educational context.</p>
</list-item>
</list>
<p>Despite the consistent application of these criteria, the volume of potentially relevant literature remained significant and in some cases insufficiently specific for the purposes of this mini-review. In this regard, the &#x201C;snowball sampling&#x201D; method was used, which proved to be the most effective strategy for identifying key publications in a highly specialized and emerging field of research. In particular, the reverse analysis of literature lists in fundamental and frequently cited works on AI-based assessment and learning analytics in higher education was used, as well as a direct analysis of subsequent publications using the &#x201C;Cited by&#x201D; function in Google Scholar.</p>
<p>The use of this strategy is in line with the recommendations of researchers, who point to its effectiveness in analyzing new and interdisciplinary research areas where relevant work may not be identified by a standard keyword search. Additionally, the selection of literature was based on the expert assessment of researchers in the field of digital education and assessment.</p>
<p>As a result of the multi-stage selection, 12 scientific publications were included in the final sample, most fully reflecting the innovations and ethical challenges of using artificial intelligence in higher education assessment. The selected papers cover research on automated formative assessment, learning data analytics, academic risk forecasting, algorithmic support for assessment decisions, as well as issues of fairness, transparency, and data protection for students.</p>
<p>The analysis of the selected publications was organized in accordance with the conceptual logic outlined in the previous section and involved a consistent review:</p><list list-type="order">
<list-item>
<p>Innovative AI-based assessment models and tools;</p>
</list-item>
<list-item>
<p>contextual features of their implementation in Ibero-American and Kazakh higher education;</p>
</list-item>
<list-item>
<p>key ethical issues and risks arising from the use of automated assessment systems.</p>
</list-item>
</list>
<p>This mini-review systematizes existing scientific approaches to AI-based assessment and forms the basis for further comparative and empirical research aimed at developing ethically responsible and context-sensitive assessment models in higher education.</p>
<p>The final sample of studies included in this mini-review is summarized in <xref ref-type="table" rid="tab1">Table 1</xref>, which presents the geographical distribution, methodological approaches and analytical focus of the selected publications.</p>
<table-wrap position="float" id="tab1">
<label>Table 1</label>
<caption>
<p>Overview of selected studies on AI-based assessment in higher education (2020&#x2013;2025).</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Author(s)</th>
<th align="left" valign="top">Country/region</th>
<th align="left" valign="top">Methodology</th>
<th align="left" valign="top">Focus</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">
<xref ref-type="bibr" rid="ref7">Hammad et al. (2024)</xref>
</td>
<td align="left" valign="top">Ibero-American context</td>
<td align="left" valign="top">Conceptual review</td>
<td align="left" valign="top">Innovation</td>
</tr>
<tr>
<td align="left" valign="top">
<xref ref-type="bibr" rid="ref8">Hernando-Castro et al. (2024)</xref>
</td>
<td align="left" valign="top">Spain</td>
<td align="left" valign="top">Case study, mixed methods</td>
<td align="left" valign="top">Innovation</td>
</tr>
<tr>
<td align="left" valign="top">
<xref ref-type="bibr" rid="ref18">Zulpykhar et al. (2025)</xref>
</td>
<td align="left" valign="top">Kazakhstan</td>
<td align="left" valign="top">Quantitative survey, statistical analysis</td>
<td align="left" valign="top">Ethics</td>
</tr>
<tr>
<td align="left" valign="top">
<xref ref-type="bibr" rid="ref12">Lelescu and Kabiraj (2024)</xref>
</td>
<td align="left" valign="top">International</td>
<td align="left" valign="top">Conceptual framework</td>
<td align="left" valign="top">Innovation</td>
</tr>
<tr>
<td align="left" valign="top">
<xref ref-type="bibr" rid="ref3">Devran and El&#x00E7;i (2020)</xref>
</td>
<td align="left" valign="top">International</td>
<td align="left" valign="top">Learning analytics, quantitative modeling</td>
<td align="left" valign="top">Innovation</td>
</tr>
<tr>
<td align="left" valign="top">
<xref ref-type="bibr" rid="ref16">Slade et al. (2024)</xref>
</td>
<td align="left" valign="top">International</td>
<td align="left" valign="top">Experimental design, analytics</td>
<td align="left" valign="top">Ethics</td>
</tr>
<tr>
<td align="left" valign="top">
<xref ref-type="bibr" rid="ref11">Kaya-Capocci et al. (2022)</xref>
</td>
<td align="left" valign="top">International</td>
<td align="left" valign="top">Systematic review</td>
<td align="left" valign="top">Ethics</td>
</tr>
<tr>
<td align="left" valign="top">
<xref ref-type="bibr" rid="ref6">Guruloo and Osman (2024)</xref>
</td>
<td align="left" valign="top">International</td>
<td align="left" valign="top">Critical analysis</td>
<td align="left" valign="top">Ethics</td>
</tr>
<tr>
<td align="left" valign="top">
<xref ref-type="bibr" rid="ref9">Ion and Mercader Juan (2024)</xref>
</td>
<td align="left" valign="top">International</td>
<td align="left" valign="top">Predictive analytics, quantitative methods</td>
<td align="left" valign="top">Innovation</td>
</tr>
<tr>
<td align="left" valign="top">
<xref ref-type="bibr" rid="ref2">de la Torre and Baldeon-Calisto (2024)</xref>
</td>
<td align="left" valign="top">Spain / Latin America</td>
<td align="left" valign="top">Mixed methods</td>
<td align="left" valign="top">Innovation</td>
</tr>
<tr>
<td align="left" valign="top">
<xref ref-type="bibr" rid="ref10">Karunaratne et al. (2025)</xref>
</td>
<td align="left" valign="top">International</td>
<td align="left" valign="top">Conceptual and ethical analysis</td>
<td align="left" valign="top">Ethics</td>
</tr>
<tr>
<td align="left" valign="top">
<xref ref-type="bibr" rid="ref13">Lodhi and Ilyassova-Schoenfeld (2023)</xref>
</td>
<td align="left" valign="top">Kazakhstan</td>
<td align="left" valign="top">&#x0421;ontextual foundation</td>
<td align="left" valign="top">Innovation</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec sec-type="results" id="sec4">
<label>4</label>
<title>Results</title>
<sec id="sec5">
<label>4.1</label>
<title>Institutional and cultural contexts of AI-based assessment implementation</title>
<p>The results of the analysis of the selected studies show that the introduction of artificial intelligence assessment in higher education significantly depends on the institutional and cultural context. In Ibero-American countries, the emphasis in research is more often on the use of AI-based assessment as a tool to increase the fairness and transparency of assessment procedures, especially in the context of mass higher education and the growing social diversity of student populations. A number of works emphasize that automated formative assessment and analysis of educational data are used to reduce the subjectivity of assessments and support students with different levels of academic training.</p>
<p>More specifically, studies situated in Spain and Latin America often frame AI-enabled assessment through the lens of assessment integrity, transparency, and fairness in digitally mediated course evaluation. For example, <xref ref-type="bibr" rid="ref8">Hernando-Castro et al. (2024)</xref> describe how the integration of AI tools in postgraduate assessment raises questions about how evaluation criteria are communicated and how students perceive the legitimacy of AI-assisted grading decisions. Similarly, <xref ref-type="bibr" rid="ref2">de la Torre and Baldeon-Calisto (2024)</xref> synthesize regional discussions around generative AI in higher education, highlighting institutional concerns about responsible use and the need for clearer safeguards in academic evaluation practices.</p>
<p>In contrast, studies linked to the Kazakh context more frequently emphasize AI-based assessment as part of institutional monitoring and educational management, where analytics and automated indicators support early identification of academic risks and decision-making at the program or platform level. For instance, <xref ref-type="bibr" rid="ref13">Lodhi and Ilyassova-Schoenfeld (2023)</xref> provide contextual evidence on Kazakhstan&#x2019;s policy transfer and structural reforms that shape how digital assessment infrastructures are adopted and governed, while <xref ref-type="bibr" rid="ref18">Zulpykhar et al. (2025)</xref> (Kazakhstan) empirically demonstrate that students&#x2019; perceptions of transparency and fairness condition their acceptance of algorithmic evaluation in local higher-education settings.</p>
</sec>
<sec id="sec6">
<label>4.2</label>
<title>The role of digital and cognitive resources of students</title>
<p>A significant result of the review is the identification of a stable relationship between the level of students&#x2019; digital readiness and the effectiveness of AI-based assessment. Studies conducted at universities in Spain, Portugal and Latin American countries show that students with higher levels of digital literacy and self-regulation skills demonstrate a more positive perception of automated assessment mechanisms and higher academic results when using analytical assessment systems.</p>
<p>Similar trends have been identified in Kazakhstani studies, where the level of students&#x2019; digital competencies has a significant impact on the interpretation and use of automated assessment results. In particular, students with a low level of digital readiness are more likely to perceive AI-based assessment as an opaque and formal process, which reduces its pedagogical effectiveness. These results highlight the need to take into account the digital and cognitive resources of students when designing and implementing assessment systems based on artificial intelligence.</p>
</sec>
<sec id="sec7">
<label>4.3</label>
<title>Innovations in assessment practices and their pedagogical effect</title>
<p>Most of the analyzed studies indicate an increase in the use of innovative forms of AI-based assessment, including automated formative assessment, personalized feedback, adaptive tests, and academic risk forecasting. In the Ibero-American context, these innovations are more often applied in courses with a large number of students, where traditional forms of assessment are difficult.</p>
<p>At Kazakhstani universities, innovative assessment solutions are mainly integrated into centralized digital platforms and used to monitor academic progress and make managerial decisions. At the same time, research shows that the pedagogical effect of AI-based assessment is enhanced when automated assessments are complemented by explicable feedback and the possibility of pedagogical interpretation of the results.</p>
</sec>
<sec id="sec8">
<label>4.4</label>
<title>Ethical challenges and risks of using AI-based assessment</title>
<p>A separate set of results is related to the analysis of ethical issues accompanying the introduction of artificial intelligence in the assessment of educational achievements. Ibero-American studies emphasize the risks of algorithmic bias related to the socio-economic status of students, linguistic diversity, and cultural differences. It is pointed out that without appropriate calibration, AI-based assessment models can reproduce and reinforce existing educational inequalities.</p>
<p>In the Kazakh context, the key ethical challenges are limited transparency of algorithmic solutions and insufficient awareness of students about the principles of automated assessment systems. A number of studies have noted that the lack of explainability of the results of AI-based assessment reduces the confidence of students and teachers in digital assessment tools, despite their high analytical accuracy.</p>
</sec>
<sec id="sec9">
<label>4.5</label>
<title>Comparative analysis and synthesis of results</title>
<p>A comparative analysis has shown that, despite differences in institutional and socio-economic conditions, Ibero-American and Kazakh AI-based assessment practices demonstrate similar patterns. In both contexts, the innovative potential of artificial intelligence in assessment is realized most effectively when combining automated analytical mechanisms with pedagogically meaningful interpretation of results and ethically sound decisions.</p>
<p>The results of the mini-review indicate that AI-based assessment in higher education is not only a technological innovation, but also a complex sociotechnical phenomenon, the effectiveness and sustainability of which are determined by the interaction of digital, pedagogical and ethical factors. These findings form the basis for further research aimed at developing context-sensitive and ethically responsible assessment models in the context of the digital transformation of higher education.</p>
</sec>
</sec>
<sec sec-type="discussion" id="sec10">
<label>5</label>
<title>Discussion</title>
<sec id="sec11">
<label>5.1</label>
<title>Generative AI in assessment: emerging practices and challenges</title>
<p>Recent advances in generative AI (GenAI)<bold>&#x2014;</bold>especially large language models (LLMs)&#x2014;are rapidly reshaping assessment practices in higher education, extending beyond traditional learning analytics toward content generation, evaluation support, and feedback automation. Current research suggests that GenAI is increasingly used to (i) generate formative feedback and commentary on student work, (ii) support rubric construction and exemplars, (iii) assist in question/item generation, and (iv) enable conversational assessment and iterative drafting. At the same time, this shift introduces new methodological risks for assessment validity and reliability, including misalignment between generated feedback and intended learning outcomes, sensitivity to prompt design, inconsistent scoring judgments, and the possibility of &#x201C;plausible but incorrect&#x201D; outputs that complicate the evidentiary basis of grading decisions. Systematic reviews of GenAI in higher education and academic integrity highlight that institutions are moving from a &#x201C;detection-first&#x201D; stance to redesigning assessment tasks and policies to maintain trust and legitimacy in evaluation (<xref ref-type="bibr" rid="ref15">Rakhmetov et al., 2026</xref>).</p>
<p>From an ethical perspective, GenAI intensifies established concerns discussed in this mini-review&#x2014;transparency, fairness, privacy, and educational inequality&#x2014;while adding integrity-related questions about authorship, acceptable assistance, and accountability for automated feedback. Global policy guidance emphasizes that GenAI adoption in education should be human-centred, with clear governance over data protection, explainability, and responsible use in assessment settings. A growing ethics-and-regulation literature further argues that without explicit institutional safeguards, GenAI may amplify bias, reduce procedural transparency, and deepen unequal access to high-quality support tools&#x2014;making context-sensitive governance particularly important for regions with heterogeneous digital infrastructure, such as Ibero-American countries and Kazakhstan (<xref ref-type="bibr" rid="ref4">Garc&#x00ED;a-L&#x00F3;pez and Trujillo-Li&#x00F1;&#x00E1;n, 2025</xref>).</p>
<p>The results of this mini-review show that the introduction of artificial intelligence assessment in higher education is not only a technological, but also a cultural and institutional process. Both in Ibero-American countries and in Kazakhstan, the use of AI-based assessment is accompanied by the need to rethink traditional approaches to assessment, the role of the teacher and the responsibility of educational organizations. Just as educational actors adapt to new socio-cultural conditions, universities are forced to rethink their values and practices in the context of digital transformation.</p>
<p>An analysis of the literature indicates that the introduction of AI-based assessment is often accompanied by a change in perceptions of the fairness and objectivity of assessment. In the Ibero-American context, this is reflected in the desire to use artificial intelligence as a tool to reduce subjectivity and compensate for social inequality. Kazakh studies, on the contrary, more often emphasize the managerial function of automated assessment, aimed at increasing the controllability and predictability of learning outcomes. These differences reflect a broader cultural and institutional shift in which assessment is gradually shifting from a pedagogically oriented practice to an analytically guided process.</p>
<p>The issue of digital adaptation of students and teachers to AI-based assessment deserves special attention. Similar to the processes of cultural adaptation in other social spheres, the development of automated assessment systems requires time, accumulation of experience and building trust. Research shows that in the early stages of the introduction of artificial intelligence, assessment is often perceived as an externally imposed and opaque mechanism, especially among students with a low level of digital readiness. As data accumulates and digital competencies develop, the role of AI-based assessment is gradually transforming from a monitoring tool to a means of supporting learning and self&#x2014;regulation.</p>
<p>An important conclusion of the mini-review is that innovative assessment practices do not guarantee a positive pedagogical effect without taking into account contextual factors. Automated formative assessment, personalized feedback and forecasting of academic risks are most effective when they are integrated into a pedagogically meaningful learning model and accompanied by interpretation from the teacher. Otherwise, there is a risk of reducing the assessment to a formal algorithmic solution, which may reduce students&#x2019; motivation and increase distrust of digital systems.</p>
<p>The ethical aspects of AI-based assessment require a separate discussion. Ibero-American studies highlight the danger of algorithmic bias related to the socio-economic status, linguistic diversity, and cultural differences of students. In the Kazakh context, the key ethical problem remains the lack of transparency of algorithms and the limited awareness of students about the principles of functioning of automated assessment systems. In both cases, the lack of explainability and participation of students in the interpretation of the results can lead to a decrease in confidence in the assessment and increased educational risks.</p>
<p>The prospects for further research in the field of AI-based assessment are related to the need for an in-depth analysis of how automated assessment systems affect the academic trajectories of various groups of students. In particular, a promising direction is to study whether algorithmic models reproduce existing educational inequalities or, on the contrary, are able to compensate for them under the condition of ethically responsible design. Longitudinal studies could reveal how the perception and effectiveness of AI-based assessment change over time and how digital assessment affects long-term educational outcomes.</p>
</sec>
<sec id="sec12">
<label>5.2</label>
<title>Improving research approaches</title>
<p>The review revealed a number of methodological limitations in existing research on AI-based assessment in higher education. First of all, there is a lack of research that takes into account the diversity of educational and social identities of students. In most studies, the analysis is based on aggregated samples, which makes it difficult to identify the differentiated effects of automated assessment for different categories of students.</p>
<p>In addition, despite the predominance of quantitative studies and analytical models, the potential of mixed methods is clearly underestimated in the literature. The combination of quantitative analysis of learning analytics with qualitative data reflecting the experience of students and teachers would allow for a deeper understanding of the mechanisms of perception and use of AI-based assessment. This approach is especially important for interpreting results that may look neutral or objective at the data level, but have ambiguous pedagogical and ethical implications in practice.</p>
<p>The development of critical methodological approaches to the analysis of assessment data is also a promising area. Using frameworks focused on social justice and data ethics can promote more responsible use of quantitative models and prevent simplistic interpretations of results. The rejection of the data-speak-for-themselves approach in favor of meaningful pedagogical analysis makes it possible to strengthen the scientific and social validity of AI-based assessment research.</p>
<p>Finally, longitudinal studies can significantly expand the understanding of the dynamics of the introduction of artificial intelligence in assessment. Comparing the short-term and long-term effects of AI-based assessment will allow us to determine to what extent automated assessment systems really contribute to the sustainable improvement of educational outcomes and in what conditions they require additional pedagogical and ethical adjustments.</p>
<p>While the present mini-review identifies consistent cross-regional patterns, the evidence base remains limited (n&#x202F;=&#x202F;12) and should be interpreted accordingly. To avoid overgeneralization, the comparative claims are grounded in specific study-level examples drawn from the reviewed corpus and are presented as emerging tendencies rather than exhaustive statements about the two regions. This framing clarifies the scope of inference and increases the transparency of how the synthesis was derived from the included publications.</p>
</sec>
</sec>
<sec id="sec13">
<label>6</label>
<title>Limitations</title>
<p>Although this mini-review provides a structured synthesis of recent work on AI-based assessment in higher education, the final corpus of 12 studies is relatively small for drawing strong comparative conclusions across two broad regions. Therefore, the comparative insights offered in this paper should be interpreted as indicative patterns and thematic tendencies, rather than as definitive generalizations about the entire Ibero-American and Kazakh research landscapes. In addition, the composition of the corpus may reflect database coverage (Scopus/Web of Science/ERIC/Google Scholar), publication-type constraints (peer-reviewed journal articles prioritized), and language and indexing effects, which can systematically under-represent locally published or non-English/Spanish/Russian sources. Finally, the rapid evolution of AI-based assessment tools since 2020 suggests that the available evidence base may be temporally uneven, with emergent practices not yet fully captured in peer-reviewed literature. These limitations underscore the need for broader systematic reviews and meta-syntheses with expanded regional coverage and transparent screening statistics.</p>
</sec>
<sec sec-type="conclusions" id="sec14">
<label>7</label>
<title>Conclusion</title>
<p>To the best of our knowledge, this mini-review is one of the first studies systematically examining assessment using artificial intelligence in higher education in the comparative context of Ibero-American countries and Kazakhstan. Unlike most existing works that focus either on individual technological solutions or on local institutional cases, this review combines an analysis of innovative AI-based assessment practices with an examination of ethical challenges and contextual factors of their implementation.</p>
<p>Importantly, the proposed conceptual model advances learning analytics and digital assessment research by conceptualizing AI-based assessment as a multi-level sociotechnical system that connects analytics (data capture and prediction), pedagogy (formative feedback and teacher interpretation), and governance (institutional decision support and accountability). Unlike many learning-analytics approaches that treat ethics as an external constraint, our model embeds fairness, transparency, privacy, and explainability as internal design parameters shaping the validity and legitimacy of assessment decisions, and it remains extensible to GenAI-enabled assessment practices (e.g., LLM-supported feedback and rubric mediation).</p>
<p>The analysis showed that artificial intelligence-based assessment should be considered as a complex sociotechnical system, the functioning of which is determined by the interaction of technological, pedagogical and institutional components. The use of automated learning data analysis, adaptive assessment mechanisms, and predictive models has significant potential to enhance the objectivity, personalization, and effectiveness of assessment. However, the realization of this potential is possible only if pedagogically meaningful design and consideration of regional features of educational systems are taken into account.</p>
<p>A comparative analysis of the Ibero-American and Kazakh contexts revealed both universal trends in the development of AI-based assessment, as well as specific features due to differences in educational policy, the level of digital maturity of universities and socio-economic conditions. There is a growing interest in the use of artificial intelligence in assessment in both regions, but the goals and priorities of its application differ: from increasing the fairness and inclusiveness of assessment practices to strengthening the analytical and managerial functions of assessment.</p>
<p>Special attention in this mini-review is paid to the ethical aspects of the implementation of AI-based assessment. An analysis of the literature shows that the risks of algorithmic bias, limited transparency of evaluation decisions, and violation of the confidentiality of student data remain relevant and insufficiently researched. In the context of the increasing use of automated assessment systems, ignoring these issues can lead to a decrease in confidence in digital educational technologies and an increase in educational inequality. In this regard, the development of ethically responsible and explicable assessment models is of fundamental importance.</p>
<p>The findings emphasize the need to move from the fragmented implementation of artificial intelligence tools to the formation of methodologically sound and context-sensitive strategies for using AI-based assessment in higher education. Such strategies should take into account not only the technical characteristics of algorithms, but also the level of digital readiness of students and teachers, institutional conditions and socio-cultural characteristics of the educational environment.</p>
<p>In the future, further research in this area may be aimed at conducting longitudinal and interdisciplinary studies analyzing the long-term impact of AI-based assessment on students&#x2019; academic trajectories and the quality of educational outcomes. Of particular interest are studies focused on the development and testing of assessment models capable of combining analytical accuracy with pedagogical interpretability and ethical responsibility.</p>
<p>In the context of the accelerated digital transformation of higher education, the results of this mini-review can serve as a theoretical and methodological basis for the development of educational policies and assessment practices aimed at the sustainable and equitable use of artificial intelligence. Given the growing role of automated assessment systems in universities and their impact on educational decisions, further development of research in the field of AI-based assessment is not only relevant, but also necessary to ensure the quality and social responsibility of higher education.</p>
</sec>
<sec id="sec15">
<label>8</label>
<title>Future studies</title>
<p>Future research on AI-based assessment in higher education should focus on longitudinal and comparative designs that capture the dynamic and context-dependent nature of automated assessment systems. Longitudinal studies are needed to examine how AI-based assessment influences students&#x2019; academic trajectories, learning behaviors, and perceptions of fairness over time, while comparative cross-regional research can clarify how institutional, socio-economic, and policy contexts shape the implementation and outcomes of these systems in Ibero-American countries and Kazakhstan. Particular attention should be given to ethical issues, including algorithmic bias, transparency, explainability, and data protection, as well as to how students and teachers understand and trust automated assessment decisions. The integration of mixed-methods approaches combining learning analytics with qualitative inquiry would allow for a more nuanced understanding of both performance outcomes and lived educational experiences. Such research is essential for the development of methodologically robust, ethically responsible, and context-sensitive AI-based assessment models aligned with the values and goals of higher education.</p>
</sec>
</body>
<back>
<sec sec-type="author-contributions" id="sec16">
<title>Author contributions</title>
<p>ZZ: Conceptualization, Data curation, Investigation, Software, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. MR: Formal analysis, Methodology, Project administration, Supervision, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. BJ: Funding acquisition, Project administration, Resources, Validation, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. DK: Methodology, Resources, Validation, Visualization, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. RZ: Conceptualization, Data curation, Investigation, Software, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing.</p>
</sec>
<sec sec-type="COI-statement" id="sec17">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="sec18">
<title>Generative AI statement</title>
<p>The author(s) declared that Generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="sec19">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="ref1"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Arifeen</surname><given-names>S. R.</given-names></name></person-group> (<year>2023</year>). <article-title>Ecological aspects of online learning in higher education: a qualitative multi-level exploration in a developing country</article-title>. <source>Educ. Inf. Technol.</source> <volume>28</volume>, <fpage>1</fpage>&#x2013;<lpage>23</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10639-022-11507-5</pub-id></mixed-citation></ref>
<ref id="ref2"><mixed-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>de la Torre</surname><given-names>A.</given-names></name> <name><surname>Baldeon-Calisto</surname><given-names>M.</given-names></name></person-group> (<year>2024</year>). <chapter-title>Generative artificial intelligence in Latin American higher education: a systematic literature review</chapter-title>. In <conf-name>2024 12th International Symposium on Digital Forensics and Security (ISDFS)</conf-name> (pp. <fpage>1</fpage>&#x2013;<lpage>7</lpage>). <publisher-name>IEEE</publisher-name>.</mixed-citation></ref>
<ref id="ref3"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Devran</surname><given-names>B. &#x00C7;.</given-names></name> <name><surname>El&#x00E7;i</surname><given-names>A.</given-names></name></person-group> (<year>2020</year>). &#x201C;<chapter-title>Traditional versus digital assessment methods: faculty development</chapter-title>&#x201D; in <source>Assessment, Testing, and Measurement Strategies in Global Higher Education</source>, <fpage>20</fpage>&#x2013;<lpage>34</lpage>.</mixed-citation></ref>
<ref id="ref4"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Garc&#x00ED;a-L&#x00F3;pez</surname><given-names>I. M.</given-names></name> <name><surname>Trujillo-Li&#x00F1;&#x00E1;n</surname><given-names>L.</given-names></name></person-group> (<year>2025</year>). <article-title>Ethical and regulatory challenges of generative AI in education: a systematic review</article-title>. <source>Front. Educ.</source> <volume>10</volume>:<fpage>1565938</fpage>. doi: <pub-id pub-id-type="doi">10.3389/feduc.2025.1565938</pub-id></mixed-citation></ref>
<ref id="ref5"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>George</surname><given-names>B.</given-names></name> <name><surname>Wooden</surname><given-names>O.</given-names></name></person-group> (<year>2023</year>). <article-title>Managing the strategic transformation of higher education through artificial intelligence</article-title>. <source>Adm. Sci.</source> <volume>13</volume>:<fpage>196</fpage>. doi: <pub-id pub-id-type="doi">10.3390/admsci13090196</pub-id></mixed-citation></ref>
<ref id="ref6"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Guruloo</surname><given-names>T. N. M.</given-names></name> <name><surname>Osman</surname><given-names>K.</given-names></name></person-group> (<year>2024</year>). &#x201C;<chapter-title>Beyond pen and paper: understanding the landscape of digital assessment in stem higher education</chapter-title>&#x201D; in <source>Digital Assessment in Higher Education: Navigating and Researching Challenges and opportunities</source> (<publisher-loc>Singapore</publisher-loc>: <publisher-name>Springer Nature Singapore</publisher-name>), <fpage>177</fpage>&#x2013;<lpage>197</lpage>.</mixed-citation></ref>
<ref id="ref7"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Hammad</surname><given-names>M. M.</given-names></name> <name><surname>Al-Refai</surname><given-names>M.</given-names></name> <name><surname>Musallam</surname><given-names>W.</given-names></name> <name><surname>Musleh</surname><given-names>S.</given-names></name> <name><surname>Faouri</surname><given-names>E. A.</given-names></name></person-group> (<year>2024</year>). &#x201C;<chapter-title>A taxonomy of AI-based assessment educational technologies</chapter-title>&#x201D; in <source>2024 15th International Conference on Information and Communication Systems (ICICS)</source> (<publisher-name>IEEE</publisher-name>), <fpage>1</fpage>&#x2013;<lpage>6</lpage>.</mixed-citation></ref>
<ref id="ref8"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hernando-Castro</surname><given-names>S.</given-names></name> <name><surname>L&#x00F3;pez-Arquillo</surname><given-names>J. D.</given-names></name> <name><surname>Perea-&#x00C1;lvarez-de-Eulate</surname><given-names>M.</given-names></name></person-group> (<year>2024</year>). <article-title>The use of AI tools and their impact on the assessment of postgraduate courses in the technological field</article-title>. <source>Adv. Build. Educ.</source> <volume>8</volume>, <fpage>9</fpage>&#x2013;<lpage>23</lpage>. doi: <pub-id pub-id-type="doi">10.20868/abe.2024.3.5407</pub-id></mixed-citation></ref>
<ref id="ref9"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Ion</surname><given-names>G.</given-names></name> <name><surname>Mercader Juan</surname><given-names>C.</given-names></name></person-group> (<year>2024</year>). &#x201C;<chapter-title>D-Eva Programme: supporting academics to use digital tools for students&#x2019; assessment</chapter-title>&#x201D; in <source>Digital Assessment in Higher Education: Navigating and Researching Challenges and Opportunities</source> (<publisher-loc>Singapore</publisher-loc>: <publisher-name>Springer Nature Singapore</publisher-name>), <fpage>225</fpage>&#x2013;<lpage>240</lpage>.</mixed-citation></ref>
<ref id="ref10"><mixed-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Karunaratne</surname><given-names>T.</given-names></name> <name><surname>Axelsson</surname><given-names>P.</given-names></name> <name><surname>Lindblad</surname><given-names>E.</given-names></name> <name><surname>Enoksson</surname><given-names>F.</given-names></name></person-group> (<year>2025</year>). <chapter-title>The devil is in the details: digital assessment opportunities and challenges in higher education-a CASE study from Swedish Engineering education</chapter-title>. In <conf-name>EDULEARN25 Proceedings</conf-name> (pp. <fpage>5060</fpage>&#x2013;<lpage>5070</lpage>). <publisher-loc>New York, USA</publisher-loc>: <comment>IATED</comment>.</mixed-citation></ref>
<ref id="ref11"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kaya-Capocci</surname><given-names>S.</given-names></name> <name><surname>O&#x2019;Leary</surname><given-names>M.</given-names></name> <name><surname>Costello</surname><given-names>E.</given-names></name></person-group> (<year>2022</year>). <article-title>Towards a framework to support the implementation of digital formative assessment in higher education</article-title>. <source>Educ. Sci.</source> <volume>12</volume>:<fpage>823</fpage>. doi: <pub-id pub-id-type="doi">10.3390/educsci12110823</pub-id></mixed-citation></ref>
<ref id="ref12"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Lelescu</surname><given-names>A.</given-names></name> <name><surname>Kabiraj</surname><given-names>S.</given-names></name></person-group> (<year>2024</year>). &#x201C;<chapter-title>Digital assessment in higher education: sustainable trends and emerging frontiers in the AI era</chapter-title>&#x201D; in <source>Digital Assessment in Higher Education: Navigating and Researching Challenges and Opportunities</source> (<publisher-loc>Singapore</publisher-loc>: <publisher-name>Springer Nature Singapore</publisher-name>), <fpage>27</fpage>&#x2013;<lpage>44</lpage>.</mixed-citation></ref>
<ref id="ref13"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lodhi</surname><given-names>I.</given-names></name> <name><surname>Ilyassova-Schoenfeld</surname><given-names>A.</given-names></name></person-group> (<year>2023</year>). <article-title>The Bologna process and its impact on the higher education reforms in Kazakhstan: a case of policy transfer and translations</article-title>. <source>Stud. High. Educ.</source> <volume>48</volume>, <fpage>204</fpage>&#x2013;<lpage>219</lpage>. doi: <pub-id pub-id-type="doi">10.1080/03075079.2022.2124244</pub-id></mixed-citation></ref>
<ref id="ref14"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Maleki</surname><given-names>A.</given-names></name></person-group> (<year>2025</year>). <article-title>Towards a more equitable education: a lens into EFL teachers&#x2019; pedagogical barriers in online assessments</article-title>. <source>Int. J. Lifelong Educ.</source> <volume>44</volume>, <fpage>186</fpage>&#x2013;<lpage>205</lpage>. doi: <pub-id pub-id-type="doi">10.1080/02601370.2024.2438127</pub-id></mixed-citation></ref>
<ref id="ref15"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rakhmetov</surname><given-names>M.</given-names></name> <name><surname>Abdykerimova</surname><given-names>E.</given-names></name> <name><surname>Alzhanov</surname><given-names>G.</given-names></name> <name><surname>Orazbayeva</surname><given-names>B.</given-names></name> <name><surname>Kuanbayeva</surname><given-names>B.</given-names></name></person-group> (<year>2026</year>). <article-title>Methodological framework for designing AI-based distance learning platforms</article-title>. <source>Int. J. Inf. Educ. Technol.</source> <volume>16</volume>, <fpage>117</fpage>&#x2013;<lpage>125</lpage>. doi: <pub-id pub-id-type="doi">10.18178/ijiet.2026.16.1.2488</pub-id></mixed-citation></ref>
<ref id="ref16"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Slade</surname><given-names>C.</given-names></name> <name><surname>Mahon</surname><given-names>K.</given-names></name> <name><surname>Lynagh</surname><given-names>J.</given-names></name> <name><surname>McGrath</surname><given-names>D.</given-names></name> <name><surname>Sheppard</surname><given-names>K.</given-names></name> <name><surname>Ahsan</surname><given-names>S. Q. M.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>A pedagogical evaluation of an institution's digital assessment platform (DAP): integrating pedagogical, technical and contextual factors</article-title>. <source>Australas. J. Educ. Technol.</source> <volume>40</volume>, <fpage>90</fpage>&#x2013;<lpage>104</lpage>. doi: <pub-id pub-id-type="doi">10.14742/ajet.9448</pub-id></mixed-citation></ref>
<ref id="ref17"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Yan</surname><given-names>L.</given-names></name> <name><surname>Suleman Abdullah Alwabel</surname><given-names>A.</given-names></name> <name><surname>Mohamad</surname><given-names>U. H.</given-names></name></person-group> (<year>2025</year>). <article-title>AI-powered education: transforming teacher-student interactions and advancing sustainable learning practices</article-title>. <source>Eur. J. Educ.</source> <volume>60</volume>:<fpage>e70351</fpage>. doi: <pub-id pub-id-type="doi">10.1111/ejed.70351</pub-id></mixed-citation></ref>
<ref id="ref18"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zulpykhar</surname><given-names>Z.</given-names></name> <name><surname>Kariyeva</surname><given-names>K.</given-names></name> <name><surname>Sadvakassova</surname><given-names>A.</given-names></name> <name><surname>Zhilmagambetova</surname><given-names>R.</given-names></name> <name><surname>Nariman</surname><given-names>S.</given-names></name></person-group> (<year>2025</year>). <article-title>Assessing the effectiveness of personalized adaptive learning in teaching mathematics at the college level</article-title>. <source>Int. J. Eng. Pedagogy (iJEP)</source> <volume>15</volume>, <fpage>4</fpage>&#x2013;<lpage>22</lpage>. doi: <pub-id pub-id-type="doi">10.3991/ijep.v15i4.52797</pub-id></mixed-citation></ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by" id="fn0001"><p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2561398/overview">Gloria Concepcion Tenorio-Sepulveda</ext-link>, Tecnologico Nacional de Mexico/TES de Chalco, Mexico</p></fn>
<fn fn-type="custom" custom-type="reviewed-by" id="fn0002"><p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3250402/overview">Selcuk Kilinc</ext-link>, University at Albany, United States</p></fn>
</fn-group>
</back>
</article>