<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" article-type="review-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Educ.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Education</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Educ.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2504-284X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/feduc.2026.1796803</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>The use of Artificial Intelligence in higher vocational colleges in Sichuan, China: a mixed-methods study of adoption, utilization, and policy implications</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Yang</surname>
<given-names>Liu</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3397056"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Balazon</surname>
<given-names>Francis G.</given-names>
</name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1906295"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Urban Vocational Colleges of Sichuan</institution>, <city>Chengdu</city>, <state>Sichuan</state>, <country country="cn">China</country></aff>
<aff id="aff2"><label>2</label><institution>College of Teacher Education, Batangas State University, The National Engineering University, Pablo Borbon Campus</institution>, <city>Batangas</city>, <country country="ph">Philippines</country></aff>
<author-notes>
<corresp id="c001"><label>&#x002A;</label>Correspondence: Liu Yang, <email xlink:href="mailto:188556545@qq.com">188556545@qq.com</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-03-02">
<day>02</day>
<month>03</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>11</volume>
<elocation-id>1796803</elocation-id>
<history>
<date date-type="received">
<day>27</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="rev-recd">
<day>10</day>
<month>02</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>16</day>
<month>02</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2026 Yang and Balazon.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Yang and Balazon</copyright-holder>
<license>
<ali:license_ref start_date="2026-03-02">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<p>This study investigated the status of Artificial Intelligence (AI) adoption and utilization in higher vocational colleges in Sichuan Province, China, and examined the challenges and policy implications of institutional AI integration. Using a mixed-methods, descriptive-comparative design, we combined a survey of 1,085 respondents (administrators, teachers, and students) with interviews and on-site observations to profile prevalent AI tools, frequency and purposes of use, and the extent of AI integration in teaching, language learning, and research, while also comparing stakeholder perceptions and identifying barriers to effective adoption. Results indicate a moderate level of AI adoption in Sichuan&#x2019;s vocational colleges: approximately 70&#x2013;75% of faculty and administrators reported adopting AI tools, while student uptake was lower, with nearly one-quarter unsure whether they had used AI. Utilization was strongest for routine tasks and language learning (notably AI-supported translation and tutoring), whereas AI use for research-related activities remained comparatively limited. Domestic platforms such as Baidu ERNIE Bot and Alibaba&#x2019;s Qwen were the dominant tools, consistent with China&#x2019;s technology ecosystem and access constraints, whereas foreign models such as ChatGPT were used minimally. Formal AI capacity-building was scarce&#x2014;fewer than 10% reported receiving structured training&#x2014;yet individuals with prior training or institutional support showed higher usage. Statistical analyses further found no significant differences in overall AI utilization levels among students, teachers, and administrators, suggesting a broadly cohesive pattern of adoption across roles. Key challenges included limited training opportunities, inadequate infrastructure and access, uncertainty or resistance toward AI use, and concerns about data privacy and academic integrity. Based on these findings, we propose a comprehensive academic policy framework to guide the ethical and effective integration of AI across teaching, learning, research, and administration, emphasizing AI literacy programs, infrastructure enhancement, clear usage guidelines, and sustained implementation support, aligned with China&#x2019;s educational modernization agenda.</p>
</abstract>
<kwd-group>
<kwd>Artificial Inteligence</kwd>
<kwd>educational policy</kwd>
<kwd>mixed methods</kwd>
<kwd>technology adoption</kwd>
<kwd>vocational education</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was not received for this work and/or its publication.</funding-statement>
</funding-group>
<counts>
<fig-count count="1"/>
<table-count count="2"/>
<equation-count count="0"/>
<ref-count count="19"/>
<page-count count="15"/>
<word-count count="13455"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Higher Education</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec1">
<label>1</label>
<title>Introduction</title>
<p>Artificial Intelligence (AI) has rapidly become a transformative force in higher education worldwide, offering tools for personalized learning, intelligent tutoring, administrative automation, and data-driven decision-making (<xref ref-type="bibr" rid="ref13">Ogbuoka, 2025</xref>; <xref ref-type="bibr" rid="ref21">Vijayalakshmi et al., 2025</xref>). In recent years, China has placed significant emphasis on integrating AI into education as part of a national strategy for educational modernization. Policy initiatives such as the Ministry of Education&#x2019;s Action Plan for Improving the Quality and Excellence of Vocational Education (2020&#x2013;2023) and the revised Vocational Education Law (2022) explicitly encourage the adoption of AI and other emerging technologies in teaching and campus management (Ministry of Education of the People&#x2019;s Republic of China, <xref ref-type="bibr" rid="ref9">2020</xref>; Ministry of Education of the People&#x2019;s Republic of China, <xref ref-type="bibr" rid="ref10">2022</xref>). These efforts reflect a recognition that AI can enhance educational quality and institutional efficiency, from AI-powered learning analytics that improve student support to automated administrative systems that streamline campus operations (<xref ref-type="bibr" rid="ref3">Crompton and Burke, 2023</xref>; <xref ref-type="bibr" rid="ref11">Ocen et al., 2025</xref>).</p>
<p>Within higher education, vocational colleges occupy a unique niche focused on practical skills and workforce development. Sichuan Province, a major hub of vocational education in China, has 86 higher vocational institutions serving hundreds of thousands of students. Integrating AI in this context holds great promise for enriching technical skill training and improving educational outcomes. AI tools can provide adaptive learning experiences tailored to individual student needs, thereby potentially increasing engagement and competency in vocational fields (<xref ref-type="bibr" rid="ref11">Ocen et al., 2025</xref>). For example, AI-driven tutoring systems and language learning applications can help vocational students practice technical terminology or foreign languages interactively, while intelligent assessment platforms can offer instant feedback on practical exercises, enhancing mastery of skills. At the institutional level, AI analytics can optimize student services and resource allocation &#x2013; early implementations in Chinese colleges have used AI to monitor student progress and alert staff to those needing support, improving retention and academic success (<xref ref-type="bibr" rid="ref18">Selznick, 2022</xref>; <xref ref-type="bibr" rid="ref9001">Allil 2004</xref>).</p>
<sec id="sec2">
<label>1.1</label>
<title>Why private higher vocational colleges in Sichuan?</title>
<p>Private higher vocational colleges in China often differ from public institutions in governance arrangements, funding sources, and technology investment capacity. Compared with public colleges, private institutions may have greater operational flexibility and market responsiveness, but they may also face tighter budgets, less stable public funding, and uneven access to advanced infrastructure and professional development. These differences can shape institutional readiness for AI and the pace at which innovations diffuse across staff and students. Sichuan Province provides a policy-relevant setting because it hosts a large vocational education system spanning urban and less-developed areas, making it an informative case for understanding adoption under heterogeneous resource conditions.</p>
<p>Despite this potential, empirical research on AI adoption in China&#x2019;s higher vocational colleges remains limited. Much of the existing scholarship on AI in Chinese education concentrates on universities or general K&#x2013;12 settings, often emphasizing technical capabilities over on-the-ground usage patterns (<xref ref-type="bibr" rid="ref14">Prasetya et al., 2025</xref>). Vocational institutions face distinctive challenges and opportunities: they prioritize hands-on training and industry alignment, operate with different administrative structures (often involving industry partners), and cater to students who may have varied academic preparation. These factors can influence how AI is implemented and perceived. For instance, vocational colleges may readily adopt AI for technical training simulations or language translation tools, yet faculty and students might be less familiar with using AI for research or academic writing tasks typical in universities. Moreover, regional disparities in technology access and training can affect adoption. Sichuan&#x2019;s vocational colleges, especially private ones, may not have equal resources or exposure to AI as top-tier universities in major Chinese cities, potentially leading to uneven uptake.</p>
<p>This study addresses a critical knowledge gap by systematically examining the use of AI in higher vocational colleges in Sichuan Province. It provides a localized, data-driven understanding of how administrators, teachers, and students in these institutions are engaging with AI, and what barriers they face. The research was guided in part by technology adoption theories such as the Technology Acceptance Model (<xref ref-type="bibr" rid="ref4">Davis, 1989</xref>) and Diffusion of Innovations (<xref ref-type="bibr" rid="ref16">Rogers, 2003</xref>), which suggest that users&#x2019; uptake of new technologies is influenced by perceived usefulness, ease of use, and social influences. These frameworks informed our exploration of whether different stakeholder groups (management, faculty, and students) differ in their readiness to adopt AI and how factors like training or institutional support impact usage.</p>
</sec>
<sec id="sec3">
<label>1.2</label>
<title>Theoretical framework and research gap</title>
<p>This study is guided by two complementary perspectives on technology adoption. First, the Technology Acceptance Model (TAM) posits that perceived usefulness and perceived ease of use shape individuals&#x2019; intentions and actual use of a technology. Second, Diffusion of Innovations (DOI) emphasizes how innovations spread through social systems as stakeholders evaluate relative advantage, compatibility with existing practices, complexity, trial ability, and observability. Together, TAM and DOI provide a lens for interpreting why AI may be adopted for some domains (e.g., routine tasks and language learning) but not others (e.g., research), and why institutional support and peer norms matter in vocational settings.</p>
<p>Despite increasing research on AI in higher education, three gaps motivate the present study. (1) Empirically, evidence from China&#x2019;s higher vocational sector&#x2014;especially private institutions&#x2014;is comparatively scarce, even though these colleges educate large populations and often face distinct resource constraints. (2) Methodologically, few studies triangulate stakeholder perspectives (administrators, teachers, students) using an explicit mixed-methods design that links survey patterns with qualitative explanations. (3) Contextually, policy and access restrictions on foreign AI tools create a reliance on domestic platforms, yet the educational implications of this reliance for pedagogical innovation and international benchmarking remain under-discussed. By addressing these gaps, the study contributes a theory-informed, context-sensitive account of AI adoption and utilization in private vocational colleges in Sichuan.</p>
<p>Key constructs are used consistently throughout the manuscript. AI adoption refers to whether a respondent has used at least one AI-enabled tool for learning, teaching, research, or administrative tasks. AI utilization refers to the frequency and extent of AI use in specific domains (teaching, language learning, and research). Institutional readiness refers to the availability of infrastructure, access to approved platforms, training opportunities, and support mechanisms that enable responsible AI integration. AI literacy refers to users&#x2019; ability to understand, evaluate, and apply AI tools effectively and ethically, including awareness of limitations, bias, privacy risks, and academic integrity expectations.</p>
<p><italic>Objectives</italic>: the overarching aim was to evaluate the current landscape of AI integration in Sichuan&#x2019;s higher vocational colleges and derive evidence-based recommendations for policy and practice. Specifically, the study sought to:</p>
<list list-type="order">
<list-item>
<p>Describe the status of AI adoption in higher vocational colleges, in terms of the AI models and tools adopted, frequency of use, primary purposes of use, and the availability of training and support for AI.</p>
</list-item>
<list-item>
<p>Assess the extent of AI utilization in key educational areas &#x2013; teaching practices, language learning, and research &#x2013; as perceived and reported by administrators, teachers, and students.</p>
</list-item>
<list-item>
<p>Compare stakeholder perspectives by determining whether there are significant differences among administrators, teachers, and students in their reported extent of AI utilization.</p>
</list-item>
<list-item>
<p>Examine the relationship between adoption factors (such as prior AI exposure or training) and the extent of AI use, i.e., whether individuals or institutions with certain adoption statuses exhibit greater AI integration in practice.</p>
</list-item>
<list-item>
<p>Identify challenges and issues encountered in the use of AI within these colleges, including any technical, pedagogical, or organizational barriers limiting effective implementation.</p>
</list-item>
<list-item>
<p>Formulate policy guidelines for the integration of AI in higher vocational education, based on the study findings &#x2013; outlining strategies for training, infrastructure, ethical use, and support mechanisms to enhance AI adoption responsibly.</p>
</list-item>
</list>
</sec>
</sec>
<sec sec-type="methods" id="sec4">
<label>2</label>
<title>Methods</title>
<sec id="sec5">
<label>2.1</label>
<title>Research design</title>
<p>A mixed-methods research design was employed, combining quantitative survey research with qualitative interviews and observations. This design allowed for a comprehensive understanding of AI usage, capturing broad usage patterns through the survey while also exploring deeper insights and context <italic>via</italic> qualitative data (<xref ref-type="bibr" rid="ref9002">Creswell and Plano Clark, 2011</xref>). The quantitative component utilized a descriptive-comparative survey approach, appropriate for assessing the status of AI adoption and comparing perceptions across different groups. The qualitative component was used to complement and explain the quantitative findings, providing illustrations of how AI is used and the challenges faced in practice.</p>
<p>Following Creswell and Plano Clark&#x2019;s typology, the study adopts an explanatory sequential mixed-methods design: quantitative survey results were analyzed first to establish adoption patterns and group comparisons, followed by qualitative interviews and observations to explain, contextualize, and elaborate the quantitative findings (e.g., reasons for low research use and concerns about integrity or infrastructure). Integration occurred at the interpretation stage by using qualitative themes to account for and nuance key statistical patterns.</p>
</sec>
<sec id="sec6">
<label>2.2</label>
<title>Study context and scope</title>
<p>This study was conducted in late 2024 in Sichuan Province, China, focusing on private higher vocational colleges. The scope covers tertiary-level vocational education and examines AI adoption and utilization among three stakeholder groups&#x2014;administrators, teachers, and students&#x2014;across teaching, language learning, research, and administrative support. The analysis emphasizes current adoption patterns and enabling and constraining factors (e.g., training, infrastructure, platform access, and institutional support), as well as policy implications related to reliance on domestic AI ecosystems under restrictions on foreign AI services. The study does not evaluate long-term outcomes (e.g., graduate employment) and does not include public vocational institutions, secondary vocational schools, or research universities; therefore, findings should be interpreted within these boundaries.</p>
</sec>
<sec id="sec7">
<label>2.3</label>
<title>Participants and sampling</title>
<p>The participants were drawn from administrators, faculty (teachers), and students of the selected 35 private higher vocational colleges in Sichuan. Using purposive sampling, the research team selected institutions that reflected a variety of disciplines and levels of AI implementation (e.g., some known to have introduced AI initiatives and others just beginning), to ensure a diverse sample. Within each chosen college, administrators (such as academic deans, department heads, and IT managers), teaching staff, and students were invited to participate.</p>
<p>A total of 1,085 respondents completed the survey: 327 administrators (30.1% of the sample), 374 teachers (34.5%), and 384 students (35.4%). This large sample provided robust quantitative data and enabled comparative analysis between the three stakeholder groups. In addition, follow-up semi-structured interviews were conducted with a subset of participants (approximately 10 administrators and 15 teachers) who volunteered to discuss their experiences further. These interviews, along with on-site observations at a few colleges (such as observing AI-equipped language labs and smart classrooms), generated qualitative data on how AI tools were being used in daily practice and the perceptions surrounding them.</p>
</sec>
<sec id="sec8">
<label>2.4</label>
<title>Instruments and data collection</title>
<p>Multiple instruments were utilized to gather data:</p>
<list list-type="bullet">
<list-item>
<p><italic>Survey questionnaire</italic>: The main instrument was a structured questionnaire developed by the researchers to capture key information on AI adoption and utilization. It was divided into sections tailored for each respondent group (students, teachers, administrators), with overlapping core items to enable comparison. The survey collected data on: (a) which AI tools or platforms respondents have used; (b) how frequently they use AI (with options ranging from &#x201C;daily&#x201D; to &#x201C;never&#x201D;); (c) for what purposes they use AI (e.g., for class preparation, language practice, administrative data analysis, etc.); (d) respondents&#x2019; ratings of the extent of AI integration in teaching, language learning, and research activities (using Likert-scale items); and (e) any training or support they have received related to AI. It also included a few open-ended questions inviting comments on challenges faced and suggestions for better AI utilization. The questionnaire was initially drafted based on literature and expert input, then refined through a pilot test with a small group of similar respondents (a few teachers, students, and administrators from non-sampled colleges in Sichuan). The pilot feedback helped clarify questions and ensure the content validity of the instrument. After revisions, the final survey was administered online (via a secure web link) to the target participants. The online format facilitated reaching participants across different colleges efficiently. To encourage honest responses, anonymity was ensured &#x2013; no identifying personal data was collected except broad demographics (role and college). Respondents were informed that participation was voluntary and their answers would be kept confidential, used only for research purposes.</p>
</list-item>
</list>
<p>Supplementary Table S1 provides sample questionnaire items and shows how key constructs were operationalized.</p>
<list list-type="bullet">
<list-item>
<p><italic>Interview guides</italic>: For the qualitative interviews, an interview guide with open-ended questions was used to delve into topics such as personal experiences with using AI tools, perceived benefits and drawbacks of AI in their college, specific instances of AI aiding or hindering their work or learning, and thoughts on what policy or support is needed. The interviews, conducted in person or via video call, were recorded with consent and transcribed for analysis.</p>
</list-item>
<list-item>
<p><italic>Observation checklists</italic>: During on-site visits to a few representative colleges, the researcher used a checklist to note the presence of AI technologies (e.g., AI language learning labs, smart classroom systems, and administrative AI software) and to observe how these were being utilized by staff or students in real time. These observations provided contextual understanding -for example, seeing an AI-driven English pronunciation practice software being used in a language class, or an administrative office using an AI chat bot to answer student queries.</p>
</list-item>
</list>
</sec>
<sec id="sec9">
<label>2.5</label>
<title>Data analysis</title>
<p>For the quantitative data from the questionnaire, responses were compiled and analyzed using statistical software (SPSS). Before analysis, data cleaning was performed (removing any incomplete responses and checking for inconsistencies). Descriptive statistics were computed to summarize AI adoption rates (as percentages of each group), usage frequency distributions, and mean ratings of AI utilization in each domain (teaching, language, research). These provided an overall picture of how widespread AI use was and the general extent of integration.</p>
<p>To address the comparative objectives, inferential statistical tests were applied: one-way ANOVA was used to test for significant differences among the three respondent groups (students, teachers, administrators) in their mean ratings of AI utilization extent. Each domain (teaching, language learning, and research) was tested separately. A significance level of 0.05 was adopted; thus, a <italic>p</italic>-value &#x003C; 0.05 would indicate a statistically significant difference in perceptions between at least two groups. If ANOVA had found significant differences, further post-hoc tests would determine which groups differed. Additionally, for categorical adoption metrics (like whether one has adopted AI or not), a chi-square test was considered to compare proportions across groups. The relationship between adoption-related factors and utilization (Objective 4) was examined through Pearson correlation analysis and cross-tabulations. For instance, the correlation between the amount of AI training received (measured cordially) and the extent of AI use in teaching was calculated. Similarly, we analyzed whether those who identified as AI adopters or frequent users also reported higher integration levels. This helped identify factors that might predict greater use of AI (e.g., does having formal training correlate with using AI more extensively?).</p>
<p>The qualitative data from interviews and open-ended survey responses were analyzed using a thematic content analysis approach. Transcripts and written comments were coded inductively to identify recurring themes related to challenges of AI use, attitudes toward AI, and suggestions for improvement. Two researchers independently reviewed the qualitative data to derive initial codes (such as &#x201C;lack of training,&#x201D; &#x201C;fear of student misuse,&#x201D; &#x201C;infrastructure issues,&#x201D; &#x201C;success story&#x2014;AI improved efficiency,&#x201D; etc.), then discussed and merged these into broader themes. Key illustrative quotes were extracted to highlight common sentiments or unique insights. The qualitative findings were used to contextualize and explain the quantitative results &#x2013; for example, if many respondents rated AI usage in research low, interview data might reveal why (perhaps lack of skills or tools), providing depth to the numeric trend.</p>
<p>Coding proceeded in three steps. First, both coders conducted open coding on an initial subset of transcripts and open-ended survey responses to generate candidate codes. Second, a shared codebook was developed with definitions and inclusion/exclusion rules, and the remaining data were coded using constant comparison to refine themes. Third, discrepant interpretations were resolved through discussion and iterative recoding until agreement was reached; analytic memos were used to document decisions and to ensure that themes were grounded in multiple data excerpts. This process increased transparency and reduced the risk that themes reflected a single researcher&#x2019;s interpretation.</p>
</sec>
<sec id="sec10">
<label>2.6</label>
<title>Ethical considerations</title>
<p>The study protocol was reviewed and approved by the relevant institutional ethics committee(s) and participating colleges. All participants were adults (&#x2265;18&#x202F;years) and provided informed consent before participation. Participation was voluntary, and respondents could skip any question or withdraw at any time without penalty. The online survey was anonymous and did not collect identifying personal data beyond broad role categories. Interviewees provided separate consent for audio recording and were assigned pseudonyms in transcripts. All data were stored on password-protected devices accessible only to the research team, and findings are reported in aggregate to prevent identification of individuals or institutions.</p>
</sec>
</sec>
<sec sec-type="results" id="sec11">
<label>3</label>
<title>Results</title>
<sec id="sec12">
<label>3.1</label>
<title>AI adoption status and usage patterns</title>
<sec id="sec13">
<label>3.1.1</label>
<title>Overall adoption rates</title>
<p>Many respondents across all groups had some experience with AI, but adoption rates varied by role. Approximately three-quarters of teachers reported that they had adopted or used AI tools in their professional activities, the highest rate among the groups. Nearly as high, about 70% of administrators indicated they use AI in their work. In contrast, student adoption was somewhat lower&#x2014;an estimated 60% of students acknowledged using AI tools in their studies. Notably, a significant portion of students lacked clear awareness: roughly 25% of student respondents answered &#x201C;not sure&#x201D; when asked if they had adopted AI, a higher uncertainty rate than that of teachers or administrators (about 20% in those groups). This suggests that a subset of students may be using AI-driven features (e.g., built-in functions in apps or websites) without realizing that they count as &#x201C;AI,&#x201D; pointing to a gap in AI literacy among learners. Only a small minority of respondents outright said they have never used AI (e.g., about 6% of teachers explicitly reported not adopting any AI, and around 10% of administrators, often those in roles not yet touched by AI). In sum, teachers led in AI adoption (roughly three out of four using AI), with administrators only slightly behind, while students lagged somewhat, with many unsure about AI usage.</p>
<p><xref ref-type="table" rid="tab1">Table 1</xref> Respondent demographics, AI adoption, and AI training exposure (survey, <italic>n</italic>&#x202F;=&#x202F;1,085). Values are percentages within each role and are rounded to the nearest whole number as reported by respondents.</p>
<table-wrap position="float" id="tab1">
<label>Table 1</label>
<caption>
<p>AI Adoption and training exposure by role in higher vocational colleges.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Role</th>
<th align="center" valign="top">
<italic>n</italic>
</th>
<th align="center" valign="top">% of sample</th>
<th align="center" valign="top">Adopted AI (%)</th>
<th align="center" valign="top">Not sure (%)</th>
<th align="center" valign="top">Formal AI training (%)</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Administrators</td>
<td align="center" valign="top">327</td>
<td align="center" valign="top">30.1</td>
<td align="center" valign="top">&#x2248;70</td>
<td align="center" valign="top">&#x2248;20</td>
<td align="center" valign="top">&#x2248;9</td>
</tr>
<tr>
<td align="left" valign="top">Teachers</td>
<td align="center" valign="top">374</td>
<td align="center" valign="top">34.5</td>
<td align="center" valign="top">&#x2248;75</td>
<td align="center" valign="top">&#x2248;20</td>
<td align="center" valign="top">&#x2248;9</td>
</tr>
<tr>
<td align="left" valign="top">Students</td>
<td align="center" valign="top">384</td>
<td align="center" valign="top">35.4</td>
<td align="center" valign="top">&#x2248;60</td>
<td align="center" valign="top">&#x2248;25</td>
<td align="center" valign="top">&#x003C;10</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="sec14">
<label>3.1.2</label>
<title>Common AI tools and platforms</title>
<p>Across all groups, AI usage was dominated by Chinese-developed platforms, reflecting the national tech environment and restrictions on foreign AI services. The most used AI systems were Baidu&#x2019;s ERNIE Bot and Alibaba&#x2019;s Qwen (Tongyi Qianwen). Over one-third of each group selected ERNIE Bot as one of their primary AI tools, and about a quarter reported using Qwen. Another widely used tool was iFlytek&#x2019;s SparkDesk (an AI platform popular for language and education support), used by roughly 18% of respondents as a main AI application. These domestic AI models were favored due to their availability in China, Chinese language proficiency, and institutional endorsements. In contrast, the usage of foreign AI models was very limited. Fewer than 5% of respondents reported regularly using ChatGPT or similar international AI systems&#x2014;for example, only a handful of teachers and students (on the order of 2%&#x2013;4% of each group) indicated they had access to or frequently used ChatGPT. Interview comments confirmed that most colleges rely on approved domestic AI platforms; access to tools like ChatGPT is often blocked or discouraged due to regulatory constraints, and language barriers also make Chinese platforms more practical for daily use. A small number of tech-savvy individuals experimented with VPNs or unofficial access to foreign models, but this was the exception rather than the norm. Aside from large language models, many respondents also used AI-powered applications specific to their tasks. For instance, machine translation software (such as Youdao or Baidu Translate) was extremely common among students for language learning and completing English assignments, and automated proofreading/grammar checkers (often integrated into word processors) were used by both students and teachers. In summary, the AI adoption landscape in these colleges is characterized by heavy reliance on domestic AI tools (with ERNIE Bot leading in popularity) and a near-absence of foreign AI usage due to accessibility issues and policy blocks.</p>
</sec>
<sec id="sec15">
<label>3.1.3</label>
<title>Frequency of AI use</title>
<p>AI usage tended to be occasional rather than constant for most respondents. The survey asked how often individuals engage with AI tools, and the distributions revealed that only a minority are daily heavy users. For instance, only about 20% of students said they use AI daily (often for routine tasks like translation or homework help each day), whereas approximately another 20% of students reported using AI only &#x201C;monthly or less.&#x201D; The largest segment of students (around 60%) fell in between&#x2014;using AI a few times a week or &#x201C;occasionally&#x201D; as needed. Teachers showed a somewhat higher regular usage than students: many teachers incorporate AI weekly for lesson planning or grading assistance, with a subset (~25%&#x2013;30%) using AI tools daily (for example, using an AI tool every day to generate quizzes or to interact on an educational platform). Still, a considerable number of teachers (roughly one-third) said their AI use was infrequent (monthly or rare), which may include educators teaching subjects where AI tools are less applicable or who are still experimenting cautiously. Administrators had similar patterns to teachers: most administrators used AI weekly in their workflow (often for tasks like data analysis during weekly reports, scheduling, or handling student services through AI systems), and about 20% were daily users (likely those in IT or data management roles). Another subset of administrators (roughly 20%&#x2013;25%) engaged with AI only rarely or not at all, possibly those whose job functions had not yet been augmented by AI. The overall impression is that regular but not pervasive use is the norm&#x2014;AI is commonly used perhaps a few times per week as part of specific tasks, rather than continuously throughout the day. This indicates that while AI tools have been integrated into routines, their use remains targeted to activities rather than across all aspects of work or study.</p>
</sec>
<sec id="sec16">
<label>3.1.4</label>
<title>Primary purposes of use</title>
<p>Respondents reported using AI for a range of purposes aligned with their roles. Among students, the top uses of AI were for language learning and coursework assistance. Nearly all students (an overwhelming ~90%) indicated they use AI-based tools for language-related tasks&#x2014;for example, using translation apps, AI chatbots for practicing English conversation, or intelligent tutoring systems for studying language courses. Many students also use AI informally to aid their studies: common examples (from open-ended responses) included using AI to summarize articles or generate ideas for assignments, employing math-solving apps or code auto-completion tools for technical subjects, or consulting AI assistants to get quick answers or explanations. However, students used AI less in collaborative or project-based contexts&#x2014;only a small fraction had experience with AI in group projects or creative work, indicating that AI&#x2019;s role in fostering collaboration or higher-order projects was still nascent.</p>
<p>For teachers, the dominant uses of AI are centered on instructional support and assessment tasks. Teachers commonly leveraged AI to generate teaching materials (over half the teachers mentioned using AI to find new examples, create practice questions or worksheets, or produce lecture summaries). Many teachers (around 60%&#x2013;70%) also reported using AI for grading and assessment support&#x2014;for instance, using automated grading tools or having AI help draft quiz questions and evaluate student responses. Indeed, teachers gave moderately high ratings (averaging around 3.0 on a 4-point scale) to statements like &#x201C;AI helps in creating and grading assessments,&#x201D; reflecting that they find AI valuable in reducing routine workload [this aligns with global reports of educators using AI to streamline repetitive tasks and focus more on teaching (<xref ref-type="bibr" rid="ref19">Tomar and Verma, 2021</xref>)]. AI was also used by some teachers for personalizing learning, such as identifying resources for different student needs, though this was less developed: on average, teachers rated AI&#x2019;s role in tailoring instruction to individual students only between &#x201C;small&#x201D; to &#x201C;moderate&#x201D; extent. This indicates that adaptive learning technologies, while available, were not yet widely or effectively used to differentiate instruction in these colleges&#x2014;a finding echoing that personalized AI-driven teaching remains an area of untapped potential. Additionally, only a minority of teachers had tried AI tools for promoting student collaboration (e.g., AI-supported group work platforms), suggesting that AI&#x2019;s use to enhance interactive or collaborative learning was still limited.</p>
<p>Administrators primarily utilized AI for institutional and student support functions. Notably, an overwhelming 90% of administrator respondents said they use AI in student services&#x2014;this was the highest among any reported purpose. This implies that AI tools like campus chatbots, automated enrollment systems, or AI-driven library and counseling services have become nearly universal in how administrators interact with students outside the classroom. Examples provided by administrators included AI Chabot&#x2019;s handling common inquiries (e.g., answering FAQs on enrollment, dormitory information, or schedules), AI systems for monitoring student attendance and alerting staff of issues, and algorithm-driven systems matching students to resources (like recommending scholarships or internships). A significant proportion of administrators also use AI for data management and analysis: about 36% reported using AI for institutional data analysis (such as analyzing enrollment trends, processing survey feedback, or generating reports). While that percentage indicates that advanced analytics tools are not yet used by all administrators, it reflects a growing adoption of AI in decision-support roles. Fewer administrators (around 39%) used AI for content creation, since content generation is less central to their duties than it is for teachers or students; however, some did use AI to draft documents, emails, or marketing materials for their college. In summary, for administrators, AI has become integral in delivering and managing student services, and it is also emerging as a tool for improving administrative decision-making and efficiency (e.g., automating scheduling, predicting student needs).</p>
</sec>
<sec id="sec17">
<label>3.1.5</label>
<title>Training and support</title>
<p>One of the stark findings across all groups was the lack of formal training in AI. Only a small minority of participants had ever received structured training or professional development on using AI tools. For instance, merely 9% of teachers said they had attended any formal training program on AI integration. A virtually identical proportion of administrators (~9%) had formal training. Students reported slightly lower formal training exposure; in the student group, fewer than 10% had participated in any workshop or course specifically on AI usage (aside from their regular curriculum). While some individuals engaged in self-learning (e.g., watching online tutorials or learning from colleagues), these were also in the minority&#x2014;only a slightly larger fraction of respondents (perhaps 15%&#x2013;20% in each group) indicated they learned to use AI on their own or from peers. The overwhelming majority, particularly over half of the students, reported no training at all in how to use AI tools. This lack of capacity-building was reflected in qualitative comments: many teachers admitted they were self-taught in using AI, often learning through trial and error or informal peer guidance. Administrators, too, noted that any expertise they had with AI was acquired on the job without formal instruction. The absence of training contributed to uneven or suboptimal usage; for example, some teachers might only use very basic features of an AI tool or avoid more advanced applications because they are not comfortable or aware of them (as one teacher put it, without training, &#x201C;we use only a fraction of an AI tool&#x2019;s features&#x201D;). Moreover, respondents felt that insufficient training and support were one reason AI had not yet deeply transformed pedagogical practices&#x2014;without knowing how to integrate AI fully into lesson design or research, users stuck to surface-level uses (like using AI for grammar checks or slide generation but not for interactive pedagogy or data analysis). These findings indicate a critical professional development gap: the colleges had not systematically prepared their staff or students to leverage AI, thereby limiting the depth of AI integration. It also highlights a need for institutional support (e.g., IT support staff, workshops) to build user confidence and skills in using AI effectively.</p>
</sec>
</sec>
<sec id="sec18">
<label>3.2</label>
<title>Extent of AI utilization in teaching, language learning, and research</title>
<p>Using the survey&#x2019;s 4-point Likert ratings (1&#x202F;=&#x202F;Not at all, 4&#x202F;=&#x202F;To a great extent), respondents reported a generally moderate level of AI integration across the three domains (<xref ref-type="fig" rid="fig1">Figure 1</xref>). The pattern indicates that AI support is most pronounced in language learning, followed by teaching practices, while research activities show the lowest reported integration, suggesting that current adoption is concentrated more on day-to-day learning support and instructional efficiency than on research-oriented applications.</p>
<fig position="float" id="fig1">
<label>Figure 1</label>
<caption>
<p>Comparative mean AI utilization in teaching, language learning, and research.</p>
</caption>
<graphic xlink:href="feduc-11-1796803-g001.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Bar chart titled "AI utilization by domain (reported means)" compares mean AI utilization scores, showing language learning highest at approximately 3.3, teaching near 2.9, and research lowest at about 2.6 on a one to four scale.</alt-text>
</graphic>
</fig>
<sec id="sec19">
<label>3.2.1</label>
<title>Teaching practices</title>
<p>Overall, AI&#x2019;s integration into teaching and classroom practices was rated as moderate. Teachers and administrators gave mean ratings typically around 2.8 to 3.0 on the 4-point scale for the extent to which AI is used in routine teaching tasks. This suggests that AI has begun to make inroads in certain teaching activities, particularly those involving automation and efficiency, but it has not revolutionized day-to-day instruction. Specifically, AI is effectively handling some routine instructional tasks: tasks such as generating quizzes, grading objective tests, or providing additional practice materials are commonly aided by AI, and educators acknowledged these contributions. For instance, many teachers agreed that AI significantly aids in assessment creation and grading, freeing them from some repetitive work. The data showed high utilization of AI for assessment&#x2014;one of the highest-rated teaching sub-areas was AI-assisted exam generation and automated grading, which teachers rated near the top of the moderate range (e.g., average around 3.0). This aligns with global trends that assessment and feedback are among the first teaching areas where AI is adopted (<xref ref-type="bibr" rid="ref11">Ocen et al., 2025</xref>).</p>
<p>However, the impact of AI on more dynamic or student-centered teaching methods was reported to be limited so far. Personalized instruction, where AI would tailor learning materials to individual student needs, had the lowest ratings among teaching-related items. Across respondent groups, the weighted mean for statements like &#x201C;AI is used to adapt teaching to each student&#x2019;s learning level&#x201D; was only about 2.8 (just at the lower end of &#x201C;moderate extent&#x201D; or even &#x201C;small extent&#x201D;). This indicates that while adaptive learning technologies exist, they were not widely implemented in these colleges. Teachers might not have the tools or training to truly personalize learning with AI beyond one-size-fits-all content. Collaborative learning enhancements through AI were also not strongly evident: the use of AI to facilitate student collaboration or interactive learning (for example, through AI-moderated forums or group project tools) was rated around the midpoint (neither low nor high). This suggests that core pedagogical approaches (like group discussions, project-based learning, etc.) had not yet been significantly transformed by AI in the classroom setting. In essence, teaching integration of AI remains focused on efficiency gains (automation of tasks) rather than fundamental pedagogical change. Many teachers still rely primarily on traditional methods for instruction delivery, bringing in AI tools selectively for convenience and support.</p>
</sec>
<sec id="sec20">
<label>3.2.2</label>
<title>Language learning</title>
<p>Among the three domains, language learning stood out as the most AI-integrated area. Respondents across all groups indicated a relatively great extent of AI utilization in language education activities. Students and teachers gave higher ratings for AI use in language learning tasks than for teaching or research tasks. This is reflected in the pervasive use of translation apps, language AI tutors, and speech recognition tools for language practice. For example, almost every student reported using some AI-based language tool, and teachers of English or other languages frequently assign or recommend such tools. The average extent rating for AI in language learning was in the upper-moderate range (approaching &#x201C;to a great extent&#x201D;). Many students agreed that AI tools (like intelligent tutoring systems for language practice) form a regular part of their language study routine. Teachers similarly noted that AI had become a valuable supplement for language instruction&#x2014;for instance, AI-driven language learning platforms that provide adaptive vocabulary and grammar exercises, pronunciation feedback via AI speech analysis, or chatbots for conversational practice. These tools allow students to get instant corrections and practice beyond classroom hours, which was cited as a major benefit. One concrete finding was that adaptive learning systems in language education are widespread: many of both students and teachers indicated that AI provides individualized support in language learning to at least a moderate or high extent. This might include systems that adjust the difficulty of exercises based on student performance or recommend focused practice where a student is struggling. Such adaptive features help address varying proficiency levels, which is crucial in vocational colleges where students&#x2019; language backgrounds can differ. The high integration in language learning is likely because the available AI applications for language are mature and user-friendly, and language learning has well-defined tasks (translation, vocabulary practice, etc.) that AI can assist with effectively. Furthermore, external factors contribute to the popularity of plenty of mobile apps and online platforms for language learning (in both Chinese and English), which are popular and easy to adopt, often without requiring institutional provisioning. Therefore, students organically gravitate to these AI tools to help with their English classes or learning Mandarin (for non-native speakers). The data confirm that language learning is the area where AI&#x2019;s educational impact is currently the most visible in Sichuan&#x2019;s vocational colleges.</p>
</sec>
<sec id="sec21">
<label>3.2.3</label>
<title>Research activities</title>
<p>In contrast to language learning, AI utilization in research was the least developed area among those studied. Respondents across all groups indicated that using AI for research purposes was still relatively limited. On average, the extent of AI integration into research-related tasks was rated in the lower-moderate range. For instance, students and teachers gave modest ratings to items like &#x201C;AI is used to assist in conducting research or projects.&#x201D; Many students in vocational colleges do not engage in traditional research extensively (as their programs are skill-focused), which partly explains the lower use. Nonetheless, for those involved in research or project work (such as thesis projects or faculty research), AI has begun to play some role. The most common way AI was used in research was for information gathering and literature review. Faculty and even students noted that AI tools (like intelligent literature search engines or summarization tools) helped them find relevant academic articles or summarize large amounts of information&#x2014;essentially accelerating the initial research phase. This is consistent with observations in higher education that AI can efficiently sift through databases and generate literature overviews. Indeed, respondents appreciated AI&#x2019;s ability to reduce information overload by quickly providing summaries or identifying key sources, and this was one area of research support that got relatively positive feedback. However, more advanced research applications of AI (such as using AI for data analysis, simulations, or writing drafts) were not common. Only a minority of faculty had tried AI-based data analysis for research projects, and students seldom used AI beyond searching for information or checking grammar in their papers. The limited use in research is likely due to both lack of awareness and the nature of vocational programs&#x2014;research is not a primary focus for most vocational students, and faculty may not have access to specialized AI research tools. Furthermore, some respondents expressed caution about relying on AI for research, due to concerns about accuracy or academic integrity (e.g., plagiarism risks if AI writes text). Overall, the findings highlight an untapped potential: while AI could greatly aid research (for example, through machine learning analysis of experimental data, or generating insights from big datasets), such applications were largely underutilized in this context as of the study period.</p>
</sec>
</sec>
<sec id="sec22">
<label>3.3</label>
<title>Comparisons among administrators, teachers, and students</title>
<p>One of the study&#x2019;s objectives was to determine if administrators, teachers, and students differed in how much they use or benefit from AI. Based on both descriptive and inferential analyses, the overall perceptions of AI utilization were remarkably consistent across the three groups, with no statistically significant differences in the rated extent of AI use in teaching, language, or research domains.</p>
<p>The survey&#x2019;s Likert-scale data for the extent of AI use were compared using one-way ANOVAs. For each domain&#x2014;teaching practices, language learning, research&#x2014;the mean ratings given by students, by teachers, and by administrators were statistically compared. The results showed that any numeric differences were small and did not reach significance at the 0.05 level. For example, teachers on average might have rated AI utilization in teaching slightly higher than students did, but the ANOVA yielded <italic>p</italic>-values (e.g., <italic>p</italic>&#x202F;&#x2248;&#x202F;0.78 for teaching, <italic>p</italic>&#x202F;&#x2248;&#x202F;0.80 for language learning, and <italic>p</italic>&#x202F;&#x2248;&#x202F;0.95 for research) indicating these differences are not significant. In practical terms, this means students, teachers, and administrators all reported similar levels of AI use in those areas. All groups generally characterized AI integration as moderate in extent; none of the groups, on average, claimed extremely high or extremely low use relative to the others.</p>
<p><xref ref-type="table" rid="tab2">Table 2</xref> Key quantitative outcomes (group comparisons and association between training/support and AI use). Effect sizes are reported to support interpretation.</p>
<table-wrap position="float" id="tab2">
<label>Table 2</label>
<caption>
<p>Summary of statistical tests on AI utilization by role and training/support correlations.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Analysis</th>
<th align="left" valign="top">Outcome</th>
<th align="center" valign="top">Test/statistic</th>
<th align="center" valign="top"><italic>p</italic>-value</th>
<th align="center" valign="top">Effect size</th>
<th align="left" valign="top">Interpretation</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">One-way ANOVA</td>
<td align="left" valign="top">Teaching utilization across roles</td>
<td align="center" valign="top"><italic>F</italic> (2,1,082)&#x202F;&#x2248;&#x202F;0.249</td>
<td align="center" valign="top">&#x2248;0.78</td>
<td align="center" valign="top"><italic>&#x03B7;</italic><sup>2</sup> &#x2248;&#x202F;0.00046</td>
<td align="left" valign="top">No meaningful group differences</td>
</tr>
<tr>
<td align="left" valign="top">One-way ANOVA</td>
<td align="left" valign="top">Language-learning utilization across roles</td>
<td align="center" valign="top"><italic>F</italic> (2,1,082)&#x202F;&#x2248;&#x202F;0.223</td>
<td align="center" valign="top">&#x2248;0.80</td>
<td align="center" valign="top"><italic>&#x03B7;</italic><sup>2</sup> &#x2248;&#x202F;0.00041</td>
<td align="left" valign="top">No meaningful group differences</td>
</tr>
<tr>
<td align="left" valign="top">One-way ANOVA</td>
<td align="left" valign="top">Research utilization across roles</td>
<td align="center" valign="top"><italic>F</italic> (2,1,082)&#x202F;&#x2248;&#x202F;0.051</td>
<td align="center" valign="top">&#x2248;0.95</td>
<td align="center" valign="top"><italic>&#x03B7;</italic><sup>2</sup> &#x2248;&#x202F;0.00009</td>
<td align="left" valign="top">No meaningful group differences</td>
</tr>
<tr>
<td align="left" valign="top">Correlation</td>
<td align="left" valign="top">Training/support &#x2194; AI utilization</td>
<td align="center" valign="top">Pearson <italic>r</italic> (see SPSS output)</td>
<td align="center" valign="top">0.001</td>
<td align="center" valign="top">|<italic>r</italic>|&#x2273;0.10 (small)</td>
<td align="left" valign="top">Training/support associated with higher AI use</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>F and <italic>&#x03B7;</italic><sup>2</sup> values are approximations derived from the reported <italic>p</italic>-values and sample size (df<sup>2</sup>&#x202F;=&#x202F;1,082) to provide interpretable effect sizes; the primary inferential result is that group differences were not statistically significant.</p>
</table-wrap-foot>
</table-wrap>
<p>This finding of a broad consensus is interesting, as one might expect differences&#x2014;perhaps teachers and administrators have more access to AI than students, or conversely, students might experiment with AI more informally than older staff. However, our data suggest a relatively uniform adoption culture within these institutions. Qualitative insights provide explanations: many AI initiatives in the colleges were implemented institution-wide rather than targeting one group. For instance, when an AI language lab system was introduced, it was made available to teachers (for instruction) and students (for practice), leading both groups to engage with it similarly. Additionally, administrative leaders often involve faculty in decisions about AI tools, and faculty, in turn, guide students in using them, creating a synchronized pace of adoption. One teacher interviewee noted, &#x201C;When the college brought in the new AI tutoring software, the administrators promoted it, we teachers got trained on it, and then we made it part of student assignments&#x2014;so everyone is on the same page.&#x201D; This collaborative approach may have led to the alignment in usage levels observed.</p>
<p>That said, it&#x2019;s important to clarify that while the overall extent-of-use ratings were similar, the nature of AI use can differ among groups. The survey&#x2019;s quantitative measure does not capture qualitative nuances. For example, administrators and teachers did have higher adoption rates and frequency of AI use in certain tasks (as described earlier, more teachers and admins have tried AI and use it regularly compared to students). The lack of statistical difference in &#x201C;extent of AI utilization&#x201D; suggests that, on average, each group feels AI is moderately used in their area&#x2014;but students might mostly be using AI for their learning purposes, whereas teachers use it for teaching tasks, etc. In our interviews, some subtle differences emerged: administrators tend to use AI for management and service-oriented functions, teachers for pedagogical support, and students for study aids. One could say that faculty and administrators are more proactive in integrating AI (often initiating its use in their workflow), whereas students are somewhat more passive or guided users, engaging with AI when it&#x2019;s part of coursework or easily accessible tools. Indeed, a theme from interviews was that students&#x2019; lower self-reported adoption might indicate they use AI when directed (by assignments or available tools) but are less likely to independently integrate AI into their learning process compared to how teachers integrate it into teaching. Despite these nuances, the key quantitative takeaway is that there is no glaring gap&#x2014;the colleges did not have, for instance, teachers heavily using AI while students not at all (which could create a mismatch in classroom experiences), nor did they have tech-savvy students outpacing reluctant teachers. All parties are roughly keeping pace with one another in this early stage of AI integration.</p>
<p>From an institutional perspective, this consensus is a positive sign. It implies that AI adoption has so far been an inclusive, community-wide process. Everyone&#x2014;leadership, faculty, and students&#x2014;is engaging with AI to a comparable degree, which can be beneficial for building an &#x201C;AI culture&#x201D; on campus. Change management literature in education suggests innovations diffuse best when all stakeholder levels embrace them together (<xref ref-type="bibr" rid="ref6">Law, 2010</xref>). Our findings resonate with that idea: the alignment in AI use may reflect supportive top-down policies combined with bottom-up willingness among faculty and students, resulting in a cohesive adoption. As a result, any initiatives to further increase AI utilization can be rolled out broadly since no one group is markedly ahead or behind others.</p>
</sec>
<sec id="sec23">
<label>3.4</label>
<title>Relationship between adoption factors and AI utilization</title>
<p>The study examined how certain adoption-related factors correlated with the extent of AI utilization to understand what drives higher or lower use of AI in the colleges. Two factors of interest were: (1) an individual&#x2019;s status as an AI adopter or non-adopter, and (2) their exposure to training or institutional support for AI. We also considered general technology familiarity (though most respondents being relatively young or tech-educated meant baseline digital familiarity was high).</p>
<p>The results clearly indicate a positive relationship between AI adoption and utilization levels. Respondents who identified as having adopted AI tools (versus those who had not) reported significantly higher usage of AI in their educational activities. In essence, active adopters&#x2014;those who had started using AI&#x2014;tended to integrate those tools to a greater extent, whereas non-adopters naturally had negligible usage. While this may seem tautological, it underscores an important point: simply getting stakeholders to take the initial step of trying AI tools is critical, as initial adoption begets ongoing use. Many interviewees noted that once they started using an AI application and saw some benefit, they found more ways to incorporate it regularly. This aligns with diffusion of innovation theory (<xref ref-type="bibr" rid="ref16">Rogers, 2003</xref>) that early adoption can lead to broader implementation if the experience is positive.</p>
<p>More illuminating was the role of training and support. The analysis found that individuals who had received formal AI training or institutional support showed significantly greater AI utilization in practice. For example, teachers who had attended even one workshop on AI or received guidance from an IT support team had higher self-reported usage frequency and higher extent-of-use ratings than those without any training. A correlation analysis revealed a statistically significant association between the number of training experiences (even if few) and the extent of AI use in teaching tasks (with a correlation coefficient indicating a moderate positive relationship, and <italic>p</italic>&#x202F;&#x003C;&#x202F;0.01 for the relationship between having training and greater integration into teaching). In fact, one of the strongest specific findings was that having received AI training was correlated with greater AI integration in teaching activities (<italic>p</italic>&#x202F;=&#x202F;0.001). This suggests that capacity-building efforts can directly translate into more effective and extensive use of AI by educators. Similarly, at the student level, those who indicated they had learned about AI (whether through a workshop or a self-initiated online course) tended to use AI more often in their studies.</p>
<p>On the other hand, some hypothesized factors showed weaker influence. We considered whether general tech-savviness or &#x201C;being a frequent user of technology&#x201D; predicted higher AI use. The data did not show a clear linear relationship here&#x2014;being young or generally good with computers did not automatically mean a student would use AI for research or learning. Some very digitally literate students still did not use AI much, possibly due to a lack of need or awareness. This aligns with the insight that AI usage is context-specific: just because someone is comfortable with technology does not mean they find a place for AI in every task. For instance, a teacher who is very tech-savvy but teaches a subject like law or art may not see immediate ways to use AI, unless given domain-specific examples.</p>
<p>Interestingly, the analysis of adoption status vs. research use hinted that broad adoption does not guarantee usage in specialized areas like research. In other words, being an avid AI user in general did not necessarily mean one used AI for research tasks. This suggests that certain applications of AI (like research) have their own barriers and require targeted integration strategies. A faculty member may use AI for grading and admin but still not use it for literature review or data analysis if they do not trust it or know about relevant tools. This finding points to the need for contextual adoption&#x2014;encouraging use of AI in specific academic tasks requires addressing those specific workflows and demonstrating value there, not just raising overall comfort with AI.</p>
</sec>
<sec id="sec24">
<label>3.5</label>
<title>Challenges in AI integration</title>
<p>Through survey responses and qualitative feedback, the study identified several key challenges and issues that impede the effective use of AI in higher vocational colleges. These challenges provide insight into why AI adoption, while moderate, has not reached higher levels, and what concerns stakeholders have about AI integration. The major challenges are summarized below:</p>
<list list-type="bullet">
<list-item>
<p><italic>Lack of training and skills</italic>: The most frequently mentioned barrier was the insufficient training of end-users (teachers, students, and even some administrators) in using AI tools. As quantified earlier, formal training opportunities have been scarce, resulting in many users feeling underprepared. Teachers pointed out that without training, they might only use basic functions of AI or avoid using it in advanced ways (e.g., for adaptive teaching or data analysis). Students expressed uncertainty about how to properly use AI for learning versus cheating, indicating a need for guidance on effective and ethical usage. This training deficit not only limits current utilization but also breeds a lack of confidence&#x2014;some faculty are hesitant to try new AI tools without support, and some students are unaware of the AI resources that could help them. Thus, inadequate user training is a critical challenge, reinforcing the need for structured professional development and AI literacy programs.</p>
</list-item>
<list-item>
<p><italic>Infrastructural constraints</italic>: A significant challenge noted was the limitation of technology infrastructure in some colleges. This includes both hardware and connectivity. Not all institutions have up-to-date computer labs, reliable high-speed internet, or the server capacity to deploy advanced AI software. Some rural or less-funded private colleges struggle with maintaining modern ICT facilities, which in turn affects AI implementation. For example, an administrator from a smaller college mentioned that they would like to use an AI-driven learning platform, but their current network often cannot handle the load, or they lack sufficient computers for student use. Outdated computers, patchy internet, and a lack of investment in AI-specific infrastructure (like GPU servers for AI applications) were cited as barriers. This infrastructural gap means even if teachers are willing and tools are available, the practical usage can be hampered by slow systems or limited access (e.g., not enough smart classrooms for all courses). Infrastructure improvements, such as upgrading network bandwidth and providing necessary devices, were thus highlighted as necessary steps to support AI integration.</p>
</list-item>
<list-item>
<p><italic>Platform access and policy restrictions</italic>: Because foreign AI tools like ChatGPT are restricted in China, colleges largely depend on domestic platforms. While domestic tools are widely used, some participants felt that being cut off from global AI services is a limitation &#x2013; for instance, certain advanced features or content available in tools like GPT-4 are not accessible. Additionally, uncertainty around policies (both government and institutional) sometimes made users cautious. A few teachers were unsure if using a VPN to access an external AI service was allowed or if using AI for generating content might violate academic policies. Lack of clear guidelines initially caused hesitation&#x2014;some faculty avoided AI for fear of breaching academic integrity rules if students used AI to do their work. Thus, policy and access issues present a challenge: balancing the use of domestic vs. foreign AI and establishing clear rules so that users know what is permitted.</p>
</list-item>
<list-item>
<p><italic>Ethical and privacy concerns</italic>: With the rise of AI, ethical considerations emerged as a challenge. Administrators and educators raised concerns about data privacy&#x2014;e.g., if student data is fed into AI systems, how is it protected?&#x2014;and about the security of AI platforms. There were also worries about academic integrity: the possibility of students misusing generative AI to cheat on assignments or becoming overly reliant on AI for answers, potentially undermining learning and critical thinking. Teachers found it challenging to design assessments that encourage proper use of AI without inviting academic dishonesty. Moreover, some respondents mentioned the risk of bias or errors in AI outputs: if AI provides incorrect information, students might accept it uncritically. These concerns indicate the need for robust ethical guidelines and training on responsible AI use (for both staff and students). While not a direct barrier to using AI (since many still use AI despite these issues), such concerns could limit how far institutions are willing to integrate AI without safeguards. For example, an administrator might hesitate to implement an AI system if unsure about compliance with data protection laws or regulations.</p>
</list-item>
<list-item>
<p><italic>Cultural and organizational resistance</italic>: A subtler challenge noted was a degree of resistance to change or uncertainty among some staff. A portion of older or traditionally minded teachers were skeptical of AI&#x2019;s benefits, preferring conventional teaching methods and worrying that AI might &#x201C;replace&#x201D; aspects of their role or lead to superficial learning. Similarly, some administrators used to legacy systems be slow to adopt AI-driven processes. This mindset can impede adoption&#x2014;if key faculty are unconvinced, they may not integrate AI even if tools are available. Students, too, interestingly, sometimes showed reluctance: a few student comments suggested distrust in AI-provided answers or discomfort in interacting with chatbots versus human guidance. Overcoming this requires awareness-building and sharing success stories to demonstrate AI&#x2019;s positive impact, thereby changing mindsets.</p>
</list-item>
<list-item>
<p><italic>Support and maintenance gaps</italic>: Finally, respondents indicated a lack of ongoing technical support as a challenge. Some colleges did not have dedicated IT staff well-versed in AI applications to assist teachers and students. If an AI tool malfunctions or if users have questions, immediate help might not be available, discouraging further use. Moreover, without institutional structures like AI committees or regular review of AI tool performance, some implementations might stagnate. Essentially, after introducing AI tools, there needs to be follow-up in terms of user support, maintenance, and evaluation, which was lacking in many cases.</p>
</list-item>
</list>
<p>Taken together, the results indicate moderate AI adoption across stakeholder groups, with the highest utilization in language learning and more targeted use in teaching and research. Training and institutional support emerge as key differentiators in how extensively AI is used. The next section interprets these findings through TAM and DOI, and discusses implications of platform restrictions and policy choices for pedagogical innovation and benchmarking.</p>
</sec>
<sec id="sec25">
<label>3.6</label>
<title>Discussion</title>
<sec id="sec26">
<label>3.6.1</label>
<title>Principal findings and interpretation</title>
<p>This research provides one of the first in-depth looks at how AI is being adopted and used in the context of Chinese higher vocational education. The findings paint a picture of moderate but growing integration of AI within Sichuan&#x2019;s vocational colleges. In interpreting these results, it is useful to consider both the local context and broader trends in AI-in-education research.</p>
<p>Firstly, the moderate overall adoption of AI across these institutions indicates that AI technologies have moved beyond the pilot stage and into regular use, but not yet into fully transformative use. Most administrators, teachers, and students have at least some exposures to AI tools, which is an encouraging sign of diffusion. Teachers and administrators have embraced AI for routine tasks&#x2014;a result consistent with innovation adoption theory that suggests early uses of a new technology often focus on improving efficiency in existing practices (<xref ref-type="bibr" rid="ref16">Rogers, 2003</xref>). Our data showed teachers using AI to streamline lesson preparation and grading, and administrators using AI to automate student services and data management. These uses align with findings from other settings: for example, educators worldwide often initially use AI to handle burdensome tasks like grading or FAQ answering, thereby freeing time for more complex work (<xref ref-type="bibr" rid="ref3">Crompton and Burke, 2023</xref>; <xref ref-type="bibr" rid="ref11">Ocen et al., 2025</xref>). The fact that Sichuan&#x2019;s vocational college faculty are doing the same suggests that they are keeping pace with global educational technology trends, at least in the domain of automating routine activities.</p>
<p>Where AI&#x2019;s impact is most pronounced is in language education. The high usage of AI for language learning in our study reflects the abundance and maturity of AI language tools. This result is in line with prior research noting that language learning applications (intelligent tutors, translation apps, etc.) are among the most widespread AI applications in education (<xref ref-type="bibr" rid="ref3">Crompton and Burke, 2023</xref>). In China, AI-driven language apps (for English learning especially) have been popular and well-supported, so it is logical that vocational students and teachers lean heavily on them. The implication is that when AI tools are readily accessible, aligned with user needs, and user-friendly, integration can happen quite organically. Language learning presented a clear use-case where AI adds value&#x2014;personalized practice and instant feedback&#x2014;, so both teachers and students eagerly utilized these tools. This success in the language domain could serve as a model for other subject areas: if similarly user-friendly AI tools (with clear pedagogical value) are developed for other disciplines (say, AI lab simulators for engineering trades or AI coaching for nursing procedures), we might expect uptake to be similarly enthusiastic.</p>
<p>In contrast, the limited use of AI for research and collaborative learning reveals the current boundaries of AI adoption in vocational colleges. Vocational institutions traditionally place less emphasis on research than academic universities, focusing more on practical training and employability. Our findings reflect this: neither faculty nor students are extensively using AI in research contexts, likely because research activities themselves are limited in these settings. Additionally, using AI for research often requires advanced tools and skills (like knowledge of data analysis software or research databases), which many vocational college staff and students may lack. This highlights an important consideration: contextual relevance. AI adoption will flourish where it aligns with the core activities and immediate needs of the institution. In vocational colleges, teaching and skill practice are core, whereas formal research is peripheral; hence, AI is naturally more integrated into teaching-related tasks than research tasks. Another factor could be trust and reliability&#x2014;academic research demands high accuracy and credibility, and both faculty and students may be cautious about relying on AI for literature reviews or analysis due to concerns about getting flawed or non-validated information (a sentiment some interviewees expressed). This caution echoes warnings by scholars about the risks of unchecked AI outputs in academic work. As AI tools improve and as research becomes more data-driven even in vocational fields, we might see growth in this area, but for now, AI&#x2019;s role in vocational college research remains supplementary at best.</p>
<p>One of the notable outcomes was that there were no significant discrepancies between the different stakeholder groups in how much AI they use. This uniformity can be interpreted positively. It suggests that the introduction of AI in these colleges has been relatively inclusive and well-communicated. Often with new technologies, one sees early adopters and laggards; here, however, administrators, faculty, and students all converged on a moderate level of use. This could imply strong institutional leadership and culture around AI. Possibly, administrative initiatives provided the push, teachers responded by integrating AI into their curriculum, and students followed suit as part of their learning process&#x2014;a cohesive rollout. Literature on educational change supports the idea that synchronized adoption leads to smoother implementation (<xref ref-type="bibr" rid="ref6">Law, 2010</xref>). From a practical standpoint, this parity means there is not a misalignment, such as teachers wanting to use AI but students are unprepared (or vice versa). Everyone is somewhat on the same page, which is beneficial for moving forward: any scaling up of AI integration can target the whole institutional community without needing to remediate a specific group.</p>
<p>However, we should nuance this finding: teachers and administrators did have slightly higher engagement and took more initiative with AI compared to students. In our context, that is expected&#x2014;educators were often the ones driving the use of AI tools in their classes or work. The fact that students reported slightly lower adoption might indicate that students use AI when it is embedded in their coursework or recommended by teachers, but less so independently. This interpretation aligns with a study by <xref ref-type="bibr" rid="ref7">Li et al. (2025)</xref>, which found that undergraduate students&#x2019; perceptions of AI were significantly influenced by instructor guidance and institutional signals. In our study, many students were unaware they were &#x201C;using AI&#x201D; unless it was explicitly pointed out (e.g., they might use a grammar-checker without labeling it as AI). This calls for efforts to boost student AI literacy and agency&#x2014;making students more conscious of AI tools and how to proactively use them for learning, beyond just what is assigned. Encouraging a culture where students experiment with AI for self-study or creative projects could elevate their engagement to match that of their instructors.</p>
<p>The positive correlation between training and AI usage is a critical insight for policy. It essentially validates that &#x201C;if you train them, they will use it.&#x201D; Faculty who underwent even minimal AI training integrated those skills into their teaching significantly more. This resonates with other findings in educational technology adoption that professional development is a key enabler of technology integration. In our context, where only a small fraction had training, the difference in usage is stark&#x2014;those few with training were pioneers using AI to a greater extent, implying that lack of training is holding many others back from doing the same. It is a reminder that introducing AI tools alone is not enough; human capacity building is necessary to unlock the potential of those tools. Notably, even non-technical training like workshops on AI pedagogy or sharing best practices could demystify AI for less confident teachers and inspire new applications.</p>
<p>It is also worth noting that institutional support and clear policies can affect AI usage patterns. Our study period preceded the widespread issuance of formal AI-use policies within these colleges. In the absence of clear guidelines, some educators were treading carefully around issues like generative AI use in assignments. The development of a comprehensive AI policy (which we undertook as an outcome of this study) is timely. Such a policy can provide clarity &#x2013; for example, explicitly permitting the use of AI tools for learning with proper attribution, or outlining how AI can be used in assessments&#x2014;which in turn can embolden teachers and students to use AI constructively without fear of &#x201C;doing something wrong.&#x201D; Other research has highlighted that when institutions provide guidelines and support structures (like AI ethics guidelines, data privacy rules, and resource provisioning), it fosters a safer and more confident environment for technology adoption. Our recommendations later in this discussion incorporate these aspects.</p>
</sec>
</sec>
<sec id="sec27">
<label>3.7</label>
<title>Implications for practice and policy</title>
<sec id="sec28">
<label>3.7.1</label>
<title>Policy restrictions, domestic ecosystems, and international benchmarking</title>
<p>China&#x2019;s restrictions on many foreign AI services (e.g., limited access to ChatGPT without workarounds) shape not only tool choice but also the innovation trajectory of colleges. The heavy reliance on domestic platforms (ERNIE Bot, Qwen, Spark Desk) can be advantageous for Chinese-language performance, data localization, and alignment with national governance requirements. At the same time, reliance on a primarily domestic ecosystem may constrain pedagogical experimentation when certain features, plugins, or international teaching resources are unavailable or when interoperability across platforms is limited. For private vocational colleges with constrained budgets, policy-driven concentration in a few domestic vendors can reduce search costs and simplify compliance, but it can also amplify vendor lock-in and limit exposure to global benchmarking practices. To mitigate these risks, institutions can (a) prioritize platform-agnostic pedagogical designs (e.g., prompt literacy, evaluation rubrics, and disclosure practices that transfer across tools), (b) document tool capabilities and limitations systematically for internal benchmarking, and (c) participate in permitted international collaborations (e.g., joint evaluation datasets or published benchmarks) to maintain comparability even when direct access to some foreign tools is restricted.</p>
<p>The findings of this study carry several implications for educational practice and institutional policy in the vocational education sector (and potentially beyond). To harness AI&#x2019;s benefits while addressing the challenges identified, a multi-pronged strategy is needed:</p>
<p><italic>Capacity building through training</italic>: There is a clear need to invest in professional development for both faculty and students regarding AI. With only ~9% of teachers having formal training, colleges should implement comprehensive AI literacy programs. This could involve regular workshops that introduce teachers to available AI tools (e.g., demonstration sessions on using AI for creating assessments, or how to use adaptive learning software), as well as ongoing technical support or coaching for lesson integration. Training should not just cover tool usage, but also pedagogical strategies for AI (for instance, how to incorporate AI tutoring systems into homework assignments effectively, or how to supervise student use of AI so that it enhances rather than replaces learning). Administrators, too, would benefit from targeted training on AI for education management (e.g., using analytics dashboards or privacy management when deploying AI). For students, integrating AI literacy into the curriculum or orientation programs is recommended. Students entering these colleges could receive an orientation module on &#x201C;Effective and Ethical Use of AI for Learning,&#x201D; where they learn about the AI resources at their disposal (such as library chatbots, learning apps, etc.), how to use them to complement studying, and what constitutes misuse (to address integrity concerns). Building confidence and skills among all users will likely increase the extent and quality of AI utilization. Notably, similar calls for AI training in education have been made by policy bodies and researchers, emphasizing that without developing human capital, the technology cannot be used to its full potential (<xref ref-type="bibr" rid="ref12">Office of Educational Technology, U.S. Department of Education, 2023</xref>).</p>
<p><italic>Improving technological infrastructure</italic>: The infrastructural constraints must be addressed at the institutional and possibly provincial level. Ensuring that all vocational colleges have adequate internet bandwidth, modern computers or device access for students, and server capacity for AI applications is foundational. Investment in infrastructure upgrades&#x2014;such as setting up AI computer labs or providing grants for colleges to acquire licensed AI software&#x2014;would reduce barriers to adoption. In Sichuan, where some colleges are in less urban areas, it may require government or public-private initiatives to fund these improvements. Additionally, maintaining up-to-date software and providing IT support staff who can assist with AI tools will ensure the technology is usable day-to-day. Some colleges might explore sharing AI resources; for example, a cluster of colleges could jointly host an AI platform if individual funding is low. The implication here is that policymakers should view AI integration not just as a pedagogical issue but also as an infrastructure one&#x2014;it requires tangible resource allocation to technology.</p>
<p><italic>Developing clear AI usage policies</italic>: With the rapid rise of generative AI, educational institutions are grappling with policy questions. Our study underlines the importance of having a formal AI policy at the college level. This policy should cover aspects such as: acceptable use (e.g., guidelines on students using AI for assignments, requiring disclosure of AI assistance to prevent plagiarism), data privacy and security (ensuring compliance with regulations when using AI platforms that handle student data), and ethical standards (for example, forbidding the use of AI in ways that could reinforce bias or replacing human judgment where it&#x2019;s crucial). By formally articulating these rules, colleges can provide a framework within which innovation can happen responsibly. During this research, we drafted a proposed Academic AI Policy for vocational colleges, which aligns with national directives and international best practices. It calls for things like establishing an AI ethics committee at the college, regular reviews of AI tool impacts, and integration of AI ethics into training sessions. Implementing such policies will help mitigate concerns expressed by participants about misuse or ethical pitfalls. Importantly, policies should be communicated and implemented through workshops so that everyone from top leadership to students understands them and why they matter (emphasize balancing innovation with caution, which is exactly what good policy enables).</p>
<p><italic>Encouraging deeper integration into curriculum</italic>: Educators should be encouraged and supported to move beyond superficial use of AI (just for routine tasks) towards holistic curricular integration. This means rethinking some teaching methods, considering AI capabilities. For example, in teaching practice, instructors could incorporate AI-driven projects&#x2014;such as having students use an AI tool to analyze a dataset or simulate a scenario relevant to their field&#x2014;thereby directly engaging students with AI as part of learning. In language classes, since AI is already heavily used, teachers could formalize it by designing assignments that require using an AI translator, then critically evaluating its output, which both leverages and develops students&#x2019; higher-order thinking. The colleges could also revise curricula to include topics on AI basics and applications relevant to each vocational field (highlight successful cases of embedding digital and AI literacy in university programs). This will prepare students for the AI-infused workplaces they will enter. Faculty should be brought together (perhaps through communities of practice) to share best practices on using AI pedagogically. When teachers see concrete examples of improved learning outcomes from AI-enhanced methods (like adaptive quizzing raising student performance), they may be more motivated to adopt similar approaches.</p>
<p><italic>Bridging the student engagement gap</italic>: Since the data suggests students were less engaged with AI unless guided, colleges should try to actively engage students with AI. This could involve creating more opportunities for students to experiment with AI in a safe learning environment. For instance, colleges might host innovation challenges or hackathons where students can create small projects using AI tools relevant to their trade (e.g., an IT student building a chat bot service for the campus, or design students using an AI image generator to aid a project, within ethical bounds). Such activities can demystify AI and spur interest. Additionally, promoting student-led AI clubs or interest groups could give students a peer-supported way to learn about new AI technologies and share usage tips. The goal is to foster a culture where students view AI not just as something for completing assignments faster, but as a tool for creativity, exploration, and self-improvement in their field. Over time, this could lead to students more independently integrating AI into their learning, which would maximize the benefits they get from these tools.</p>
<p><italic>Ongoing support and governance</italic>: institutions must establish structures for sustained support and governance of AI integration. One recommendation from the study is for colleges to form AI steering committees or task forces that include representatives from administration, faculty, IT staff, and students (mirroring recommendations by <xref ref-type="bibr" rid="ref5">Jobin et al., 2019</xref> for inclusive AI governance). These committees can oversee the rollout of AI initiatives, ensure that tools are evaluated for effectiveness and fairness, and stay updated with emerging technologies or regulations. They would also be responsible for revising policies as needed and championing resources (budget for tools, training, etc.). Having a formal body signals the college&#x2019;s commitment and provides a centralized place to address issues that arise (for example, if a teacher reports a problem with an AI system, the committee can investigate and liaise with vendors or authorities).</p>
<p><italic>Addressing ethical and integrity issues</italic>: From a practice standpoint, the challenge of maintaining academic integrity in the age of AI requires strategic action. Teachers should design assessments and learning activities in ways that encourage ethical use of AI&#x2014;for instance, assignments can be structured to require personal reflection or in-class components that AI cannot generate, or students can be asked to show how they used AI as a tool (thus making AI a part of the learning process rather than a shortcut around it). Educating students about the pitfalls of over-reliance on AI (such as potential inaccuracies or the loss of skill practice) is equally important. On the institutional side, deploying AI plagiarism detection or AI-output detection tools might be necessary as a deterrent (though these are evolving). An ethical implication is also ensuring AI tools used by the college are themselves fair and accessible&#x2014;for example, making sure that an AI platform is usable by students with disabilities, or that it does not disadvantage any group of students. This might involve testing tools for bias and being transparent about how AI decisions (like automated grading) are made, aligning with principles of trustworthy AI in education (<xref ref-type="bibr" rid="ref9003">European Commission 2019</xref>; <xref ref-type="bibr" rid="ref20">UNESCO, 2021</xref>).</p>
</sec>
<sec id="sec29">
<label>3.7.2</label>
<title>Limitations and future research</title>
<p>This study had several limitations. First, the sample was limited to private higher vocational colleges in Sichuan Province, which may restrict generalizability to public institutions or other regions. Future studies should include broader or international samples to compare AI adoption patterns. Second, the cross-sectional design captured a single time point, despite the rapid evolution of AI technologies. Longitudinal research is needed to trace how adoption changes over time and impacts educational outcomes. Third, the reliance on self-reported data may introduce bias; incorporating objective usage metrics or direct classroom observations would improve reliability. Fourth, the qualitative component was modest and may not fully reflect stakeholder perspectives. In-depth qualitative methods like ethnography or focus groups could yield richer insights. Finally, while this study took a broad view of AI integration, more focused studies on specific domains such as AI in language learning, counseling, or administrative support are recommended to deepen understanding.</p>
</sec>
</sec>
</sec>
<sec sec-type="conclusions" id="sec30">
<label>4</label>
<title>Conclusion</title>
<p>This study has provided a comprehensive examination of AI integration in higher vocational colleges in Sichuan, offering evidence that can guide both practitioners and policy makers. In conclusion, AI adoption in Sichuan&#x2019;s vocational education sector is underway at a moderate level&#x2014;it is neither in its infancy nor fully mature. Educators and administrators are leveraging AI primarily to augment efficiency in teaching and operations, and students are benefiting notably in areas like language learning. The cross-stakeholder adoption is uniform, which bodes well for unified progress. Yet challenges related to training, infrastructure, and ethical governance currently restrain AI from realizing its full potential in transforming teaching and learning practices.</p>
<p>Encouragingly, the study&#x2019;s findings have already been channeled into the development of a proposed academic policy for AI use, and into a series of recommendations that address identified gaps. Implementing these policy guidelines&#x2014;focusing on capacity building, infrastructure, clear ethical frameworks, and proactive engagement strategies&#x2014;is expected to significantly enhance AI integration in these institutions. By doing so, higher vocational colleges in Sichuan (and similar contexts elsewhere) can ensure that AI is harnessed effectively to improve educational quality, while also safeguarding against its risks. The goal is to align the use of AI with the mission of vocational education: producing graduates who are not only skilled in their trades but also adept in using advanced technologies like AI in the workplace.</p>
<p>In a broader sense, this research underscores that successful educational innovation with AI requires a balanced approach&#x2014;one that pairs technological possibilities with human-centric support systems. As China and other nations push forward with AI-enabled education initiatives, studies like this provide ground-level insights to inform those efforts. The lessons from Sichuan&#x2019;s vocational colleges may inform national strategies, ensuring that policies and investments address real needs on campus.</p>
<p>The coming years will be critical as AI tools evolve and permeate further into education. By taking thoughtful action on the issues highlighted (such as training the workforce, upgrading tech infrastructure, and setting clear rules of the road), educational institutions can transition from moderate, exploratory use of AI to more fully integrated, innovative pedagogies and systems. Such a transition promises not only efficiency gains but also the potential for a richer, more personalized, and responsive learning experience for students&#x2014;ultimately fulfilling the promise of AI as a catalyst for educational excellence and equity in the vocational college sector and beyond.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="sec31">
<title>Data availability statement</title>
<p>The datasets generated and/or analyzed in this study are not publicly available because they contain information that could compromise participant confidentiality (e.g., institution- or respondent-identifying details) and are covered by the study&#x2019;s ethics and consent conditions. All data supporting the main findings are presented in the article and/or its <xref rid="SM1" ref-type="supplementary-material">Supplementary Material</xref> in aggregated form. De-identified data may be made available upon reasonable request to the corresponding author, subject to institutional approval and execution of a data use agreement.</p>
</sec>
<sec sec-type="ethics-statement" id="sec32">
<title>Ethics statement</title>
<p>This study involved human participants and was conducted in accordance with applicable ethical standards and institutional requirements. Prior to data collection, permission to conduct the research was obtained from the participating institutions. All participants (administrators, teachers, and students) were informed about the purpose of the study, the voluntary nature of participation, and their right to withdraw at any time without penalty. Written or electronic informed consent was obtained from all participants prior to completing the survey and/or participating in interviews. No identifiable personal information was collected or reported in the manuscript, and all responses were anonymized and reported only in aggregate form to protect confidentiality. This article does not contain any identifiable human images.</p>
</sec>
<sec sec-type="author-contributions" id="sec33">
<title>Author contributions</title>
<p>LY: Conceptualization, Data curation, Formal analysis, Investigation, Methodology, Project administration, Resources, Software, Supervision, Validation, Visualization, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. FB: Conceptualization, Data curation, Formal analysis, Funding acquisition, Investigation, Methodology, Project administration, Resources, Software, Supervision, Validation, Visualization, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing.</p>
</sec>
<ack>
<title>Acknowledgments</title>
<p>The authors would like to thank all teachers, colleagues, and classmates who provided valuable guidance, encouragement, and support throughout the writing of this paper and the submission process. It is hoped that this research will inspire further exploration of AI literacy in language education and contribute to ongoing discussions on equitable and ethical technology use in teaching.</p>
</ack>
<sec sec-type="COI-statement" id="sec34">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="sec35">
<title>Generative AI statement</title>
<p>The author(s) declared that Generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="sec36">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec sec-type="supplementary-material" id="sec37">
<title>Supplementary material</title>
<p>The Supplementary material for this article can be found online at: <ext-link xlink:href="https://www.frontiersin.org/articles/10.3389/feduc.2026.1796803/full#supplementary-material" ext-link-type="uri">https://www.frontiersin.org/articles/10.3389/feduc.2026.1796803/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Table_1.pdf" id="SM1" mimetype="application/pdf" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<ref-list>
<title>References</title>
<ref id="ref9001"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Allil</surname><given-names>Kamaal</given-names></name></person-group>. (<year>2004</year>). <article-title>Integrating AI-driven marketing analytics techniques into the classroom: Pedagogical strategies for enhancing student engagement and future business success</article-title>. <source>Journal of Marketing Analytics</source> <volume>12</volume>: <fpage>142</fpage>&#x2013;<lpage>68</lpage>. doi: <pub-id pub-id-type="doi">10.1057/s41270-023-00281-z</pub-id>.</mixed-citation></ref>
<ref id="ref9002"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Creswell</surname><given-names>John W.</given-names></name> <name><surname>Vicki</surname><given-names>L.</given-names></name></person-group> (<year>2011</year>). <article-title>Plano clark. revisiting mixed methods research designs twenty years later</article-title>. <source>The Sage Handbook of Mixed Methods Research Design</source>, <volume>2023</volume>, <fpage>21</fpage>&#x2013;<lpage>36</lpage>. doi: <pub-id pub-id-type="doi">10.4135/9781529614572.n6</pub-id>.</mixed-citation></ref>
<ref id="ref3"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Crompton</surname><given-names>H.</given-names></name> <name><surname>Burke</surname><given-names>D.</given-names></name></person-group> (<year>2023</year>). <article-title>Artificial intelligence in higher education: the state of the field</article-title>. <source>Int. J. Educ. Technol. High. Educ.</source> <volume>20</volume>:<fpage>22</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s41239-023-00392-8</pub-id></mixed-citation></ref>
<ref id="ref4"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Davis</surname><given-names>F. D.</given-names></name></person-group> (<year>1989</year>). <article-title>Perceived usefulness, perceived ease of use, and user acceptance of information technology</article-title>. <source>MIS Q.</source> <volume>13</volume>, <fpage>319</fpage>&#x2013;<lpage>340</lpage>. doi: <pub-id pub-id-type="doi">10.2307/249008</pub-id></mixed-citation></ref>
<ref id="ref9003"><mixed-citation publication-type="other"><person-group person-group-type="author"><collab id="coll201">European Commission</collab></person-group>. (<year>2019</year>). <source>European Union Politics, January 2019</source>. <fpage>143</fpage>&#x2013;<lpage>56</lpage>. doi: <pub-id pub-id-type="doi">10.1093/help/9780198806530.003.00108</pub-id></mixed-citation></ref>
<ref id="ref5"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jobin</surname><given-names>A.</given-names></name> <name><surname>Ienca</surname><given-names>M.</given-names></name> <name><surname>Vayena</surname><given-names>E.</given-names></name></person-group> (<year>2019</year>). <article-title>The global landscape of AI ethics guidelines</article-title>. <source>Nat. Mach. Intell.</source> <volume>1</volume>, <fpage>389</fpage>&#x2013;<lpage>399</lpage>. doi: <pub-id pub-id-type="doi">10.1038/s42256-019-0088-2</pub-id></mixed-citation></ref>
<ref id="ref6"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Law</surname><given-names>N.</given-names></name></person-group> (<year>2010</year>). &#x201C;<article-title>Educational change and technology innovation in education</article-title>&#x201D; in <source>Second international handbook of educational change</source>. eds. <person-group person-group-type="editor"><name><surname>Hargreaves</surname><given-names>A.</given-names></name> <etal/></person-group>. (<publisher-loc>Berlin</publisher-loc>: <publisher-name>Springer</publisher-name>), <fpage>971</fpage>&#x2013;<lpage>988</lpage>.</mixed-citation></ref>
<ref id="ref7"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Li</surname><given-names>Y.</given-names></name> <name><surname>Castulo</surname><given-names>N. J.</given-names></name> <name><surname>Xu</surname><given-names>X.</given-names></name></person-group> (<year>2025</year>). <article-title>Embracing or rejecting AI? A mixed-method study on undergraduate students&#x2019; perceptions of artificial intelligence at a private university in China</article-title>. <source>Front. Educ.</source> <volume>10</volume>:<fpage>1505856</fpage>. doi: <pub-id pub-id-type="doi">10.3389/feduc.2025.1505856</pub-id></mixed-citation></ref>
<ref id="ref9"><mixed-citation publication-type="other"><person-group person-group-type="author"><collab id="coll1">Ministry of Education of the People&#x2019;s Republic of China</collab></person-group>. (<year>2020</year>). <source>Action plan for improving the quality and excellence of vocational education (2020&#x2013;2023). (in Chinese).</source> <publisher-loc>Beijing (CN)</publisher-loc>: <publisher-name>Ministry of Education of the People&#x2019;s Republic of China</publisher-name>.</mixed-citation></ref>
<ref id="ref10"><mixed-citation publication-type="book"><person-group person-group-type="author"><collab id="coll2">Ministry of Education of the People&#x2019;s Republic of China</collab></person-group> (<year>2022</year>). <source>Circular of the Ministry of Education on the issuance of the Compulsory Education Curriculum Program and Curriculum Standards</source>. <publisher-loc>Beijing (CN)</publisher-loc>: <publisher-name>Ministry of Education of the People&#x2019;s Republic of China</publisher-name>. Available from: <ext-link xlink:href="http://%20www.moe.gov.cn/%20srcsite/%20A26/%20s8001/%20202204/%20t20220420_619921.html" ext-link-type="uri">http:// www.moe.gov.cn/ srcsite/ A26/ s8001/ 202204/ t20220420_619921.html</ext-link> (Accessed April 20, 2022).</mixed-citation></ref>
<ref id="ref11"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ocen</surname><given-names>S.</given-names></name> <name><surname>Elasu</surname><given-names>J.</given-names></name> <name><surname>Aarakit</surname><given-names>S. M.</given-names></name> <name><surname>Olupot</surname><given-names>C.</given-names></name></person-group> (<year>2025</year>). <article-title>Artificial intelligence in higher education institutions: review of innovations, opportunities and challenges</article-title>. <source>Front. Educ.</source> <volume>10</volume>:<fpage>1530247</fpage>. doi: <pub-id pub-id-type="doi">10.3389/feduc.2025.1530247</pub-id></mixed-citation></ref>
<ref id="ref12"><mixed-citation publication-type="book"><person-group person-group-type="author"><collab id="coll3">Office of Educational Technology, U.S. Department of Education</collab></person-group> (<year>2023</year>). <source>Artificial intelligence and the future of teaching and learning: Insights and recommendations</source>. <publisher-loc>Washington, DC</publisher-loc>: <publisher-name>U.S. Department of Education</publisher-name>.</mixed-citation></ref>
<ref id="ref13"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Ogbuoka</surname><given-names>O. M.</given-names></name></person-group> (<year>2025</year>). &#x201C;<article-title>Enhancing human resource Management in Education through AI tools</article-title>&#x201D; in <source>Artificial intelligence and ethical transformation in education: Management, leadership and data-driven decision-making for sustainability. Deep Science Publishing</source>. Available online at: <ext-link xlink:href="https://doi.org/10.70593/978-93-7185-539-6_7" ext-link-type="uri">https://doi.org/10.70593/978-93-7185-539-6_7</ext-link> (Accessed September 15, 2025)</mixed-citation></ref>
<ref id="ref14"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Prasetya</surname><given-names>F.</given-names></name> <name><surname>Fortuna</surname><given-names>A.</given-names></name> <name><surname>Samala</surname><given-names>A. D.</given-names></name> <name><surname>Latifa</surname><given-names>D. K.</given-names></name> <name><surname>Andriani</surname><given-names>W.</given-names></name> <name><surname>Gusti</surname><given-names>U. A.</given-names></name> <etal/></person-group>. (<year>2025</year>). <article-title>Harnessing artificial intelligence to revolutionize vocational education: emerging trends, challenges, and contributions to SDGs 2030</article-title>. <source>Soc. Sci. Humanit. Open</source> <volume>11</volume>:<fpage>100401</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ssaho.2025.101401</pub-id></mixed-citation></ref>
<ref id="ref16"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Rogers</surname><given-names>E. M.</given-names></name></person-group> (<year>2003</year>). <source>Diffusion of innovations</source>. <edition>5th</edition> Edn. <publisher-loc>New York, NY</publisher-loc>: <publisher-name>Free Press</publisher-name>.</mixed-citation></ref>
<ref id="ref18"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Selznick</surname><given-names>BS.</given-names></name> <name><surname>Titareva</surname><given-names>TN.</given-names></name></person-group> (<year>2022</year>). <source>Postsecondary Administrative Leadership and Educational AI: an ethical shared approach. In: Strategy, Policy, Practice, and Governance for AI in Higher Education Institutions</source>. Almaraz-Men&#x00E9;ndez F, Maz-Machado A, L&#x00F3;pez-Esteban C, Almaraz-L&#x00F3;pez C, editors. <publisher-loc>Hershey (PA)</publisher-loc>: <publisher-name>IGI Global</publisher-name>. <fpage>73</fpage>&#x2013;<lpage>100</lpage>. doi: <pub-id pub-id-type="doi">10.4018/978-1-7998-9247-2.ch004</pub-id></mixed-citation></ref>
<ref id="ref19"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Tomar</surname><given-names>P.</given-names></name> <name><surname>Verma</surname><given-names>P.</given-names></name></person-group> (<year>2021</year>). &#x201C;<article-title>Adoption of educational technology to enhance the teaching-learning process: teachers&#x2019; perspective</article-title>&#x201D; in <source>Proceedings of ICCE 2021</source>. eds. <person-group person-group-type="editor"><name><surname>Cheung</surname><given-names>S.</given-names></name> <etal/></person-group>. (<publisher-loc>Berlin</publisher-loc>: <publisher-name>Springer</publisher-name>).</mixed-citation></ref>
<ref id="ref20"><mixed-citation publication-type="book"><person-group person-group-type="author"><collab id="coll4">UNESCO</collab></person-group> (<year>2021</year>). <source>Beijing consensus on artificial intelligence and education</source>. <publisher-loc>Paris</publisher-loc>: <publisher-name>UNESCO</publisher-name>.</mixed-citation></ref>
<ref id="ref21"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Vijayalakshmi</surname><given-names>M.</given-names></name> <name><surname>Balu</surname><given-names>S.</given-names></name> <name><surname>Sowmya K</surname></name></person-group> (<year>2025</year>). &#x201C;<article-title>AI based smart classroom Systems for Personalized Learning and Educational Outcome Analytics</article-title>&#x201D; in <source>Artificial intelligence in multi domain Systems for Intelligent Decision Making and Automation</source>. <publisher-loc>Coimbatore (IN)</publisher-loc>: <publisher-name>RADemics Research Institute</publisher-name>. <fpage>346</fpage>&#x2013;<lpage>373</lpage>. doi: <pub-id pub-id-type="doi">10.71443/9789349552357-13</pub-id></mixed-citation></ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by" id="fn0001">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3277382/overview">Christopher Hill</ext-link>, Canadian University of Dubai, United Arab Emirates</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by" id="fn0002">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1546858/overview">Rany Sam</ext-link>, National University of Battambang, Cambodia</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3183586/overview">Khalid Mirza</ext-link>, Quaid-i-Azam University, Pakistan</p>
</fn>
</fn-group>
</back>
</article>