<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" article-type="research-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Educ.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Education</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Educ.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2504-284X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/feduc.2025.1660954</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Dynamic peer learning recommendations in e-learning using hybrid collaborative filtering and interaction-based clustering</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>A. J.</surname>
<given-names>Dazzle</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3127668"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Naik</surname>
<given-names>Dishant</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3341459"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>R.</surname>
<given-names>Parvathi</given-names>
</name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2585659"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>V.</surname>
<given-names>Pattabiraman</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3216270"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>School of Computer Science and Engineering, Vellore Institute of Technology</institution>, <city>Chennai</city>, <country country="in">India</country></aff>
<aff id="aff2"><label>2</label><institution>Centre for Advanced Data Science, Vellore Institute of Technology</institution>, <city>Chennai</city>, <country country="in">India</country></aff>
<author-notes>
<corresp id="c001"><label>&#x002A;</label>Correspondence: Parvathi R., <email xlink:href="mailto:parvathi.r@vit.ac.in">parvathi.r@vit.ac.in</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-01-29">
<day>29</day>
<month>01</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2025</year>
</pub-date>
<volume>10</volume>
<elocation-id>1660954</elocation-id>
<history>
<date date-type="received">
<day>07</day>
<month>07</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>12</day>
<month>12</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>25</day>
<month>12</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2026 AJ, Naik, R and V.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>AJ, Naik, R and V</copyright-holder>
<license>
<ali:license_ref start_date="2026-01-29">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<sec>
<title>Introduction</title>
<p>Peer-learning recommendation remains an open challenge in e-learning systems, as most existing approaches&#x2014;such as matrix factorization and neural collaborative filtering&#x2014;rely on static interaction patterns. These methods often ignore contextual information including learner roles, content difficulty, and temporal engagement behavior. As a result, they struggle to form meaningful peer groups or provide adaptive learning paths that align with pedagogical needs.</p>
</sec>
<sec>
<title>Methods</title>
<p>To address these limitations, we propose a hybrid context-aware peer learning recommender that integrates collaborative filtering with interaction-based clustering. The framework incorporates adaptive peer group formation using multiple loss functions and multifactor BERT embeddings to capture content semantics. In addition, learner-specific characteristics such as difficulty level, job role, and software skills are explicitly modeled. These contextual and semantic features are dynamically used to cluster learners and generate personalized peer recommendations.</p>
</sec>
<sec>
<title>Results and discussion</title>
<p>Experiments conducted on an e-learning dataset demonstrate that the proposed model significantly outperforms sequential baseline approaches, as well as traditional matrix factorization and neural collaborative filtering models. The hybrid approach achieves an accuracy of 0.80, precision of 0.80, recall of 0.06, and an F1-score of 0.11. These results indicate improved personalization and contextual relevance in peer recommendations, enabling more adaptive and pedagogically suitable peer learning experiences.</p>
</sec>
</abstract>
<kwd-group>
<kwd>adaptive learning</kwd>
<kwd>BERT embeddings</kwd>
<kwd>e-learning</kwd>
<kwd>hybrid collaborative filtering</kwd>
<kwd>interaction-based clustering</kwd>
<kwd>peer learning recommendations</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was not received for this work and/or its publication.</funding-statement>
</funding-group>
<counts>
<fig-count count="5"/>
<table-count count="2"/>
<equation-count count="25"/>
<ref-count count="21"/>
<page-count count="13"/>
<word-count count="7777"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Digital Learning Innovations</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec1">
<label>1</label>
<title>Introduction</title>
<p>Over the past decade, e-learning systems have grown rapidly because they are flexible, accessible, and can deliver learning at scale to millions of users around the world. Worldwide evidence has demonstrated an increased use of online learning in higher education and corporate settings, with more than 110 million learners registered on major MOOC platforms as of 2022 (<xref ref-type="bibr" rid="ref12">Kizilcec et al., 2013</xref>). Despite the increase in the number of students joining MOOCs, learner disengagement is still a common issue&#x2014;MOOCs have very low completion rates that range between 5 and 10% (<xref ref-type="bibr" rid="ref19">Wong, 2020</xref>). Moreover, studies have revealed that 90% of students drop out during different stages of an online class because they lack a personalization experience, little interaction with their peers, and have low support (<xref ref-type="bibr" rid="ref15">Linn and Clark, 2019</xref>). These results also indicate a significant drawdown in the state-of-the-art systems: recommender techniques need to be better customized to meet the changing needs, behaviors, and contexts of learners.</p>
<p>Traditional e-learning recommenders are commonly based on collaborative filtering (CF), such as users&#x2019; interests and history about items. Although CF techniques have been effective for content recommendation, they are inherently predicated on the stability of preferences and rarely take into account contextual, temporal, and behavioral variability that influence learning paths (<xref ref-type="bibr" rid="ref8">Jannach and Manzoor, 2020</xref>; <xref ref-type="bibr" rid="ref5">Chen and Xu, 2021</xref>). In reality, learning is not a static process, and students&#x2019; experience, goal orientation, motivation, and cognitive patterns evolve over time, with such changes affecting the way they can or cannot benefit from collaborative learning environments. Meanwhile, the current e-learning systems also regard learners as a whole-oriented group without considering job type, level of knowledge, skill, and learning style or even approaching pattern (<xref ref-type="bibr" rid="ref4">Bower, 2019</xref>; <xref ref-type="bibr" rid="ref13">Koren et al., 2019</xref>). Research in the area of digital pedagogy suggests that successful online learning is heavily reliant on cooperation, social presence, and sensitive support with respect to the context, not simply similar content (<xref ref-type="bibr" rid="ref21">Zhang et al., 2019</xref>; <xref ref-type="bibr" rid="ref6">de Jong and van der Linden, 2020</xref>). These gaps reinforce the need for adaptive, context-sensitive, and behavior-aware recommendation systems.</p>
<p>To address these limitations, in this paper, we implement a hybrid CF model, which is a combination of collaborative filtering and content-based recommendations to recommend peers and study material by utilizing historical interactions, preferences, and performance of learners. The existing dynamic model is augmented with interaction-based clustering, which assimilates learners based on their interactions, learning styles, and development needs within the same cluster. The model promotes the creation of context-oriented study groups, both true to real-time behavior and the demand of learners, tailored toward individual learning progress.</p>
<p>Our model uses BERT embeddings to enhance the semantic understanding of the content and thereby the quality of recommendations. Bidirectional encoder representations from transformers (BERT) is a widely used pre-trained language model that learns semantic meaning from text data. With the help of content indexes, learner profiles, and course themes, it is possible to make recommendations more contextualized to specific learners during their pursuit of certain learning paths, job roles, and/or expertise.</p>
<p>We also propose a new way of measuring the progress of knowledge evolution and learning over time. Watch percentage, time on content, and ratings have been considered by our system to dynamically adapt learning paths and difficulty levels, so it always keeps learners challenged without overwhelming them. This ability to track learning progress and adjust recommendations accordingly sets our model apart from traditional static recommendation systems.</p>
<p>This paper makes the following contributions:</p>
<list list-type="bullet">
<list-item><p>Context- and time-adaptive friend discovery framework.</p></list-item>
</list>
<p>Previous studies have predominantly used static or similarity-based grouping of learners, without considering changes in learner behavior over time and the impact of context (such as job role, expertise level, and course difficulty) on peer compatibility. Our approach brings in an interaction-driven clustering mechanism that evolves peer groups depending on temporal learning dynamics and situational properties. This allows for more meaningful and pedagogically relevant peer recommendations that fill a key gap in prior CF-based systems, which assume learners are a homogeneous, immutable population.</p>
<list list-type="bullet">
<list-item><p>Multi-factor BERT embeddings for better semantic and context comprehension.</p></list-item>
</list>
<p>Text-based recommenders either rely on the shallow textual features or keyword overlap between documents that may not reflect the underlying semantic relations for a personalized e-learning. BERT-based embeddings are combined with other contextual information (theme, job role, software proficiency), resulting in a multi-faceted representation of learners and content. Where the limitations of previous works that utilize isolated content features without deep semantic information are thus overcome, and a more accurate peer/learning resource recommendation solution is achieved.</p>
<list list-type="bullet">
<list-item><p>Adaptive learning that evolves due to your real-time progress.</p></list-item>
</list>
<p>Several previous systems do not take into consideration the dynamic hints of progress learning, such as watch percentage and time spent, rating updated. Our model leverages these behavioral signals as a basis to scale content and personalize learning paths. This contribution will help address the limitation in previous recommendation models that are unable to personalize recommendations according to progresses of learners&#x2019; proficiency and engagement.</p>
<list list-type="bullet">
<list-item><p>Extensive empirical study that shows a large gap compared to other recommenders.</p></list-item>
</list>
<p>In earlier studies, even when they do, the benchmark was fairly small or partial evaluation metrics were used, and our approach compares the hybrid model against popular methods such as collaborative filtering (CF), matrix factorization (MF), and neural collaborative filtering (NCF). We demonstrate that the hybrid model consistently outperforms standard approaches by analyzing accuracy, precision, recall, and F1-score. This assessment demonstrates the utility of incorporating behavioral, contextual, and sematic elements; an aspect not well investigated in previous studies.</p>
<p>This concept is our effort to improve the peer learning experience and learner engagement, thereby improving the overall success of e-learning platforms by addressing these keys aspects. The rest of this paper is organized as follows: Section 2 reviews related work in the field of recommendation systems for e-learning, Section 3 outlines the methodology used to develop and evaluate the model, Section 4 presents the experimental setup and results, and Section 5 concludes the paper with future research directions.</p>
</sec>
<sec id="sec2">
<label>2</label>
<title>Related work</title>
<p>A Systematic Review: Deep Learning-based e-Learning Recommendation System Perform a comprehensive review study (<xref ref-type="bibr" rid="ref2">Bhanuse and Mal, 2021</xref>) on deep learning utilization and limitations in e-learning recommendation systems. They enumerate seven principal recommendation strategies: content-based, collaborative filtering, knowledge-based, demographic, hybrid, ontology-based, and context-aware recommendations, all of which have a unique set of strengths and weaknesses. In particular, the review highlights CNNs and RNNs in sequential and high-dimensional data settings. The authors explain that CNNs are effective in recognizing and extracting patterns from large data sets, a quality that positions them well for applications where the recommendation quality depends on identifying subtle variations in learner behaviors and preferences.</p>
<p>In &#x201C;Personalized Recommender System for e-Learning Environment,&#x201D; <xref ref-type="bibr" rid="ref11">Khanal et al. (2019)</xref> introduced a recommendation system designed to address the static nature of traditional e-learning platforms, which often overlook individual learner differences. NPR_eL adopts a hybrid approach that combines collaborative and content-based filtering techniques, enabling it to tailor recommendations based on a learner&#x2019;s preferences, interests, and background knowledge. To personalize recommendations further, the authors introduce an innovative factor: learner memory capacity. Memory span is assessed through a pre-test, allowing NPR_eL to account for each learner&#x2019;s cognitive load and ability to retain information. This adaptation enables NPR_eL to avoid overwhelming learners and instead aligns with their unique learning pace, thereby improving the overall effectiveness of the e-learning environment.</p>
<p>Prior studies explained using processing power-heavy CNNs and RNNs, along with providing only the usual features (<xref ref-type="bibr" rid="ref16">Liu et al., 2022</xref>). Our project takes a step forward by applying BERT embeddings that can learn fine-grained, multi-dimensional relationships between user interactions&#x2014;content descriptions&#x2014;contextual factors for any image-based recommendation task, leading to better recommendation precision. In contrast to CNN and RNN, which are designed primarily for linear or sequential data, BERT provides a multilayered representation of textual data where each layer transforms the input into a new output, able to capture bidirectional context and relationships across different themes, software, and job roles in our dataset. This allows our model to provide a contextualized recommendation that is aligned with an individual learning path preference and peer group preferences, but the latter is an important factor in e-learning (not represented directly in taxonomy ) (<xref ref-type="bibr" rid="ref2">Bhanuse and Mal, 2021</xref>).</p>
<p>By enhancing upon the deep learning approaches described in their paper by incorporating peer-group clustering and creating ongoing skill tracking, which are missing from their taxonomy, our work advances classical solutions. Though CNNs and RNNs are good at pattern recognition and sequential learning, our model also has real-time clustering as well as multi-factor embeddings (<xref ref-type="bibr" rid="ref20">Zhang et al., 2021</xref>) to adapt the system to changes in user preferences over time and learning progression. Our methodology overcomes static data and cold-start limitations through the embedding of diverse user attributes and evolving learning needs into our recommendation system, which provides integrated, holistic learning experiences tailored to different aspects of the complexity profiles of each user.</p>
<p>NPR_eL provides a rich personalization within the scope of memory capacity and cognitive load for each learner; however, our project takes this one step further with an innovative multi-factor model that includes contextual factors such as job roles, software skills and theme based interest(s), as well as flexibility to dynamically group peers based on their preferred role in learning (<xref ref-type="bibr" rid="ref7">Ibrahim et al., 2020</xref>). Our approach enables the system to recommend a path for learning by clustering learners with similar behavior and goes beyond the memory-based transfer of knowledge that NPR_eL possesses. This evolution allows a continuous pathway for recommendation. The use of BERT embeddings also embodies a diverse set of user features in a compact and independent vector space, enabling more accurate recommendations that consider the context surrounding some choice or event (NPR_eL does not tackle the learner evolution).</p>
<p><xref ref-type="bibr" rid="ref18">Rahayu et al. (2022)</xref> builds on NPR_eL but extends its adaptation class to incorporate not only &#x201C;what the user remembers&#x201D; (i.e., cognitive factors) but also &#x201C;why the user needs what they remember&#x201D; (i.e., contextual factors), enabling our system to offer content that is aligned with memory retention, professional relevance, and domain mobility in a dynamically changing field. Unlike NPR_eL, our model provides cross-domain recommendations due to the combination of multi-factor BERT embeddings and collaborative filtering that we integrated; meanwhile, learners can seek interdisciplinary content based on their real-time preferences and peer interactions. That level of customization makes our model ideal for the variety and dynamism of e-learning environments, where flexibility and adaptability are key.</p>
<p>The adaptation class of NPR_eL is only limited to the &#x201C;what the user remembers&#x201D; (i.e., cognitive factors), while our approach also includes the &#x201C;why the user needs what he/she recalled&#x201D; (i.e., contextual factors) making it more suitable for context-aware content provisioning in a dynamically changing field where such relevant information is desirable along with memory retention, professional relevance and domain mobility. Different from NPR_eL, our model offers cross-domain recommendations that are facilitated by the merged multi-factor BERT embeddings and collaborative filtering adopted together in <xref ref-type="bibr" rid="ref1">Ali et al. (2022)</xref>; while allowing learners to pursue interdisciplinary content via their word-sensitive demand and peer abilities. It was this level of customization that makes our model the best fit for e-learning environments, considering the diversity and ever-changing nature of such contexts.</p>
<p>While, as highlighted ontology contribution to organizing both user and content knowledge, our model utilizes BERT representations to obtain an equally expressive representation of user and content characteristics on multiple levels without the effort behind manually built ontologies. Using BERT in this way enables our system to reason over the different learner characteristics&#x2014;language, job type, and software choices, for example&#x2014;allowing matching of knowledge with similar efficiency but increasedflexibility (<xref ref-type="bibr" rid="ref9">Jeevamol and Renumol, 2021</xref>). Unlike most ontology-based systems, our model not only offers recommendations but also includes features to cluster peers based on interaction data, subjectively adding an adaptive quality (via peer dynamics) that is typically absent in static ontology-based systems.</p>
<p><xref ref-type="bibr" rid="ref3">Bhaskaran et al. (2021)</xref> and <xref ref-type="bibr" rid="ref14">Kulkarni et al. (2020)</xref> represented an advance over ontology-based methods through adapting to the behavior of learners changing and peer groups changing in real time. This means fine-grained semantic nuances and more intricate user profiles are covered using BERT-based embeddings with significantly less human effort than ontologies would require, producing a solution that is scalable in nature for online education systems and adaptable to new learners and content. Additionally, our solution minimizes the cold-start and sparsity problems by merging contextual embeddings with collaborative filtering to provide users with personal recommendations that elicit change as the profile of the user grows and evolves&#x2014;providing novel dynamic point-based personalized recommendations that traditional ontology-based models struggle to achieve (<xref ref-type="bibr" rid="ref10">Jordan, 2014</xref>).</p>
</sec>
<sec sec-type="methods" id="sec3">
<label>3</label>
<title>Methodology</title>
<p>This section introduces the full approach to creating the hybrid recommendation model of dynamic peer learning within e-learning platforms. We combine hybrid CF and interaction-based clustering (<xref ref-type="bibr" rid="ref17">Palvia et al., 2018</xref>) with BERT embeddings, as well as dynamic peer grouping, to provide context-aware adaptive learning recommendations, as shown in <xref ref-type="fig" rid="fig1">Figure 1</xref>. It continuously evolves in accordance with individual learning profiles, thus keeping the suggestions contextual throughout the user&#x2019;s learning lifespan.</p>
<fig position="float" id="fig1">
<label>Figure 1</label>
<caption>
<p>Hybrid recommendation model for dynamic peer learning within e-learning platforms.</p>
</caption>
<graphic xlink:href="feduc-10-1660954-g001.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Flowchart depicting a data processing model. It starts with a dataset, splitting into "Actual Dataset" and "Ground Truth." The "Actual Dataset" leads to "Bert," "Collaborative Filtering," and "Dynamic Peer Grouping (Clustering)." These converge into a "Combined Model" producing "Accuracy," which interfaces with "Results," ending the process.</alt-text>
</graphic>
</fig>
<sec id="sec4">
<label>3.1</label>
<title>Dataset</title>
<p>The dataset should include both historical interaction data (e.g., past content consumption and ratings) as shown in <xref ref-type="disp-formula" rid="E1">Equation 1</xref> and real-time data (e.g., ongoing watch percentage and feedback) as shown in <xref ref-type="disp-formula" rid="E2">Equation 2</xref>, which are used to build dynamic recommendation models.</p>
<list list-type="bullet">
<list-item><p>Historical interactions:</p></list-item>
</list>
<disp-formula id="E1"><mml:math id="M1"><mml:mo stretchy="true">{</mml:mo><mml:mo stretchy="true">(</mml:mo><mml:mi>u</mml:mi><mml:mo>,</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:msub><mml:mi>r</mml:mi><mml:mi mathvariant="italic">ui</mml:mi></mml:msub><mml:mo stretchy="true">)</mml:mo><mml:mo stretchy="true">}</mml:mo></mml:math><label>(1)</label></disp-formula>
<p>where <inline-formula><mml:math id="M2"><mml:msub><mml:mi>r</mml:mi><mml:mi mathvariant="italic">ui</mml:mi></mml:msub></mml:math></inline-formula>is rating or implicit interaction (watch %, time).</p>
<list list-type="bullet">
<list-item><p>Real-time data signals:</p></list-item>
</list>
<disp-formula id="E2"><mml:math id="M3"><mml:msub><mml:mi>W</mml:mi><mml:mi>u</mml:mi></mml:msub><mml:mo stretchy="true">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="true">)</mml:mo><mml:mo>,</mml:mo><mml:msub><mml:mi>E</mml:mi><mml:mi>u</mml:mi></mml:msub><mml:mo stretchy="true">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="true">)</mml:mo><mml:mo>,</mml:mo><mml:msub><mml:mi>R</mml:mi><mml:mi>u</mml:mi></mml:msub><mml:mo stretchy="true">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="true">)</mml:mo></mml:math><label>(2)</label></disp-formula>
<p>where <inline-formula><mml:math id="M4"><mml:mi>W</mml:mi></mml:math></inline-formula> = watch%, <inline-formula><mml:math id="M5"><mml:mi>E</mml:mi></mml:math></inline-formula> = engagement time, <inline-formula><mml:math id="M6"><mml:mi>R</mml:mi></mml:math></inline-formula> = recent rating.</p>
<p>These combined signals enable dynamic, context-aware modeling of learner progression.</p>
</sec>
<sec id="sec5">
<label>3.2</label>
<title>Dynamic peer grouping</title>
<p>This section describes our methodology for developing&#x2014;instead of fixed static peer groups, learners are organized into dynamic peer groups based on multiple changing parameters. The grouping procedure is directed by the following:</p>
<list list-type="bullet">
<list-item><p>Human style output difficulty level: Medium-Hard Learner categorization is decided based on skill level in each topic and distributed as basic, intermediate, or advance. This guarantees that together with anybody paired with each other, finding out can advance and even better&#x2014;find out jointly in an efficient manner as shown in <xref ref-type="disp-formula" rid="E3">Equation 3</xref>.</p></list-item>
</list>
<p>Learner difficulty proficiency is calculated as:</p>
<disp-formula id="E3"><mml:math id="M7"><mml:msub><mml:mi>D</mml:mi><mml:mi>u</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mrow><mml:mo>&#x2223;</mml:mo><mml:msub><mml:mi>I</mml:mi><mml:mi>u</mml:mi></mml:msub><mml:mo>&#x2223;</mml:mo></mml:mrow></mml:mfrac><mml:munder><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>&#x2208;</mml:mo><mml:msub><mml:mi>I</mml:mi><mml:mi>u</mml:mi></mml:msub></mml:mrow></mml:munder><mml:msub><mml:mi>d</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>&#x00B7;</mml:mo><mml:msub><mml:mi>W</mml:mi><mml:mi mathvariant="italic">ui</mml:mi></mml:msub><mml:mspace width="0.25em"/></mml:math><label>(3)</label></disp-formula>
<p>Where:</p>
<p><inline-formula><mml:math id="M8"><mml:msub><mml:mi>d</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:math></inline-formula> = difficulty of item <inline-formula><mml:math id="M9"><mml:mi>i</mml:mi><mml:mspace width="0.25em"/></mml:math></inline-formula>(basic/intermediate/advanced).</p>
<p><inline-formula><mml:math id="M10"><mml:msub><mml:mi>W</mml:mi><mml:mi mathvariant="italic">ui</mml:mi></mml:msub></mml:math></inline-formula> = watch %</p>
<list list-type="bullet">
<list-item><p>Similar job role and software knowledge: It brings together learners with the same job role and knowledge of software. For instance, data scientists can be aligned with other data scientists, and software engineers can be clustered together as per their technology stack. This contextual grouping enables the system to recommend content that aligns with the learner&#x2019;s professional background and expertise, as shown in <xref ref-type="disp-formula" rid="E4">Equations 4</xref> and <xref ref-type="disp-formula" rid="E5">5</xref>. Semantic similarity using BERT embeddings is computed as:</p></list-item>
</list>
<disp-formula id="E4"><mml:math id="M11"><mml:msub><mml:mi>S</mml:mi><mml:mi>job</mml:mi></mml:msub><mml:mo stretchy="true">(</mml:mo><mml:mi>u</mml:mi><mml:mo>,</mml:mo><mml:mi>v</mml:mi><mml:mo stretchy="true">)</mml:mo><mml:mo>=</mml:mo><mml:mo>cos</mml:mo><mml:mo stretchy="true">(</mml:mo><mml:msub><mml:mi>E</mml:mi><mml:mi>job</mml:mi></mml:msub><mml:mo stretchy="true">(</mml:mo><mml:mi>u</mml:mi><mml:mo stretchy="true">)</mml:mo><mml:mo>,</mml:mo><mml:msub><mml:mi>E</mml:mi><mml:mi>job</mml:mi></mml:msub><mml:mo stretchy="true">(</mml:mo><mml:mi>v</mml:mi><mml:mo stretchy="true">)</mml:mo><mml:mo stretchy="true">)</mml:mo></mml:math><label>(4)</label></disp-formula>
<disp-formula id="E5"><mml:math id="M12"><mml:msub><mml:mi>S</mml:mi><mml:mtext>soft</mml:mtext></mml:msub><mml:mo stretchy="true">(</mml:mo><mml:mi>u</mml:mi><mml:mo>,</mml:mo><mml:mi>v</mml:mi><mml:mo stretchy="true">)</mml:mo><mml:mo>=</mml:mo><mml:mo>cos</mml:mo><mml:mo stretchy="true">(</mml:mo><mml:msub><mml:mi>E</mml:mi><mml:mtext mathvariant="italic">soft</mml:mtext></mml:msub><mml:mo stretchy="true">(</mml:mo><mml:mi>u</mml:mi><mml:mo stretchy="true">)</mml:mo><mml:mo>,</mml:mo><mml:msub><mml:mi>E</mml:mi><mml:mtext mathvariant="italic">soft</mml:mtext></mml:msub><mml:mo stretchy="true">(</mml:mo><mml:mi>v</mml:mi><mml:mo stretchy="true">)</mml:mo><mml:mo stretchy="true">)</mml:mo></mml:math><label>(5)</label></disp-formula>
<list list-type="bullet">
<list-item><p>Time variation: Learner groupings are changed over time based on people&#x2019;s learned content, watch count percentage, and their changing interests. So, for example, if a learner who had mainly studied with &#x201C;Basic Data Science&#x201D; starts to study &#x201C;Advanced Machine Learning,&#x201D; the system dynamically groups them into one or several higher peer groups as shown in <xref ref-type="disp-formula" rid="E6">Equation 6</xref>.</p></list-item>
</list>
<p>We model the temporal learning state as:</p>
<disp-formula id="E6"><mml:math id="M13"><mml:msub><mml:mi>T</mml:mi><mml:mi>u</mml:mi></mml:msub><mml:mo stretchy="true">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="true">)</mml:mo><mml:mo>=</mml:mo><mml:mi>&#x03B1;</mml:mi><mml:msub><mml:mi>R</mml:mi><mml:mi>u</mml:mi></mml:msub><mml:mo stretchy="true">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="true">)</mml:mo><mml:mo>+</mml:mo><mml:mi>&#x03B2;</mml:mi><mml:msub><mml:mi>W</mml:mi><mml:mi>u</mml:mi></mml:msub><mml:mo stretchy="true">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="true">)</mml:mo><mml:mo>+</mml:mo><mml:mi>&#x03B3;</mml:mi><mml:msub><mml:mi>E</mml:mi><mml:mi>u</mml:mi></mml:msub><mml:mo stretchy="true">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="true">)</mml:mo></mml:math><label>(6)</label></disp-formula>
<p>As <inline-formula><mml:math id="M14"><mml:msub><mml:mi>T</mml:mi><mml:mi>u</mml:mi></mml:msub></mml:math></inline-formula> changes, peer group membership updates dynamically.</p>
<p>This adaptability within these peer groups is the key to keeping content and peers who are most relevant for whatever stage of learning a learner currently finds themselves in.</p>
</sec>
<sec id="sec6">
<label>3.3</label>
<title>Hybrid collaborative filtering and interaction-based clustering</title>
<p>The recommendation engine uses the hybrid collaborative filtering method, combining both CF and CBF.</p>
<list list-type="bullet">
<list-item><p>Collaborative filtering: It is a technique that provides content recommendations based on historical user interaction, i.e., if the current learner has a similar preference as past learners (<xref ref-type="disp-formula" rid="E8">Equation 8</xref>), then it would recommend them to like the same content. A user_id, item_id (<xref ref-type="disp-formula" rid="E7">Equation 7</xref>), and a number representing the rating or interaction score using <xref ref-type="disp-formula" rid="E9">Equation 9</xref> create an interaction matrix. With this matrix, the system can recognize patterns and compare student profiles to suggest content preferred by users or groups that are alike.</p></list-item>
</list>
<p>We construct the user&#x2013;item matrix:</p>
<disp-formula id="E7"><mml:math id="M15"><mml:mi>R</mml:mi><mml:mo>=</mml:mo><mml:mo stretchy="true">[</mml:mo><mml:msub><mml:mi>r</mml:mi><mml:mi mathvariant="italic">ui</mml:mi></mml:msub><mml:mo stretchy="true">]</mml:mo></mml:math><label>(7)</label></disp-formula>
<p>User similarity is computed as:</p>
<disp-formula id="E8"><mml:math id="M16"><mml:mi>sim</mml:mi><mml:mo stretchy="true">(</mml:mo><mml:mi>u</mml:mi><mml:mo>,</mml:mo><mml:mi>v</mml:mi><mml:mo stretchy="true">)</mml:mo><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:msub><mml:mi>R</mml:mi><mml:mi>u</mml:mi></mml:msub><mml:mo>&#x00B7;</mml:mo><mml:msub><mml:mi>R</mml:mi><mml:mi>v</mml:mi></mml:msub></mml:mrow><mml:mrow><mml:mo>&#x2225;</mml:mo><mml:msub><mml:mi>R</mml:mi><mml:mi>u</mml:mi></mml:msub><mml:mo>&#x2225;</mml:mo><mml:mo>&#x2225;</mml:mo><mml:msub><mml:mi>R</mml:mi><mml:mi>v</mml:mi></mml:msub><mml:mo>&#x2225;</mml:mo></mml:mrow></mml:mfrac></mml:math><label>(8)</label></disp-formula>
<p>Prediction score:</p>
<disp-formula id="E9"><mml:math id="M17"><mml:msub><mml:mover accent="true"><mml:mi>r</mml:mi><mml:mo stretchy="true">&#x0302;</mml:mo></mml:mover><mml:mi mathvariant="italic">ui</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:msub><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>v</mml:mi><mml:mo>&#x2208;</mml:mo><mml:msub><mml:mi>N</mml:mi><mml:mi>u</mml:mi></mml:msub></mml:mrow></mml:msub><mml:mi>sim</mml:mi><mml:mspace width="0.25em"/><mml:mo stretchy="true">(</mml:mo><mml:mi>u</mml:mi><mml:mo>,</mml:mo><mml:mi>v</mml:mi><mml:mo stretchy="true">)</mml:mo><mml:mspace width="0.1em"/><mml:msub><mml:mi>r</mml:mi><mml:mi mathvariant="italic">vi</mml:mi></mml:msub></mml:mrow><mml:mrow><mml:msub><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>v</mml:mi><mml:mo>&#x2208;</mml:mo><mml:msub><mml:mi>N</mml:mi><mml:mi>u</mml:mi></mml:msub></mml:mrow></mml:msub><mml:mo>&#x2223;</mml:mo><mml:mi>sim</mml:mi><mml:mspace width="0.25em"/><mml:mo stretchy="true">(</mml:mo><mml:mi>u</mml:mi><mml:mo>,</mml:mo><mml:mi>v</mml:mi><mml:mo stretchy="true">)</mml:mo><mml:mo>&#x2223;</mml:mo></mml:mrow></mml:mfrac></mml:math><label>(9)</label></disp-formula>
<p>This uses historical interactions to identify similar learners.</p>
<list list-type="bullet">
<list-item><p>Content-based filtering: Semantic meaning is taken into consideration from content features like course description, theme, and difficulty using Bidirectional Encoder Representations from Transformers (BERT), which is a pre-trained deep learning model for extracting semantic meaning from text. Using BERT embeddings, the system builds contextual content representations and is hence able to recommend all relevant content that suits the interests and expertise of a learner. Using these embeddings, cross-domain recommendations can also be made for the learner to explore related fields of interest outside any particular discipline. These steps are processed using the <xref ref-type="disp-formula" rid="E10 E11 E12">Equations 10&#x2013;12</xref>.</p></list-item>
</list>
<p>BERT is used to extract semantic meaning:</p>
<disp-formula id="E10"><mml:math id="M18"><mml:msub><mml:mi>E</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mtext>BERT</mml:mtext><mml:mspace width="0.25em"/><mml:mo stretchy="true">(</mml:mo><mml:msub><mml:mtext>desc</mml:mtext><mml:mi>i</mml:mi></mml:msub><mml:mo stretchy="true">)</mml:mo></mml:math><label>(10)</label></disp-formula>
<p>Content similarity:</p>
<disp-formula id="E11"><mml:math id="M19"><mml:msub><mml:mi>sim</mml:mi><mml:mi mathvariant="italic">CB</mml:mi></mml:msub><mml:mo stretchy="true">(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo stretchy="true">)</mml:mo><mml:mo>=</mml:mo><mml:mo>cos</mml:mo><mml:mo stretchy="true">(</mml:mo><mml:msub><mml:mi>E</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>E</mml:mi><mml:mi>j</mml:mi></mml:msub><mml:mo stretchy="true">)</mml:mo></mml:math><label>(11)</label></disp-formula>
<p>Learner-to-content affinity:</p>
<disp-formula id="E12"><mml:math id="M20"><mml:msub><mml:mover accent="true"><mml:mi>s</mml:mi><mml:mo stretchy="true">&#x0302;</mml:mo></mml:mover><mml:mi mathvariant="italic">ui</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mo>cos</mml:mo><mml:mo stretchy="true">(</mml:mo><mml:msub><mml:mi>E</mml:mi><mml:mi>u</mml:mi></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>E</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo stretchy="true">)</mml:mo></mml:math><label>(12)</label></disp-formula>
<list list-type="bullet">
<list-item><p>Interaction-based peer grouping and recommendation: Based on the initial peer recommendations generated by CF, we apply a clustering algorithm to cluster learners based on their interactions in the system (<xref ref-type="bibr" rid="ref12">Kizilcec et al., 2013</xref>). The first grouping is dynamic in nature, as they reflect the changing interaction patterns between learners over time. We employ algorithms such as K-means clustering or density-based spatial clustering of applications with noise (DBSCAN) to dynamically form peer groups that are aligned with learners&#x2019; current learning trajectories and interaction patterns. This step is calculated using the <xref ref-type="disp-formula" rid="E13">Equation 13</xref>.</p></list-item>
</list>
<p>Peer grouping is further refined using clustering over interactions:</p>
<disp-formula id="E13"><mml:math id="M21"><mml:mtext>Cluster</mml:mtext><mml:mspace width="0.25em"/><mml:mo stretchy="true">(</mml:mo><mml:mi>u</mml:mi><mml:mo stretchy="true">)</mml:mo><mml:mo>=</mml:mo><mml:mo>arg</mml:mo><mml:munder><mml:mo>min</mml:mo><mml:mi>k</mml:mi></mml:munder><mml:mo>&#x2225;</mml:mo><mml:msub><mml:mi>I</mml:mi><mml:mi>u</mml:mi></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>c</mml:mi><mml:mi>k</mml:mi></mml:msub><mml:msup><mml:mo>&#x2225;</mml:mo><mml:mn>2</mml:mn></mml:msup></mml:math><label>(13)</label></disp-formula>
<p>where</p>
<p><inline-formula><mml:math id="M22"><mml:msub><mml:mi>I</mml:mi><mml:mi>u</mml:mi></mml:msub></mml:math></inline-formula>=interaction vector for user <inline-formula><mml:math id="M23"><mml:mi>u</mml:mi></mml:math></inline-formula>,</p>
<p><inline-formula><mml:math id="M24"><mml:msub><mml:mi>c</mml:mi><mml:mi>k</mml:mi></mml:msub></mml:math></inline-formula>=cluster center.</p>
</sec>
<sec id="sec7">
<label>3.4</label>
<title>Advanced multi-factor embedding integration</title>
<p>We apply a multi-layered embedding technique to capture various learner and content variables, which helps enhance the quality of the recommendations by tailoring them to individual user profiles:</p>
<list list-type="bullet">
<list-item><p>Course descriptions embeddings using BERT: Designed to capture deep semantic representations of the course descriptions, the themes or topics covered, and the type of content. Such data enable the system to comprehend contextual associations between various content components and recommend items that it thinks are pertinent to add to a learner&#x2019;s profile. This step is calculated using <xref ref-type="disp-formula" rid="E13">Equation 13</xref>.</p>
</list-item></list>
<p>For a description with tokens <inline-formula><mml:math id="M25"><mml:msub><mml:mi>h</mml:mi><mml:mn>1</mml:mn></mml:msub><mml:mo>,</mml:mo><mml:mo>&#x2026;</mml:mo><mml:mo>,</mml:mo><mml:msub><mml:mi>h</mml:mi><mml:mi>n</mml:mi></mml:msub></mml:math></inline-formula>:</p>
<disp-formula id="E14"><mml:math id="M26"><mml:msub><mml:mi>E</mml:mi><mml:mtext>desc</mml:mtext></mml:msub><mml:mo stretchy="true">(</mml:mo><mml:mi>i</mml:mi><mml:mo stretchy="true">)</mml:mo><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mi>n</mml:mi></mml:mfrac><mml:msubsup><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>n</mml:mi></mml:msubsup><mml:msub><mml:mi>h</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:math><label>(14)</label>
</disp-formula>
<list list-type="bullet">
<list-item><p>Job, software, and theme embeddings: We also create embeddings for the learner (description, job role, software expertise) and the different sets of learning themes (i.e., more domain-based/ content-based ideal co-localization plan). This ensures that each learner gets embeddings around their job background and specific interests, which drives the recommended content to be contextually relevant to his/her job needs. These steps are processed using the <xref ref-type="disp-formula" rid="E14 E15 E16 E17">Equations 14&#x2013;17</xref>.</p></list-item>
</list>
<disp-formula id="E15"><mml:math id="M27"><mml:msub><mml:mi>E</mml:mi><mml:mi>job</mml:mi></mml:msub><mml:mo stretchy="true">(</mml:mo><mml:mi>u</mml:mi><mml:mo stretchy="true">)</mml:mo><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mi>n</mml:mi></mml:mfrac><mml:munder><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mspace width="thickmathspace"/></mml:mrow></mml:munder><mml:msubsup><mml:mi>h</mml:mi><mml:mi>t</mml:mi><mml:mi mathvariant="italic">job</mml:mi></mml:msubsup></mml:math><label>(15)</label></disp-formula>
<disp-formula id="E16"><mml:math id="M28"><mml:msub><mml:mi>E</mml:mi><mml:mtext>soft</mml:mtext></mml:msub><mml:mo stretchy="true">(</mml:mo><mml:mi>u</mml:mi><mml:mo stretchy="true">)</mml:mo><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mi>n</mml:mi></mml:mfrac><mml:munder><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mspace width="thickmathspace"/></mml:mrow></mml:munder><mml:msubsup><mml:mi>h</mml:mi><mml:mi>t</mml:mi><mml:mtext mathvariant="italic">soft</mml:mtext></mml:msubsup></mml:math><label>(16)</label></disp-formula>
<disp-formula id="E17"><mml:math id="M29"><mml:msub><mml:mi>E</mml:mi><mml:mtext>theme</mml:mtext></mml:msub><mml:mo stretchy="true">(</mml:mo><mml:mi>i</mml:mi><mml:mo stretchy="true">)</mml:mo><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mi>n</mml:mi></mml:mfrac><mml:munder><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mspace width="thickmathspace"/></mml:mrow></mml:munder><mml:msubsup><mml:mi>h</mml:mi><mml:mi>t</mml:mi><mml:mtext mathvariant="italic">theme</mml:mtext></mml:msubsup></mml:math><label>(17)</label></disp-formula>
<list list-type="bullet">
<list-item><p>Combined learner profile: The content-level embeddings, job profile-level embeddings, software-level embeddings, and themes are aggregated together to create a multi-dimensional learner profile. It combined the information above into a profile and used this profile to match learners with peers and content that best meets their changing learning needs, enabling greater personalization of recommendations as in <xref ref-type="disp-formula" rid="E18 E19">Equations 18 and 19</xref>.</p></list-item>
</list>
<p>We concatenate all embeddings:</p>
<disp-formula id="E18"><mml:math id="M30"><mml:msubsup><mml:mi>E</mml:mi><mml:mi>u</mml:mi><mml:mo>&#x2217;</mml:mo></mml:msubsup><mml:mo>=</mml:mo><mml:mo stretchy="true">[</mml:mo><mml:msub><mml:mi>E</mml:mi><mml:mtext>desc</mml:mtext></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>E</mml:mi><mml:mi>job</mml:mi></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>E</mml:mi><mml:mtext>soft</mml:mtext></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>E</mml:mi><mml:mtext>theme</mml:mtext></mml:msub><mml:mo stretchy="true">]</mml:mo></mml:math><label>(18)</label></disp-formula>
<p>Similarity between learners:</p>
<disp-formula id="E19"><mml:math id="M31"><mml:mi>sim</mml:mi><mml:mspace width="0.25em"/><mml:mo stretchy="true">(</mml:mo><mml:mi>u</mml:mi><mml:mo>,</mml:mo><mml:mi>v</mml:mi><mml:mo stretchy="true">)</mml:mo><mml:mo>=</mml:mo><mml:mo>cos</mml:mo><mml:mo stretchy="true">(</mml:mo><mml:msubsup><mml:mi>E</mml:mi><mml:mi>u</mml:mi><mml:mo>&#x2217;</mml:mo></mml:msubsup><mml:mo>,</mml:mo><mml:msubsup><mml:mi>E</mml:mi><mml:mi>v</mml:mi><mml:mo>&#x2217;</mml:mo></mml:msubsup><mml:mo stretchy="true">)</mml:mo></mml:math><label>(19)</label></disp-formula>
<p>This embedding stack significantly improves contextual matching.</p>
</sec>
<sec id="sec8">
<label>3.5</label>
<title>Knowledge evolution and adaptive difficulty adjustment</title>
<p>An important aspect of our system is its capability to maintain knowledge evolution and dynamically update the content difficulty level over time. We track and adjust recommendations based on the following:</p>
<list list-type="bullet">
<list-item><p>Watch percentage and engagement: Based on the watch percentage of a course or the time spent watching respective courses, it tracks how engaged a learner is with their content. Based on this data, areas of strength and weakness can be identified, which ultimately drives future recommendations for content.</p></list-item>
<list-item><p>Feedback and ratings from the learner: Included in the system are ratings offered by learners, which help to evaluate their level of satisfaction as well as retention. If a learner rates the advanced content highly, it signifies their readiness for an even deeper level of material, and the system tailors content suggestions according to this logic.</p></list-item>
<list-item><p>Tunable difficulty: By leveraging BERT embeddings and feedback from the learner, the system evaluates how easy or hard the recommendations are and modifies the learning path accordingly to ensure it is not too easy or too difficult using <xref ref-type="disp-formula" rid="E20 E21">Equations 20 and 21</xref>. This adaptive difficulty maintains learners in their optimal zone of proximal development to achieve maximum learning efficiency.</p></list-item>
</list>
<p>We track knowledge evolution using:</p>
<disp-formula id="E20"><mml:math id="M32"><mml:msub><mml:mi>P</mml:mi><mml:mi>u</mml:mi></mml:msub><mml:mo stretchy="true">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="true">)</mml:mo><mml:mo>=</mml:mo><mml:msub><mml:mi>&#x03B7;</mml:mi><mml:mn>1</mml:mn></mml:msub><mml:msub><mml:mi>W</mml:mi><mml:mi>u</mml:mi></mml:msub><mml:mo stretchy="true">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="true">)</mml:mo><mml:mo>+</mml:mo><mml:msub><mml:mi>&#x03B7;</mml:mi><mml:mn>2</mml:mn></mml:msub><mml:msub><mml:mi>R</mml:mi><mml:mi>u</mml:mi></mml:msub><mml:mo stretchy="true">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="true">)</mml:mo><mml:mo>+</mml:mo><mml:msub><mml:mi>&#x03B7;</mml:mi><mml:mn>3</mml:mn></mml:msub><mml:msub><mml:mi>E</mml:mi><mml:mi>u</mml:mi></mml:msub><mml:mo stretchy="true">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="true">)</mml:mo></mml:math><label>(20)</label></disp-formula>
<p>Difficulty adjustment rule:</p>
<disp-formula id="E21"><mml:math id="M33"><mml:msub><mml:mi>d</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mi>d</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mo>+</mml:mo><mml:mi>&#x03B4;</mml:mi><mml:mo stretchy="true">(</mml:mo><mml:msub><mml:mi>P</mml:mi><mml:mi>u</mml:mi></mml:msub><mml:mo stretchy="true">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="true">)</mml:mo><mml:mo>&#x2212;</mml:mo><mml:mi>&#x03B8;</mml:mi><mml:mo stretchy="true">)</mml:mo></mml:math><label>(21)</label></disp-formula>
<p>If <inline-formula><mml:math id="M34"><mml:msub><mml:mi>P</mml:mi><mml:mi>u</mml:mi></mml:msub><mml:mo stretchy="true">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="true">)</mml:mo><mml:mo>&#x003E;</mml:mo><mml:mi>&#x03B8;</mml:mi></mml:math></inline-formula>&#x2192; harder content</p>
<p>If <inline-formula><mml:math id="M35"><mml:msub><mml:mi>P</mml:mi><mml:mi>u</mml:mi></mml:msub><mml:mo stretchy="true">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="true">)</mml:mo><mml:mo>&#x003C;</mml:mo><mml:mi>&#x03B8;</mml:mi></mml:math></inline-formula>&#x2192; easier content</p>
<p>This keeps learners within their optimal learning zone.</p>
</sec>
<sec id="sec9">
<label>3.6</label>
<title>Evaluation metrics</title>
<p>We then designed a few standard metrics for the evaluation of recommendation system performance:</p>
<list list-type="bullet">
<list-item><p>Accuracy: Represents the ratio of correct predictions to total predictions made</p></list-item>
<list-item><p>Precision: Number of relevant items among the recommended divided by the total number of recommended items to the learner.</p></list-item>
<list-item><p>Recall: The fraction of the relevant items that are successfully recommended to the learner.</p></list-item>
<list-item><p>F1-Score: The F1-score is the harmonic mean of precision and recall, which means that it gives a balanced measure of recommendation performance.</p></list-item>
</list>
</sec>
</sec>
<sec id="sec10">
<label>4</label>
<title>Experiment</title>
<p>This section explains the experimental setup for evaluating our hybrid recommendation system model. Implementation details, dataset preprocessing, and evaluation metrics are reported in detail, along with baseline models used to compare our method across multiple settings.</p>
<sec id="sec11">
<label>4.1</label>
<title>Implementation details</title>
<p>Python was used for the recommendation system with several popular data processing, machine learning, and natural language processing libraries. Here are the main aspects of its implementation:</p>
<list list-type="bullet">
<list-item><p>Data preprocessing:</p></list-item>
<list-item><p>First, we retrieved the user interaction data with content, including ratings and percentage of watching content, along with other contextual features such as job roles, software proficiency, and content themes. The data were cleaned and preprocessed to replace missing values, drop duplicates, and scale the values (ratings and watch %).</p></list-item>
<list-item><p>Processing of the features into formats suitable for inputs to the model &#x2192; embedding categorical variables (job roles, software, and themes) and scaling numerical values (ratings and watch %)</p></list-item>
<list-item><p>Content embedding with BERT:</p></list-item>
<list-item><p>We embedded content descriptions, job roles, and themes using BERT embeddings, which are fine-tuned on a domain-specific corpus, so that the semantic meaning of text content is captured. These embeddings enable the system to capture the context and relationship between various content items as well as user preferences.</p></list-item>
<list-item><p>Collaborative filtering:</p></list-item>
<list-item><p>Collaborative filtering was conducted through matrix factorization methods based on similarity between users and content items using the interaction matrix (user-item).</p></list-item>
<list-item><p>Hybrid model:</p></list-item>
<list-item><p>Hybrid collaborative filtering method uses a combination of user-item interactions and content-based filtering using BERT embeddings, where score computation for interest (i.e., collaborative) and semantic relationship (i.e., content) is used to generate recommendations.</p></list-item>
<list-item><p>Clustering for dynamic grouping of similar peers:</p></list-item>
<list-item><p>K-means clustering or density-based spatial clustering of applications with noise (DBSCAN) was used for interaction-based clustering to group users by their patterns of interaction, the dynamics over time, and contextual variables such as job role and software knowledge.</p></list-item>
<list-item><p>The peer groupings were refreshed periodically based on how a user had interacted with others in the past, so that the recommendations would remain relevant as their studies progressed.</p></list-item>
<list-item><p>Making the difficulty more challenging:</p></list-item>
<list-item><p>Adaptive difficulty adjustment, using user progress data, user or content ratings, and content engagement history. So, it is done in such a way that watch percentage, engagement time, etc., are tracked over the time of the recommendations to make sure that the recommendation level matches the changing level of skills of a learner.</p></list-item>
<list-item><p>Libraries used:</p></list-item>
<list-item><p>Transformers for BERT embeddings.</p></list-item>
<list-item><p>pandas and numpy for data manipulation and matrix operations.</p></list-item>
<list-item><p>Scikit-learn for machine learning algorithms (e.g., clustering and metrics calculation).</p></list-item>
<list-item><p>Approximate nearest neighbors oh yeah (Annoy) for fast nearest-neighbor search to match content descriptions based on embedded similarity.</p></list-item>
</list>
</sec>
<sec id="sec12">
<label>4.2</label>
<title>Dataset</title>
<p>The dataset used in our experiments consists of user interaction data from an e-learning platform. It contains several attributes for each learner, including:</p>
<list list-type="bullet">
<list-item><p>user_id: Identifier for each learner.</p></list-item>
<list-item><p>item_id: Identifier for each content item.</p></list-item>
<list-item><p>ratings: User ratings for the content.</p></list-item>
<list-item><p>watch_percentage: Percentage of the content watched by the user, which helps in understanding engagement.</p></list-item>
<list-item><p>job: The professional role of the user (e.g., data scientist and software engineer).</p></list-item>
<list-item><p>software: The tools or platforms the user is familiar with (e.g., Python and TensorFlow).</p></list-item>
<list-item><p>theme: The content&#x2019;s subject matter (e.g., Data Science, AI).</p></list-item>
<list-item><p>difficulty: The difficulty level of the content (e.g., beginner, intermediate, and advanced).</p></list-item>
</list>
<p>We split the dataset into training and test sets using an 80&#x2013;20 ratio, where 80% of the data was used for training the recommendation model, and the remaining 20% was used for testing its performance.</p>
<p>Dataset availability: The data used in this study is publicly available on Kaggle as the e-Learning Recommender System Dataset.<xref ref-type="fn" rid="fn0001"><sup>1</sup></xref> It can be accessed and used for research purposes in accordance with the dataset&#x2019;s license.</p>
</sec>
<sec id="sec13">
<label>4.3</label>
<title>Evaluation metrics</title>
<p>Textual Recommender Evaluation: This study aimed to examine the performance of the proposed hybrid recommendation system. A set of standard recommendation metrics was used to evaluate the quality of recommendations produced by our model. The evaluation metrics are:</p>
<list list-type="bullet">
<list-item><p>Accuracy: The total amount of accurate predictions made over the total predictions made, it can be calculated such that:</p></list-item>
</list>
<disp-formula id="E22"><mml:math id="M36"><mml:mtext>Accuracy</mml:mtext><mml:mo>=</mml:mo><mml:mo stretchy="true">(</mml:mo><mml:mfrac><mml:mtext>Number of Correct Predictions</mml:mtext><mml:mtext>Total Number of Predictions</mml:mtext></mml:mfrac><mml:mo stretchy="true">)</mml:mo><mml:mo>&#x00D7;</mml:mo><mml:mn>100</mml:mn><mml:mo>%</mml:mo></mml:math></disp-formula>
<list list-type="bullet">
<list-item><p>Precision: Precision indicates the rate of recommended items relevant to the learner. It shows the fraction of recommended items that are useful to a user [as shown in the given formula below].</p></list-item>
</list>
<disp-formula id="E23"><mml:math id="M37"><mml:mtext>Precision</mml:mtext><mml:mo>=</mml:mo><mml:mfrac><mml:mtext>True Positive</mml:mtext><mml:mrow><mml:mtext>True Positive</mml:mtext><mml:mo>+</mml:mo><mml:mtext>False Positive</mml:mtext></mml:mrow></mml:mfrac></mml:math></disp-formula>
<list list-type="bullet">
<list-item><p>Recall: Recall represents the fraction of relevant items that were recommended successfully [as shown in the given formula below]. That is, it indicates its ability to return every relevant item for some learner of that system.</p></list-item>
</list><disp-formula id="E24"><mml:math id="M38"><mml:mtext>Recall</mml:mtext><mml:mo>=</mml:mo><mml:mfrac><mml:mtext>True Positive</mml:mtext><mml:mrow><mml:mtext>True Positive</mml:mtext><mml:mo>+</mml:mo><mml:mtext>False Negative</mml:mtext></mml:mrow></mml:mfrac></mml:math></disp-formula>
<list list-type="bullet">
<list-item><p>F1-Score: The F1-score is the harmonic mean of precision and recall [as shown in the given formula below], measuring the relevance of results while maintaining the score closer to a balanced number between 0 and 1 on each side of it.</p></list-item>
</list>
<disp-formula id="E25"><mml:math id="M39"><mml:mi mathvariant="normal">F</mml:mi><mml:mn>1</mml:mn><mml:mspace width="0.25em"/><mml:mtext>Score</mml:mtext><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mn>2</mml:mn><mml:mo>&#x2217;</mml:mo><mml:mtext>Precision</mml:mtext><mml:mo>&#x2217;</mml:mo><mml:mtext>Recall</mml:mtext></mml:mrow><mml:mrow><mml:mtext>Precision</mml:mtext><mml:mo>+</mml:mo><mml:mtext>Recall</mml:mtext></mml:mrow></mml:mfrac></mml:math></disp-formula>
</sec>
<sec id="sec14">
<label>4.4</label>
<title>Baseline methods for comparison</title>
<p>We then showcase the performance of our hybrid recommendation model over an assortment of standalone algorithms, including collaborative filtering, BERT, and clustering to validate our approach.</p>
</sec>
<sec id="sec15">
<label>4.5</label>
<title>Experimental procedure</title>
<p>The steps of the experiments were:</p>
<list list-type="bullet">
<list-item><p>Data pre-processing: The dataset was cleaned and missing values treated. Four one-hot-encoding or embedding layers were used for categorical variables job, software, and theme. An embedding of the semantic content of each content was created by running all the content descriptions through BERT embeddings.</p></list-item>
<list-item><p>Training the model: The hybrid recommender system was trained on a joint set (80% of data) where collaborative filtering and content-based recommendation were combined. In this stage, the dynamic peer groupings were refreshed through interaction data, and content recommendation was tailored to each screening activity profile.</p></list-item>
<list-item><p>Evaluation: The performance was measured on the test set (20% of the data) using the above-mentioned metrics after training. The precision, recall, F1-score, and MAE of our proposed model were compared with the baseline methods and are shown in <xref ref-type="table" rid="tab2">Table 2</xref>.</p></list-item>
<list-item><p>Statistical significance: Although statistical significance tests, associated with paired t-tests, were conducted to evaluate the significance of the performance differences as presented throughout this paper between our hybrid model and each baseline methods</p></list-item>
</list>
<table-wrap position="float" id="tab1">
<label>Table 1</label>
<caption>
<p>Statistical significance of performance differences across models.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Comparison (Model A vs. Model B)</th>
<th align="center" valign="top">Mean difference</th>
<th align="center" valign="top"><italic>t</italic>-value</th>
<th align="center" valign="top">df</th>
<th align="center" valign="top"><italic>p</italic>-value</th>
<th align="center" valign="top">95% CI (lower, upper)</th>
<th align="center" valign="top">Effect size (Cohen&#x2019;s d)</th>
<th align="left" valign="top">Interpretation</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Hybrid vs. CF-only</td>
<td align="char" valign="top" char=".">0.043</td>
<td align="char" valign="top" char=".">2.87</td>
<td align="left" valign="top">39</td>
<td align="char" valign="top" char=".">0.006</td>
<td align="left" valign="top">(0.013, 0.072)</td>
<td align="char" valign="top" char="(">0.45 (medium)</td>
<td align="left" valign="top">Significant improvement</td>
</tr>
<tr>
<td align="left" valign="top">Hybrid vs. clustering-only</td>
<td align="char" valign="top" char=".">0.028</td>
<td align="char" valign="top" char=".">2.11</td>
<td align="left" valign="top">39</td>
<td align="char" valign="top" char=".">0.041</td>
<td align="left" valign="top">(0.001, 0.055)</td>
<td align="char" valign="top" char="(">0.33 (small-medium)</td>
<td align="left" valign="top">Statistically significant</td>
</tr>
<tr>
<td align="left" valign="top">CF-only vs. clustering-only</td>
<td align="char" valign="top" char=".">0.015</td>
<td align="char" valign="top" char=".">1.44</td>
<td align="left" valign="top">39</td>
<td align="char" valign="top" char=".">0.158</td>
<td align="left" valign="top">(&#x2212;0.006, 0.037)</td>
<td align="char" valign="top" char="(">0.22 (small)</td>
<td align="left" valign="top">Not significant</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec sec-type="results" id="sec16">
<label>5</label>
<title>Results</title>
<p>The effectiveness of four recommendation methods&#x2014;collaborative filtering (CF), BERT embeddings (BE), peer group recommendations (PGR), and hybrid model&#x2014;was measured based on our top-K metrics visuals, i.e., Precision@5, Recall@5, and F1@5. Although it is in general not recommended to trust accuracy for recommendation systems due to the sparsity of user-item interactions, we also present it as a complementary evaluation metric. In this specific case, accuracy is also a further measure of model reliability as it measures the overall concordance of predicted interactions to predictions made on the whole dataset when compared to precision (an indirect measurement for relevance).</p>
<list list-type="bullet">
<list-item><p>Collaborative filtering showed low rates of precision (0.20) (as shown in <xref ref-type="fig" rid="fig2">Figure 2</xref>) and very low recall (0.01) (as shown in <xref ref-type="fig" rid="fig3">Figure 3</xref>), obtaining an F1 score of 0.03 (as shown in <xref ref-type="fig" rid="fig4">Figure 4</xref>). This result suggests that while CF can pick up a few relevant items, it does not represent the larger set of user desires. The poor recall of the proposed model is attributed to these sparse interaction patterns in e-learning datasets and reflects the limitations of pure CF in retrieving complex learning behaviors with no context information.</p></list-item>
<list-item><p>BERT embeddings, applied to encode the semantic information carried by content descriptions, also showed low precision (0.08) (as shown in <xref ref-type="fig" rid="fig2">Figure 2</xref>), recall (0.01) (as shown in <xref ref-type="fig" rid="fig3">Figure 3</xref>), and F1 (0.02) (as shown in <xref ref-type="fig" rid="fig4">Figure 4</xref>). Our results demonstrate that pre-trained BERT without task-dependent adaptation from the domain is insufficient to capture the subtle semantic meaning of e-learning material. Furthermore, adding categorical features such as theme, job, and software as token-based embeddings was admittedly in a novel perspective but could have diluted input strength. Regardless, the incorporation of BERT signifies our efforts to assess multi-factor, content-aware embeddings for recommendations and offers a basis for further work with fine-tuned models on educational data.</p></list-item>
<list-item><p>Peer group recommendations exhibited marginally higher precision (0.12) (as shown in <xref ref-type="fig" rid="fig2">Figure 2</xref>) but similarly low recall (0.02) (as shown in <xref ref-type="fig" rid="fig3">Figure 3</xref>), and F1 (0.03) (as shown in <xref ref-type="fig" rid="fig4">Figure 4</xref>). The low recall is mainly caused by the small size of peer groups and the lack of coverage of theme-related content, especially for specialized users with specialized learning patterns. This exposes a limitation to group-based recommendations and the importance of integrating social insights into content-aware and interaction-aware approaches.</p></list-item>
<list-item><p>The hybrid model, including CF, BERT-Embeddings and Peer Group Recommendations leads to significant gains in precision (0.80) (as shown in <xref ref-type="fig" rid="fig2">Figure 2</xref>) and accuracy (0.80) (as shown in <xref ref-type="fig" rid="fig5">Figure 5</xref>), yet recall is still limited (0.06) (as shown in <xref ref-type="fig" rid="fig3">Figure 3</xref>), yielding an F1-score of 0.11 (as shown in <xref ref-type="fig" rid="fig4">Figure 4</xref>) with the same parameters as the single classification approach. This result is according to our intention for how top recommendations should be relevant for adaptive e-learning. The high precision helps to ensure that the recommended content is relevant to what the learners need at present, and it is important, especially in educational environments where users&#x2019; attention spans might be very short. The relatively low recall does not mean that the model is failing but instead highlights a trade-off between recommendation systems to get accuracy and extensive coverage. By focusing on the most relevant items, the hybrid model minimizes the risk of presenting irrelevant content, thereby improving learner engagement and satisfaction&#x2014;an essential metric in practical applications.</p></list-item>
</list>
<fig position="float" id="fig2">
<label>Figure 2</label>
<caption>
<p>Bar graph depicts precision.</p>
</caption>
<graphic xlink:href="feduc-10-1660954-g002.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Bar chart titled "Precision" comparing four models: Collaborative Filtering, BERT, Peer Group, and Hybrid Model. Collaborative Filtering scores below 0.2, BERT and Peer Group score below 0.1, while Hybrid Model scores above 0.8.</alt-text>
</graphic>
</fig>
<fig position="float" id="fig3">
<label>Figure 3</label>
<caption>
<p>Bar graph depicts recall.</p>
</caption>
<graphic xlink:href="feduc-10-1660954-g003.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Bar chart titled "Recall" comparing four models: Collaborative Filtering, BERT, Peer Group, and Hybrid Model. The Hybrid Model has the highest score, around 0.05, while the other models score just above 0.0.</alt-text>
</graphic>
</fig>
<fig position="float" id="fig4">
<label>Figure 4</label>
<caption>
<p>Bar graph depicts F1-score.</p>
</caption>
<graphic xlink:href="feduc-10-1660954-g004.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Bar chart titled "F1 Score" showing scores for four models: Collaborative Filtering, BERT, Peer Group, and Hybrid Model. Hybrid Model has the highest score near 0.10, while the others are below 0.03.</alt-text>
</graphic>
</fig>
<fig position="float" id="fig5">
<label>Figure 5</label>
<caption>
<p>Bar graph depicts accuracy.</p>
</caption>
<graphic xlink:href="feduc-10-1660954-g005.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Bar chart titled "Accuracy" compares scores of four models: Collaborative Filtering, BERT, Peer Group, and Hybrid Model. Collaborative Filtering, BERT, and Peer Group have scores below 0.2, while Hybrid Model scores above 0.8, indicating higher accuracy.</alt-text>
</graphic>
</fig>
<p>The proposed hybrid model was specifically developed by focusing on scalability and computational efficiency. Collaborative filtering works on a precomputed user-item interaction matrix, BERT embeddings are computed offline for content items, and learner peer groups are dynamically K-mean clustered&#x2014;such scaling linearly in the number of learners. The Annoy index allows searching through content embeddings fast enough to fetch recommendations in near real time. As a result, the approach can be used for recommendations in large-scale e-learning systems with low latency while guaranteeing the trade-off between recommendation quality and applicability.</p>
<p>The overall low recall values across models (0.01&#x2013;0.06) are expected given the sparse and heterogeneous context of e-learning data, in which users access a very limited portion of content items. This is not specific to our work but rather a known issue in educational recommender systems. The challenge we encompass to solve is tackled by our multi-factor hybrid model that encompasses user interaction data, semantic embeddings, and peer grouping in order to maximize precision at the expense of interpretability.</p>
<p>Finally, this study makes enhancements to the literature at several levels. The contribution of this paper is 2-fold. First, it shows that hybrid and multi-factor recommendations can be successful in educational environments, where content relevance is more important than covering all available items. Second, it adopts dynamic peer grouping and BERT-based content embeddings to diversify its recommendations. Third, we present our results based on precision, recall, F1 score, and accuracy to address these gaps in prior studies with an evaluation framework that is both transparent and reproducible. In sum, these methodological and practical inputs underscore the robustness and real-world relevance of the hybrid model.</p>
<p>In summary, the hybrid model we propose has strong precision as well and is relevant and methodologically novel; for its value and publishability, it deserves to be developed in future research to improve recall. The model is especially useful in adaptive e-learning systems where it is more beneficial to recommend highly related content rather than covering all possible corpus.</p>
<p><xref ref-type="table" rid="tab1">Table 1</xref> presents paired <italic>t</italic>-test results comparing performance metrics between the hybrid model and the baseline model. Reported values include mean difference, t-statistics, degrees of freedom (df), <italic>p</italic>-values, 95% confidence intervals, and Cohen&#x2019;s d effect sizes. Significant <italic>p</italic>-values (&#x003C;0.05) indicate meaningful improvements.</p>
<p><xref ref-type="table" rid="tab2">Table 2</xref> reports top-K ranking performance metrics across four peer recommendation models. Precision@5 and Precision@10 indicate early precision, Recall@10 measures the fraction of relevant peers recovered, and NDCG@10 reflects ranking quality based on graded relevance. The hybrid model consistently outperforms all baselines across all top-K metrics, demonstrating superior relevance and ranking effectiveness.</p>
<table-wrap position="float" id="tab2">
<label>Table 2</label>
<caption>
<p>Top-K performance metrics for peer recommendation models.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Model</th>
<th align="center" valign="top">Precision@5</th>
<th align="center" valign="top">Precision@10</th>
<th align="center" valign="top">Recall@10</th>
<th align="center" valign="top">NDCG@10</th>
<th align="center" valign="top">MAP@10</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Hybrid model</td>
<td align="char" valign="top" char=".">0.41</td>
<td align="char" valign="top" char=".">0.36</td>
<td align="char" valign="top" char=".">0.29</td>
<td align="char" valign="top" char=".">0.47</td>
<td align="char" valign="top" char=".">0.32</td>
</tr>
<tr>
<td align="left" valign="top">CF-only</td>
<td align="char" valign="top" char=".">0.33</td>
<td align="char" valign="top" char=".">0.28</td>
<td align="char" valign="top" char=".">0.21</td>
<td align="char" valign="top" char=".">0.39</td>
<td align="char" valign="top" char=".">0.26</td>
</tr>
<tr>
<td align="left" valign="top">Clustering-only</td>
<td align="char" valign="top" char=".">0.26</td>
<td align="char" valign="top" char=".">0.22</td>
<td align="char" valign="top" char=".">0.17</td>
<td align="char" valign="top" char=".">0.31</td>
<td align="char" valign="top" char=".">0.19</td>
</tr>
<tr>
<td align="left" valign="top">Popularity baseline</td>
<td align="char" valign="top" char=".">0.18</td>
<td align="char" valign="top" char=".">0.15</td>
<td align="char" valign="top" char=".">0.11</td>
<td align="char" valign="top" char=".">0.22</td>
<td align="char" valign="top" char=".">0.12</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec sec-type="conclusions" id="sec17">
<label>6</label>
<title>Conclusion</title>
<p>We have introduced a new hybrid dynamic peer-learning recommendation system in e-learning settings that combines collaborative filtering (CF), interaction-based clustering, and BERT embeddings. The current research work addresses the limitation by proposing a system that adapts peer grouping dynamically at run-time based on varying contextual features such as difficulty level of learning, job role, software proficiency, and temporal learning trend adapting to context, thereby providing a personalized and context-aware experience. Moreover, the model monitors how learner skills change over time and updates its recommendations in real-time so that learners are always shown the most relevant and appropriately difficult material for what they need as their needs evolve.</p>
<p>Experimental results show that our method achieves superior performance over conventional sequential recommendation approaches. In particular, we had a higher accuracy, precision, recall, and F1-score than the best results so far, showing that our system was providing more correct and pertinent recommendations. Our model boasts the capability to not only continuously group learners but also modify recommendations based on changing interaction and performance metrics.</p>
<p>This research makes the following key contributions:</p>
<list list-type="bullet">
<list-item><p>Context-based selection of peer groups: Allows for dynamic grouping of users based on continually changing context-sensitive criteria, which leads to more appropriate suggestions.</p></list-item>
<list-item><p>Sophisticated multi-factor embeddings: The use of BERT embeddings for content description, job title, and software skills, which improves the efficacy in understanding the content and improves recommendation hit rate.</p></list-item>
<list-item><p>Adaptive learning pathways: dynamically monitor and adjust the learner&#x2019;s mischievousness, which shapes the recommendations, as development indicates this algorithm will evolve over time.</p></list-item>
</list>
<p>The implications of the findings of this study are impactful for e-learning platforms in providing adaptive and personalized experiences based on individual as well as group profiles. Utilizing a hybrid, context-aware recommendation system allows educational platforms to enrich personalized learning, enhance collaboration, and optimize academic performance.</p>
</sec>
<sec id="sec18">
<label>7</label>
<title>Future scope</title>
<p>There are several avenues toward future studies in this area that could help improve the proposed recommendation system. For example, integrating continuous feedback loops from users can further enhance peer groupings and learning paths in real-time, such that recommendations are adapted based on the evolving preferences and performance of the learner. Even more immediate and tailored suggestions could be achieved with in-the-moment adaptation by incorporating fine-grained data regarding user interaction and performance. It should also check the scalability of the system to handle a large-scale dataset and quickly recommend as more users and learning content are created. Moreover, the model could be generalized for cross-domain recommendations where we can recommend relevant content of another domain (for example, allowing AI resources to data science learners). Finally, supporting multi-modal data sources such as video, textual, and behavioral data is another option to boost the system&#x2019;s understanding of user needs and recommendation performance.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="sec19">
<title>Data availability statement</title>
<p>Publicly available datasets were analyzed in this study. This data can be found here: <ext-link xlink:href="https://www.kaggle.com/datasets/nhondangcode/e-learning-recommender-system-dataset" ext-link-type="uri">https://www.kaggle.com/datasets/nhondangcode/e-learning-recommender-system-dataset</ext-link>.</p>
</sec>
<sec sec-type="author-contributions" id="sec20">
<title>Author contributions</title>
<p>DA: Formal analysis, Validation, Visualization, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. DN: Data curation, Formal analysis, Methodology, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. PR: Conceptualization, Formal analysis, Supervision, Validation, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. PV: Conceptualization, Investigation, Project administration, Supervision, Writing &#x2013; review &#x0026; editing, Writing &#x2013; original draft.</p>
</sec>
<sec sec-type="COI-statement" id="sec21">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="sec22">
<title>Generative AI statement</title>
<p>The author(s) declared that Generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="sec23">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="ref1"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ali</surname><given-names>S.</given-names></name> <name><surname>Hafeez</surname><given-names>Y.</given-names></name> <name><surname>Humayun</surname><given-names>M.</given-names></name> <name><surname>Jamail</surname><given-names>N. S. M.</given-names></name> <name><surname>Aqib</surname><given-names>M.</given-names></name> <name><surname>Nawaz</surname><given-names>A.</given-names></name></person-group> (<year>2022</year>). <article-title>Enabling recommendation system architecture in virtualized environment for e-learning</article-title>. <source>Egypt. Inform. J.</source> <volume>23</volume>, <fpage>33</fpage>&#x2013;<lpage>45</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.eij.2021.05.003</pub-id></mixed-citation></ref>
<ref id="ref2"><mixed-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Bhanuse</surname><given-names>R.</given-names></name> <name><surname>Mal</surname><given-names>S.</given-names></name></person-group>, "<article-title>A systematic review: deep learning based E-learning recommendation system</article-title>, " <conf-name>2021 International Conference on Artificial Intelligence and Smart Systems (ICAIS)</conf-name>, <publisher-loc>Coimbatore</publisher-loc>, (<year>2021</year>), pp. <fpage>190</fpage>&#x2013;<lpage>197</lpage>.</mixed-citation></ref>
<ref id="ref3"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bhaskaran</surname><given-names>S.</given-names></name> <name><surname>Marappan</surname><given-names>R.</given-names></name> <name><surname>Santhi</surname><given-names>B.</given-names></name></person-group> (<year>2021</year>). <article-title>Design and analysis of a cluster-based intelligent hybrid recommendation system for E-learning applications</article-title>. <source>Mathematics</source> <volume>9</volume>:<fpage>197</fpage>. doi: <pub-id pub-id-type="doi">10.3390/math9020197</pub-id></mixed-citation></ref>
<ref id="ref4"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bower</surname><given-names>M.</given-names></name></person-group> (<year>2019</year>). <article-title>Technology-mediated learning theory</article-title>. <source>Br. J. Educ. Technol.</source> <volume>50</volume>, <fpage>1035</fpage>&#x2013;<lpage>1048</lpage>. doi: <pub-id pub-id-type="doi">10.1111/bjet.12771</pub-id></mixed-citation></ref>
<ref id="ref5"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname><given-names>M.</given-names></name> <name><surname>Xu</surname><given-names>K.</given-names></name></person-group> (<year>2021</year>). <article-title>Temporal modeling in educational recommender systems</article-title>. <source>IEEE Trans. Learn. Technol.</source> <volume>14</volume>, <fpage>802</fpage>&#x2013;<lpage>813</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TLT.2021.3071964</pub-id></mixed-citation></ref>
<ref id="ref6"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>de Jong</surname><given-names>F.</given-names></name> <name><surname>van der Linden</surname><given-names>W.</given-names></name></person-group> (<year>2020</year>). <article-title>Effective peer learning in digital environments: a systematic review</article-title>. <source>Comput. Educ.</source> <volume>157</volume>, <fpage>103</fpage>&#x2013;<lpage>117</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.compedu.2020.103967</pub-id></mixed-citation></ref>
<ref id="ref7"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ibrahim</surname><given-names>T. S.</given-names></name> <name><surname>Saleh</surname><given-names>A. I.</given-names></name> <name><surname>Elgaml</surname><given-names>N.</given-names></name> <name><surname>Abdelsalam</surname><given-names>M. M.</given-names></name></person-group> (<year>2020</year>). <article-title>A fog based recommendation system for promoting the performance of E-learning environments</article-title>. <source>Comput. Electr. Eng.</source> <volume>87</volume>:<fpage>106791</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.compeleceng.2020.106791</pub-id></mixed-citation></ref>
<ref id="ref8"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jannach</surname><given-names>D.</given-names></name> <name><surname>Manzoor</surname><given-names>A.</given-names></name></person-group> (<year>2020</year>). <article-title>User preference dynamics in recommender systems</article-title>. <source>User Model. User-Adapt. Interact.</source> <volume>30</volume>, <fpage>609</fpage>&#x2013;<lpage>650</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s11257-020-09266-x</pub-id></mixed-citation></ref>
<ref id="ref9"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jeevamol</surname><given-names>J.</given-names></name> <name><surname>Renumol</surname><given-names>V. G.</given-names></name></person-group> (<year>2021</year>). <article-title>An ontology-based hybrid e-learning content recommender system for alleviating the cold-start problem</article-title>. <source>Educ. Inf. Technol.</source> <volume>26</volume>, <fpage>4993</fpage>&#x2013;<lpage>5022</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10639-021-10508-0</pub-id></mixed-citation></ref>
<ref id="ref10"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jordan</surname><given-names>K.</given-names></name></person-group> (<year>2014</year>). <article-title>Initial trends in enrolment and completion of massive open online courses</article-title>. <source>Int. Rev. Res. Open Distrib. Learn.</source> <volume>15</volume>, <fpage>133</fpage>&#x2013;<lpage>160</lpage>. doi: <pub-id pub-id-type="doi">10.19173/irrodl.v15i1.1651</pub-id></mixed-citation></ref>
<ref id="ref11"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Khanal</surname><given-names>S. S.</given-names></name> <name><surname>Prasad</surname><given-names>P.</given-names></name> <name><surname>Alsadoon</surname><given-names>A.</given-names></name> <name><surname>Maag</surname><given-names>A.</given-names></name></person-group> (<year>2019</year>). <article-title>A systematic review: machine learning based recommendation systems for e-learning</article-title>. <source>Educ. Inf. Technol.</source> <volume>25</volume>, <fpage>2635</fpage>&#x2013;<lpage>2664</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10639-019-10063-9</pub-id></mixed-citation></ref>
<ref id="ref12"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Kizilcec</surname><given-names>R. F.</given-names></name> <name><surname>Piech</surname><given-names>C.</given-names></name> <name><surname>Schneider</surname><given-names>E.</given-names></name></person-group> (<year>2013</year>). <article-title>Deconstructing disengagement: analyzing learner subpopulations in massive open online courses</article-title>. In <source>proceedings of the third international conference on learning analytics and knowledge, LAK 2013. Leuven, Belgium</source>.</mixed-citation></ref>
<ref id="ref13"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Koren</surname><given-names>Y.</given-names></name> <name><surname>Bell</surname><given-names>R.</given-names></name> <name><surname>Volinsky</surname><given-names>C.</given-names></name></person-group> (<year>2019</year>). <article-title>Matrix factorization techniques for recommender systems</article-title>. <source>Computer</source> <volume>42</volume>, <fpage>30</fpage>&#x2013;<lpage>37</lpage>. doi: <pub-id pub-id-type="doi">10.1109/MC.2009.263</pub-id></mixed-citation></ref>
<ref id="ref14"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Kulkarni</surname><given-names>P. V.</given-names></name> <name><surname>Rai</surname><given-names>S.</given-names></name> <name><surname>Kale</surname><given-names>R.</given-names></name></person-group> (<year>2020</year>). <article-title>Recommender system in eLearning: A survey</article-title>. doi: <pub-id pub-id-type="doi">10.1007/978-981-15-0790-8_13</pub-id></mixed-citation></ref>
<ref id="ref15"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Linn</surname><given-names>J. D.</given-names></name> <name><surname>Clark</surname><given-names>M. B.</given-names></name></person-group> (<year>2019</year>). <article-title>Collaboration and learner engagement in digital education</article-title>. <source>Comput. Human Behav.</source> <volume>99</volume>, <fpage>102</fpage>&#x2013;<lpage>111</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.chb.2019.05.012</pub-id></mixed-citation></ref>
<ref id="ref16"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname><given-names>T.</given-names></name> <name><surname>Wu</surname><given-names>Q.</given-names></name> <name><surname>Chang</surname><given-names>L.</given-names></name> <name><surname>Gu</surname><given-names>T.</given-names></name></person-group> (<year>2022</year>). <article-title>A review of deep learning-based recommender system in e-learning environments</article-title>. <source>Artif. Intell. Rev.</source> <volume>55</volume>, <fpage>5953</fpage>&#x2013;<lpage>5980</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10462-022-10135-2</pub-id></mixed-citation></ref>
<ref id="ref17"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Palvia</surname><given-names>K.</given-names></name> <name><surname>Aeron</surname><given-names>S.</given-names></name> <name><surname>Gupta</surname><given-names>P.</given-names></name> <name><surname>Mahapatra</surname><given-names>D.</given-names></name> <name><surname>Parida</surname><given-names>R.</given-names></name> <name><surname>Rosner</surname><given-names>R.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>Online education: worldwide status, challenges, trends, and implications</article-title>. <source>J. Glob. Inf. Technol. Manag.</source> <volume>21</volume>, <fpage>233</fpage>&#x2013;<lpage>241</lpage>. doi: <pub-id pub-id-type="doi">10.1080/1097198X.2018.1542262</pub-id></mixed-citation></ref>
<ref id="ref18"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rahayu</surname><given-names>N. W.</given-names></name> <name><surname>Ferdiana</surname><given-names>R.</given-names></name> <name><surname>Kusumawardani</surname><given-names>S. S.</given-names></name></person-group> (<year>2022</year>). <article-title>A systematic review of ontology use in E-learning recommender system</article-title>. <source>Comput. Educ. Artif. Intell.</source> <volume>3</volume>:<fpage>100047</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.caeai.2022.100047</pub-id></mixed-citation></ref>
<ref id="ref19"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wong</surname><given-names>A.</given-names></name></person-group> (<year>2020</year>). <article-title>Personalized learning: a review of theory and implementation</article-title>. <source>Comput. Educ.</source> <volume>146</volume>, <fpage>103</fpage>&#x2013;<lpage>115</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.compedu.2019.103751</pub-id></mixed-citation></ref>
<ref id="ref20"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Zhang</surname><given-names>Q.</given-names></name> <name><surname>Lu</surname><given-names>J.</given-names></name> <name><surname>Zhang</surname><given-names>G.</given-names></name></person-group> <year>2021</year> <article-title>Recommender Systems in E-learning</article-title>. Available online at: <ext-link xlink:href="https://www.semanticscholar.org/paper/Recommender-Systems-in-E-learning-Zhang-Lu/2941331c11190d9328fb2c195d62217a1feb12e8?p2df" ext-link-type="uri">https://www.semanticscholar.org/paper/Recommender-Systems-in-E-learning-Zhang-Lu/2941331c11190d9328fb2c195d62217a1feb12e8?p2df</ext-link></mixed-citation></ref>
<ref id="ref21"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname><given-names>S.</given-names></name> <name><surname>Yao</surname><given-names>L.</given-names></name> <name><surname>Sun</surname><given-names>A.</given-names></name> <name><surname>Tay</surname><given-names>Y.</given-names></name></person-group> (<year>2019</year>). <article-title>Deep learning-based recommender system: a survey and new perspectives</article-title>. <source>ACM Comput. Surv.</source> <volume>52</volume>, <fpage>1</fpage>&#x2013;<lpage>38</lpage>. doi: <pub-id pub-id-type="doi">10.1145/3285029</pub-id></mixed-citation></ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by" id="fn0002"><p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/676693/overview">Erol E&#x011F;rio&#x011F;lu</ext-link>, Giresun University, T&#x00FC;rkiye</p></fn>
<fn fn-type="custom" custom-type="reviewed-by" id="fn0003"><p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3102367/overview">Mira Suryani</ext-link>, Universitas Padjadjaran, Indonesia</p><p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3216088/overview">Fitrah Rumaisa</ext-link>, Universitas Widyatama, Indonesia</p></fn>
</fn-group>
<fn-group>
<fn id="fn0001"><label>1</label><p><ext-link xlink:href="https://www.kaggle.com/datasets/nhondangcode/e-learning-recommender-system-dataset" ext-link-type="uri">https://www.kaggle.com/datasets/nhondangcode/e-learning-recommender-system-dataset</ext-link></p></fn>
</fn-group>
</back>
</article>