<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="2.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Neurorobot.</journal-id>
<journal-title>Frontiers in Neurorobotics</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Neurorobot.</abbrev-journal-title>
<issn pub-type="epub">1662-5218</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnbot.2024.1481297</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Neuroscience</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>An improved graph factorization machine based on solving unbalanced game perception</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Xie</surname> <given-names>Xiaoxia</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2816516/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Jia</surname> <given-names>Yuan</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Ma</surname> <given-names>Tiande</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2818325/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>BYD Company Limited</institution>, <addr-line>Shenzhen</addr-line>, <country>China</country></aff>
<aff id="aff2"><sup>2</sup><institution>School of Statistics, Renmin University of China</institution>, <addr-line>Beijing</addr-line>, <country>China</country></aff>
<aff id="aff3"><sup>3</sup><institution>School of Computer Science and Technology, Xinjiang University</institution>, <addr-line>Urumqi</addr-line>, <country>China</country></aff>
<author-notes>
<fn fn-type="edited-by" id="fn0001">
<p>Edited by: Yu Zhang, Beihang University, China</p>
</fn>
<fn fn-type="edited-by" id="fn0002">
<p>Reviewed by: Yinsheng Li, Henan University of Technology, China</p>
<p>Yongzhi Zhai, Xi&#x2019;an University of Posts and Telecommunications, China</p>
</fn>
<corresp id="c001">&#x002A;Correspondence: Tiande Ma, <email>20201303225@stu.xju.edu.cn</email></corresp>
</author-notes>
<pub-date pub-type="epub">
<day>04</day>
<month>12</month>
<year>2024</year>
</pub-date>
<pub-date pub-type="collection">
<year>2024</year>
</pub-date>
<volume>18</volume>
<elocation-id>1481297</elocation-id>
<history>
<date date-type="received">
<day>15</day>
<month>08</month>
<year>2024</year>
</date>
<date date-type="accepted">
<day>22</day>
<month>11</month>
<year>2024</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2024 Xie, Jia and Ma.</copyright-statement>
<copyright-year>2024</copyright-year>
<copyright-holder>Xie, Jia and Ma</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<p>The user perception of mobile game is crucial for improving user experience and thus enhancing game profitability. The sparse data captured in the game can lead to sporadic performance of the model. This paper proposes a new method, the balanced graph factorization machine (BGFM), based on existing algorithms, considering the data imbalance and important high-dimensional features. The data categories are first balanced by Borderline-SMOTE oversampling, and then features are represented naturally in a graph-structured way. The highlight is that the BGFM contains interaction mechanisms for aggregating beneficial features. The results are represented as edges in the graph. Next, BGFM combines factorization machine (FM) and graph neural network strategies to concatenate any sequential feature interactions of features in the graph with an attention mechanism that assigns inter-feature weights. Experiments were conducted on the collected game perception dataset. The performance of proposed BGFM was compared with eight state-of-the-art models, significantly surpassing all of them by AUC, precision, recall, and F-measure indices.</p>
</abstract>
<kwd-group>
<kwd>machine learning</kwd>
<kwd>mobile game user evaluation</kwd>
<kwd>quality of experience</kwd>
<kwd>factorization machine</kwd>
<kwd>graph neural network</kwd>
</kwd-group>
<contract-num rid="cn1">2023TSYCTD</contract-num>
<contract-sponsor id="cn1">Tianshan Talent Training Project-Xinjiang Science and Technology Innovation Team Program</contract-sponsor>
<counts>
<fig-count count="1"/>
<table-count count="2"/>
<equation-count count="9"/>
<ref-count count="46"/>
<page-count count="7"/>
<word-count count="5911"/>
</counts>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec1">
<label>1</label>
<title>Introduction</title>
<p>Mobile games gained a large share of global business, especially during the COVID-induced dead season for other entertainment businesses and activities. Game-related services, from run to finish, interact with each other in multiple directions. The complex functionality of the game user during play requires multiple services to reach together, which involve different functions. The application runs by invoking the most appropriate ones from many alternative services to be combined. In a real Internet environment, multiple service providers usually offer services with the required functionality. These services are distributed differently and hosted on servers in different user regions. These many services are combined through network selection and application invocation to realize the complex functionality the user requires. Therefore, how the customer can judge the most suitable quality service is the key to improving the gaming user&#x2019;s perceived experience.</p>
<p>When investigating how service quality affects user experience, it is necessary to consider the influence of user and environmental factors such as the user&#x2019;s level of play, game mechanics, game team, and individual performance. The main idea of existing studies on game user perception is to clarify multiple dimensions of interrelated game perceptions, then establish an objective and easy-to-measure correlation mapping between service quality indices and experience quality, fully consider the influence of other dimensions on the correlation, and finally assess or predict game perceptions using the objective features, to achieve the purpose of optimizing game perceptions. However, these studies need to pay more attention to the importance of features other than service quality features on the outcome of game perception.</p>
<p>This study aims to mitigate some deficiencies of existing algorithms by an alternative approach. To this end, a new method called Balanced Graph Factorization Machine (BGFM), which considers the data imbalance and the importance of high-dimensional features, is elaborated and tested. The data categories are first balanced by Borderline-SMOTE oversampling, and then features are represented naturally in a graph-structured way. The highlight is that the BGFM contains interaction mechanisms for aggregating beneficial features. The results are represented as edges in the graph. Next, BGFM combines factorization machine (FM) and graph neural network strategies to concatenate any sequential feature interactions of features in the graph with an attention mechanism that assigns inter-feature weights. The main highlights in this paper are listed as follows:</p>
<p>The strengths and weaknesses of FM and GNN in modeling feature interactions are analyzed. To solve their problems and take advantage of their strengths, a new model for feature interaction modeling, BGFM, is proposed, which bridges the gap between GNN and FM. The features in the graph are the nodes, and the two-by-two interactions between features are the edges connecting the nodes, making it possible to solve the FM problem by taking advantage of the strengths of GNN.</p>
<p>The similarity between the computed feature interactions of the attention mechanism is introduced to ensure the robustness of LTFM. This enhances the positive effects of effective features while reducing the negative effects due to biased features.</p>
<p>We conducted several experiments on the QoE dataset. The results show that the proposed BGFM performs well and outperforms the existing methods.</p>
</sec>
<sec id="sec2">
<label>2</label>
<title>Related work</title>
<p>Previous studies of game user perception have focused only on the correspondence between QoS parameters and game QoE (<xref ref-type="bibr" rid="ref39">Wattimena et al., 2006</xref>; <xref ref-type="bibr" rid="ref16">Koo et al., 2007</xref>; <xref ref-type="bibr" rid="ref6">Denieffe et al., 2007</xref>). This was initially done using linear models (e.g., logistic regression and generalized regression) to generate user game perception scores (<xref ref-type="bibr" rid="ref29">Pornpongtechavanich et al., 2022</xref>). User-perceived assessment models based on machine learning techniques, such as QoE modeling using SVM to construct prediction models (<xref ref-type="bibr" rid="ref35">Suznjevic et al., 2019</xref>), have become a research hotspot as they effectively predict user perception. These models ignore useful but unseen feature interactions in the data, as evidenced by the effectiveness of hidden variable models (<xref ref-type="bibr" rid="ref34">Sun et al., 2013</xref>). Factor decomposition machines (<xref ref-type="bibr" rid="ref30">Rendle, 2010</xref>) provide a general-purpose predictor to efficiently model higher-order interactions between interpreted features within linear time complexity.</p>
<p><xref ref-type="bibr" rid="ref42">Yang et al. (2021)</xref> transformed location information into neighborhood information and added it into a factor decomposition machine to propose the LBFM model. More recently, <xref ref-type="bibr" rid="ref38">Wang et al. (2022)</xref> proposed an LDFM model using information entropy and location projection of users and services. While the above algorithms extend the dataset somewhat, different cross-cutting features are not distinguished, making the model performance fluctuate. <xref ref-type="bibr" rid="ref12">He and Chua (2017)</xref> proposed neural factorization machines (NFM) for sparse predictive analytics. <xref ref-type="bibr" rid="ref40">Xiao and Ye (2017)</xref> thus introduced the neural network strategy on top of the previous ones and proposed the AFM model, which distinguishes between different second-order feature combinations through the attention mechanism. <xref ref-type="bibr" rid="ref13">Hong et al. (2019)</xref> proposed interaction-aware factorization machines for recommender systems, considering that perceived data sparsity can lead to fluctuations in model performance. To the best of the authors&#x2019; knowledge, the only data-driven study of game user perception that considered the effects of multiple factors has been reported in our previous paper (<xref ref-type="bibr" rid="ref41">Xie and Jia, 2022</xref>), which introduced the location-time-aware factorization machine based on fuzzy set theory for game perception (LTFM).</p>
<p>Despite some progress in the relevant research, two major aspects of the problem need further clarification. On the one hand, poor game user perceptions are a minority occurrence, similar to positive samples required for trade fraud risk prediction in banks. This inevitably runs into the problem of data imbalance. The collected game user perception data are categorized into three evaluation categories: excellent, good, and poor, with an approximate ratio of 5:1:1. In LTFM, the data are divided by tiers to reduce the impact of data imbalance on the overall performance of the algorithmic model. Although the model outperformed others, there is much room for its improvement in several aspects.</p>
<p>On the other hand, a factorial decomposition machine is a model for modeling interaction features. The core of FM is to learn the uniquely hot-coded features corresponding to the hidden vectors, and then the interaction between features is modeled by the inner product of vectors (<xref ref-type="bibr" rid="ref30">Rendle, 2010</xref>). FM has been used in <xref ref-type="bibr" rid="ref4">Cheng et al. (2016)</xref> and <xref ref-type="bibr" rid="ref10">Guo et al. (2017)</xref>, exhibiting at least two weak points: (i) it failed to capture higher-order feature interactions, and (ii) it assigned the same weights to all feature interactions, overfitting the model by useless interactions (<xref ref-type="bibr" rid="ref44">Zhang et al., 2016</xref>; <xref ref-type="bibr" rid="ref32">Su et al., 2021</xref>). Attempts have been made to transform FM to learn higher-order feature interactions by introducing deep neural networks (DNNs). Neural Factorization Machine (NFM) combines DNNs and dual interaction layers to obtain information about higher-order feature interactions (<xref ref-type="bibr" rid="ref12">He and Chua, 2017</xref>). Wide &#x0026; Deep learning model, and DeepFM model combine shallow and deep structures to achieve multi-order feature interactions (<xref ref-type="bibr" rid="ref4">Cheng et al., 2016</xref>; <xref ref-type="bibr" rid="ref10">Guo et al., 2017</xref>). However, implicit learning models introduced into DNNs are usually weakly interpretable, while Graph Neural Networks (GNNs) provide a lucrative alternative for grasping higher-order interactions between features (<xref ref-type="bibr" rid="ref43">Zhang C. et al., 2021</xref>; <xref ref-type="bibr" rid="ref11">Hamilton et al., 2017</xref>). The core technical point of GNN is to achieve a higher learning rate by accumulating layer by layer and aggregating multidimensional relevant features. As a result, higher-order interactions between features can be explicitly encoded into the embedding, which inspired this study.</p>
<p>All in all, there is a great need to evaluate the perceived experience of game users. There are two main advantages of the proposed BGFM over previous studies:</p>
<p>Treating features as nodes and two-by-two interactions between features as edges mitigates the problem of comprehensively combining GNN and FM, making it possible to solve FM problems via GNN.</p>
<p>The attention mechanism assigns different weights to different features interactively to enhance the utilization of effective features and reduce the probability of deviant features.</p>
</sec>
<sec id="sec3">
<label>3</label>
<title>Proposed method</title>
<p>To address the above algorithmic pain points in game perception research, we propose the Balanced Graph Factorization Machine (BGFM) model. To this end, the overall framework of BGFM is decomposed, and the overall working principle of BGFM is summarized. The BGFM firstly chooses Borderline-SMOTE to solve the problem of unbalanced distribution of training data, which leads to fluctuation of model performance. Then, we focus on how to model higher-order beneficial feature interactions. For this purpose, we design a special mechanism in BGFM, which can be split into two main parts: the selection of beneficial interaction features and interaction aggregation. The implementation principles of these two parts are described in detail. Finally, the model-based predictions and the model optimization are discussed.</p>
<sec id="sec4">
<label>3.1</label>
<title>BGFM</title>
<p><xref ref-type="fig" rid="fig1">Figure 1</xref> shows the network structure of BGFM. The graph flexibly represents higher-order associations between features. Edges in BGFM are useful feature interactions obtained by model aggregation. After resolving the data imbalance, beneficial feature interactions are selected. After learning by the attention mechanism, different feature interactions are given different weights and jointly output for final prediction.</p>
<fig position="float" id="fig1">
<label>Figure 1</label>
<caption>
<p>Network structure of BGFM.</p>
</caption>
<graphic xlink:href="fnbot-18-1481297-g001.tif"/>
</fig>
<p>The BGFM will update the network step by step. The input values are processed through feature embedding as the initial data for BGFM, as <inline-formula>
<mml:math id="M1">
<mml:msubsup>
<mml:mi>e</mml:mi>
<mml:mi>i</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mn>1</mml:mn>
</mml:mfenced>
</mml:msubsup>
<mml:mo>=</mml:mo>
<mml:msub>
<mml:mi>e</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:math>
</inline-formula>, where <inline-formula>
<mml:math id="M2">
<mml:msubsup>
<mml:mi>e</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>k</mml:mi>
</mml:msubsup>
</mml:math>
</inline-formula> is the latest feature embedding for the <inline-formula>
<mml:math id="M3">
<mml:mi>k</mml:mi>
</mml:math>
</inline-formula>-th layer. The model initially has no pre-input edge information, so edges are first obtained by the interaction selection component. The resulting edge information is then aggregated to update the feature embeddings in the remaining regions.</p>
<p>Existing methods for unbalanced data learning can be divided into three categories. BGFM uses a data-level solution, while others need more flexibility and robustness. BGFM greatly simplifies the workload of model training, improves efficiency, and gets various classifiers. Borderline-SMOTE is an algorithm extended for SMOTE. It considers the effect of noisy samples, and the algorithm uses only a few classes of samples with the attribute Danger on the border to obtain new samples, yielding a balanced distribution of the training sample set. After solving the problem of perceived data imbalance, two main components exist in each layer of BGFM. Both of them are described in detail next.</p>
</sec>
<sec id="sec5">
<label>3.2</label>
<title>Interaction feature selection</title>
<p>We devised a mechanism to obtain favorable pairwise feature interactions in the paper. The mechanism is the inference of connections between perceptual features through the graph structure, which models higher-order connections between features. However, the edges connecting two nodes <inline-formula>
<mml:math id="M4">
<mml:mfenced open="(" close=")" separators=",">
<mml:msub>
<mml:mi>v</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:msub>
<mml:mi>v</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
</mml:mfenced>
<mml:mo>&#x2208;</mml:mo>
<mml:mi>E</mml:mi>
</mml:math>
</inline-formula> exist deterministically, greatly simplifying the selection process compared to the direct introduction of gradient descent-based optimization techniques.</p>
<p>This limitation is resolved by replacing the set of edges <inline-formula>
<mml:math id="M5">
<mml:mi>E</mml:mi>
</mml:math>
</inline-formula> by heightened neighbors <inline-formula>
<mml:math id="M6">
<mml:mi>P</mml:mi>
</mml:math>
</inline-formula>, which <inline-formula>
<mml:math id="M7">
<mml:msub>
<mml:mi>P</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:math>
</inline-formula> is explained as the likelihoods of <inline-formula>
<mml:math id="M8">
<mml:mfenced open="(" close=")" separators=",">
<mml:msub>
<mml:mi>v</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:msub>
<mml:mi>v</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
</mml:mfenced>
<mml:mo>&#x2208;</mml:mo>
<mml:mi>E</mml:mi>
</mml:math>
</inline-formula>. It shows that the interaction between the features is very important. A different graph structure <inline-formula>
<mml:math id="M9">
<mml:msup>
<mml:mi>P</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mi>k</mml:mi>
</mml:mfenced>
</mml:msup>
</mml:math>
</inline-formula> needs to be learnt at each <inline-formula>
<mml:math id="M10">
<mml:mi>k</mml:mi>
</mml:math>
</inline-formula>-th layer and comparing it with the previously derived graph. These treatments provide higher performance. Specifically, each layer of the model&#x2019;s graph structure is fixed, culminating in fixed-form outputs. However, our model is characterized by adaptive learning and can model associations of beneficial features.</p>
<p>This section aims to design a metric function to obtain beneficial feature interactions. The <inline-formula>
<mml:math id="M11">
<mml:msub>
<mml:mi>P</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")" separators=",">
<mml:msub>
<mml:mi>v</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:msub>
<mml:mi>v</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
</mml:mfenced>
</mml:math>
</inline-formula> metric function calculate <inline-formula>
<mml:math id="M12">
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mi>s</mml:mi>
</mml:msub>
<mml:mfenced open="(" close=")" separators=",">
<mml:msub>
<mml:mi>e</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:msub>
<mml:mi>e</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
</mml:mfenced>
</mml:math>
</inline-formula>s the weights of the edges. NMF-based functions are used to evaluate the edge weights (<xref ref-type="bibr" rid="ref12">He and Chua, 2017</xref>). The product of elements of these feature vectors is converted into a scalar using Multilayer Perception (MLP) with one hidden layer, which can be calculated as <xref ref-type="disp-formula" rid="EQ1">Equation 1</xref>.</p>
<disp-formula id="EQ1">
<label>(1)</label>
<mml:math id="M13">
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mi>s</mml:mi>
</mml:msub>
<mml:mfenced open="(" close=")" separators=",">
<mml:msub>
<mml:mi>e</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:msub>
<mml:mi>e</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
</mml:mfenced>
<mml:mo>=</mml:mo>
<mml:mi>&#x03C3;</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msubsup>
<mml:mi>W</mml:mi>
<mml:mn>2</mml:mn>
<mml:mi>s</mml:mi>
</mml:msubsup>
<mml:mi>&#x03B4;</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msubsup>
<mml:mi>W</mml:mi>
<mml:mn>1</mml:mn>
<mml:mi>s</mml:mi>
</mml:msubsup>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mi>e</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2299;</mml:mo>
<mml:msub>
<mml:mi>e</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mo>+</mml:mo>
<mml:msubsup>
<mml:mi>b</mml:mi>
<mml:mn>1</mml:mn>
<mml:mi>s</mml:mi>
</mml:msubsup>
</mml:mrow>
</mml:mfenced>
<mml:mo>+</mml:mo>
<mml:msubsup>
<mml:mi>b</mml:mi>
<mml:mn>2</mml:mn>
<mml:mi>s</mml:mi>
</mml:msubsup>
</mml:mrow>
</mml:mfenced>
</mml:math>
</disp-formula>
<p>where <inline-formula>
<mml:math id="M14">
<mml:msubsup>
<mml:mi>W</mml:mi>
<mml:mn>1</mml:mn>
<mml:mi>s</mml:mi>
</mml:msubsup>
</mml:math>
</inline-formula>, <inline-formula>
<mml:math id="M15">
<mml:msubsup>
<mml:mi>W</mml:mi>
<mml:mn>2</mml:mn>
<mml:mi>s</mml:mi>
</mml:msubsup>
</mml:math>
</inline-formula>, <inline-formula>
<mml:math id="M16">
<mml:msubsup>
<mml:mi>b</mml:mi>
<mml:mn>1</mml:mn>
<mml:mi>s</mml:mi>
</mml:msubsup>
</mml:math>
</inline-formula> and <inline-formula>
<mml:math id="M17">
<mml:msubsup>
<mml:mi>b</mml:mi>
<mml:mn>2</mml:mn>
<mml:mi>s</mml:mi>
</mml:msubsup>
</mml:math>
</inline-formula> are the inputs to the multilayer player. <inline-formula>
<mml:math id="M18">
<mml:mi mathvariant="italic">ReLU</mml:mi>
</mml:math>
</inline-formula>and <inline-formula>
<mml:math id="M19">
<mml:mi mathvariant="italic">Sigmoid</mml:mi>
</mml:math>
</inline-formula>activation functions are represented by <inline-formula>
<mml:math id="M20">
<mml:mi>&#x03B4;</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mo>&#x22C5;</mml:mo>
</mml:mfenced>
</mml:math>
</inline-formula> and <inline-formula>
<mml:math id="M21">
<mml:mi>&#x03C3;</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mo>&#x22C5;</mml:mo>
</mml:mfenced>
</mml:math>
</inline-formula>, respectively. It is worth noting the order of the inputs to <inline-formula>
<mml:math id="M22">
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mi>s</mml:mi>
</mml:msub>
</mml:math>
</inline-formula> is invariant, as <inline-formula>
<mml:math id="M23">
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mi>s</mml:mi>
</mml:msub>
<mml:mfenced open="(" close=")" separators=",">
<mml:msub>
<mml:mi>e</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:msub>
<mml:mi>e</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
</mml:mfenced>
<mml:mo>=</mml:mo>
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mi>s</mml:mi>
</mml:msub>
<mml:mfenced open="(" close=")" separators=",">
<mml:msub>
<mml:mi>e</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
<mml:msub>
<mml:mi>e</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mfenced>
</mml:math>
</inline-formula>. The same pair of nodes have the same edge weights at this point. This successive graph structure modeling allows the gradient to backpropagate. Since there is no truth graph structure, the gradient here is defined by the deviation between the model&#x2019;s estimated and actual values. Feature interactions are treated as one, and the weights are estimated using MLP. Euclidean distance or other distance metrics can also be chosen (<xref ref-type="bibr" rid="ref45">Zhang W. et al., 2021</xref>).</p>
</sec>
<sec id="sec6">
<label>3.3</label>
<title>Interaction aggregation</title>
<p>After selecting the beneficial feature interactions, the feature representation is updated by performing an interaction aggregation operation. For the target feature node <inline-formula>
<mml:math id="M24">
<mml:msub>
<mml:mi>v</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:math>
</inline-formula>, the attention coefficient of each feature interaction is measured while aggregating its beneficial interactions with its neighbors. The learnable projection <italic>a</italic> and nonlinear activation function <inline-formula>
<mml:math id="M25">
<mml:mi mathvariant="italic">LeakyReLU</mml:mi>
</mml:math>
</inline-formula> are applied to measure the attention coefficients as <xref ref-type="disp-formula" rid="EQ2">Equation 2</xref>.</p>
<disp-formula id="EQ2">
<label>(2)</label>
<mml:math id="M26">
<mml:msub>
<mml:mi>c</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>=</mml:mo>
<mml:mi mathvariant="italic">LeakyReLu</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msup>
<mml:mi>a</mml:mi>
<mml:mi>T</mml:mi>
</mml:msup>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mi>e</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2299;</mml:mo>
<mml:msub>
<mml:mi>e</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
</mml:math>
</disp-formula>
<p>This implies the significance of interactions between features <inline-formula>
<mml:math id="M27">
<mml:msub>
<mml:mi>v</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:math>
</inline-formula> and <inline-formula>
<mml:math id="M28">
<mml:msub>
<mml:mi>v</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
</mml:math>
</inline-formula> In this paper, we only compute <inline-formula>
<mml:math id="M29">
<mml:msub>
<mml:mi>c</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:math>
</inline-formula> of the node <inline-formula>
<mml:math id="M30">
<mml:mi>j</mml:mi>
<mml:mo>&#x2208;</mml:mo>
<mml:msub>
<mml:mi>N</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:math>
</inline-formula>. <inline-formula>
<mml:math id="M31">
<mml:msub>
<mml:mi>N</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:math>
</inline-formula> represents the neighbors of the node <inline-formula>
<mml:math id="M32">
<mml:msub>
<mml:mi>v</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:math>
</inline-formula>, which is the sum for features that are useful to interact with <inline-formula>
<mml:math id="M33">
<mml:msub>
<mml:mi>v</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:math>
</inline-formula>. In the paper, the following function <inline-formula>
<mml:math id="M34">
<mml:mi mathvariant="italic">Softmax</mml:mi>
</mml:math>
</inline-formula> is used to normalize them in all choices of <inline-formula>
<mml:math id="M35">
<mml:mi>j</mml:mi>
</mml:math>
</inline-formula>, as shown in <xref ref-type="disp-formula" rid="EQ3">Equation 3</xref>.</p>
<disp-formula id="EQ3">
<label>(3)</label>
<mml:math id="M36">
<mml:msub>
<mml:mi>a</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>=</mml:mo>
<mml:mi mathvariant="italic">Softmax</mml:mi>
<mml:mfenced open="(" close=")">
<mml:msub>
<mml:mi>Y</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mfenced>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:msup>
<mml:mi>e</mml:mi>
<mml:msub>
<mml:mi>Y</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:msup>
<mml:mrow>
<mml:msubsup>
<mml:mstyle displaystyle="true">
<mml:mo stretchy="true">&#x2211;</mml:mo>
</mml:mstyle>
<mml:mrow>
<mml:mi>k</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>K</mml:mi>
</mml:msubsup>
<mml:msup>
<mml:mi>e</mml:mi>
<mml:msub>
<mml:mi>Y</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
</mml:msup>
</mml:mrow>
</mml:mfrac>
</mml:math>
</disp-formula>
<p>where <inline-formula>
<mml:math id="M37">
<mml:msub>
<mml:mi>Y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:math>
</inline-formula> is the output value of the <inline-formula>
<mml:math id="M38">
<mml:mi>i</mml:mi>
</mml:math>
</inline-formula>-th node, the output values of the multi-classification range vary from 0 to 1; <inline-formula>
<mml:math id="M39">
<mml:mi>k</mml:mi>
</mml:math>
</inline-formula> is the number of nodes that the network finally outputs, that is, the number of categories that can be classified. This makes it easy to compare the coefficients obtained between different feature nodes. After obtaining the normalized attention coefficients, the linear and nonlinear combinations of links between features are computed as subsequent new feature inputs as <xref ref-type="disp-formula" rid="EQ4">Equation 4</xref>.</p>
<disp-formula id="EQ4">
<label>(4)</label>
<mml:math id="M40">
<mml:msubsup>
<mml:mi>e</mml:mi>
<mml:mi>i</mml:mi>
<mml:mn>1</mml:mn>
</mml:msubsup>
<mml:mo>=</mml:mo>
<mml:mi>&#x03C3;</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:munder>
<mml:mstyle displaystyle="true">
<mml:mo stretchy="true">&#x2211;</mml:mo>
</mml:mstyle>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>&#x2208;</mml:mo>
<mml:msub>
<mml:mi>N</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:munder>
<mml:msub>
<mml:mi>a</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:mi>a</mml:mi>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mi>e</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2299;</mml:mo>
<mml:msub>
<mml:mi>e</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
</mml:math>
</disp-formula>
<p>where <inline-formula>
<mml:math id="M41">
<mml:msub>
<mml:mi>a</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:math>
</inline-formula> measures the attentional coefficient of feature <inline-formula>
<mml:math id="M42">
<mml:mi>i</mml:mi>
</mml:math>
</inline-formula> and feature <inline-formula>
<mml:math id="M43">
<mml:mi>j</mml:mi>
</mml:math>
</inline-formula> interactions, and <inline-formula>
<mml:math id="M44">
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:math>
</inline-formula> indicates the probability that such feature association is helpful. <inline-formula>
<mml:math id="M45">
<mml:msub>
<mml:mi>a</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:math>
</inline-formula> The attention coefficient is computed via the soft-attention mechanism and <inline-formula>
<mml:math id="M46">
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:math>
</inline-formula> is computed by the hard-attention mechanism. The information about the selected feature interactions is controlled by multiplying them and making the input values of the feature interaction selection mechanism learnable by gradient backpropagation.</p>
<p>To capture the diverse polysemy of feature interactions in different semantic subspaces and to stabilize the learning process, this paper extends our mechanism by applying multi-head attention (<xref ref-type="bibr" rid="ref25">Li et al., 2017</xref>; <xref ref-type="bibr" rid="ref28">Marcheggiani and Titov, 2017</xref>; <xref ref-type="bibr" rid="ref37">Wang et al., 2019</xref>). Specifically, <italic>H</italic> individual attention mechanisms perform the update of <xref ref-type="disp-formula" rid="EQ4">Equation 4</xref> and then concatenate these features to produce an output feature representation as <xref ref-type="disp-formula" rid="EQ5">Equation 5</xref>.</p>
<disp-formula id="EQ5">
<label>(5)</label>
<mml:math id="M47">
<mml:msubsup>
<mml:mi>e</mml:mi>
<mml:mi>i</mml:mi>
<mml:mn>2</mml:mn>
</mml:msubsup>
<mml:mo>=</mml:mo>
<mml:munderover>
<mml:mo>&#x2225;</mml:mo>
<mml:mi>H</mml:mi>
<mml:mi>h</mml:mi>
</mml:munderover>
<mml:mi>&#x03C3;</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:munder>
<mml:mstyle displaystyle="true">
<mml:mo stretchy="true">&#x2211;</mml:mo>
</mml:mstyle>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>&#x2208;</mml:mo>
<mml:msub>
<mml:mi>N</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:munder>
<mml:msub>
<mml:mi>a</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:msubsup>
<mml:mi>W</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>h</mml:mi>
</mml:msubsup>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mi>e</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2299;</mml:mo>
<mml:msub>
<mml:mi>e</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
</mml:math>
</disp-formula>
<p>where <inline-formula>
<mml:math id="M48">
<mml:mo stretchy="true">|</mml:mo>
<mml:mo stretchy="true">|</mml:mo>
</mml:math>
</inline-formula> denotes the cascade, <inline-formula>
<mml:math id="M49">
<mml:msub>
<mml:mi>a</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:math>
</inline-formula> is the normalized value obtained through the <inline-formula>
<mml:math id="M50">
<mml:mi>h</mml:mi>
</mml:math>
</inline-formula>-th attention machine with <inline-formula>
<mml:math id="M51">
<mml:msubsup>
<mml:mi>W</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>h</mml:mi>
</mml:msubsup>
</mml:math>
</inline-formula> is the linear transformation matrix of the former. Optionally, the feature representation can be updated using average pooling, as shown in <xref ref-type="disp-formula" rid="EQ6">Equation 6</xref>.</p>
<disp-formula id="EQ6">
<label>(6)</label>
<mml:math id="M52">
<mml:msubsup>
<mml:mi>e</mml:mi>
<mml:mi>i</mml:mi>
<mml:mn>3</mml:mn>
</mml:msubsup>
<mml:mo>=</mml:mo>
<mml:mi>&#x03C3;</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mi>H</mml:mi>
</mml:mfrac>
<mml:munderover>
<mml:mstyle displaystyle="true">
<mml:mo stretchy="true">&#x2211;</mml:mo>
</mml:mstyle>
<mml:mrow>
<mml:mi>h</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>H</mml:mi>
</mml:munderover>
<mml:munder>
<mml:mstyle displaystyle="true">
<mml:mo stretchy="true">&#x2211;</mml:mo>
</mml:mstyle>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>&#x2208;</mml:mo>
<mml:msub>
<mml:mi>N</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:munder>
<mml:msub>
<mml:mi>a</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:msubsup>
<mml:mi>W</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>h</mml:mi>
</mml:msubsup>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mi>e</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2299;</mml:mo>
<mml:msub>
<mml:mi>e</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
</mml:math>
</disp-formula>
</sec>
<sec id="sec7">
<label>3.4</label>
<title>Forecasting and improvement</title>
<p>The results for <inline-formula>
<mml:math id="M53">
<mml:mi>k</mml:mi>
</mml:math>
</inline-formula>-th layer is <inline-formula>
<mml:math id="M54">
<mml:mfenced open="{" close="}" separators=",,,">
<mml:msubsup>
<mml:mi>e</mml:mi>
<mml:mn>1</mml:mn>
<mml:mfenced open="(" close=")">
<mml:mi>k</mml:mi>
</mml:mfenced>
</mml:msubsup>
<mml:msubsup>
<mml:mi>e</mml:mi>
<mml:mn>2</mml:mn>
<mml:mfenced open="(" close=")">
<mml:mi>k</mml:mi>
</mml:mfenced>
</mml:msubsup>
<mml:mo>&#x2026;</mml:mo>
<mml:msubsup>
<mml:mi>e</mml:mi>
<mml:mi>n</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mi>k</mml:mi>
</mml:mfenced>
</mml:msubsup>
</mml:mfenced>
</mml:math>
</inline-formula>, which is a collection of <inline-formula>
<mml:math id="M55">
<mml:mi>n</mml:mi>
</mml:math>
</inline-formula> feature representation vectors. Because the representations acquired in multiple layers model different orders of interactions, they play different roles in the ultimate result. Thus, they are connected in series to get the definitive expression for every feature (<xref ref-type="bibr" rid="ref2">Beck et al., 2018</xref>) as <xref ref-type="disp-formula" rid="EQ7">Equation 7</xref>.</p>
<disp-formula id="EQ7">
<label>(7)</label>
<mml:math id="M56">
<mml:msub>
<mml:mi>e</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>=</mml:mo>
<mml:msubsup>
<mml:mi>e</mml:mi>
<mml:mi>i</mml:mi>
<mml:mn>1</mml:mn>
</mml:msubsup>
<mml:mo>&#x2225;</mml:mo>
<mml:mo>&#x22EF;</mml:mo>
<mml:mo>&#x2225;</mml:mo>
<mml:msubsup>
<mml:mi>e</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>n</mml:mi>
</mml:msubsup>
</mml:math>
</disp-formula>
<p>Finally, all the feature vectors are pooled equally to get the result at the graph level, and the final prediction is made using the projection vector <inline-formula>
<mml:math id="M57">
<mml:mi>p</mml:mi>
</mml:math>
</inline-formula>. The obtained results are computed using <xref ref-type="disp-formula" rid="EQ8 EQ9">Equations 8, 9</xref>:</p>
<disp-formula id="EQ8">
<label>(8)</label>
<mml:math id="M58">
<mml:mi>e</mml:mi>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mi>n</mml:mi>
</mml:mfrac>
<mml:munderover>
<mml:mstyle displaystyle="true">
<mml:mo stretchy="true">&#x2211;</mml:mo>
</mml:mstyle>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>n</mml:mi>
</mml:munderover>
<mml:msub>
<mml:mi>e</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:math>
</disp-formula>
<disp-formula id="EQ9">
<label>(9)</label>
<mml:math id="M59">
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo stretchy="true">&#x0302;</mml:mo>
</mml:mover>
<mml:mo>=</mml:mo>
<mml:msup>
<mml:mi>p</mml:mi>
<mml:mi>T</mml:mi>
</mml:msup>
<mml:mi>e</mml:mi>
</mml:math>
</disp-formula>
</sec>
</sec>
<sec sec-type="results" id="sec8">
<label>4</label>
<title>Results and discussion</title>
<sec id="sec9">
<label>4.1</label>
<title>Research data</title>
<p>The research focuses on exploring the effects of multiple influencing factors (including user, system, and contextual ones) on the perceived QoE of game users. A general taxonomy of the various factors in the literature is drawn upon, and further references are made to the taxonomy of existing game-related studies in terms of game QoE. Finally, an empirical test method is derived (<xref ref-type="bibr" rid="ref29">Pornpongtechavanich et al., 2022</xref>; <xref ref-type="bibr" rid="ref15">Jiang et al., 2019</xref>). Specifically, this gaming dataset considers the effects of three different system factors (latency, packet loss rate, jitter, and additional network parameters), user skills (user-personal factors in terms of gaming experience), and context (in terms of action categories and social context). The game entity under study is Glory of Kings, a game in which the interaction is mainly based on the UDP protocol, which requires a high level of real-time and user engagement. Due to the lack of a dataset of user game perception, the testing process in the study&#x2019;s laboratory environment was determined after reviewing the relevant literature.</p>
<p>In joint efforts of team members and participants, data from 789 games were collected, with each piece of data representing three dimensions of user, service, and environmental data. Each dataset has 21 features, consisting of 4 pieces of user data (player ID, age, gender, and skill level), 16 pieces of in-game and post-game service data, and a user-perceived score for the last one.</p>
</sec>
<sec id="sec10">
<label>4.2</label>
<title>Comparison algorithms</title>
<p>In this paper, to demonstrate the effectiveness of the proposed algorithm, we compared it with algorithms from four categories: (A) linear methods, (B) FM-based methods, (C) DNN-based methods, and (D) aggregation-based methods. The specific eight comparison algorithms include LR (A), Standard FM (<xref ref-type="bibr" rid="ref30">Rendle, 2010</xref>) (B), NFM (<xref ref-type="bibr" rid="ref12">He and Chua, 2017</xref>) (C), AFM (<xref ref-type="bibr" rid="ref40">Xiao and Ye, 2017</xref>) (B), AutoInt (<xref ref-type="bibr" rid="ref31">Song et al., 2020</xref>) (D), Fi-GNN (<xref ref-type="bibr" rid="ref5">Cui et al., 2019</xref>) (D), InterHat (<xref ref-type="bibr" rid="ref17">Li et al., 2020</xref>) (D).</p>
<p>LR is a linear regression, modelled using only a single feature; Standard FM is able to model second-order interaction links of features; NFM designed a dual interaction layer and DNN to handle nonlinear features and model higher order feature interactions; AFM introduces the attention mechanism to give weight to the interaction of different features; AutoInt is to improve the efficiency of the model in learning higher-order feature interactions through self-attentive networks; Fi-GNN uses gated graph neural networks to model higher-order feature connections as fully connected graphs; InterHat uses the attention machine to select features, and raw feature multiplication produces higher-order feature interactions; LTFM is an extended FM-based model that considers the effects of temporal and spatial information projections and feature interactions on the final game perception results.</p>
</sec>
<sec id="sec11">
<label>4.3</label>
<title>Evaluation of performance indices</title>
<p>The following five assessment metrics are used in the experiments of game user perception evaluation: AUC (<xref ref-type="bibr" rid="ref7">Gospodinova et al., 2023</xref>; <xref ref-type="bibr" rid="ref21">Li et al., 2022</xref>), Precision (<xref ref-type="bibr" rid="ref1">Annadurai et al., 2024</xref>; <xref ref-type="bibr" rid="ref3">Chan et al., 2024</xref>), Recall (<xref ref-type="bibr" rid="ref36">Wang et al., 2024</xref>; <xref ref-type="bibr" rid="ref46">Zheng et al., 2024</xref>; <xref ref-type="bibr" rid="ref14">Hong et al., 2024</xref>), and F-measure (<xref ref-type="bibr" rid="ref23">Li et al., 2024a</xref>; <xref ref-type="bibr" rid="ref20">Li et al., 2021</xref>; <xref ref-type="bibr" rid="ref22">Li et al., 2023a</xref>; <xref ref-type="bibr" rid="ref8">Guo et al., 2024a</xref>; <xref ref-type="bibr" rid="ref9">Guo et al., 2024b</xref>; <xref ref-type="bibr" rid="ref27">Ma and Tong, 2024</xref>; <xref ref-type="bibr" rid="ref33">Sultan et al., 2024</xref>; <xref ref-type="bibr" rid="ref24">Li et al., 2024b</xref>; <xref ref-type="bibr" rid="ref26">Li et al., 2024c</xref>; <xref ref-type="bibr" rid="ref18">Li et al., 2023b</xref>; <xref ref-type="bibr" rid="ref19">Li et al., 2023c</xref>).</p>
<p>The AUC curve is taken as the area under the ROC curve. The larger the value, the better the model performance. Precision is used to calculate the proportion of correct predictions among all samples with positive predictions. Recall is the ratio of positive class samples correctly judged by the classifier to the total number of positive class samples. Usually, accuracy is inversely proportional to recall. A composite metric, F-measure, is introduced to balance the effects of precision and recall and to evaluate a classifier more fully. When both precision and recall are high, the value of F-measure is high.</p>
</sec>
<sec id="sec12">
<label>4.4</label>
<title>Results and analysis</title>
<p>The performance of the BGFM model after balancing the data was first analyzed experimentally, as shown in <xref ref-type="table" rid="tab1">Table 1</xref>. It is concluded that there is an improvement in the BGFM model performance compared to the standard FM.</p>
<table-wrap position="float" id="tab1">
<label>Table 1</label>
<caption>
<p>Quantitative performance comparison of standard FM and balanced model.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Model</th>
<th align="center" valign="top">AUC</th>
<th align="center" valign="top">Precision</th>
<th align="center" valign="top">Recall</th>
<th align="center" valign="top">F-measure</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Standard FM</td>
<td align="center" valign="middle">0.7549</td>
<td align="center" valign="middle">0.7189</td>
<td align="center" valign="middle">0.7215</td>
<td align="center" valign="middle">0.7008</td>
</tr>
<tr>
<td align="left" valign="middle">BGFM (balanced data)</td>
<td align="center" valign="middle"><bold>0.7760</bold></td>
<td align="center" valign="middle"><bold>0.7575</bold></td>
<td align="center" valign="middle"><bold>0.7402</bold></td>
<td align="center" valign="middle"><bold>0.7334</bold></td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>The bold values are meant to be the best performing performance indicators in the table.</p>
</table-wrap-foot>
</table-wrap>
<p>It can be deduced from <xref ref-type="table" rid="tab1">Table 1</xref> that FM performs well for sparse feature data. However, since poor game perception is a minority class occurrence, FM is impairing the correctness of the final judgment by judging the minority class as the majority class. Due to the data volume limitation, we consider preprocessing the data and de-rationalizing the generation of new data from the existing data to achieve the result that the data classes to be judged are basically the same. The balanced data is then imported into our model. The results show that using Borderline-SMOTE oversampling to balance the data category distribution is beneficial for the final perceptual evaluation.</p>
<p>The performance comparison of these methods on the game-aware dataset is shown in <xref ref-type="table" rid="tab2">Table 2</xref>, from which the following observations are obtained: the BGFM proposed in this chapter achieves the best performance on the game perception dataset. The enhanced efficiency of the BGFM compared to the four classes (A, B, C, and D) of methods is particularly significant. BGFM employs a mechanism for choosing and aggregating beneficial feature interactions, and its performance is superior and easy to manage. Taking all aspects together, BGFM is superior to existing algorithms. In the following, the four types of algorithms, A, B, C, and D, will be specifically analyzed.</p>
<table-wrap position="float" id="tab2">
<label>Table 2</label>
<caption>
<p>Quantitative comparison of different algorithms.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Model</th>
<th align="center" valign="top">AUC</th>
<th align="center" valign="top">Precision</th>
<th align="center" valign="top">Recall</th>
<th align="center" valign="top">F-measure</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">LR</td>
<td align="center" valign="top">0.7288</td>
<td align="center" valign="top">0.7960</td>
<td align="center" valign="top">0.7975</td>
<td align="center" valign="top">0.7802</td>
</tr>
<tr>
<td align="left" valign="top">Standard FM</td>
<td align="center" valign="top">0.7549</td>
<td align="center" valign="top">0.7189</td>
<td align="center" valign="top">0.7215</td>
<td align="center" valign="top">0.7008</td>
</tr>
<tr>
<td align="left" valign="top">NFM</td>
<td align="center" valign="top">0.7721</td>
<td align="center" valign="top">0.7789</td>
<td align="center" valign="top">0.7595</td>
<td align="center" valign="top">0.7778</td>
</tr>
<tr>
<td align="left" valign="top">AFM</td>
<td align="center" valign="top">0.7862</td>
<td align="center" valign="top">0.8052</td>
<td align="center" valign="top">0.7848</td>
<td align="center" valign="top">0.7972</td>
</tr>
<tr>
<td align="left" valign="top">AutoInt</td>
<td align="center" valign="top">0.7908</td>
<td align="center" valign="top">0.8158</td>
<td align="center" valign="top">0.7975</td>
<td align="center" valign="top">0.8012</td>
</tr>
<tr>
<td align="left" valign="top">Fi-GNN</td>
<td align="center" valign="top">0.8014</td>
<td align="center" valign="top">0.8207</td>
<td align="center" valign="top">0.7975</td>
<td align="center" valign="top">0.8010</td>
</tr>
<tr>
<td align="left" valign="top">InterHat</td>
<td align="center" valign="top">0.8017</td>
<td align="center" valign="top">0.8259</td>
<td align="center" valign="top">0.8101</td>
<td align="center" valign="top">0.8069</td>
</tr>
<tr>
<td align="left" valign="top">LTFM</td>
<td align="center" valign="top">0.8093</td>
<td align="center" valign="top">0.8328</td>
<td align="center" valign="top">0.8228</td>
<td align="center" valign="top">0.8178</td>
</tr>
<tr>
<td align="left" valign="top">BGFM</td>
<td align="center" valign="top"><bold>0.8305</bold></td>
<td align="center" valign="top"><bold>0.8760</bold></td>
<td align="center" valign="top"><bold>0.8360</bold></td>
<td align="center" valign="top"><bold>0.8453</bold></td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>The bold values are meant to be the best performing performance indicators in the table.</p>
</table-wrap-foot>
</table-wrap>
<p>Aggregation-based methods outperform the other three classes of models, demonstrating the advantages of selection strategies in getting higher-order relationships. However, the LTFM model performance still performs well, suggesting that the importance of projected information that considers both temporal and spatial information interacting with and capturing features for the final game perception results is favorable for the final perceptual evaluation. However, this model only expands the data dimensions and captures hidden feature interactions based on the properties of FM for sparse data, which fails to address the performance fluctuations caused by the imbalance of data categories and captures higher-order beneficial feature interactions. The BGFM solves these problems well.</p>
<p>Compared to the powerful aggregation-based baseline AutoInt and Fi-GNN, BGFM still offers a significant performance improvement and can be considered important for game-aware prediction tasks. This enhancement is due to the combination of GNN with FM. Treating features as nodes, two-by-two interactions between features as edges, and each input as a graph, GNN&#x2019;s aggregation strategy solves two of FM&#x2019;s problems: suboptimal feature interactions that lead to model overfitting and the difficulty of modeling higher-order feature interactions. GNN introduces the concept of feature interaction and a beneficial interaction selection method that greatly improves the model&#x2019;s performance.</p>
<p>The attention mechanism assigns different weights to interactions. AFM outperforms FM, demonstrating the necessity of considering feature interaction weights. Although NFM uses DNNs to model higher-order interactions, they do not ensure an improvement over the base model and the improved model with the addition of an attention mechanism, possibly because of their implicit feature interaction learning approach. AutoInt performs better than AFM because the multi-head attention mechanism in the model takes into account the richness of feature interactions in multiple spaces.</p>
</sec>
</sec>
<sec sec-type="conclusions" id="sec13">
<label>5</label>
<title>Conclusion</title>
<p>This study bridges FM and GNN approaches, yielding a new BGFM model. It exploits the respective strengths of FM and GNN, attempting to compensate for their individual deficiencies. Beneficial feature interactions are selected at each layer of BGFM and considered edges in the graph. The interactions are then encoded as feature representations using the neighborhood interaction aggregation operation. The model adds higher-order feature learning at each layer, and the layer depth determines the median result. This leads to the conclusion that our model can learn the highest-order feature interactions. The BGFM learns higher-order interactions between features and provides high interpretability of model results. The experimental results prove that the proposed BGFM outperforms eight state-of-the-art models to a large extent.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="sec14">
<title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p>
</sec>
<sec sec-type="author-contributions" id="sec15">
<title>Author contributions</title>
<p>XX: Conceptualization, Data curation, Investigation, Methodology, Validation, Visualization, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. YJ: Methodology, Resources, Software, Supervision, Validation, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. TM: Funding acquisition, Project administration, Resources, Software, Supervision, Writing &#x2013; review &#x0026; editing.</p>
</sec>
<sec sec-type="funding-information" id="sec16">
<title>Funding</title>
<p>The author(s) declare financial support was received for the research, authorship, and/or publication of this article. This research was funded by the Tianshan Talent Training Project-Xinjiang Science and Technology Innovation Team Program (2023TSYCTD).</p>
</sec>
<sec sec-type="COI-statement" id="sec17">
<title>Conflict of interest</title>
<p>XX was employed by BYD Company Limited.</p>
<p>The remaining authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="disclaimer" id="sec18">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="ref1"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Annadurai</surname> <given-names>A.</given-names></name> <name><surname>Sureshkumar</surname> <given-names>V.</given-names></name> <name><surname>Jaganathan</surname> <given-names>D.</given-names></name> <name><surname>Dhanasekaran</surname> <given-names>S.</given-names></name></person-group> (<year>2024</year>). <article-title>Enhancing medical image quality using fractional order denoising integrated with transfer learning</article-title>. <source>Fractal Fract.</source> <volume>8</volume>:<fpage>511</fpage>. doi: <pub-id pub-id-type="doi">10.3390/fractalfract8090511</pub-id></citation></ref>
<ref id="ref2"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Beck</surname> <given-names>D.</given-names></name> <name><surname>Haffari</surname> <given-names>G.</given-names></name> <name><surname>Cohn</surname> <given-names>T.</given-names></name></person-group> (<year>2018</year>). Graph-to-sequence learning using gated graph neural networks. Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics. 1, 273&#x2013;283.</citation></ref>
<ref id="ref3"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chan</surname> <given-names>H.</given-names></name> <name><surname>Qiu</surname> <given-names>X.</given-names></name> <name><surname>Gao</surname> <given-names>X.</given-names></name> <name><surname>Lu</surname> <given-names>D.</given-names></name></person-group> (<year>2024</year>). <article-title>A complex background SAR ship target detection method based on fusion tensor and cross-domain adversarial learning</article-title>. <source>Remote Sens.</source> <volume>16</volume>:<fpage>3492</fpage>. doi: <pub-id pub-id-type="doi">10.3390/rs16183492</pub-id></citation></ref>
<ref id="ref4"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Cheng</surname> <given-names>H.</given-names></name> <name><surname>Koc</surname> <given-names>L.</given-names></name> <name><surname>Harmsen</surname> <given-names>J.</given-names></name></person-group> (<year>2016</year>). Wide &#x0026; deep learning for recommender systems. In Proceedings of the 1st Workshop on Deep Learning for Recommender Systems. pp. 7&#x2013;10.</citation></ref>
<ref id="ref5"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Cui</surname> <given-names>Z.</given-names></name> <name><surname>Li</surname> <given-names>Z.</given-names></name> <name><surname>Wu</surname> <given-names>S.</given-names></name></person-group> (<year>2019</year>). Dressing as a whole: outfit compatibility learning based on node-wise graph neural networks. Proceedings of the World Wide Web Conference. pp. 307&#x2013;317.</citation></ref>
<ref id="ref6"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Denieffe</surname> <given-names>D.</given-names></name> <name><surname>Carrig</surname> <given-names>B.</given-names></name> <name><surname>Marshall</surname> <given-names>D.</given-names></name></person-group> (<year>2007</year>). <article-title>A game assessment metric for the online gamer</article-title>. <source>Adv. Elect. Comput. Eng.</source> <volume>7</volume>, <fpage>3</fpage>&#x2013;<lpage>6</lpage>. doi: <pub-id pub-id-type="doi">10.4316/aece.2007.02001</pub-id></citation></ref>
<ref id="ref7"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gospodinova</surname> <given-names>E.</given-names></name> <name><surname>Lebamovski</surname> <given-names>P.</given-names></name> <name><surname>Georgieva-Tsaneva</surname> <given-names>G.</given-names></name> <name><surname>Negreva</surname> <given-names>M.</given-names></name></person-group> (<year>2023</year>). <article-title>Evaluation of the methods for nonlinear analysis of heart rate variability</article-title>. <source>Fractal Fract.</source> <volume>7</volume>:<fpage>388</fpage>. doi: <pub-id pub-id-type="doi">10.3390/fractalfract7050388</pub-id></citation></ref>
<ref id="ref8"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Guo</surname> <given-names>F.</given-names></name> <name><surname>Ma</surname> <given-names>H.</given-names></name> <name><surname>Li</surname> <given-names>L.</given-names></name> <name><surname>Lv</surname> <given-names>M.</given-names></name> <name><surname>Jia</surname> <given-names>Z.</given-names></name></person-group> (<year>2024a</year>). <article-title>FCNet: flexible convolution network for infrared small ship detection</article-title>. <source>Remote Sens.</source> <volume>16</volume>:<fpage>2218</fpage>. doi: <pub-id pub-id-type="doi">10.3390/rs16122218</pub-id></citation></ref>
<ref id="ref9"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Guo</surname> <given-names>F.</given-names></name> <name><surname>Ma</surname> <given-names>H.</given-names></name> <name><surname>Li</surname> <given-names>L.</given-names></name> <name><surname>Lv</surname> <given-names>M.</given-names></name> <name><surname>Jia</surname> <given-names>Z.</given-names></name></person-group> (<year>2024b</year>). <article-title>Multi-attention pyramid context network for infrared small ship detection</article-title>. <source>J. Mar. Sci. Eng.</source> <volume>12</volume>:<fpage>345</fpage>. doi: <pub-id pub-id-type="doi">10.3390/jmse12020345</pub-id></citation></ref>
<ref id="ref10"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Guo</surname> <given-names>H.</given-names></name> <name><surname>Tang</surname> <given-names>R.</given-names></name> <name><surname>Ye</surname> <given-names>Y.</given-names></name></person-group> (<year>2017</year>). DeepFM: a factorization-machine based neural network for ctr prediction. In Proceedings of the 26th International Joint Conference on Artificial Intelligence. pp. 1725&#x2013;1731.</citation></ref>
<ref id="ref11"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hamilton</surname> <given-names>W.</given-names></name> <name><surname>Ying</surname> <given-names>R.</given-names></name> <name><surname>Leskovec</surname> <given-names>J.</given-names></name></person-group> (<year>2017</year>). <article-title>Inductive representation learning on large graphs</article-title>. <source>Adv. Neural Inf. Proces. Syst.</source> <volume>30</volume>, <fpage>1025</fpage>&#x2013;<lpage>1035</lpage>.</citation></ref>
<ref id="ref12"><citation citation-type="other"><person-group person-group-type="author"><name><surname>He</surname> <given-names>X.</given-names></name> <name><surname>Chua</surname> <given-names>T.</given-names></name></person-group> (<year>2017</year>). Neural factorization machines for sparse predictive analytics. In Proceedings of the 40th International ACM SIGIR Conference on Research and Development in Information Retrieval. pp. 355&#x2013;364.</citation></ref>
<ref id="ref13"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hong</surname> <given-names>F.</given-names></name> <name><surname>Huang</surname> <given-names>D.</given-names></name> <name><surname>Chen</surname> <given-names>G.</given-names></name></person-group> (<year>2019</year>). <article-title>Interaction-aware factorization machines for recommender systems</article-title>. <source>AAAI Conf. Artif. Intell.</source> <volume>33</volume>, <fpage>3804</fpage>&#x2013;<lpage>3811</lpage>. doi: <pub-id pub-id-type="doi">10.1609/aaai.v33i01.33013804</pub-id></citation></ref>
<ref id="ref14"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hong</surname> <given-names>L.</given-names></name> <name><surname>Lee</surname> <given-names>S.</given-names></name> <name><surname>Song</surname> <given-names>G.</given-names></name></person-group> (<year>2024</year>). <article-title>CAM-Vtrans: real-time sports training utilizing multi-modal robot data</article-title>. <source>Front. Neurorobot.</source> <volume>18</volume>:<fpage>1453571</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnbot.2024.1453571</pub-id>, PMID: <pub-id pub-id-type="pmid">39463860</pub-id></citation></ref>
<ref id="ref15"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Jiang</surname> <given-names>B.</given-names></name> <name><surname>Zhang</surname> <given-names>Z.</given-names></name> <name><surname>Lin</surname> <given-names>D.</given-names></name></person-group> (<year>2019</year>). Semi-supervised learning with graph learning-convolutional networks. IEEE Conference on Computer Vision and Pattern Recognition. pp. 11305&#x2013;11312.</citation></ref>
<ref id="ref16"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Koo</surname> <given-names>D.</given-names></name> <name><surname>Lee</surname> <given-names>S.</given-names></name> <name><surname>Chang</surname> <given-names>H.</given-names></name></person-group> (<year>2007</year>). <article-title>Experiential motives for playing online games</article-title>. <source>J. Cyber Psychol. Behav.</source> <volume>2</volume>, <fpage>772</fpage>&#x2013;<lpage>775</lpage>.</citation></ref>
<ref id="ref17"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>Z.</given-names></name> <name><surname>Cheng</surname> <given-names>W.</given-names></name> <name><surname>Chen</surname> <given-names>Y.</given-names></name></person-group> (<year>2020</year>). Interpretable click-through rate prediction through hierarchical attention. Proceedings of the 13th International Conference on Web Search and Data Mining. pp. 313&#x2013;321.</citation></ref>
<ref id="ref18"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>L.</given-names></name> <name><surname>Lv</surname> <given-names>M.</given-names></name> <name><surname>Jia</surname> <given-names>Z.</given-names></name> <name><surname>Jin</surname> <given-names>Q.</given-names></name> <name><surname>Liu</surname> <given-names>M.</given-names></name> <name><surname>Chen</surname> <given-names>L.</given-names></name> <etal/></person-group>. (<year>2023b</year>). <article-title>An effective infrared and visible image fusion approach via rolling guidance filtering and gradient saliency map</article-title>. <source>Remote Sens.</source> <volume>15</volume>:<fpage>2486</fpage>. doi: <pub-id pub-id-type="doi">10.3390/rs15102486</pub-id></citation></ref>
<ref id="ref19"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>L.</given-names></name> <name><surname>Lv</surname> <given-names>M.</given-names></name> <name><surname>Jia</surname> <given-names>Z.</given-names></name> <name><surname>Ma</surname> <given-names>H.</given-names></name></person-group> (<year>2023c</year>). <article-title>Sparse representation-based multi-focus image fusion method via local energy in shearlet domain</article-title>. <source>Sensors</source> <volume>23</volume>:<fpage>2888</fpage>. doi: <pub-id pub-id-type="doi">10.3390/s23062888</pub-id>, PMID: <pub-id pub-id-type="pmid">36991598</pub-id></citation></ref>
<ref id="ref20"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>L.</given-names></name> <name><surname>Ma</surname> <given-names>H.</given-names></name> <name><surname>Jia</surname> <given-names>Z.</given-names></name></person-group> (<year>2021</year>). <article-title>Change detection from SAR images based on convolutional neural networks guided by saliency enhancement</article-title>. <source>Remote Sens.</source> <volume>13</volume>:<fpage>3697</fpage>. doi: <pub-id pub-id-type="doi">10.3390/rs13183697</pub-id></citation></ref>
<ref id="ref21"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>L.</given-names></name> <name><surname>Ma</surname> <given-names>H.</given-names></name> <name><surname>Jia</surname> <given-names>Z.</given-names></name></person-group> (<year>2022</year>). <article-title>Multiscale geometric analysis fusion-based unsupervised change detection in remote sensing images via FLICM model</article-title>. <source>Entropy</source> <volume>24</volume>:<fpage>291</fpage>. doi: <pub-id pub-id-type="doi">10.3390/e24020291</pub-id>, PMID: <pub-id pub-id-type="pmid">35205585</pub-id></citation></ref>
<ref id="ref22"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>L.</given-names></name> <name><surname>Ma</surname> <given-names>H.</given-names></name> <name><surname>Jia</surname> <given-names>Z.</given-names></name></person-group> (<year>2023a</year>). <article-title>Gamma correction-based automatic unsupervised change detection in SAR images via FLICM model</article-title>. <source>J. Indian Soc. Remote Sens.</source> <volume>51</volume>, <fpage>1077</fpage>&#x2013;<lpage>1088</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s12524-023-01674-4</pub-id></citation></ref>
<ref id="ref23"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>L.</given-names></name> <name><surname>Ma</surname> <given-names>H.</given-names></name> <name><surname>Zhang</surname> <given-names>X.</given-names></name> <name><surname>Zhao</surname> <given-names>X.</given-names></name></person-group> (<year>2024a</year>). <article-title>Synthetic aperture radar image change detection based on principal component analysis and two-level clustering</article-title>. <source>Remote Sens.</source> <volume>16</volume>:<fpage>1861</fpage>. doi: <pub-id pub-id-type="doi">10.3390/rs16111861</pub-id></citation></ref>
<ref id="ref24"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>L.</given-names></name> <name><surname>Shi</surname> <given-names>Y.</given-names></name> <name><surname>Lv</surname> <given-names>M.</given-names></name> <name><surname>Jia</surname> <given-names>Z.</given-names></name></person-group> (<year>2024b</year>). <article-title>Infrared and visible image fusion via sparse representation and guided filtering in Laplacian pyramid domain</article-title>. <source>Remote Sens.</source> <volume>16</volume>:<fpage>3804</fpage>. doi: <pub-id pub-id-type="doi">10.3390/rs16203804</pub-id></citation></ref>
<ref id="ref25"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>R.</given-names></name> <name><surname>Tapaswi</surname> <given-names>M.</given-names></name> <name><surname>Liao</surname> <given-names>R.</given-names></name></person-group> (<year>2017</year>). Situation recognition with graph neural networks. In Proceedings of the 2017 IEEE International Conference on Computer Vision. pp. 4183&#x2013;4192.</citation></ref>
<ref id="ref26"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>L.</given-names></name> <name><surname>Zhao</surname> <given-names>X.</given-names></name> <name><surname>Hou</surname> <given-names>H.</given-names></name> <name><surname>Zhang</surname> <given-names>X.</given-names></name></person-group> (<year>2024c</year>). <article-title>Fractal dimension-based multi-focus image fusion via coupled neural P systems in NSCT domain</article-title>. <source>Fractal Fract.</source> <volume>8</volume>:<fpage>554</fpage>. doi: <pub-id pub-id-type="doi">10.3390/fractalfract8100554</pub-id></citation></ref>
<ref id="ref27"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ma</surname> <given-names>L.</given-names></name> <name><surname>Tong</surname> <given-names>Y.</given-names></name></person-group> (<year>2024</year>). <article-title>TL-CStrans net: a vision robot for table tennis player action recognition driven via CS-transformer</article-title>. <source>Front. Neurorobot.</source> <volume>18</volume>:<fpage>1443177</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnbot.2024.1443177</pub-id>, PMID: <pub-id pub-id-type="pmid">39498235</pub-id></citation></ref>
<ref id="ref28"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Marcheggiani</surname> <given-names>D.</given-names></name> <name><surname>Titov</surname> <given-names>I.</given-names></name></person-group> (<year>2017</year>). Encoding sentences with graph convolutional networks for semantic role labeling. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing. pp. 1506&#x2013;1515.</citation></ref>
<ref id="ref29"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pornpongtechavanich</surname> <given-names>P.</given-names></name> <name><surname>Wuttidittachotti</surname> <given-names>P.</given-names></name> <name><surname>Daengsi</surname> <given-names>T.</given-names></name></person-group> (<year>2022</year>). <article-title>QoE modeling for audiovisual associated with MOBA game using subjective approach</article-title>. <source>Multimed. Tools Appl.</source> <volume>81</volume>, <fpage>37763</fpage>&#x2013;<lpage>37779</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s11042-022-12807-1</pub-id></citation></ref>
<ref id="ref30"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Rendle</surname> <given-names>S.</given-names></name></person-group> Factorization Machines. (<year>2010</year>). In Proceedings of the 2010 IEEE international conference on data mining. Sydney. pp. 995&#x2013;1000.</citation></ref>
<ref id="ref31"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Song</surname> <given-names>W.</given-names></name> <name><surname>Shi</surname> <given-names>C.</given-names></name> <name><surname>Xiao</surname> <given-names>Z.</given-names></name></person-group> (<year>2020</year>). AutoInt: automatic feature interaction learning via self-attentive neural networks. Proceedings of the 28th ACM International Conference on Information and Knowledge Management. pp. 1161&#x2013;1170.</citation></ref>
<ref id="ref32"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Su</surname> <given-names>Y.</given-names></name> <name><surname>Zhang</surname> <given-names>R.</given-names></name> <name><surname>Erfani</surname> <given-names>S.</given-names></name></person-group> (<year>2021</year>). <article-title>Detecting beneficial feature interactions for recommender systems</article-title>. <source>AAAI Conf. Artif. Intell.</source> <volume>35</volume>, <fpage>4357</fpage>&#x2013;<lpage>4365</lpage>. doi: <pub-id pub-id-type="doi">10.1609/aaai.v35i5.16561</pub-id></citation></ref>
<ref id="ref33"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sultan</surname> <given-names>H.</given-names></name> <name><surname>Ullah</surname> <given-names>N.</given-names></name> <name><surname>Hong</surname> <given-names>J. S.</given-names></name> <name><surname>Kim</surname> <given-names>S. G.</given-names></name></person-group> (<year>2024</year>). <article-title>Estimation of fractal dimension and segmentation of brain tumor with parallel features aggregation network</article-title>. <source>Fractal Fract.</source> <volume>8</volume>:<fpage>357</fpage>. doi: <pub-id pub-id-type="doi">10.3390/fractalfract8060357</pub-id></citation></ref>
<ref id="ref34"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sun</surname> <given-names>H.</given-names></name> <name><surname>Zheng</surname> <given-names>Z.</given-names></name> <name><surname>Chen</surname> <given-names>J.</given-names></name></person-group> (<year>2013</year>). <article-title>Personalized web service recommendation via normal recovery collaborative filtering</article-title>. <source>IEEE Trans. Serv. Comput.</source> <volume>6</volume>, <fpage>573</fpage>&#x2013;<lpage>579</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TSC.2012.31</pub-id></citation></ref>
<ref id="ref35"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Suznjevic</surname> <given-names>M.</given-names></name> <name><surname>Skorin-Kapov</surname> <given-names>L.</given-names></name> <name><surname>Cerekovic</surname> <given-names>A.</given-names></name></person-group> (<year>2019</year>). <article-title>How to measure and model QoE for networked games?</article-title> <source>Multimedia Systems</source> <volume>25</volume>, <fpage>395</fpage>&#x2013;<lpage>420</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00530-019-00615-x</pub-id></citation></ref>
<ref id="ref36"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>S.</given-names></name> <name><surname>Chen</surname> <given-names>Y.</given-names></name> <name><surname>Yuan</surname> <given-names>Y.</given-names></name></person-group> (<year>2024</year>). <article-title>TSAE-UNet: a novel network for multi-scene and multi-temporal water body detection based on spatiotemporal feature extraction</article-title>. <source>Remote Sens.</source> <volume>16</volume>:<fpage>3829</fpage>. doi: <pub-id pub-id-type="doi">10.3390/rs16203829</pub-id></citation></ref>
<ref id="ref37"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>X.</given-names></name> <name><surname>He</surname> <given-names>X.</given-names></name> <name><surname>Wang</surname> <given-names>M.</given-names></name></person-group> (<year>2019</year>). Neural graph collaborative filtering. In Proceedings of the 42nd International ACM SIGIR Conference on Research and Development in Information Retrieval. pp. 165&#x2013;174.</citation></ref>
<ref id="ref38"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>Q.</given-names></name> <name><surname>Zhang</surname> <given-names>M.</given-names></name> <name><surname>Zhang</surname> <given-names>Y.</given-names></name></person-group> (<year>2022</year>). <article-title>Location-based deep factorization machine model for service recommendation</article-title>. <source>Appl. Intell.</source> <volume>52</volume>, <fpage>9899</fpage>&#x2013;<lpage>9918</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10489-021-02998-9</pub-id></citation></ref>
<ref id="ref39"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Wattimena</surname> <given-names>A.</given-names></name> <name><surname>Kooij</surname> <given-names>R.</given-names></name> <name><surname>Van</surname> <given-names>V.</given-names></name></person-group> (<year>2006</year>). Predicting the perceived quality of a first person shooter: the quake iv G-model. In Proceedings of 5th ACM SIGCOMM workshop on network and system support for games, NetGames '06, pp. 42&#x2013;45.</citation></ref>
<ref id="ref40"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Xiao</surname> <given-names>J.</given-names></name> <name><surname>Ye</surname> <given-names>H.</given-names></name></person-group> (<year>2017</year>). Attentional factorization machines: learning the weight of feature interactions via attention networks. In Proceedings of the 26th international joint conference on artificial intelligence. pp. 3119&#x2013;3125.</citation></ref>
<ref id="ref41"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Xie</surname> <given-names>X.</given-names></name> <name><surname>Jia</surname> <given-names>Z.</given-names></name></person-group> (<year>2022</year>). <article-title>A location-time-aware factorization machine based on fuzzy set theory for game perception</article-title>. <source>Appl. Sci.</source> <volume>12</volume>:<fpage>12819</fpage>. doi: <pub-id pub-id-type="doi">10.3390/app122412819</pub-id></citation></ref>
<ref id="ref42"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yang</surname> <given-names>Y.</given-names></name> <name><surname>Zheng</surname> <given-names>Z.</given-names></name> <name><surname>Niu</surname> <given-names>X.</given-names></name></person-group> (<year>2021</year>). <article-title>A location-based factorization machine model for web service QoS prediction</article-title>. <source>IEEE Trans. Serv. Comput.</source> <volume>14</volume>, <fpage>1264</fpage>&#x2013;<lpage>1277</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TSC.2018.2876532</pub-id></citation></ref>
<ref id="ref43"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>C.</given-names></name> <name><surname>Bengio</surname> <given-names>S.</given-names></name> <name><surname>Hardt</surname> <given-names>M.</given-names></name></person-group> (<year>2021</year>). <article-title>Understanding deep learning (still) requires rethinking generalization</article-title>. <source>Commun. ACM</source> <volume>64</volume>, <fpage>107</fpage>&#x2013;<lpage>115</lpage>. doi: <pub-id pub-id-type="doi">10.1145/3446776</pub-id></citation></ref>
<ref id="ref44"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>W.</given-names></name> <name><surname>Du</surname> <given-names>T.</given-names></name> <name><surname>Wang</surname> <given-names>J.</given-names></name></person-group> (<year>2016</year>). <article-title>Deep learning over multi-field categorical data&#x2014;A case study on user response prediction</article-title>. <source>Lect. Notes Comput. Sci</source> <volume>9626</volume>, <fpage>45</fpage>&#x2013;<lpage>57</lpage>. doi: <pub-id pub-id-type="doi">10.1007/978-3-319-30671-1_4</pub-id></citation></ref>
<ref id="ref45"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>H.</given-names></name> <name><surname>Lu</surname> <given-names>G.</given-names></name> <name><surname>Zhan</surname> <given-names>M.</given-names></name></person-group> (<year>2021</year>). <article-title>Semi-supervised classification of graph convolutional networks with Laplacian rank constraints</article-title>. <source>Neural. Process. Lett.</source> <volume>54</volume>, <fpage>2645</fpage>&#x2013;<lpage>2656</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s11063-020-10404-7</pub-id></citation></ref>
<ref id="ref46"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zheng</surname> <given-names>J.</given-names></name> <name><surname>Xiang</surname> <given-names>M.</given-names></name> <name><surname>Zhang</surname> <given-names>T.</given-names></name> <name><surname>Zhou</surname> <given-names>J.</given-names></name></person-group> (<year>2024</year>). <article-title>An improved adaptive grid-based progressive triangulated irregular network densification algorithm for filtering airborne LiDAR data</article-title>. <source>Remote Sens.</source> <volume>16</volume>:<fpage>3846</fpage>. doi: <pub-id pub-id-type="doi">10.3390/rs16203846</pub-id></citation></ref>
</ref-list>
</back>
</article>