<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Neurosci.</journal-id>
<journal-title>Frontiers in Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Neurosci.</abbrev-journal-title>
<issn pub-type="epub">1662-453X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnins.2025.1499629</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Neuroscience</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Knowledge concept recognition in the learning brain via fMRI classification</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Zhang</surname> <given-names>Wenxin</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Zhang</surname> <given-names>Yiping</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/2733239/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Sun</surname> <given-names>Liqian</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Zhang</surname> <given-names>Yupei</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1310548/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Shang</surname> <given-names>Xuequn</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/898708/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>School of Computer Science, Northwestern Polytechnical University</institution>, <addr-line>Xi&#x00027;an</addr-line>, <country>China</country></aff>
<aff id="aff2"><sup>2</sup><institution>Big Data Storage and Management MIIT Lab</institution>, <addr-line>Xi&#x00027;an</addr-line>, <country>China</country></aff>
<author-notes>
<fn fn-type="edited-by"><p>Edited by: Shuqiang Wang, Chinese Academy of Sciences (CAS), China</p></fn>
<fn fn-type="edited-by"><p>Reviewed by: Qiankun Zuo, Hubei University of Economics, China</p>
<p>Weiheng Yao, Chinese Academy of Sciences (CAS), China</p></fn>
<corresp id="c001">&#x0002A;Correspondence: Yupei Zhang <email>ypzhaang&#x00040;nwpu.edu.cn</email></corresp>
</author-notes>
<pub-date pub-type="epub">
<day>21</day>
<month>03</month>
<year>2025</year>
</pub-date>
<pub-date pub-type="collection">
<year>2025</year>
</pub-date>
<volume>19</volume>
<elocation-id>1499629</elocation-id>
<history>
<date date-type="received">
<day>09</day>
<month>10</month>
<year>2024</year>
</date>
<date date-type="accepted">
<day>25</day>
<month>02</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2025 Zhang, Zhang, Sun, Zhang and Shang.</copyright-statement>
<copyright-year>2025</copyright-year>
<copyright-holder>Zhang, Zhang, Sun, Zhang and Shang</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license>
</permissions>
<abstract>
<p>Knowledge concept recognition (KCR) aims to identify the concepts learned in the brain, which has been a longstanding area of interest for learning science and education. While many studies have investigated object recognition using brain fMRIs, there are limited research on identifying specific knowledge points within the classroom. In this paper, we propose to recognize the knowledge concepts in computer science by classifying the brain fMRIs taken when students are learning the concepts. More specifically, this study made attempts on two representation strategies, i.e., voxel and time difference. Based on the representations, we evaluated traditional classifiers and the combination of CNN and LSTM for KCR. Experiments are conducted on a public dataset collected from 25 students and teachers in a computer science course. The evaluations of classifying fMRI segments show that the used classifiers all can attain a good performance when using the time-difference representation, where the CNN-LSTM model reaches the highest accuracy. This research contributes to the understanding of human learning and supports the development of personalized learning.</p></abstract>
<kwd-group>
<kwd>knowledge concept recognition</kwd>
<kwd>deep learning</kwd>
<kwd>fMRI classification</kwd>
<kwd>brain identification</kwd>
<kwd>learning science</kwd>
</kwd-group>
<counts>
<fig-count count="5"/>
<table-count count="5"/>
<equation-count count="4"/>
<ref-count count="49"/>
<page-count count="10"/>
<word-count count="6435"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Brain Imaging Methods</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<title>1 Introduction</title>
<p>The recognition of knowledge concepts in the brain aims to identify the contexts that are learning or thinking, which is critical for human learning understanding (Seguin et al., <xref ref-type="bibr" rid="B28">2023</xref>). It is useful in many current research fields, including the neural pattern in education (Meshulam et al., <xref ref-type="bibr" rid="B23">2021</xref>), intelligent human-machine interface (Allen et al., <xref ref-type="bibr" rid="B1">2022</xref>), and brain disorder treatment (Chianese et al., <xref ref-type="bibr" rid="B6">2021</xref>). Hence, knowledge concept recognition (KCR) has become an emerging direction in recent years due to the quick development of brain science and its applications.</p>
<p>In general, KCR involves scanning the active brain to acquire imaging data during different task states and using classification techniques to identify the corresponding brain images for various task states (Zhang et al., <xref ref-type="bibr" rid="B40">2023</xref>). The techniques of brain imaging acquisition can be invasive or non-invasive, such as functional magnetic resonance imaging (fMRI) and positron emission tomography (PET) (Chang et al., <xref ref-type="bibr" rid="B5">2022</xref>). Wherein, fMRI is widely used in investigations of brain functions due to its high spatial resolution and non-invasive acquisition. The fMRI images are often acquired by brain scanning for many timestamps, where one picture is obtained at each timestamp (Allen et al., <xref ref-type="bibr" rid="B1">2022</xref>; Meshulam et al., <xref ref-type="bibr" rid="B23">2021</xref>). Hence, fMRI data usually involves a sequence of images, where each pixel in the image measures the Blood Oxygenation Level Dependent (BOLD) signal at a brain location. With the time-series fMRI, we acquire the brain activity using the changes of BOLD values in the brain (Allen et al., <xref ref-type="bibr" rid="B1">2022</xref>).</p>
<p>With the assumption of different BOLD patterns in fMRI for different cognitive concepts, the KCR focused is usually formulated into fMRI classification (Feng et al., <xref ref-type="bibr" rid="B9">2022</xref>). In recent years, many approaches have been developed to identify fMRI images. Zhang et al. (<xref ref-type="bibr" rid="B39">2022b</xref>) proposed a multi-instance model with contrastive learning to identify non-math students and Alzheimer&#x00027;s disease. Qiang et al. (<xref ref-type="bibr" rid="B25">2023</xref>) knitted the VAE-GAN method by integrating variational auto-encoder(VAE) and generative adversarial net(GAN) to implement fMRI augmentation for Attention Deficit Hyperactivity Disorder(ADHD) classification. Zhang et al. (<xref ref-type="bibr" rid="B36">2022a</xref>) used a novel feature selection method by the difference between step distribution curves and utilized a multilayer perception pre-trained by a VAE for identifying the Autism spectrum disorder (ASD). However, the current studies of fMRI classification rarely consider the problems in the classroom (Mason and Just, <xref ref-type="bibr" rid="B22">2016</xref>). Li et al. (<xref ref-type="bibr" rid="B18">2023</xref>) used spatio-temproal graph neural networks to identify the learning disability from brain graphs, while the identification of concepts learned is few touched (Lei et al., <xref ref-type="bibr" rid="B16">2023</xref>; Mason and Just, <xref ref-type="bibr" rid="B22">2016</xref>). Wang et al. (<xref ref-type="bibr" rid="B30">2013</xref>) developed a multi-voxel fMRI pattern analysis to identify the difference between abstract and concrete concepts by using a logical regression classifier, where the fMRIs are yielded by asking different words. Mason and Just (<xref ref-type="bibr" rid="B22">2016</xref>) used the naive Bayes classifiers to identify the physics concepts from fMRI, showing the discriminability of the brain activation signature.</p>
<p>The recent study shows neural representation could predict learning outcomes in students taking a computer science (CS) course (Meshulam et al., <xref ref-type="bibr" rid="B23">2021</xref>). To explore the process, in this paper, we use machine-learning-based fMRI classification methods for KCR in the CS classroom. Our KCR tasks are focused on identifying those concepts learned in student learning, where the concepts involve basic knowledge points of programming (Meshulam et al., <xref ref-type="bibr" rid="B23">2021</xref>). Our study framework is shown in <xref ref-type="fig" rid="F1">Figure 1</xref>. The contributions are 3-folds:</p>
<fig id="F1" position="float">
<label>Figure 1</label>
<caption><p>The framework of our study. From courses, the brain receives stimulation of concepts, resulting in fMRIs. This study explore creating the mapping <italic>f</italic> to identify what concept is learning in the brain.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-19-1499629-g0001.tif"/>
</fig>
<p>(1) This paper contributes to the topic of KCR from fMRI in education, which aims to discover the cognitive pattern in the learning brain. It can help trace the knowledge in the brain (Zhang et al., <xref ref-type="bibr" rid="B37">2020</xref>) and make a personalized learning plan.</p>
<p>(2) Two strategies of fMRI classification are discussed, including voxel-based and temporal difference-based methods. For the two methods, traditional machine learning models and deep neural networks are evaluated, respectively.</p>
<p>(3) The CNN-LSTM model integrating convolutional neural networks (CNN) and long short-term memory (LSTM) are utilized to extract the spatial and temporal features from the BOLD variances, resulting in a better performance than other methods.</p>
<p>The remainder of this paper is organized as follows. Section 2 investigates related works of the KCR and fMRI classification. The used dataset is introduced in Section 3. The traditional classifiers and CNN-LSTM are introduced in Sections 4, where both voxel-based and difference-based representation are also given, respectively. Experimental results are presented and analyzed in Section 5. Finally, Section 6 concludes this study.</p>
</sec>
<sec id="s2">
<title>2 Related work</title>
<sec>
<title>2.1 Concept recognition</title>
<p>Recognition of knowledge concepts in brain imaging data involves scanning the brain under different learning task states to obtain brain imaging data. Then, utilizing classification techniques to identify the corresponding brain images for different task states. This is crucial for understanding human learning (Br&#x000E9;chet et al., <xref ref-type="bibr" rid="B4">2019</xref>) and represents a new direction emerging in recent years. According to different types of brain imaging data, KCR can be implemented via different techniques, including functional magnetic resonance imaging (fMRI), structural magnetic resonance imaging (sMRI), and electroencephalography (EEG). In recent years, with the development of fMRI and sMRI (Khvostikov et al., <xref ref-type="bibr" rid="B13">2018</xref>) technologies, researchers have been able to obtain detailed information about brain function and structure, leading to significant advances in the field of cognitive neuroscience. Zeithamova et al. (<xref ref-type="bibr" rid="B35">2019</xref>) proposed a geometric deep learning framework for cross-modal brain anatomy and functional mapping, which is important for understanding the relationship between brain structure and function, as well as studying neurological disorders. Additionally, in the identification of EEG images, Li et al. (<xref ref-type="bibr" rid="B17">2016</xref>) encapsulated multi-channel neurophysiological signals into grid-like frames through wavelet transform and spectrogram transform. They further designed a hybrid deep learning model, combining Convolutional Neural Networks (CNN) and Recurrent Neural Networks (RNN), to extract task-related features, explore inter-channel correlations, and incorporate contextual information from these frames. However, there is few studies of recognizing the KC from fMRIs in a classroom (Zhang et al., <xref ref-type="bibr" rid="B42">2022c</xref>). In general, KCR can be cast as a fMRI classification problem simply (Zhang et al., <xref ref-type="bibr" rid="B40">2023</xref>).</p>
</sec>
<sec>
<title>2.2 FMRI classification</title>
<p>Currently, there are many pathological analyses (Wang et al., <xref ref-type="bibr" rid="B32">2021</xref>), neuro-disease diagnoses (Ronicko et al., <xref ref-type="bibr" rid="B26">2020</xref>), and pattern recognition (Wang et al., <xref ref-type="bibr" rid="B31">2019</xref>) methods based on fMRI data. They are mainly divided into traditional machine learning-based methods and deep learning-based methods, where deep learning methods can be further categorized into voxel-based classification methods based on convolutional neural networks and graph-based representation classification methods based on functional connectivity.</p>
<p>Traditional machine learning methods were initially applied to the correlation analysis of brain regions and cognitive functions in fMRI data. By examining the response of each voxel in the fMRI data of subjects under different stimuli (whether the voxel is &#x0201C;activated,&#x0201D; measured by the change in neural metrics at that point), mapping the voxels to cognitive stimuli was attempted. However, this approach ignored the correlation between voxels in different locations. Multi-voxel pattern analysis (MVPA) (Weaverdyck et al., <xref ref-type="bibr" rid="B33">2020</xref>) applies multivariate analysis to multiple voxels in fMRI data to improve the representation of voxel relationships. Therein, linear discriminant analysis and support vector machines are also used in the comparisons. Kuncheva et al. (<xref ref-type="bibr" rid="B15">2010</xref>) proposed a support vector machine model based on random subspaces and compared its performance with other machine learning classifiers for fMRI classification. Ryali et al. (<xref ref-type="bibr" rid="B27">2010</xref>) achieved the dual objectives of discriminating brain regions and classifying fMRI data using logistic regression combined with L1 and L2 regularization and other machine learning techniques. However, the traditional machine learning-based fMRI classification is insufficient in representation learning.</p>
<p>Deep learning algorithms, benefiting from the power of neural networks, show the better performance in the fMRI classification. On the one hand, voxel-based deep learning methods have achieved research results in various fields. Feng et al. (<xref ref-type="bibr" rid="B9">2022</xref>) proposed a method that combines Deep Feature Selection (DFS) and Graph Convolutional Networks (GCN), to classify ASD and developing control groups, significantly improving the prediction performance. Researchers then focused on the temporal nature of fMRI data, integrating sequence learning ideas such as LSTM and Markov processes into fMRI data classification. These deep learning methods have shown improvement compared to traditional machine learning methods, but generally ignored the structure information, such as the interregional correlations of the brain (Li et al., <xref ref-type="bibr" rid="B18">2023</xref>). On the other hand, functional connectivity (FC) is achieved based on the voxel-wise time series of fMRI images, reflecting the functional spatio-temporal relationships between brain regions (Lurie et al., <xref ref-type="bibr" rid="B20">2020</xref>). Dynamic connectivity analyses (Zarghami and Friston, <xref ref-type="bibr" rid="B34">2020</xref>) are investigated the neuronal basis of metastability. Generally, two main methods are often used to calculate FC: correlation analysis (Li&#x000E9;geois et al., <xref ref-type="bibr" rid="B19">2020</xref>) and clustering decomposition (Cribben and Yu, <xref ref-type="bibr" rid="B8">2017</xref>). In the former, brain regions with strong correlations are generally considered to be functionally connected, such as Pearson product-moment correlation and Spearman&#x00027;s rank correlation (Lei et al., <xref ref-type="bibr" rid="B16">2023</xref>). The latter clusters brain regions to be functionally connected. FC has been utilized for diagnosing Autism Spectrum Disorder (ASD) (Shao et al., <xref ref-type="bibr" rid="B29">2021</xref>), Alzheimer&#x00027;s disease (AD) (Zuo et al., <xref ref-type="bibr" rid="B48">2024b</xref>, <xref ref-type="bibr" rid="B47">2023a</xref>), and cognitive impairment (Zuo et al., <xref ref-type="bibr" rid="B46">2024a</xref>, <xref ref-type="bibr" rid="B49">2023b</xref>).</p>
<p>Besides, graph-based learning methods have been developed to identify the brain status, since the brain graph can be conducted by functional connectivity matrix (Bessadok et al., <xref ref-type="bibr" rid="B2">2022</xref>; Zong et al., <xref ref-type="bibr" rid="B45">2024</xref>). Kim and Ye (<xref ref-type="bibr" rid="B14">2020</xref>) develop an approach for graph analysis based on resting-state fMRI to diagnose the spectrum disorders. To consider the sequential features, Lei et al. (<xref ref-type="bibr" rid="B16">2023</xref>) developed a Spatio-Temporal Graph Convolutional Network (ST-GCN) for brain representation.</p>
<p>However, there is few studies that are to diagnose whether a KC from a computer course has been mastered by student (Zhang et al., <xref ref-type="bibr" rid="B42">2022c</xref>). Inspired by this, this study aims to learn and encode both temporal and spatial information into the graph structure for subsequent spectral graph convolution methods to learn concepts from the graph structure. With the different representation, the study is to recognize the KC from brain fMRI, exploring a novel possibility of knowledge diagnoses for education (Zhang et al., <xref ref-type="bibr" rid="B37">2020</xref>).</p>
</sec>
</sec>
<sec id="s3">
<title>3 Problem definition and the used public dataset</title>
<p>To be more clear, we definite the problem of KCR here. Let <italic>X</italic> be a input fMRIs and <italic>y</italic> be the knowledge concept (KC) learned in the brain. The KCR problem finds a function <italic>f</italic> to :</p>
<disp-formula id="E1"><label>(1)</label><mml:math id="M1"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>m</mml:mi><mml:mi>i</mml:mi><mml:mi>n</mml:mi><mml:mi>i</mml:mi><mml:mi>m</mml:mi><mml:mi>i</mml:mi><mml:mi>z</mml:mi><mml:mi>e</mml:mi><mml:mo>&#x02016;</mml:mo><mml:mi>f</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>X</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>-</mml:mo><mml:mi>y</mml:mi><mml:msubsup><mml:mrow><mml:mo>&#x02016;</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>such that <italic>f</italic> can identify the KC in the brain. The studies on data analysis has been develop in the work of Meshulam et al. (<xref ref-type="bibr" rid="B23">2021</xref>). But they did not develop the KCR in their works while focusing on analyzing the relationship between students and experts.</p>
<p>Inspired by the research work of Meshulam et al. (<xref ref-type="bibr" rid="B23">2021</xref>), this study uses their published datasets collected brain images from 25 participants,<xref ref-type="fn" rid="fn0001"><sup>1</sup></xref> as shown in <xref ref-type="fig" rid="F2">Figure 2</xref>. The participants in this study consisted of 20 students and 5 teachers. The short description is as follows: The students underwent six scans, while the teachers participated in a single fMRI scan. During the first five scans, students viewed lectures from the course &#x0201C;An Introduction to Computer Science,&#x0201D; which covered topics such as conditions and loops, libraries and functions, abstract data types, performance, and the theory of computing. In the sixth scan, both students and teachers watched a knowledge review video summarizing the material from the previous weeks and then took an exam. NOTE THAT there are many limitations in data collection, such as student&#x00027;s requirements and the number of students in the classroom, leading to the small size of dataset. More details about this dataset is refereed to the paper published by Meshulam et al. (<xref ref-type="bibr" rid="B23">2021</xref>).</p>
<fig id="F2" position="float">
<label>Figure 2</label>
<caption><p>An example fMRI <bold>(left)</bold>, MRI <bold>(right upper)</bold>, and hippocampus <bold>(right down)</bold>.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-19-1499629-g0002.tif"/>
</fig>
<p>Due to the complexity, this study focuses on the hippocampus. Learning in the brain is a complex system, which is affected by many brain regions, such as the hippocampus, the prefrontal cortex, and the parietal lobe (Gavazzi et al., <xref ref-type="bibr" rid="B10">2023</xref>). However, this study aims to recognize the abstract concepts learned in the brain. Since the hippocampus is one of the most important brain regions in concept abstract (Courellis et al., <xref ref-type="bibr" rid="B7">2024</xref>), we here focused on hippocampus for learning KC from videos and extracted the corresponding fMRI patches. For the convenience in model training, we extracted 668 fMRI voxels for the hippocampus. To mitigate the small sample issue, we divided the fMRI sequence into short fragment along time with about 15 time stamps per segment. The category labels correspond to five computer-science concepts.</p>
</sec>
<sec id="s4">
<title>4 The used fMRI classification methods</title>
<sec>
<title>4.1 Traditional voxel-based machine learning methods</title>
<sec>
<title>4.1.1 Data pre-processing</title>
<p>The fMRI images were here pre-processed as follows. First, motion correction was applied using MCFLIRT (Jenkinson et al., <xref ref-type="bibr" rid="B12">2002</xref>) to address rearrangements between images, correcting for motion both within and across questions. Next, a joint registration of functional and anatomical images was performed for each participant using a 12-degree linear transformation. The anatomical images were then normalized to the standard brain template defined by the Montreal Neurological Institute&#x00027;s 152-brain average, followed by a 6-degree-of-freedom non-linear registration from structural to standard space. Finally, slice timing correction was conducted.</p>
<p>Given the complex spatial structure of the hippocampus examined in this study, the 668 voxels were flattened into a one-dimensional format. The entire dataset was divided into different time steps, usually with a time step of 15. This process generated the preliminary data required for the model, where each sample is represented as a tensor of size 668 &#x000D7; 15, where the 668 voxels represent the hippocampus and 15 represents the data fragment having 15 time steps. To mitigate the dimensional impact between metrics and enhance the comparability of data indicators, data standardization is essential. This paper employs the following normalization formula:</p>
<disp-formula id="E2"><label>(2)</label><mml:math id="M2"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msup><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>&#x02032;</mml:mi></mml:mrow></mml:msup><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mi>x</mml:mi><mml:mo>-</mml:mo><mml:mi>&#x003BC;</mml:mi></mml:mrow><mml:mrow><mml:mi>&#x003C3;</mml:mi></mml:mrow></mml:mfrac></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>where <italic>x</italic> indicates the raw data, &#x003BC; and &#x003C3; are the mean and standard deviation of <italic>x</italic>, and <italic>x</italic>&#x02032; is the normalized data. In addition, we used two normalizations for individual samples and the entire category.</p>
</sec>
<sec>
<title>4.1.2 Traditional machine learning models</title>
<p>The traditional classification models utilized in this research are the Support Vector Machine (SVM) and the k-Nearest Neighbors (KNN) algorithm (Bhutta et al., <xref ref-type="bibr" rid="B3">2023</xref>). Both methods are often employed for classification (Zhang et al., <xref ref-type="bibr" rid="B43">2021</xref>). The parameters used in experiments are introduced in the specific evaluations.</p>
</sec>
</sec>
<sec>
<title>4.2 BOLD differences-based spatio-temporal deep neural networks</title>
<sec>
<title>4.2.1 Data processing for difference computation</title>
<p>In the preceding experiments, the flattening of all voxels into a one-dimensional format resulted in the loss of their overall spatial characteristics. To account for spatial features, this study employed a two-dimensional representation of fMRI images. To account for temporal features, this paper implements differential operations on the original data within the temporal dimension, expressed mathematically by <italic>X</italic><sub>diff</sub> &#x0003D; <italic>X</italic><sub><italic>t</italic></sub> &#x02212; <italic>X</italic><sub><italic>t</italic>&#x02212;1</sub> where <italic>X</italic><sub><italic>t</italic></sub> is the <italic>t</italic>-slice in a fMRI. Then, normalization is performed on the time-difference.</p>
</sec>
<sec>
<title>4.2.2 The CNN-LSTM model</title>
<p>To extract robust spatio-temporal features, we develop a spatio-temporal network model, CNN-LSTM, as illustrated in <xref ref-type="fig" rid="F3">Figure 3</xref>. This model comprises five distinct components: data enhancement, convolutional neural network (CNN), long short-term memory neural network (LSTM), feature fusion, and fully connected neural network. In CNN-LSTM, Convolutional neural networks (CNN) can extract spatial features of the model; long short-term memory neural networks (LSTM) can extract the temporal sequence features of the data; combining CNN and LSTM together can achieve the extraction of model spatio-temporal features.</p>
<fig id="F3" position="float">
<label>Figure 3</label>
<caption><p>The used CNN-LSTM framework, where the input size is depended on the classification tasks in hand, while the loss function is the cross entropy (Mao et al., <xref ref-type="bibr" rid="B21">2023</xref>). The dashed arrows in the figure indicate the direction of back-propagating.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-19-1499629-g0003.tif"/>
</fig>
<p>Given a set of fMRI data <italic>X</italic> &#x0003D; {<italic>X</italic><sub>1</sub>, <italic>X</italic><sub>2</sub>, <italic>X</italic><sub>3</sub>, ..., <italic>X</italic><sub><italic>n</italic></sub>}, each <italic>X</italic><sub><italic>i</italic></sub> = [<italic>X</italic><sub><italic>i,j</italic></sub>] represents the BOLD value of the <italic>j</italic>-th voxel in the hippocampus at the time step <italic>t</italic><sub><italic>i</italic></sub>. The model we proposed tries to learn an optimal model <italic>F</italic> so that <italic>F</italic>(<italic>X</italic><sub><italic>i</italic></sub>) is directly mapped to its corresponding label <italic>y</italic><sub><italic>i</italic></sub>. There are five concept types of <italic>y</italic><sub><italic>i</italic></sub>, which correspond to the fMRI images under specific computer course tasks. The CNN-LSTM model described in this paper can be outlined through the following steps:</p>
<list list-type="order">
<list-item><p>Process the fMRI images using a time difference operation to obtain the data <italic>X</italic>.</p></list-item>
<list-item><p>Apply a convolution to the enhanced X through the CNN layer to yield <italic>X</italic><sup><italic>C</italic></sup>.</p></list-item>
<list-item><p>Pass the enhanced <italic>X</italic> through the LSTM layer with <italic>k</italic> modules to obtain <italic>X</italic><sup><italic>L</italic></sup>.</p></list-item>
<list-item><p>Fuse <italic>X</italic><sup><italic>C</italic></sup> and <italic>X</italic><sup><italic>L</italic></sup> to obtain <italic>X</italic><sup><italic>M</italic></sup>.</p></list-item>
<list-item><p>Feed the obtained <italic>X</italic><sup><italic>M</italic></sup> into a fully connected neural network (FCN) for classification.</p></list-item>
</list>
<p>In summary, the prediction results of this framework are achieved by</p>
<disp-formula id="E3"><label>(3)</label><mml:math id="M3"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mi>M</mml:mi><mml:mi>L</mml:mi><mml:mi>P</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>C</mml:mi><mml:mi>N</mml:mi><mml:mi>N</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>X</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x0002B;</mml:mo><mml:mi>L</mml:mi><mml:mi>S</mml:mi><mml:mi>T</mml:mi><mml:mi>M</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>X</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>where <italic>y</italic><sub><italic>i</italic></sub> is the label; MLP indicates the Multi-Layer Perceptron, while LSTM is Long Short-Term Memory. Note that LSTM is a traditional approach to handle the time sequence data, while the Transformer has been well known for their strong capabilities in parallel computation, global context modeling, and adaptability to sequence data (Han et al., <xref ref-type="bibr" rid="B11">2021</xref>). However, the fMRI datasets used here is hard to train Transformer due to its complexity. Hence, in this study, we just explored the KCR by using LSTM (Zhang et al., <xref ref-type="bibr" rid="B42">2022c</xref>).</p>
</sec>
</sec>
</sec>
<sec id="s5">
<title>5 Experiment results</title>
<sec>
<title>5.1 Evaluation metrics</title>
<p>We evaluated the used classification methods for KCR by common metrics. For this multi-label classification, we calculated the metric by considering the samples belonging to the target class as positive samples. Let <italic>TP</italic>, <italic>TN</italic>, <italic>FP</italic>, and <italic>FN</italic> be true positive, true negative, false positive, and false negative for classification result, respectively. The following metrics evaluate model performance on the test set:</p>
<disp-formula id="E4"><mml:math id="M4"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:mtable style="text-align:axis;" equalrows="false" columnlines="none" equalcolumns="false" class="array"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mstyle mathvariant="italic"><mml:mtext class="textit">Precision</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mi>T</mml:mi><mml:msub><mml:mrow><mml:mi>P</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:msub><mml:mrow><mml:mi>P</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>&#x0002B;</mml:mo><mml:mi>F</mml:mi><mml:msub><mml:mrow><mml:mi>P</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mfrac></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mstyle mathvariant="italic"><mml:mtext class="textit">Recall</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mi>T</mml:mi><mml:msub><mml:mrow><mml:mi>P</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:msub><mml:mrow><mml:mi>P</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>&#x0002B;</mml:mo><mml:mi>F</mml:mi><mml:msub><mml:mrow><mml:mi>N</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mfrac></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mstyle mathvariant="italic"><mml:mtext class="textit">F_1 Score</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mn>2</mml:mn><mml:mo>&#x000B7;</mml:mo><mml:mfrac><mml:mrow><mml:mi>P</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>i</mml:mi><mml:mi>s</mml:mi><mml:mi>i</mml:mi><mml:mi>o</mml:mi><mml:msub><mml:mrow><mml:mi>n</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>&#x000B7;</mml:mo><mml:mi>R</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>a</mml:mi><mml:mi>l</mml:mi><mml:msub><mml:mrow><mml:mi>l</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:mi>P</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>i</mml:mi><mml:mi>s</mml:mi><mml:mi>i</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:mi>R</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>a</mml:mi><mml:mi>l</mml:mi><mml:mi>l</mml:mi></mml:mrow></mml:mfrac></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mstyle mathvariant="italic"><mml:mtext class="textit">Accuracy</mml:mtext></mml:mstyle><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:mi>T</mml:mi><mml:mi>N</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:mi>T</mml:mi><mml:mi>N</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:mi>F</mml:mi><mml:mi>P</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:mi>F</mml:mi><mml:mi>N</mml:mi></mml:mrow></mml:mfrac></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mstyle mathvariant="italic"><mml:mtext class="textit">Cohen&#x00027;s Kappa Coefficient</mml:mtext></mml:mstyle><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:msub><mml:mrow><mml:mi>P</mml:mi></mml:mrow><mml:mrow><mml:mi>o</mml:mi></mml:mrow></mml:msub><mml:mo>-</mml:mo><mml:msub><mml:mrow><mml:mi>P</mml:mi></mml:mrow><mml:mrow><mml:mi>e</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:msub><mml:mrow><mml:mi>P</mml:mi></mml:mrow><mml:mrow><mml:mi>e</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mfrac></mml:mtd></mml:mtr></mml:mtable></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>where <italic>i</italic> indicates the <italic>i</italic>-th class, <italic>P</italic><sub><italic>o</italic></sub> is the observed agreement, and <italic>P</italic><sub><italic>e</italic></sub> is the expected agreement by chance. The 10-fold cross-validation is adopted to achieve the classification accuracy. The process is as follows: we randomly divided the datasets into 10 folds where 9 folds are for training classifiers and the rest fold is for computing test accuracy. Finally, we reported the average values from the 10 folds. To evaluate the data imbalance issue, we also computed the Micro-average and Macro-average computed as in the previous work (Zhang et al., <xref ref-type="bibr" rid="B44">2022d</xref>). In this study, we tried to ensure that the number of samples across classes was approximately balanced, leaving the issue of data imbalance for future consideration.</p>
</sec>
<sec>
<title>5.2 Voxel-based classification evaluation</title>
<sec>
<title>5.2.1 Evaluations on traditional classifiers</title>
<p>We classified fMRI data under five concept categories using traditional classification models, i.e., SVM and KNN. The classification results are shown in <xref ref-type="table" rid="T1">Table 1</xref>, together with their parameter settings. From the results, it can be seen that the SVM model performs well, achieving an accuracy of 78%. While the performance of KNN is relatively poor, with only 42% accuracy. This might be because the KNN model is relatively simple and considers only the &#x0201C;distance&#x0201D; factor, leading to lower accuracy.</p>
<table-wrap position="float" id="T1">
<label>Table 1</label>
<caption><p>Results of using SVM and KNN algorithms with different normalizations, where the empirical parameters are set for high prediction accuracy.</p></caption>
<table frame="box" rules="all">
<thead>
<tr style="background-color:#919498;color:#ffffff">
<th valign="top" align="left"><bold>Classifier</bold></th>
<th valign="top" align="center"><bold>Parameter setting</bold></th>
<th valign="top" align="center" colspan="2"><bold>Accuracy</bold></th>
</tr>
</thead>
<tbody>
<tr style="background-color:#919498;color:#ffffff">
<td/>
<td/>
<td valign="top" align="center"><bold>Region normalization</bold></td>
<td valign="top" align="center"><bold>Category normalization</bold></td>
</tr> <tr>
<td valign="top" align="left">SVM</td>
<td valign="top" align="center">decision_function_shape = &#x0201C;ovr,&#x0201D; kernel = &#x0201C;linear,&#x0201D; C = 10, max_iter = 1,000</td>
<td valign="top" align="center">0.76</td>
<td valign="top" align="center">0.78</td>
</tr> <tr>
<td valign="top" align="left">KNN</td>
<td valign="top" align="center">n_neighbors = 30, weights = &#x0201C;uniform,&#x0201D; p = 2</td>
<td valign="top" align="center">0.39</td>
<td valign="top" align="center">0.42</td>
</tr></tbody>
</table>
</table-wrap>
<p><xref ref-type="fig" rid="F4">Figure 4</xref> presents the confusion matrices of SVM and KNN obtained from the experiments. The results indicate that the SVM achieves relatively high classification accuracy across all classes. On Category &#x0201C;2,&#x0201D; i.e., abstract data types, SVM has relatively lower performance. The classification accuracy for the remaining categories exceeds 80%. While, KNN&#x00027;s overall performance is relatively poor from the left matrix. Category &#x0201C;1&#x0201D; is significantly misclassified into Category &#x0201C;2.&#x0201D;</p>
<fig id="F4" position="float">
<label>Figure 4</label>
<caption><p>Confusion matrixes of SVM <bold>(right)</bold> and KNN <bold>(left)</bold> with category normalization. The vertical axis is the actual labels, while the horizontal axis is the predicted labels.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-19-1499629-g0004.tif"/>
</fig>
<p><xref ref-type="table" rid="T2">Table 2</xref> presents the experimental results of the investigation of the impact of time step {1, 3, 5}. It is observed that reducing the time step can result in an increase in training accuracy for KNN and SVM. That is due to the increasing number of training samples.</p>
<table-wrap position="float" id="T2">
<label>Table 2</label>
<caption><p>Comparison of SVM and KNN with different time steps, where the empirical parameters are set to obtain higher classification accuracy.</p></caption>
<table frame="box" rules="all">
<thead>
<tr style="background-color:#919498;color:#ffffff">
<th valign="top" align="left"><bold>Classifier</bold></th>
<th valign="top" align="center"><bold>Parameter setting</bold></th>
<th valign="top" align="center" colspan="3"><bold>Category normalization</bold></th>
</tr>
</thead>
<tbody>
<tr style="background-color:#919498;color:#ffffff">
<td/>
<td/>
<td valign="top" align="center" colspan="3"><bold>Time step</bold></td>
</tr>
 <tr>
<td/>
<td/>
<td valign="top" align="center"><bold>5</bold></td>
<td valign="top" align="center"><bold>3</bold></td>
<td valign="top" align="center"><bold>1</bold></td>
</tr> <tr>
<td valign="top" align="left">SVM</td>
<td valign="top" align="center">decision_function_shape = &#x0201C;ovr,&#x0201D; kernel = &#x0201C;linear,&#x0201D; C = 10, max_iter = 1,000</td>
<td valign="top" align="center">0.65</td>
<td valign="top" align="center">0.68</td>
<td valign="top" align="center">0.69</td>
</tr> <tr>
<td valign="top" align="left">KNN</td>
<td valign="top" align="center">n_neighbors = 30, weights = &#x0201C;uniform,&#x0201D; p = 2</td>
<td valign="top" align="center">0.53</td>
<td valign="top" align="center">0.61</td>
<td valign="top" align="center">0.71</td>
</tr></tbody>
</table>
</table-wrap>
</sec>
<sec>
<title>5.2.2 Evaluations on deep learning models</title>
<p>We conducted experiments by using MLP and (CNN1D&#x0002B;LSTM) &#x000D7; MLP to show the prediction performance. The results are shown in <xref ref-type="table" rid="T3">Table 3</xref> with their using parameters. As is shown, we could achieve the following observations and conclusions. (1) MLP consisting of a 5-layer neural network was trained to directly classify the flattened data vector, achieving an accuracy of 74%. (2) Considering the temporal features, we evaluated the model (CNN1D&#x0002B;LSTM) &#x000D7; MLP which combines CNN1D and LSTM on flattened vectors and then connect to MLP, achieving 81%.</p>
<table-wrap position="float" id="T3">
<label>Table 3</label>
<caption><p>Parameters and comparison of MLP and (CNN&#x0002B;LSTM) &#x000D7; MLP, where the empirical parameters are set to obatin higher classification accuracy.</p></caption>
<table frame="box" rules="all">
<thead>
<tr style="background-color:#919498;color:#ffffff">
<th valign="top" align="left"><bold>Classifier</bold></th>
<th valign="top" align="center"><bold>Parameter setting</bold></th>
<th valign="top" align="center"><bold>Accuracy</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">MLP</td>
<td valign="top" align="center">(10,020,512,256,128,64,5), optimizer = &#x0201C;adam,&#x0201D; Epoch = 3,000</td>
<td valign="top" align="center">0.74 &#x000B1; 0.023</td>
</tr> <tr>
<td valign="top" align="left">(CNN1D&#x0002B;LSTM) &#x000D7; MLP</td>
<td valign="top" align="center">Conv1d(in_channels = 668, out_channels = 100, kernel_size = 1) LSTM(input_size = 668, hidden_size = 100, num_layers = 5, bias = True, batch_first = True, dropout = 0.25, bidirectional = False) (3,000,512,256,64,5), Dropout = 0.2, Epoch = 3,000</td>
<td valign="top" align="center">0.81 &#x000B1; 0.031</td>
</tr></tbody>
</table>
</table-wrap>
<p><xref ref-type="fig" rid="F5">Figure 5</xref> displays the ROC curves obtained from the experiment. The results manifest that (CNN1D&#x0002B;LSTM) &#x000D7; MLP reaches the better performance than MLP in terms of ROC and AUC. Besides, we aggregate the per-class metrics into the micro/macro-average ROC curve and AUC. Their results show that (CNN1D&#x0002B;LSTM) &#x000D7; MLP is better than MLP. All results imply the effectiveness of integrating spatial and temporal features.</p>
<fig id="F5" position="float">
<label>Figure 5</label>
<caption><p>ROC curves of MLP <bold>(left)</bold> and (CNN&#x0002B;LSTM) &#x000D7; MLP <bold>(right)</bold>.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-19-1499629-g0005.tif"/>
</fig>
</sec>
</sec>
<sec>
<title>5.3 Time-difference based classification evaluation</title>
<p>This subsection evaluates the time difference based methods. To check effectiveness of each component, we employed four different models for comparisons, shown in <xref ref-type="table" rid="T4">Table 4</xref>, together with their used parameters. Note that we adjusted the model in the training process to seek their best performance in our experiments.</p>
<table-wrap position="float" id="T4">
<label>Table 4</label>
<caption><p>Parameters and accuracy of the combinations of MLP, CNN, LSTM, and LSTM.</p></caption>
<table frame="box" rules="all">
<thead>
<tr style="background-color:#919498;color:#ffffff">
<th valign="top" align="left"><bold>Classifier</bold></th>
<th valign="top" align="center"><bold>Parameter setting</bold></th>
<th valign="top" align="center"><bold>Accuracy</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">MLP</td>
<td valign="top" align="center">(10,020,512,256,128,64,5), optimizer = &#x0201C;adam,&#x0201D; Epoch = 3,000</td>
<td valign="top" align="center">0.88 &#x000B1; 0.016</td>
</tr> <tr>
<td valign="top" align="left">CNN&#x0002B;MLP</td>
<td valign="top" align="center">Conv[1(3,3), strides=1, padding = &#x0201C;same,&#x0201D; use_bias = False] Conv2D[1,(3,3), strides = 1, padding = &#x0201C;same,&#x0201D; use_bias = False] (20,040,512,256,64,5), Dropout = 0.2, Epoch = 500</td>
<td valign="top" align="center">0.91 &#x000B1; 0.024</td>
</tr> <tr>
<td valign="top" align="left">LSTM&#x0002B;MLP</td>
<td valign="top" align="center">LSTM(input_size = 668, hidden_size = 100, num_layers = 5, bias = True, batch_first = True, dropout = 0.25, bidirectional = False) (11,520,512,256,64,5) Dropout = 0.2, Epoch = 500</td>
<td valign="top" align="center">0.92 &#x000B1; 0.021</td>
</tr> <tr>
<td valign="top" align="left">(CNN&#x0002B;LSTM) &#x000D7; MLP</td>
<td valign="top" align="center">Conv(in_channels = 668, out_channels = 100, kernel_size = 1) LSTM(input_size = 668, hidden_size = 100, num_layers = 5, bias = True, batch_first = True, dropout = 0.25, bidirectional = False) (3,000,512,256,64,5), Dropout = 0.2, Epoch = 500</td>
<td valign="top" align="center">0.94 &#x000B1; 0.029</td>
</tr></tbody>
</table>
<table-wrap-foot>
<p>The standard deviation are obtained by 10 folds.</p>
</table-wrap-foot>
</table-wrap>
<p>From <xref ref-type="table" rid="T4">Table 4</xref>, these observations reveal that both CNN&#x0002B;MLP and LSTM&#x0002B;MLP reach better performance than MLP, while (CNN&#x0002B;LSTM) &#x000D7; MLP achieves the best performance. Besides, the MLP model attained good results after 3,000 training iterations, while other models achieved comparable performance after only 500 training iterations. This implies that more powerful feature representations not only enhance the classification accuracy of the models but also accelerate their convergence. However, the less steps likely incurs higher standard deviations.</p>
<p>In <xref ref-type="table" rid="T5">Table 5</xref>, we computed the performance evaluation results of each model utilizing time difference in terms of precision, accuracy, recall, F1 score, and Cohen&#x00027;s Kappa coefficient. The results indicate that considering either spatial or temporal features enhances classification performance. Moreover, the integration of both types of features appears to yield superior results compared to the use of either feature type alone. That implies that learning concept in the brain is not only a structural activity but also a temporal activity.</p>
<table-wrap position="float" id="T5">
<label>Table 5</label>
<caption><p>Evaluation metrics of compared methods, where the accuracy is from <xref ref-type="table" rid="T4">Table 4</xref>.</p></caption>
<table frame="box" rules="all">
<thead>
<tr style="background-color:#919498;color:#ffffff">
<th valign="top" align="left"><bold>Classifier</bold></th>
<th valign="top" align="center"><bold>Precision</bold></th>
<th valign="top" align="center"><bold>Accuracy</bold></th>
<th valign="top" align="center"><bold>Recall</bold></th>
<th valign="top" align="center"><bold>F1 Score</bold></th>
<th valign="top" align="center"><bold>Cohen&#x00027;s Kappa coefficient</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">MLP</td>
<td valign="top" align="center">0.90</td>
<td valign="top" align="center">0.88</td>
<td valign="top" align="center">0.84</td>
<td valign="top" align="center">0.867</td>
<td valign="top" align="center">0.67</td>
</tr> <tr>
<td valign="top" align="left">CNN&#x0002B;MLP</td>
<td valign="top" align="center">0.89</td>
<td valign="top" align="center">0.91</td>
<td valign="top" align="center">0.86</td>
<td valign="top" align="center">0.875</td>
<td valign="top" align="center">0.76</td>
</tr> <tr>
<td valign="top" align="left">LSTM&#x0002B;MLP</td>
<td valign="top" align="center">0.91</td>
<td valign="top" align="center">0.92</td>
<td valign="top" align="center">0.90</td>
<td valign="top" align="center">0.905</td>
<td valign="top" align="center">0.74</td>
</tr> <tr>
<td valign="top" align="left">(CNN&#x0002B;LSTM) &#x000D7; MLP</td>
<td valign="top" align="center"><bold>0.93</bold></td>
<td valign="top" align="center"><bold>0.94</bold></td>
<td valign="top" align="center"><bold>0.91</bold></td>
<td valign="top" align="center"><bold>0.935</bold></td>
<td valign="top" align="center"><bold>0.81</bold></td>
</tr></tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec sec-type="conclusions" id="s6">
<title>6 Conclusion</title>
<p>In this study, we proposed utilizing learning methods for knowledge concept recognition (KCR), a compelling problem in brain decoding. We implemented two approaches to data preprocessing: raw voxel sequences and time-difference sequences. When utilizing time-difference sequences, the results show significantly improved performance, compared to using the voxel sequences. Experimental results show the consideration of both spatial and temporal features proves to be particularly effective in fMRI classification for KCR.</p>
<p>In future work, we will consider a bigger fMRI datasets for learning science and use more explainable feature extract model and deep models (Ning et al., <xref ref-type="bibr" rid="B24">2023</xref>). To address the small data-size problem, we will adopt the federated learning framework to have fMRI analyses with many other institute (Zhang et al., <xref ref-type="bibr" rid="B41">2025</xref>, <xref ref-type="bibr" rid="B38">2024</xref>). Finally, toward a personalized learning plan, the variability between students will be worthy to consider in the future.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s7">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/supplementary material, further inquiries can be directed to the corresponding author.</p>
</sec>
<sec sec-type="ethics-statement" id="s8">
<title>Ethics statement</title>
<p>Ethical review and approval was not required for the study on human participants in accordance with the local legislation and institutional requirements. Written informed consent from the patients/participants or patients/participants&#x00027; legal guardian/next of kin was not required to participate in this study in accordance with the national legislation and the institutional requirements.</p>
</sec>
<sec sec-type="author-contributions" id="s9">
<title>Author contributions</title>
<p>WZ: Data curation, Methodology, Software, Validation, Visualization, Writing &#x02013; original draft, Writing &#x02013; review &#x00026; editing, Conceptualization, Investigation. YiZ: Conceptualization, Formal analysis, Methodology, Validation, Writing &#x02013; original draft. LS: Conceptualization, Formal analysis, Investigation, Validation, Writing &#x02013; original draft. YuZ: Conceptualization, Funding acquisition, Project administration, Resources, Supervision, Visualization, Writing &#x02013; review &#x00026; editing, Writing &#x02013; original draft. XS: Conceptualization, Funding acquisition, Resources, Validation, Writing &#x02013; review &#x00026; editing.</p>
</sec>
<sec sec-type="funding-information" id="s10">
<title>Funding</title>
<p>The author(s) declare that financial support was received for the research and/or publication of this article. This work was supported in part by the Key Research and Development Program of Higher Education Institution Science Research and Development Center under the Ministry of Education (2022IT222).</p>
</sec>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="s11">
<title>Generative AI statement</title>
<p>The author(s) declare that no Gen AI was used in the creation of this manuscript.</p>
</sec>
<sec sec-type="disclaimer" id="s12">
<title>Publisher&#x00027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<fn-group>
<fn id="fn0001"><p><sup>1</sup><ext-link ext-link-type="uri" xlink:href="https://openneuro.org/datasets/ds003233/versions/1.2.0">https://openneuro.org/datasets/ds003233/versions/1.2.0</ext-link></p></fn>
</fn-group>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Allen</surname> <given-names>E. J.</given-names></name> <name><surname>St-Yves</surname> <given-names>G.</given-names></name> <name><surname>Wu</surname> <given-names>Y.</given-names></name> <name><surname>Breedlove</surname> <given-names>J. L.</given-names></name> <name><surname>Prince</surname> <given-names>J. S.</given-names></name> <name><surname>Dowdle</surname> <given-names>L. T.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>A massive 7t fMRI dataset to bridge cognitive neuroscience and artificial intelligence</article-title>. <source>Nat. Neurosci</source>. <volume>25</volume>, <fpage>116</fpage>&#x02013;<lpage>126</lpage>. <pub-id pub-id-type="doi">10.1038/s41593-021-00962-x</pub-id><pub-id pub-id-type="pmid">34916659</pub-id></citation></ref>
<ref id="B2">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bessadok</surname> <given-names>A.</given-names></name> <name><surname>Mahjoub</surname> <given-names>M. A.</given-names></name> <name><surname>Rekik</surname> <given-names>I.</given-names></name></person-group> (<year>2022</year>). <article-title>Graph neural networks in network neuroscience</article-title>. <source>IEEE Trans. Pattern Anal. Mach. Intell</source>. <volume>45</volume>, <fpage>5833</fpage>&#x02013;<lpage>5848</lpage>. <pub-id pub-id-type="doi">10.1109/TPAMI.2022.3209686</pub-id><pub-id pub-id-type="pmid">36155474</pub-id></citation></ref>
<ref id="B3">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bhutta</surname> <given-names>M. R.</given-names></name> <name><surname>Ali</surname> <given-names>M. U.</given-names></name> <name><surname>Zafar</surname> <given-names>A.</given-names></name> <name><surname>Kim</surname> <given-names>K. S.</given-names></name> <name><surname>Byun</surname> <given-names>J. H.</given-names></name> <name><surname>Lee</surname> <given-names>S. W.</given-names></name></person-group> (<year>2023</year>). <article-title>Artificial neural network models: implementation of functional near-infrared spectroscopy-based spontaneous lie detection in an interactive scenario</article-title>. <source>Front. Comput. Neurosci</source>. <volume>17</volume>:<fpage>1286664</fpage>. <pub-id pub-id-type="doi">10.3389/fncom.2023.1286664</pub-id><pub-id pub-id-type="pmid">38328471</pub-id></citation></ref>
<ref id="B4">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Br&#x000E9;chet</surname> <given-names>L.</given-names></name> <name><surname>Brunet</surname> <given-names>D.</given-names></name> <name><surname>Birot</surname> <given-names>G.</given-names></name> <name><surname>Gruetter</surname> <given-names>R.</given-names></name> <name><surname>Michel</surname> <given-names>C. M.</given-names></name> <name><surname>Jorge</surname> <given-names>J.</given-names></name></person-group> (<year>2019</year>). <article-title>Capturing the spatiotemporal dynamics of self-generated, task-initiated thoughts with EEG and fMRI</article-title>. <source>Neuroimage</source> <volume>194</volume>, <fpage>82</fpage>&#x02013;<lpage>92</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2019.03.029</pub-id><pub-id pub-id-type="pmid">30902640</pub-id></citation></ref>
<ref id="B5">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chang</surname> <given-names>H.</given-names></name> <name><surname>Sheng</surname> <given-names>Y.</given-names></name> <name><surname>Liu</surname> <given-names>J.</given-names></name> <name><surname>Yang</surname> <given-names>H.</given-names></name> <name><surname>Pan</surname> <given-names>X.</given-names></name> <name><surname>Liu</surname> <given-names>H.</given-names></name></person-group> (<year>2022</year>). <article-title>Non-invasive brain imaging and stimulation in post-stroke motor rehabilitation: a review</article-title>. <source>IEEE Trans. Cogn. Dev. Syst</source>. <volume>15</volume>, <fpage>1085</fpage>&#x02013;<lpage>1101</lpage>. <pub-id pub-id-type="doi">10.1109/TCDS.2022.3232581</pub-id></citation>
</ref>
<ref id="B6">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chianese</surname> <given-names>A. A.</given-names></name> <name><surname>Jackson</surname> <given-names>S. Z.</given-names></name> <name><surname>Souders</surname> <given-names>M. C.</given-names></name></person-group> (<year>2021</year>). <article-title>Psychosexual knowledge and education in autism spectrum disorder individuals</article-title>. <source>J. Am. Assoc. Nurse Pract</source>. <volume>33</volume>, <fpage>776</fpage>&#x02013;<lpage>784</lpage>. <pub-id pub-id-type="doi">10.1097/JXX.0000000000000508</pub-id><pub-id pub-id-type="pmid">33273264</pub-id></citation></ref>
<ref id="B7">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Courellis</surname> <given-names>H. S.</given-names></name> <name><surname>Minxha</surname> <given-names>J.</given-names></name> <name><surname>Cardenas</surname> <given-names>A. R.</given-names></name> <name><surname>Kimmel</surname> <given-names>D. L.</given-names></name> <name><surname>Reed</surname> <given-names>C. M.</given-names></name> <name><surname>Valiante</surname> <given-names>T. A.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>Abstract representations emerge in human hippocampal neurons during inference</article-title>. <source>Nature</source> <volume>632</volume>, <fpage>841</fpage>&#x02013;<lpage>849</lpage>. <pub-id pub-id-type="doi">10.1038/s41586-024-07799-x</pub-id><pub-id pub-id-type="pmid">39143207</pub-id></citation></ref>
<ref id="B8">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cribben</surname> <given-names>I.</given-names></name> <name><surname>Yu</surname> <given-names>Y.</given-names></name></person-group> (<year>2017</year>). <article-title>Estimating whole-brain dynamics by using spectral clustering</article-title>. <source>J. R. Stat. Soc. Series C</source> <volume>66</volume>, <fpage>607</fpage>&#x02013;<lpage>627</lpage>. <pub-id pub-id-type="doi">10.1111/rssc.12169</pub-id><pub-id pub-id-type="pmid">26162552</pub-id></citation></ref>
<ref id="B9">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Feng</surname> <given-names>W.</given-names></name> <name><surname>Liu</surname> <given-names>G.</given-names></name> <name><surname>Zeng</surname> <given-names>K.</given-names></name> <name><surname>Zeng</surname> <given-names>M.</given-names></name> <name><surname>Liu</surname> <given-names>Y.</given-names></name></person-group> (<year>2022</year>). <article-title>A review of methods for classification and recognition of asd using fMRI data</article-title>. <source>J. Neurosci. Methods</source> <volume>368</volume>:<fpage>109456</fpage>. <pub-id pub-id-type="doi">10.1016/j.jneumeth.2021.109456</pub-id><pub-id pub-id-type="pmid">34954253</pub-id></citation></ref>
<ref id="B10">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gavazzi</surname> <given-names>G.</given-names></name> <name><surname>Giovannelli</surname> <given-names>F.</given-names></name> <name><surname>Noferini</surname> <given-names>C.</given-names></name> <name><surname>Cincotta</surname> <given-names>M.</given-names></name> <name><surname>Cavaliere</surname> <given-names>C.</given-names></name> <name><surname>Salvatore</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Subregional prefrontal cortex recruitment as a function of inhibitory demand: an fMRI metanalysis</article-title>. <source>Neurosci. Biobehav. Rev</source>. <volume>152</volume>:<fpage>105285</fpage>. <pub-id pub-id-type="doi">10.1016/j.neubiorev.2023.105285</pub-id><pub-id pub-id-type="pmid">37327836</pub-id></citation></ref>
<ref id="B11">
<citation citation-type="web"><person-group person-group-type="author"><name><surname>Han</surname> <given-names>K.</given-names></name> <name><surname>Xiao</surname> <given-names>A.</given-names></name> <name><surname>Wu</surname> <given-names>E.</given-names></name> <name><surname>Guo</surname> <given-names>J.</given-names></name> <name><surname>Xu</surname> <given-names>C.</given-names></name> <name><surname>Wang</surname> <given-names>Y.</given-names></name></person-group> (<year>2021</year>). <article-title>Transformer in transformer</article-title>. <source>Adv. Neural Inf. Process. Syst</source>. <volume>34</volume>, <fpage>15908</fpage>&#x02013;<lpage>15919</lpage>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://proceedings.neurips.cc/paper_files/paper/2021/file/854d9fca60b4bd07f9bb215d59ef5561-Paper.pdf">https://proceedings.neurips.cc/paper_files/paper/2021/file/854d9fca60b4bd07f9bb215d59ef5561-Paper.pdf</ext-link></citation>
</ref>
<ref id="B12">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jenkinson</surname> <given-names>M.</given-names></name> <name><surname>Bannister</surname> <given-names>P.</given-names></name> <name><surname>Brady</surname> <given-names>M.</given-names></name> <name><surname>Smith</surname> <given-names>S.</given-names></name></person-group> (<year>2002</year>). <article-title>Improved optimization for the robust and accurate linear registration and motion correction of brain images</article-title>. <source>Neuroimage</source> <volume>17</volume>, <fpage>825</fpage>&#x02013;<lpage>841</lpage>. <pub-id pub-id-type="doi">10.1006/nimg.2002.1132</pub-id><pub-id pub-id-type="pmid">12377157</pub-id></citation></ref>
<ref id="B13">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Khvostikov</surname> <given-names>A.</given-names></name> <name><surname>Aderghal</surname> <given-names>K.</given-names></name> <name><surname>Benois-Pineau</surname> <given-names>J.</given-names></name> <name><surname>Krylov</surname> <given-names>A.</given-names></name> <name><surname>Catheline</surname> <given-names>G.</given-names></name></person-group> (<year>2018</year>). 3D cnn-based classification using smri and md-dti images for alzheimer disease studies. <italic>arXiv preprint arXiv:1801.05968</italic>.</citation>
</ref>
<ref id="B14">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kim</surname> <given-names>B.-H.</given-names></name> <name><surname>Ye</surname> <given-names>J. C.</given-names></name></person-group> (<year>2020</year>). <article-title>Understanding graph isomorphism network for rs-fMRI functional connectivity analysis</article-title>. <source>Front. Neurosci</source>. <volume>14</volume>:<fpage>630</fpage>. <pub-id pub-id-type="doi">10.3389/fnins.2020.00630</pub-id><pub-id pub-id-type="pmid">32714130</pub-id></citation></ref>
<ref id="B15">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kuncheva</surname> <given-names>L. I.</given-names></name> <name><surname>Rodr&#x000ED;guez</surname> <given-names>J. J.</given-names></name> <name><surname>Plumpton</surname> <given-names>C. O.</given-names></name> <name><surname>Linden</surname> <given-names>D. E.</given-names></name> <name><surname>Johnston</surname> <given-names>S. J.</given-names></name></person-group> (<year>2010</year>). <article-title>Random subspace ensembles for fMRI classification</article-title>. <source>IEEE Trans. Med. Imaging</source> <volume>29</volume>, <fpage>531</fpage>&#x02013;<lpage>542</lpage>. <pub-id pub-id-type="doi">10.1109/TMI.2009.2037756</pub-id><pub-id pub-id-type="pmid">20129853</pub-id></citation></ref>
<ref id="B16">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Lei</surname> <given-names>Y.</given-names></name> <name><surname>Zhang</surname> <given-names>Y.</given-names></name> <name><surname>Lin</surname> <given-names>Y.</given-names></name> <name><surname>Shang</surname> <given-names>X.</given-names></name></person-group> (<year>2023</year>). <article-title>&#x0201C;Knowledge-concept diagnosis from fMRIs by using a space-time embedding graph convolutional network,&#x0201D;</article-title> in <source>International Conference on Web Information Systems and Applications</source> (<publisher-loc>Springer</publisher-loc>), <fpage>100</fpage>&#x02013;<lpage>111</lpage>. <pub-id pub-id-type="doi">10.1007/978-981-99-6222-8_9</pub-id></citation>
</ref>
<ref id="B17">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>X.</given-names></name> <name><surname>Song</surname> <given-names>D.</given-names></name> <name><surname>Zhang</surname> <given-names>P.</given-names></name> <name><surname>Yu</surname> <given-names>G.</given-names></name> <name><surname>Hou</surname> <given-names>Y.</given-names></name> <name><surname>Hu</surname> <given-names>B.</given-names></name></person-group> (<year>2016</year>). <article-title>&#x0201C;Emotion recognition from multi-channel EEG data through convolutional recurrent neural network,&#x0201D;</article-title> in <source>2016 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)</source> (<publisher-loc>IEEE</publisher-loc>), <fpage>352</fpage>&#x02013;<lpage>359</lpage>. <pub-id pub-id-type="doi">10.1109/BIBM.2016.7822545</pub-id></citation>
</ref>
<ref id="B18">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>Y.</given-names></name> <name><surname>Shang</surname> <given-names>X.</given-names></name> <name><surname>Zhang</surname> <given-names>Y.</given-names></name></person-group> (<year>2023</year>). <article-title>&#x0201C;Learning-disability recognition by using sparse spatio-temporal graph neural networks,&#x0201D;</article-title> in <source>2023 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)</source> (<publisher-loc>IEEE</publisher-loc>), <fpage>3521</fpage>&#x02013;<lpage>3528</lpage>. <pub-id pub-id-type="doi">10.1109/BIBM58861.2023.10385405</pub-id></citation>
</ref>
<ref id="B19">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li&#x000E9;geois</surname> <given-names>R.</given-names></name> <name><surname>Santos</surname> <given-names>A.</given-names></name> <name><surname>Matta</surname> <given-names>V.</given-names></name> <name><surname>Van De Ville</surname> <given-names>D.</given-names></name> <name><surname>Sayed</surname> <given-names>A. H.</given-names></name></person-group> (<year>2020</year>). <article-title>Revisiting correlation-based functional connectivity and its relationship with structural connectivity</article-title>. <source>Netw. Neurosci</source>. <volume>4</volume>, <fpage>1235</fpage>&#x02013;<lpage>1251</lpage>. <pub-id pub-id-type="doi">10.1162/netn_a_00166</pub-id><pub-id pub-id-type="pmid">33409438</pub-id></citation></ref>
<ref id="B20">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lurie</surname> <given-names>D. J.</given-names></name> <name><surname>Kessler</surname> <given-names>D.</given-names></name> <name><surname>Bassett</surname> <given-names>D. S.</given-names></name> <name><surname>Betzel</surname> <given-names>R. F.</given-names></name> <name><surname>Breakspear</surname> <given-names>M.</given-names></name> <name><surname>Kheilholz</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Questions and controversies in the study of time-varying functional connectivity in resting fMRI</article-title>. <source>Netw. Neurosci</source>. <volume>4</volume>, <fpage>30</fpage>&#x02013;<lpage>69</lpage>. <pub-id pub-id-type="doi">10.1162/netn_a_00116</pub-id><pub-id pub-id-type="pmid">32043043</pub-id></citation></ref>
<ref id="B21">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Mao</surname> <given-names>A.</given-names></name> <name><surname>Mohri</surname> <given-names>M.</given-names></name> <name><surname>Zhong</surname> <given-names>Y.</given-names></name></person-group> (<year>2023</year>). <article-title>&#x0201C;Cross-entropy loss functions: theoretical analysis and applications,&#x0201D;</article-title> in <source>International Conference on Machine Learning</source> (<publisher-loc>PMLR</publisher-loc>), <fpage>23803</fpage>&#x02013;<lpage>23828</lpage>. <pub-id pub-id-type="pmid">37918314</pub-id></citation></ref>
<ref id="B22">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mason</surname> <given-names>R. A.</given-names></name> <name><surname>Just</surname> <given-names>M. A.</given-names></name></person-group> (<year>2016</year>). <article-title>Neural representations of physics concepts</article-title>. <source>Psychol. Sci</source>. <volume>27</volume>, <fpage>904</fpage>&#x02013;<lpage>913</lpage>. <pub-id pub-id-type="doi">10.1177/0956797616641941</pub-id><pub-id pub-id-type="pmid">27113732</pub-id></citation></ref>
<ref id="B23">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Meshulam</surname> <given-names>M.</given-names></name> <name><surname>Hasenfratz</surname> <given-names>L.</given-names></name> <name><surname>Hillman</surname> <given-names>H.</given-names></name> <name><surname>Liu</surname> <given-names>Y.-F.</given-names></name> <name><surname>Nguyen</surname> <given-names>M.</given-names></name> <name><surname>Norman</surname> <given-names>K. A.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Neural alignment predicts learning outcomes in students taking an introduction to computer science course</article-title>. <source>Nat. Commun</source>. <volume>12</volume>:<fpage>1922</fpage>. <pub-id pub-id-type="doi">10.1038/s41467-021-22202-3</pub-id><pub-id pub-id-type="pmid">33771999</pub-id></citation></ref>
<ref id="B24">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ning</surname> <given-names>E.</given-names></name> <name><surname>Wang</surname> <given-names>C.</given-names></name> <name><surname>Zhang</surname> <given-names>H.</given-names></name> <name><surname>Ning</surname> <given-names>X.</given-names></name> <name><surname>Tiwari</surname> <given-names>P.</given-names></name></person-group> (<year>2023</year>). <article-title>Occluded person re-identification with deep learning: a survey and perspectives</article-title>. <source>Expert Syst. Applic</source>. <volume>239</volume>:<fpage>122419</fpage>. <pub-id pub-id-type="doi">10.1016/j.eswa.2023.122419</pub-id></citation>
</ref>
<ref id="B25">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Qiang</surname> <given-names>N.</given-names></name> <name><surname>Gao</surname> <given-names>J.</given-names></name> <name><surname>Dong</surname> <given-names>Q.</given-names></name> <name><surname>Yue</surname> <given-names>H.</given-names></name> <name><surname>Liang</surname> <given-names>H.</given-names></name> <name><surname>Liu</surname> <given-names>L.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Functional brain network identification and fMRI augmentation using a vae-gan framework</article-title>. <source>Comput. Biol. Med</source>. <volume>165</volume>:<fpage>107395</fpage>. <pub-id pub-id-type="doi">10.1016/j.compbiomed.2023.107395</pub-id><pub-id pub-id-type="pmid">37669583</pub-id></citation></ref>
<ref id="B26">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ronicko</surname> <given-names>J. F. A.</given-names></name> <name><surname>Thomas</surname> <given-names>J.</given-names></name> <name><surname>Thangavel</surname> <given-names>P.</given-names></name> <name><surname>Koneru</surname> <given-names>V.</given-names></name> <name><surname>Langs</surname> <given-names>G.</given-names></name> <name><surname>Dauwels</surname> <given-names>J.</given-names></name></person-group> (<year>2020</year>). <article-title>Diagnostic classification of autism using resting-state fMRI data improves with full correlation functional brain connectivity compared to partial correlation</article-title>. <source>J. Neurosci. Methods</source> <volume>345</volume>:<fpage>108884</fpage>. <pub-id pub-id-type="doi">10.1016/j.jneumeth.2020.108884</pub-id><pub-id pub-id-type="pmid">32730918</pub-id></citation></ref>
<ref id="B27">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ryali</surname> <given-names>S.</given-names></name> <name><surname>Supekar</surname> <given-names>K.</given-names></name> <name><surname>Abrams</surname> <given-names>D. A.</given-names></name> <name><surname>Menon</surname> <given-names>V.</given-names></name></person-group> (<year>2010</year>). <article-title>Sparse logistic regression for whole-brain classification of fMRI data</article-title>. <source>Neuroimage</source> <volume>51</volume>, <fpage>752</fpage>&#x02013;<lpage>764</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2010.02.040</pub-id><pub-id pub-id-type="pmid">20188193</pub-id></citation></ref>
<ref id="B28">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Seguin</surname> <given-names>C.</given-names></name> <name><surname>Sporns</surname> <given-names>O.</given-names></name> <name><surname>Zalesky</surname> <given-names>A.</given-names></name></person-group> (<year>2023</year>). <article-title>Brain network communication: concepts, models and applications</article-title>. <source>Nat. Rev. Neurosci</source>. <volume>24</volume>, <fpage>557</fpage>&#x02013;<lpage>574</lpage>. <pub-id pub-id-type="doi">10.1038/s41583-023-00718-5</pub-id><pub-id pub-id-type="pmid">37438433</pub-id></citation></ref>
<ref id="B29">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Shao</surname> <given-names>L.</given-names></name> <name><surname>Fu</surname> <given-names>C.</given-names></name> <name><surname>You</surname> <given-names>Y.</given-names></name> <name><surname>Fu</surname> <given-names>D.</given-names></name></person-group> (<year>2021</year>). <article-title>Classification of asd based on fMRI data with deep learning</article-title>. <source>Cogn. Neurodyn</source>. <volume>15</volume>, <fpage>961</fpage>&#x02013;<lpage>974</lpage>. <pub-id pub-id-type="doi">10.1007/s11571-021-09683-0</pub-id><pub-id pub-id-type="pmid">34790264</pub-id></citation></ref>
<ref id="B30">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>J.</given-names></name> <name><surname>Baucom</surname> <given-names>L. B.</given-names></name> <name><surname>Shinkareva</surname> <given-names>S. V.</given-names></name></person-group> (<year>2013</year>). <article-title>Decoding abstract and concrete concept representations based on single-trial fMRI data</article-title>. <source>Hum. Brain Mapp</source>. <volume>34</volume>, <fpage>1133</fpage>&#x02013;<lpage>1147</lpage>. <pub-id pub-id-type="doi">10.1002/hbm.21498</pub-id><pub-id pub-id-type="pmid">23568269</pub-id></citation></ref>
<ref id="B31">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>L.</given-names></name> <name><surname>Li</surname> <given-names>K.</given-names></name> <name><surname>Chen</surname> <given-names>X.</given-names></name> <name><surname>Hu</surname> <given-names>X. P.</given-names></name></person-group> (<year>2019</year>). <article-title>Application of convolutional recurrent neural network for individual recognition based on resting state fMRI data</article-title>. <source>Front. Neurosci</source>. <volume>13</volume>:<fpage>441809</fpage>. <pub-id pub-id-type="doi">10.3389/fnins.2019.00434</pub-id><pub-id pub-id-type="pmid">31118882</pub-id></citation></ref>
<ref id="B32">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>Z.</given-names></name> <name><surname>Xin</surname> <given-names>J.</given-names></name> <name><surname>Wang</surname> <given-names>Z.</given-names></name> <name><surname>Yao</surname> <given-names>Y.</given-names></name> <name><surname>Zhao</surname> <given-names>Y.</given-names></name> <name><surname>Qian</surname> <given-names>W.</given-names></name></person-group> (<year>2021</year>). <article-title>Brain functional network modeling and analysis based on fMRI: a systematic review</article-title>. <source>Cogn. Neurodyn</source>. <volume>15</volume>, <fpage>389</fpage>&#x02013;<lpage>403</lpage>. <pub-id pub-id-type="doi">10.1007/s11571-020-09630-5</pub-id><pub-id pub-id-type="pmid">34040667</pub-id></citation></ref>
<ref id="B33">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Weaverdyck</surname> <given-names>M. E.</given-names></name> <name><surname>Lieberman</surname> <given-names>M. D.</given-names></name> <name><surname>Parkinson</surname> <given-names>C.</given-names></name></person-group> (<year>2020</year>). <article-title>Tools of the trade multivoxel pattern analysis in fMRI: a practical introduction for social and affective neuroscientists</article-title>. <source>Soc. Cogn. Affect. Neurosci</source>. <volume>15</volume>, <fpage>487</fpage>&#x02013;<lpage>509</lpage>. <pub-id pub-id-type="doi">10.1093/scan/nsaa057</pub-id><pub-id pub-id-type="pmid">32364607</pub-id></citation></ref>
<ref id="B34">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zarghami</surname> <given-names>T. S.</given-names></name> <name><surname>Friston</surname> <given-names>K. J.</given-names></name></person-group> (<year>2020</year>). <article-title>Dynamic effective connectivity</article-title>. <source>Neuroimage</source> <volume>207</volume>:<fpage>116453</fpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2019.116453</pub-id><pub-id pub-id-type="pmid">31821868</pub-id></citation></ref>
<ref id="B35">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zeithamova</surname> <given-names>D.</given-names></name> <name><surname>Mack</surname> <given-names>M. L.</given-names></name> <name><surname>Braunlich</surname> <given-names>K.</given-names></name> <name><surname>Davis</surname> <given-names>T.</given-names></name> <name><surname>Seger</surname> <given-names>C. A.</given-names></name> <name><surname>Van Kesteren</surname> <given-names>M. T.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>Brain mechanisms of concept learning</article-title>. <source>J. Neurosci</source>. <volume>39</volume>, <fpage>8259</fpage>&#x02013;<lpage>8266</lpage>. <pub-id pub-id-type="doi">10.1523/JNEUROSCI.1166-19.2019</pub-id><pub-id pub-id-type="pmid">31619495</pub-id></citation></ref>
<ref id="B36">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>F.</given-names></name> <name><surname>Wei</surname> <given-names>Y.</given-names></name> <name><surname>Liu</surname> <given-names>J.</given-names></name> <name><surname>Wang</surname> <given-names>Y.</given-names></name> <name><surname>Xi</surname> <given-names>W.</given-names></name> <name><surname>Pan</surname> <given-names>Y.</given-names></name></person-group> (<year>2022a</year>). <article-title>Identification of autism spectrum disorder based on a novel feature selection method and variational autoencoder</article-title>. <source>Comput. Biol. Med</source>. <volume>148</volume>:<fpage>105854</fpage>. <pub-id pub-id-type="doi">10.1016/j.compbiomed.2022.105854</pub-id><pub-id pub-id-type="pmid">35863246</pub-id></citation></ref>
<ref id="B37">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>Y.</given-names></name> <name><surname>Dai</surname> <given-names>H.</given-names></name> <name><surname>Yun</surname> <given-names>Y.</given-names></name> <name><surname>Liu</surname> <given-names>S.</given-names></name> <name><surname>Lan</surname> <given-names>A.</given-names></name> <name><surname>Shang</surname> <given-names>X.</given-names></name></person-group> (<year>2020</year>). <article-title>Meta-knowledge dictionary learning on 1-bit response data for student knowledge diagnosis</article-title>. <source>Knowl. Based Syst</source>. <volume>205</volume>:<fpage>106290</fpage>. <pub-id pub-id-type="doi">10.1016/j.knosys.2020.106290</pub-id></citation>
</ref>
<ref id="B38">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>Y.</given-names></name> <name><surname>Li</surname> <given-names>Y.</given-names></name> <name><surname>Wang</surname> <given-names>Y.</given-names></name> <name><surname>Wei</surname> <given-names>S.</given-names></name> <name><surname>Xu</surname> <given-names>Y.</given-names></name> <name><surname>Shang</surname> <given-names>X.</given-names></name></person-group> (<year>2024</year>). <article-title>Federated learning-outcome prediction with multi-layer privacy protection</article-title>. <source>Front. Comput. Sci</source>. <volume>18</volume>:<fpage>186604</fpage>. <pub-id pub-id-type="doi">10.1007/s11704-023-2791-8</pub-id></citation>
</ref>
<ref id="B39">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>Y.</given-names></name> <name><surname>Liu</surname> <given-names>S.</given-names></name> <name><surname>Qu</surname> <given-names>X.</given-names></name> <name><surname>Shang</surname> <given-names>X.</given-names></name></person-group> (<year>2022b</year>). <article-title>Multi-instance discriminative contrastive learning for brain image representation</article-title>. <source>Neural Comput. Applic</source>. <volume>2022</volume>, <fpage>1</fpage>&#x02013;<lpage>14</lpage>. <pub-id pub-id-type="doi">10.1007/s00521-022-07524-7</pub-id></citation>
</ref>
<ref id="B40">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>Y.</given-names></name> <name><surname>Sun</surname> <given-names>L.</given-names></name> <name><surname>Wang</surname> <given-names>J.</given-names></name> <name><surname>Shen</surname> <given-names>Y.</given-names></name> <name><surname>Zhang</surname> <given-names>Y.</given-names></name> <name><surname>Liu</surname> <given-names>S.</given-names></name></person-group> (<year>2023</year>). <article-title>&#x0201C;Concept-level recognition from neuroimages for understanding learning in the brain,&#x0201D;</article-title> in <source>2023 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)</source> (<publisher-loc>IEEE</publisher-loc>), <fpage>3984</fpage>&#x02013;<lpage>3990</lpage>. <pub-id pub-id-type="doi">10.1109/BIBM58861.2023.10385947</pub-id></citation>
</ref>
<ref id="B41">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>Y.</given-names></name> <name><surname>Wang</surname> <given-names>Y.</given-names></name> <name><surname>Li</surname> <given-names>Y.</given-names></name> <name><surname>Xu</surname> <given-names>Y.</given-names></name> <name><surname>Wei</surname> <given-names>S.</given-names></name> <name><surname>Liu</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2025</year>). <article-title>Federated discriminative representation learning for image classification</article-title>. <source>IEEE Trans. Neural Netw. Learn. Syst.</source> <volume>36</volume>, <fpage>3204</fpage>&#x02013;<lpage>3217</lpage>. <pub-id pub-id-type="doi">10.1109/TNNLS.2023.3336957</pub-id><pub-id pub-id-type="pmid">38055356</pub-id></citation></ref>
<ref id="B42">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>Y.</given-names></name> <name><surname>Xu</surname> <given-names>Y.</given-names></name> <name><surname>An</surname> <given-names>R.</given-names></name> <name><surname>Li</surname> <given-names>Y.</given-names></name> <name><surname>Liu</surname> <given-names>S.</given-names></name> <name><surname>Shang</surname> <given-names>X.</given-names></name></person-group> (<year>2022c</year>). <article-title>&#x0201C;Markov guided spatio-temporal networks for brain image classification,&#x0201D;</article-title> in <source>2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)</source> (<publisher-loc>IEEE</publisher-loc>), <fpage>2035</fpage>&#x02013;<lpage>2041</lpage>. <pub-id pub-id-type="doi">10.1109/BIBM55620.2022.9995528</pub-id></citation>
</ref>
<ref id="B43">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>Y.</given-names></name> <name><surname>Yun</surname> <given-names>Y.</given-names></name> <name><surname>An</surname> <given-names>R.</given-names></name> <name><surname>Cui</surname> <given-names>J.</given-names></name> <name><surname>Dai</surname> <given-names>H.</given-names></name> <name><surname>Shang</surname> <given-names>X.</given-names></name></person-group> (<year>2021</year>). <article-title>Educational data mining techniques for student performance prediction: method review and comparison analysis</article-title>. <source>Front. Psychol</source>. <volume>12</volume>:<fpage>698490</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2021.698490</pub-id><pub-id pub-id-type="pmid">34950079</pub-id></citation></ref>
<ref id="B44">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>Y.</given-names></name> <name><surname>Zhou</surname> <given-names>Y.</given-names></name> <name><surname>Liu</surname> <given-names>S.</given-names></name> <name><surname>Zhang</surname> <given-names>W.</given-names></name> <name><surname>Xiao</surname> <given-names>M.</given-names></name> <name><surname>Shang</surname> <given-names>X.</given-names></name></person-group> (<year>2022d</year>). <article-title>&#x0201C;Westcoin: weakly-supervised contextualized text classification with imbalance and noisy labels,&#x0201D;</article-title> in <source>2022 26th International Conference on Pattern Recognition (ICPR)</source> (<publisher-loc>IEEE</publisher-loc>), <fpage>2451</fpage>&#x02013;<lpage>2457</lpage>. <pub-id pub-id-type="doi">10.1109/ICPR56361.2022.9956110</pub-id></citation>
</ref>
<ref id="B45">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zong</surname> <given-names>Y.</given-names></name> <name><surname>Zuo</surname> <given-names>Q.</given-names></name> <name><surname>Ng</surname> <given-names>M. K.-P.</given-names></name> <name><surname>Lei</surname> <given-names>B.</given-names></name> <name><surname>Wang</surname> <given-names>S.</given-names></name></person-group> (<year>2024</year>). <article-title>A new brain network construction paradigm for brain disorder via diffusion-based graph contrastive learning</article-title>. <source>IEEE Trans. Pattern Anal. Mach. Intell</source>. <volume>46</volume>, <fpage>10389</fpage>&#x02013;<lpage>10403</lpage>. <pub-id pub-id-type="doi">10.1109/TPAMI.2024.3442811</pub-id><pub-id pub-id-type="pmid">39137077</pub-id></citation></ref>
<ref id="B46">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zuo</surname> <given-names>Q.</given-names></name> <name><surname>Chen</surname> <given-names>L.</given-names></name> <name><surname>Shen</surname> <given-names>Y.</given-names></name> <name><surname>Ng</surname> <given-names>M. K.-P.</given-names></name> <name><surname>Lei</surname> <given-names>B.</given-names></name> <name><surname>Wang</surname> <given-names>S.</given-names></name></person-group> (<year>2024a</year>). <article-title>&#x0201C;BDHT: generative AI enables causality analysis for mild cognitive impairment,&#x0201D;</article-title> in <source>IEEE Transactions on Automation Science and Engineering</source>, 1&#x02013;13. <pub-id pub-id-type="doi">10.1109/TASE.2024.3425949</pub-id><pub-id pub-id-type="pmid">38168455</pub-id></citation></ref>
<ref id="B47">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zuo</surname> <given-names>Q.</given-names></name> <name><surname>Shen</surname> <given-names>Y.</given-names></name> <name><surname>Zhong</surname> <given-names>N.</given-names></name> <name><surname>Chen</surname> <given-names>C. P.</given-names></name> <name><surname>Lei</surname> <given-names>B.</given-names></name> <name><surname>Wang</surname> <given-names>S.</given-names></name></person-group> (<year>2023a</year>). <article-title>Alzheimer&#x00027;s disease prediction via brain structural-functional deep fusing network</article-title>. <source>IEEE Trans. Neural Syst. Rehab. Eng</source>. <volume>31</volume>, <fpage>4601</fpage>&#x02013;<lpage>4612</lpage>. <pub-id pub-id-type="doi">10.1109/TNSRE.2023.3333952</pub-id><pub-id pub-id-type="pmid">37971911</pub-id></citation></ref>
<ref id="B48">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zuo</surname> <given-names>Q.</given-names></name> <name><surname>Wu</surname> <given-names>H.</given-names></name> <name><surname>Chen</surname> <given-names>C. P.</given-names></name> <name><surname>Lei</surname> <given-names>B.</given-names></name> <name><surname>Wang</surname> <given-names>S.</given-names></name></person-group> (<year>2024b</year>). <article-title>Prior-guided adversarial learning with hypergraph for predicting abnormal connections in Alzheimer&#x00027;s disease</article-title>. <source>IEEE Trans. Cybern</source>. <volume>54</volume>, <fpage>3652</fpage>&#x02013;<lpage>3665</lpage>. <pub-id pub-id-type="doi">10.1109/TCYB.2023.3344641</pub-id><pub-id pub-id-type="pmid">38236677</pub-id></citation></ref>
<ref id="B49">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zuo</surname> <given-names>Q.</given-names></name> <name><surname>Zhong</surname> <given-names>N.</given-names></name> <name><surname>Pan</surname> <given-names>Y.</given-names></name> <name><surname>Wu</surname> <given-names>H.</given-names></name> <name><surname>Lei</surname> <given-names>B.</given-names></name> <name><surname>Wang</surname> <given-names>S.</given-names></name></person-group> (<year>2023b</year>). <article-title>Brain structure-function fusing representation learning using adversarial decomposed-vae for analyzing MCI</article-title>. <source>IEEE Trans. Neural Syst. Rehabilit. Eng</source>. <volume>31</volume>, <fpage>4017</fpage>&#x02013;<lpage>4028</lpage>. <pub-id pub-id-type="doi">10.1109/TNSRE.2023.3323432</pub-id><pub-id pub-id-type="pmid">37815971</pub-id></citation></ref>
</ref-list>
</back>
</article>