<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Neurosci.</journal-id>
<journal-title>Frontiers in Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Neurosci.</abbrev-journal-title>
<issn pub-type="epub">1662-453X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnins.2023.1137567</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Neuroscience</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Broad learning for early diagnosis of Alzheimer&#x00027;s disease using FDG-PET of the brain</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name><surname>Duan</surname> <given-names>Junwei</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1999018/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Liu</surname> <given-names>Yang</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/2206339/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Wu</surname> <given-names>Huanhua</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1366808/overview"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Wang</surname> <given-names>Jing</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<xref ref-type="corresp" rid="c002"><sup>&#x0002A;</sup></xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Chen</surname> <given-names>Long</given-names></name>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/776894/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Chen</surname> <given-names>C. L. Philip</given-names></name>
<xref ref-type="aff" rid="aff6"><sup>6</sup></xref>
</contrib>
<on-behalf-of>Alzheimer&#x00027;s Disease Neuroimaging Initiative</on-behalf-of>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>College of Information Science and Technology, Jinan University</institution>, <addr-line>Guangzhou</addr-line>, <country>China</country></aff>
<aff id="aff2"><sup>2</sup><institution>Guangdong Provincial Key Laboratory of Traditional Chinese Medicine Informatization, Jinan University</institution>, <addr-line>Guangzhou</addr-line>, <country>China</country></aff>
<aff id="aff3"><sup>3</sup><institution>Department of Nuclear Medicine and PET/CT-MRI Centre, The First Affiliated Hospital of Jinan University</institution>, <addr-line>Guangzhou</addr-line>, <country>China</country></aff>
<aff id="aff4"><sup>4</sup><institution>School of Computer Science, Guangdong Polytechnic Normal University</institution>, <addr-line>Guangzhou</addr-line>, <country>China</country></aff>
<aff id="aff5"><sup>5</sup><institution>Department of Computer and Information Science, Faculty of Science and Technology, University of Macau, Taipa</institution>, <addr-line>Macau SAR</addr-line>, <country>China</country></aff>
<aff id="aff6"><sup>6</sup><institution>School of Computer Science and Engineering, South China University of Technology</institution>, <addr-line>Guangzhou</addr-line>, <country>China</country></aff>
<author-notes>
<fn fn-type="edited-by"><p>Edited by: Ahmed Shalaby, University of Texas Southwestern Medical Center, United States</p></fn>
<fn fn-type="edited-by"><p>Reviewed by: Peixin Lu, Wuhan University, China; Hamed Ghaffari, Iran University of Medical Sciences, Iran</p></fn>
<corresp id="c001">&#x0002A;Correspondence: Junwei Duan <email>jwduan&#x00040;jnu.edu.cn</email></corresp>
<corresp id="c002">Jing Wang <email>wj_adr&#x00040;163.com</email></corresp>
<fn fn-type="other" id="fn001"><p>This article was submitted to Brain Imaging Methods, a section of the journal Frontiers in Neuroscience</p></fn></author-notes>
<pub-date pub-type="epub">
<day>13</day>
<month>03</month>
<year>2023</year>
</pub-date>
<pub-date pub-type="collection">
<year>2023</year>
</pub-date>
<volume>17</volume>
<elocation-id>1137567</elocation-id>
<history>
<date date-type="received">
<day>04</day>
<month>01</month>
<year>2023</year>
</date>
<date date-type="accepted">
<day>13</day>
<month>02</month>
<year>2023</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2023 Duan, Liu, Wu, Wang, Chen and Chen.</copyright-statement>
<copyright-year>2023</copyright-year>
<copyright-holder>Duan, Liu, Wu, Wang, Chen and Chen</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license> </permissions>
<abstract>
<p>Alzheimer&#x00027;s disease (AD) is a progressive neurodegenerative disease, and the development of AD is irreversible. However, preventive measures in the presymptomatic stage of AD can effectively slow down deterioration. Fluorodeoxyglucose positron emission tomography (FDG-PET) can detect the metabolism of glucose in patients&#x00027; brains, which can help to identify changes related to AD before brain damage occurs. Machine learning is useful for early diagnosis of patients with AD using FDG-PET, but it requires a sufficiently large dataset, and it is easy for overfitting to occur in small datasets. Previous studies using machine learning for early diagnosis with FDG-PET have either involved the extraction of elaborately handcrafted features or validation on a small dataset, and few studies have explored the refined classification of early mild cognitive impairment (EMCI) and late mild cognitive impairment (LMCI). This article presents a broad network-based model for early diagnosis of AD (BLADNet) through PET imaging of the brain; this method employs a novel broad neural network to enhance the features of FDG-PET extracted <italic>via</italic> 2D CNN. BLADNet can search for information over a broad space through the addition of new BLS blocks without retraining of the whole network, thus improving the accuracy of AD classification. Experiments conducted on a dataset containing 2,298 FDG-PET images of 1,045 subjects from the ADNI database demonstrate that our methods are superior to those used in previous studies on early diagnosis of AD with FDG-PET. In particular, our methods achieved state-of-the-art results in EMCI and LMCI classification with FDG-PET.</p></abstract>
<kwd-group>
<kwd>Alzheimer&#x00027;s disease</kwd>
<kwd>PET</kwd>
<kwd>broad learning system</kwd>
<kwd>neural network</kwd>
<kwd>computer-aided diagnosis</kwd>
</kwd-group>
<contract-num rid="cn001">2021A151501199</contract-num>
<contract-sponsor id="cn001">Basic and Applied Basic Research Foundation of Guangdong Province<named-content content-type="fundref-id">10.13039/501100021171</named-content></contract-sponsor>
<contract-sponsor id="cn002">National Key Research and Development Program of China<named-content content-type="fundref-id">10.13039/501100012166</named-content></contract-sponsor>
<counts>
<fig-count count="5"/>
<table-count count="5"/>
<equation-count count="0"/>
<ref-count count="34"/>
<page-count count="10"/>
<word-count count="6280"/>
</counts>
</article-meta>
</front>
<body>
<sec id="s1">
<title>1. Introduction</title>
<p>Alzheimer&#x00027;s disease (AD) cannot be diagnosed until obvious symptoms appear in the patient, but studies have found that patients with AD show abnormalities in regional metabolism before brain structure changes occur (Jagust et al., <xref ref-type="bibr" rid="B18">2006</xref>). Fluorine 18 (18F) fluorodeoxyglucose (FDG) positron emission tomography (PET) is a non-invasive nuclear medicine imaging technique that can indicate the metabolic activity of tissues and organs (Marcus et al., <xref ref-type="bibr" rid="B25">2014</xref>; Bouter et al., <xref ref-type="bibr" rid="B2">2019</xref>; Levin et al., <xref ref-type="bibr" rid="B22">2021</xref>). FDG-PET may detect the onset of certain diseases earlier than other imaging tests (Brown et al., <xref ref-type="bibr" rid="B3">2014</xref>). FDG-PET is regarded as an effective biomarker for earlier diagnosis of AD (Ch&#x000E9;telat et al., <xref ref-type="bibr" rid="B6">2020</xref>). The onset of Alzheimer&#x00027;s disease is insidious and slow, and it can be divided into three stages: cognitively normal (CN), mild cognitive impairment (MCI), and Alzheimer&#x00027;s disease (AD). Patients with AD tend to show hypometabolism on 18F-FDG-PET scan in the regions of the posterior cingulate, parietotemporal cortices, and frontal lobes, while patients with MCI often show posterior cingulate and parietotemporal hypometabolism with variable frontal lobe involvement (Mosconi et al., <xref ref-type="bibr" rid="B26">2008</xref>; Kobylecki et al., <xref ref-type="bibr" rid="B21">2015</xref>). However, the difference between the two stages in FDG-PET is difficult to distinguish with the naked eye or through pattern recognition-based decisions made <italic>via</italic> qualitative readings. Because the disease involves a wide continuous spectrum, from normal cognition to MCI to AD, MCI can also be subdivided into early MCI (EMCI) and late MCI (LMCI) (Jessen et al., <xref ref-type="bibr" rid="B20">2014</xref>).</p>
<p>Machine learning approaches can effectively extract features that are difficult to find with the naked eye and can outperform professional clinicians in certain imaging diagnosis problems (Zhang et al., <xref ref-type="bibr" rid="B34">2020</xref>). A number of studies have already experimented with unsupervised learning (Suk and Shen, <xref ref-type="bibr" rid="B32">2013</xref>), adversarial learning (Baydargil et al., <xref ref-type="bibr" rid="B1">2021</xref>), and multi-scale learning (Lu et al., <xref ref-type="bibr" rid="B24">2018</xref>) techniques in AD-related PET image analysis. These methods have achieved good results in classification of CN, MCI, and AD, but few studies have explored the refined classification of early EMCI and late LMCI.</p>
<p>Currently, deep learning-based approaches have been applied in early diagnosis of AD (Suk and Shen, <xref ref-type="bibr" rid="B32">2013</xref>; Lu et al., <xref ref-type="bibr" rid="B24">2018</xref>). Nevertheless, there are still many issues remaining in deep learning, such as gradient explosion and vanishing gradients, which limit the depth in terms of number of layers in the network or its fitting ability; some researchers have proposed residual learning (He et al., <xref ref-type="bibr" rid="B14">2016</xref>) as a way to alleviate this problem. The broad learning system (BLS) is one kind of neural network without deep structure. BLS provides better fitting ability by increasing the number of network nodes horizontally and obtains solutions <italic>via</italic> pseudoinverse, with no need for an iterative backpropagation process. However, BLS obtains a feature representation of input data through random projection, which may result in too much redundant information that could influence the performance of the BLS model. Some researchers have experimented with variations of BLS that use other models as feature extractors in the feature mapping layer (Feng and Chen, <xref ref-type="bibr" rid="B10">2018</xref>; Du et al., <xref ref-type="bibr" rid="B9">2020</xref>; Jara-Maldonado et al., <xref ref-type="bibr" rid="B19">2022</xref>; Wu and Duan, <xref ref-type="bibr" rid="B33">2022</xref>). In this article, we propose a novel BLS-based method, in which we use grouped convolution layers to extract the features from slice groups in the first stage, and then these features are fed into a broad learning model for further feature enhancement.</p>
<p>This study proposes a machine learning model based on BLS to predict the clinical diagnosis in patients using 18F-FDG-PET of the brain. We attempted to predict patients&#x00027; classifications as AD, MCI, or CN, and (within the category of MCI) as EMCI or LMCI. The hypothesis was that the broad learning-based model would be able to detect regional metabolic abnormalities caused by pathology, which are difficult to observe on clinical review, and improve the accuracy of individual diagnosis.</p></sec>
<sec id="s2">
<title>2. Materials and methods</title>
<sec>
<title>2.1. Data acquisition</title>
<p>Data used in the preparation of this article were obtained from the Alzheimer&#x00027;s Disease Neuroimaging Initiative (ADNI) database (adni.loni.usc.edu). ADNI was launched in 2003 as a public&#x02013;private partnership, led by Principal Investigator Michael W. Weiner, MD. The primary goal of ADNI has been to test whether serial magnetic resonance imaging (MRI), PET, other biological markers, and clinical and neuropsychological assessment can be combined to measure the progression of MCI and early AD.</p>
<p>In our study, we analyzed a total of 2,298 FDG-PET imaging studies of 1,045 patients obtained from ADNI. The datasets contained images of subjects of different ages. In ADNI 1, the subjects were grouped into three classes: CN, MCI, and AD. However, in ADNI 2/GO, the MCI stage was subdivided into EMCI and LMCI. To be classified as CN, subjects must have no memory complaints and be non-demented. To be classified as having MCI, subjects must have a Mini-Mental State Examination (MMSE) score between 24 and 30; the activities of daily living must be preserved, and dementia must be absent. Finally, to be classified as having AD, subjects must be clinically diagnosed as such, with an MMSE score between 20 and 26 (Jack Jr et al., <xref ref-type="bibr" rid="B17">2008</xref>). Demographic information on our dataset is presented in <xref ref-type="table" rid="T1">Table 1</xref>. A total of 80% of the data (1,851 imaging studies, 598 patients) were used for model training. The remaining 20% (447 imaging studies; no repeat studies of the same subjects in the test set) were used for model testing, from which an additional test set (74 imaging studies for AD vs. MCI vs. CN classification and 45 imaging studies for EMCI vs. LMCI classification) was selected for validation by professional radiologists.</p>
<table-wrap position="float" id="T1">
<label>Table 1</label>
<caption><p>Demographics of datasets.</p></caption>
<table frame="box" rules="all">
<thead><tr style="background-color:#919497;color:#ffffff">
<th valign="top" align="left" colspan="3"></th>
<th valign="top" align="center" colspan="2" style="border-bottom: thin solid #000000;"><bold>Average age</bold><xref ref-type="table-fn" rid="TN1"><sup><bold>&#x0002A;</bold></sup></xref></th>
</tr>
</thead>
<tbody>
<tr style="background-color:#919497;color:#ffffff">
<td valign="top" align="left"><bold>Clinical diagnosis</bold></td>
<td valign="top" align="center"><bold>No. of Patients</bold></td>
<td valign="top" align="center"><bold>No. of imaging studies</bold></td>
<td valign="top" align="center"><bold>Men</bold></td>
<td valign="top" align="center"><bold>Women</bold></td>
</tr> <tr>
<td valign="top" align="left">AD</td>
<td valign="top" align="center">297</td>
<td valign="top" align="center">541</td>
<td valign="top" align="center">76.47 &#x000B1; 7.57 (56&#x02013;92)</td>
<td valign="top" align="center">75.11 &#x000B1; 7.63 (55&#x02013;92)</td>
</tr> <tr>
<td valign="top" align="left">MCI</td>
<td valign="top" align="center">196</td>
<td valign="top" align="center">616</td>
<td valign="top" align="center">77.79 &#x000B1; 7.01 (57&#x02013;92)</td>
<td valign="top" align="center">74.82 &#x000B1; 7.95 (57&#x02013;96)</td>
</tr> <tr>
<td valign="top" align="left">CN</td>
<td valign="top" align="center">242</td>
<td valign="top" align="center">627</td>
<td valign="top" align="center">77.12 &#x000B1; 5.41 (62&#x02013;91)</td>
<td valign="top" align="center">76.93 &#x000B1; 6.37 (60&#x02013;96)</td>
</tr> <tr>
<td valign="top" align="left">Total</td>
<td valign="top" align="center">735</td>
<td valign="top" align="center">1,784</td>
<td valign="top" align="center">77.18 &#x000B1; 6.75 (56&#x02013;92)</td>
<td valign="top" align="center">75.76 &#x000B1; 7.31 (55&#x02013;96)</td>
</tr> <tr>
<td valign="top" align="left">EMCI</td>
<td valign="top" align="center">152</td>
<td valign="top" align="center">265</td>
<td valign="top" align="center">73.89 &#x000B1; 6.85 (56&#x02013;90)</td>
<td valign="top" align="center">72.40 &#x000B1; 8.40 (55&#x02013;92)</td>
</tr> <tr>
<td valign="top" align="left">LMCI</td>
<td valign="top" align="center">158</td>
<td valign="top" align="center">249</td>
<td valign="top" align="center">74.70 &#x000B1; 7.37 (56&#x02013;94)</td>
<td valign="top" align="center">71.80 &#x000B1; 7.80 (55&#x02013;91)</td>
</tr> <tr>
<td valign="top" align="left">Total</td>
<td valign="top" align="center">310</td>
<td valign="top" align="center">514</td>
<td valign="top" align="center">74.27 &#x000B1; 7.10 (56&#x02013;94)</td>
<td valign="top" align="center">72.09 &#x000B1; 8.11 (55&#x02013;91)</td>
</tr></tbody>
</table>
<table-wrap-foot>
<p>AD, Alzheimer&#x00027;s disease; MCI, mild cognitive impairment; CN, cognitively normal.</p>
<fn id="TN1"><label>&#x0002A;</label><p>Data in parentheses are the range.</p></fn>
</table-wrap-foot>
</table-wrap></sec>
<sec>
<title>2.2. Data processing</title>
<p>For the purpose of eliminating differences between images acquired from various systems, FDG-PET images in ADNI have undergone a series of preprocessing steps, intensity normalization, and conversion to a uniform isotropic resolution of 8 mm full width at half maximum. We selected the processed images from ADNI; our method does not require any specific pre-defined ROI or VOI as traditional machine learning methods do. All 3D images were resampled to a size of 160 &#x000D7; 160 &#x000D7; 96; we treated the images as a series of 2D slices and removed slices with all-zero intensity on both sides, then divided the image into four groups of slices at equal intervals, with each group containing 23 slices. All processing steps were conducted in Python (version 3.8) using the packages scipy (<ext-link ext-link-type="uri" xlink:href="http://www.scipy.org">http://www.scipy.org</ext-link>) and numpy (<ext-link ext-link-type="uri" xlink:href="https://numpy.org/">https://numpy.org/</ext-link>). <xref ref-type="fig" rid="F1">Figure 1</xref> shows a single slice, viewed on three planes.</p>
<fig id="F1" position="float">
<label>Figure 1</label>
<caption><p>Example of FDG-PET imaging from ADNI. Each row represents a PET imaging slice on a three-plane view. The three rows are: a 73-year-old man with AD, an 81-year-old woman with MCI, and a 71-year-old man without MCI/AD. The difference between them is difficult to identify with the naked eye.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-17-1137567-g0001.tif"/>
</fig></sec>
<sec>
<title>2.3. Model development</title>
<p>Despite the good learning ability of deep neural networks, they are easy to overfit on small datasets and their training is also time-consuming. The BLS is a lightweight network with a broad structure proposed by Chen and Liu (<xref ref-type="bibr" rid="B4">2017</xref>). The inspiration for its design comes from a random vector functional link neural network (RVFLLNN) (Pao and Takefuji, <xref ref-type="bibr" rid="B30">1992</xref>; Chu et al., <xref ref-type="bibr" rid="B7">2019</xref>; Gong et al., <xref ref-type="bibr" rid="B12">2021</xref>). It can obtain a globally optimal solution using a ridge regression algorithm during training. without an iterative backpropagation process, meaning that its training is fast and efficient. The detailed description of the BLS is illustrated in the <xref ref-type="supplementary-material" rid="SM1">Supplementary material</xref>. Based on the BLS, we propose a broad network-based model for early diagnosis of AD (BLADNet) through PET imaging of the brain.</p>
<p><xref ref-type="fig" rid="F2">Figure 2</xref> illustrates the overall architecture of BLADNet. The framework consists of two stages. In the first stage, we use a 2D CNN for automated feature learning from each group of slices rather than directly using a 3D CNN, which reduces the number of parameters to be learned. In the second stage, the features extracted from each group in the previous step are concatenated to form a compact sequence feature; then, the Extreme Broad Learning System (EBLS), based on a broad neural network, is used to enhance the features from 2D CNN and carry out the final classification. A detailed description of the EBLS is provided in the <xref ref-type="supplementary-material" rid="SM1">Supplementary material</xref><xref ref-type="fn" rid="fn0001"><sup>1</sup></xref>. Our model was developed in Python using the packages numpy and pytorch (<ext-link ext-link-type="uri" xlink:href="https://pytorch.org/">https://pytorch.org/</ext-link>, version 1.7.1). All experiments were conducted using a computer with a Linux operating system (Ubuntu 18.04). The computer was equipped with a CPU (Intel(R) Core (TM) i9-9980XE, 3.00 GHz), 64 GB of DDR4 SDRAM, and GPU (GeForce RTX 3080) with CUDA Version 11.2 and cuDNN Version 9.1.85.</p>
<fig id="F2" position="float">
<label>Figure 2</label>
<caption><p>Each 3D image is decomposed into groups of 2D slices at equal intervals. In the first stage, deep convolutional features are extracted from each group by 2D CNN. In the second stage, all features from each slice group are concatenated to form a compact feature vector, and fed to EBLS for final prediction. <bold>(A)</bold> The overall architecture of BLADNet. <bold>(B)</bold> The detailed structure of EBLS.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-17-1137567-g0002.tif"/>
</fig></sec>
<sec>
<title>2.4. Model evaluation and statistical analysis</title>
<p>We performed the experiments of AD vs. MCI vs. CN classification as in previous studies on data from ADNI 1, and also performed additional experiments of refined classification between EMCI and LMCI on data from ADNI 2/GO. All data were randomly shuffled before being spli into the training set and test set. In all experiments, we used 80% of the samples for training and 20% of the samples for testing. In the experiment, we regard each category as positive samples respectively, the rest as negative samples, and then calculate metrics. We used accuracy, sensitivity, and specificity as metrics to evaluate classification performance. All metrics were calculated under a default threshold value of 0.5. We also plotted the ROC curve of all experiments and calculated the corresponding AUC.</p>
<p>Two board-certified professional radiologists working in a department of brain imaging and nuclear medicine (radiologist 1: HLZ, with 8 years of experience in brain imaging reading for AD diagnosis; radiologist 2: HHW, with 6 years of experience in brain imaging reading for AD diagnosis) were asked to give their diagnostic impressions of a dataset that was not used for model training. For each case, the radiologists were provided with the patient&#x00027;s age, gender, and MMSE score as additional information for validation. To validate the performance of the proposed model and the professional readings of radiologists, we compared the performance of our proposed model with that of the radiologists&#x00027; interpretations. The main steps of the experiment are shown in <xref ref-type="fig" rid="F3">Figure 3</xref>.</p>
<fig id="F3" position="float">
<label>Figure 3</label>
<caption><p>Main steps of the experiment. The data set was split into training and test sets at a ratio of 8:2. In the validation step, the radiologists were provided with demographic information to aid their readings.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-17-1137567-g0003.tif"/>
</fig></sec></sec>
<sec id="s3">
<title>3. Experimental results</title>
<sec>
<title>3.1. Results of model training</title>
<p>The prediction results of the broad network-based model are shown in <xref ref-type="table" rid="T2">Table 2</xref>. For classification of AD, MCI, and CN samples, sensitivity was 92.16 (94 of 102), 89.34 (109 of 122), and 95.16% (118 of 124), respectively; specificity was 97.56 (240 of 246), 95.58 (216 of 226), and 95.09% (213 of 224), respectively; and precision was 94.00 (94 of 100), 91.60 (109 of 119), and 91.47% (118 of 129), respectively. The ROC curves of our model, trained on 80% of the ADNI data and tested on the remaining 20%, are shown in <xref ref-type="fig" rid="F4">Figure 4</xref>. The AUC in prediction of AD, MCI, and CN was 0.97, 0.98, and 0.99, respectively. The AUC for CN was the highest, indicating that our model can distinguish healthy subjects from patients with AD/MCI.</p>
<table-wrap position="float" id="T2">
<label>Table 2</label>
<caption><p>Comparison of performance between our model and radiology readers in prediction of AD, MCI, and CN.</p></caption>
<table frame="box" rules="all">
<thead><tr style="background-color:#919497;color:#ffffff">
<th valign="top" align="left"><bold>Our method on ADNI test set</bold></th>
<th valign="top" align="center"><bold>Sensitivity (%)<xref ref-type="table-fn" rid="TN2"><sup>&#x0002A;</sup></xref></bold></th>
<th valign="top" align="center"><bold>Specificity (%)<xref ref-type="table-fn" rid="TN2"><sup>&#x0002A;</sup></xref></bold></th>
<th valign="top" align="center"><bold>Precision (%)<xref ref-type="table-fn" rid="TN2"><sup>&#x0002A;</sup></xref></bold></th>
<th valign="top" align="center"><bold>F1 score (%)</bold></th>
<th valign="top" align="center"><bold>No. of imaging studies</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">AD</td>
<td valign="top" align="center">92.16 (94/102)</td>
<td valign="top" align="center">97.56 (240/246)</td>
<td valign="top" align="center">94.00 (94/100)</td>
<td valign="top" align="center">93.06</td>
<td valign="top" align="center">102</td>
</tr> <tr>
<td valign="top" align="left">MCI</td>
<td valign="top" align="center">89.34 (109/122)</td>
<td valign="top" align="center">95.58 (216/226)</td>
<td valign="top" align="center">91.60 (109/119)</td>
<td valign="top" align="center">90.46</td>
<td valign="top" align="center">122</td>
</tr> <tr>
<td valign="top" align="left">CN</td>
<td valign="top" align="center">95.16 (118/124)</td>
<td valign="top" align="center">95.09 (213/224)</td>
<td valign="top" align="center">91.47 (118/129)</td>
<td valign="top" align="center">93.28</td>
<td valign="top" align="center">124</td>
</tr> <tr>
<td valign="top" align="left" colspan="6"><bold>Radiologist 1</bold></td>
</tr> <tr>
<td valign="top" align="left">AD</td>
<td valign="top" align="center">51.85 (14/27)</td>
<td valign="top" align="center">57.45 (27/47)</td>
<td valign="top" align="center">41.76 (14/34)</td>
<td valign="top" align="center">45.9</td>
<td valign="top" align="center">27</td>
</tr> <tr>
<td valign="top" align="left">MCI</td>
<td valign="top" align="center">29.41 (5/17)</td>
<td valign="top" align="center">80.70 (46/57)</td>
<td valign="top" align="center">31.25 (5/16)</td>
<td valign="top" align="center">30.3</td>
<td valign="top" align="center">17</td>
</tr> <tr>
<td valign="top" align="left">CN</td>
<td valign="top" align="center">46.67 (14/30)</td>
<td valign="top" align="center">77.27 (34/44)</td>
<td valign="top" align="center">58.33 (14/24)</td>
<td valign="top" align="center">51.85</td>
<td valign="top" align="center">30</td>
</tr> <tr>
<td valign="top" align="left" colspan="6"><bold>Radiologist 2</bold></td>
</tr> <tr>
<td valign="top" align="left">AD</td>
<td valign="top" align="center">37.04 (10/27)</td>
<td valign="top" align="center">72.34 (34/47)</td>
<td valign="top" align="center">43.48 (10/23)</td>
<td valign="top" align="center">40</td>
<td valign="top" align="center">27</td>
</tr> <tr>
<td valign="top" align="left">MCI</td>
<td valign="top" align="center">35.29 (6/17)</td>
<td valign="top" align="center">63.16 (36/57)</td>
<td valign="top" align="center">22.22 (6/27)</td>
<td valign="top" align="center">27.27</td>
<td valign="top" align="center">17</td>
</tr> <tr>
<td valign="top" align="left">CN</td>
<td valign="top" align="center">46.67 (14/30)</td>
<td valign="top" align="center">77.27 (34/44)</td>
<td valign="top" align="center">58.33 (14/24)</td>
<td valign="top" align="center">51.85</td>
<td valign="top" align="center">30</td>
</tr></tbody>
</table>
<table-wrap-foot>
<p>AD, Alzheimer&#x00027;s disease; MCI, mild cognitive impairment; CN, cognitively normal.</p>
<fn id="TN2"><label>&#x0002A;</label><p>Data in parentheses are raw data used to calculate the percentage.</p></fn>
</table-wrap-foot>
</table-wrap>
<fig id="F4" position="float">
<label>Figure 4</label>
<caption><p>ROC curve of our method tested on the ADNI data set. The ROC curve labeled AD represents the model&#x00027;s performance in distinguishing AD vs. all other classes, the other curves represent the equivalent objective. The AUC is larger for CN than for the other classes, which indicates that our model can distinguish healthy subjects from patients with AD/MCI more successfully than other classifications.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-17-1137567-g0004.tif"/>
</fig>
<p>The results for EMCI and LMCI prediction are shown in <xref ref-type="table" rid="T3">Table 3</xref>. In this experiment, we treated LMCI as the positive class and EMCI as the negative class. Sensitivity was 81.63% (40 of 49) and specificity was 85.19% (46 of 54). Similar to the AD vs. MCI vs. CN experiment, the specificity of the model was much higher than the sensitivity, indicating that our model was better than radiologists at identifying healthy subjects.</p>
<table-wrap position="float" id="T3">
<label>Table 3</label>
<caption><p>Comparison of performance between our model and radiology readers in prediction of EMCI and LMCI.</p></caption>
<table frame="box" rules="all">
<thead><tr style="background-color:#919497;color:#ffffff">
<th/>
<th valign="top" align="left"><bold>Sensitivity (%)<xref ref-type="table-fn" rid="TN3"><sup>&#x0002A;</sup></xref></bold></th>
<th valign="top" align="center"><bold>Specificity (%)<xref ref-type="table-fn" rid="TN3"><sup>&#x0002A;</sup></xref></bold></th>
<th valign="top" align="center"><bold>Precision (%)<xref ref-type="table-fn" rid="TN3"><sup>&#x0002A;</sup></xref></bold></th>
<th valign="top" align="center"><bold>F1 score (%)</bold></th>
<th valign="top" align="center"><bold>No. of imaging studies</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Our method on test set 2</td>
<td valign="top" align="center">81.63 (40/49)</td>
<td valign="top" align="center">85.19 (46/54)</td>
<td valign="top" align="center">83.33 (40/48)</td>
<td valign="top" align="center">82.47</td>
<td valign="top" align="center">103</td>
</tr> <tr>
<td valign="top" align="left">Radiologist 1</td>
<td valign="top" align="center">84.00 (21/25)</td>
<td valign="top" align="center">25.00 (5/20)</td>
<td valign="top" align="center">58.33 (21/36)</td>
<td valign="top" align="center">68.85</td>
<td valign="top" align="center">45</td>
</tr> <tr>
<td valign="top" align="left">Radiologist 2</td>
<td valign="top" align="center">76.00 (19/25)</td>
<td valign="top" align="center">30.00 (6/20)</td>
<td valign="top" align="center">58.33 (19/33)</td>
<td valign="top" align="center">65.52</td>
<td valign="top" align="center">45</td>
</tr></tbody>
</table>
<table-wrap-foot>
<p>EMCI, early mild cognitive impairment; LMCI, late mild cognitive impairment.</p>
<fn id="TN3"><label>&#x0002A;</label><p>Data in parentheses are raw data used to calculate the percentage.</p></fn>
</table-wrap-foot>
</table-wrap></sec>
<sec>
<title>3.2. Model interpretation: t-SNE plot</title>
<p>We used the t-SNE algorithm to reduce the dimensionality of the features extracted from the convolutional network and projected them into a two-dimensional space for visualization. As shown in <xref ref-type="fig" rid="F5">Figure 5A</xref>, for the AD vs. MCI vs. CN experiment, there were obvious boundaries between the three categories. Moreover, only a few samples from other categories were scattered within the CN category, indicating that the model has a better screening ability for healthy cases than for patients. Similarly, as shown in <xref ref-type="fig" rid="F5">Figure 5B</xref>, for EMCI and LMCI classification, the model divided the samples very successfully into two clusters. Although a few cases were mixed in the junction of the two clusters, which indicates that there is a transition stage from EMCI to LMCI, our model could distinguish the two stages well.</p>
<fig id="F5" position="float">
<label>Figure 5</label>
<caption><p>Scatter plot of all data after dimension reduction by t-SNE. <bold>(A)</bold> Visualization of dimension reduction for AD, MCI, and CN. <bold>(B)</bold> Visualization of dimension reduction for EMCI and LMCI.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-17-1137567-g0005.tif"/>
</fig></sec>
<sec>
<title>3.3. Comparison of model predictions with state-of-the-art methods</title>
<p>Recently, a substantial amount of work has been carried out exploring the application of machine learning approaches to AD prediction using brain imaging. Most of these studies have used structural imaging of the brain, with few studies using functional imaging, specifically 18F-FDG-PET. Some researchers have attempted to analyze 18F-FDG-PET for AD predictions, but these studies have yielded limited success (Liu et al., <xref ref-type="bibr" rid="B23">2018</xref>; Lu et al., <xref ref-type="bibr" rid="B24">2018</xref>; Pan et al., <xref ref-type="bibr" rid="B29">2018</xref>; Ding et al., <xref ref-type="bibr" rid="B8">2019</xref>; Huang et al., <xref ref-type="bibr" rid="B16">2019</xref>; Hamdi et al., <xref ref-type="bibr" rid="B13">2022</xref>). <xref ref-type="table" rid="T4">Tables 4</xref>, <xref ref-type="table" rid="T5">5</xref> summarize state-of-the-art deep learning methods for prediction of AD using 18F-FDG-PET imaging. Most of the methods investigated can only discriminate AD from CN or MCI from CN, while our method can classify patients at different stages of AD with higher sensitivity and specificity. In addition, compared with these methods, we used a larger test set in our experiments, which demonstrates the superior generalization ability of our method.</p>
<table-wrap position="float" id="T4">
<label>Table 4</label>
<caption><p>Summary of state-of-the-art methods for prediction of Alzheimer&#x00027;s disease (AD) using 18F-FDG-PET imaging.</p></caption>
<table frame="box" rules="all">
<thead><tr style="background-color:#919497;color:#ffffff">
<th valign="top" align="left"><bold>References</bold></th>
<th valign="top" align="center"><bold>Summary of method</bold></th>
<th valign="top" align="center"><bold>Dataset specifications</bold></th>
<th valign="top" align="center"><bold>Sensitivity</bold></th>
<th valign="top" align="center"><bold>Specificity</bold></th>
<th valign="top" align="center"><bold>AUC<xref ref-type="table-fn" rid="TN4"><sup>&#x0002A;</sup></xref></bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Ding et al. (<xref ref-type="bibr" rid="B8">2019</xref>)</td>
<td valign="top" align="center">Inception V3 network pre-trained on ImageNet</td>
<td valign="top" align="center">484 AD, 861 MCI, 764 non-AD/MCI scans from ADNI</td>
<td valign="top" align="center">AD 81%</td>
<td valign="top" align="center">94%</td>
<td valign="top" align="center">0.92</td>
</tr>
 <tr>
<td/>
<td/>
<td/>
<td valign="top" align="center">MCI 54%</td>
<td valign="top" align="center">68%</td>
<td valign="top" align="center">0.63</td>
</tr>
 <tr>
<td/>
<td/>
<td/>
<td valign="top" align="center">Non-AD/MCI 59%</td>
<td valign="top" align="center">75%</td>
<td valign="top" align="center">0.73</td>
</tr> <tr>
<td valign="top" align="left">Huang et al. (<xref ref-type="bibr" rid="B16">2019</xref>)</td>
<td valign="top" align="center">3D VGG network</td>
<td valign="top" align="center">647 AD, 731 CN, 767 MCI 18F-FDG-PET scans from ADNI</td>
<td valign="top" align="center">AD vs. CN 90.24%</td>
<td valign="top" align="center">87.77%</td>
<td valign="top" align="center">0.9269</td>
</tr> <tr>
<td valign="top" align="left">Lu et al. (<xref ref-type="bibr" rid="B24">2018</xref>)</td>
<td valign="top" align="center">A multiscale deep neural network</td>
<td valign="top" align="center">226 AD and 304 18F-FDG-PET scans from ADNI</td>
<td valign="top" align="center">AD vs. CN 91.54%</td>
<td valign="top" align="center">95.06%</td>
<td valign="top" align="center">NA</td>
</tr> <tr>
<td valign="top" align="left">Liu et al. (<xref ref-type="bibr" rid="B23">2018</xref>)</td>
<td valign="top" align="center">A combination of 2D CNN and RNN</td>
<td valign="top" align="center">93 AD, 146 MCI, 100 CN scans from ADNI</td>
<td valign="top" align="center">AD vs. CN 91.4%</td>
<td valign="top" align="center">91%</td>
<td valign="top" align="center">0.953</td>
</tr>
 <tr>
<td/>
<td/>
<td/>
<td valign="top" align="center">MCI vs. CN 78.1%</td>
<td valign="top" align="center">80%</td>
<td valign="top" align="center">0.839</td>
</tr> <tr>
<td valign="top" align="left">Hamdi et al. (<xref ref-type="bibr" rid="B13">2022</xref>)</td>
<td valign="top" align="center">A 2D CNN network</td>
<td valign="top" align="center">220 AD, 635 CN FDG-PET scans from ADNI</td>
<td valign="top" align="center">AD vs. CN 94%</td>
<td valign="top" align="center">96%</td>
<td valign="top" align="center">0.95</td>
</tr> <tr>
<td valign="top" align="left">Pan et al. (<xref ref-type="bibr" rid="B29">2018</xref>)</td>
<td valign="top" align="center">SVM</td>
<td valign="top" align="center">94 AD, 88 MCI, 90 CN subjects from ADNI</td>
<td valign="top" align="center">AD vs. CN 92.78%</td>
<td valign="top" align="center">91.38%</td>
<td valign="top" align="center">0.9598</td>
</tr>
 <tr>
<td/>
<td/>
<td/>
<td valign="top" align="center">MCI vs. CN 84.20%</td>
<td valign="top" align="center">82.83%</td>
<td valign="top" align="center">0.8893</td>
</tr> <tr>
<td valign="top" align="left">Current study</td>
<td valign="top" align="center">A broad learning-based network</td>
<td valign="top" align="center">541 AD, 616 MCI, 627 FDG-PET imaging studies from ADNI</td>
<td valign="top" align="center">AD 92.16%</td>
<td valign="top" align="center">97.56%</td>
<td valign="top" align="center">0.97</td>
</tr>
 <tr>
<td/>
<td/>
<td/>
<td valign="top" align="center">MCI 89.34%</td>
<td valign="top" align="center">95.58%</td>
<td valign="top" align="center">0.98</td>
</tr>
 <tr>
<td/>
<td/>
<td/>
<td valign="top" align="center">Non-AD/MCI 95.16%</td>
<td valign="top" align="center">95.09%</td>
<td valign="top" align="center">0.99</td>
</tr></tbody>
</table>
<table-wrap-foot>
<p>We report sensitivity, specificity, and area under the curve (AUC) for all these methods.</p>
<p>AD, Alzheimer&#x00027;s disease; MCI, mild cognitive impairment; CN, cognitively normal.</p>
<fn id="TN4"><label>&#x0002A;</label><p>A value of NA indicates that this result is not reported in the literature.</p></fn>
</table-wrap-foot>
</table-wrap>
<table-wrap position="float" id="T5">
<label>Table 5</label>
<caption><p>Comparison of performance between our model and other existing methods in prediction of EMCI and LMCI.</p></caption>
<table frame="box" rules="all">
<thead><tr style="background-color:#919497;color:#ffffff">
<th valign="top" align="left"><bold>References</bold></th>
<th valign="top" align="center"><bold>Sensitivity (%)<xref ref-type="table-fn" rid="TN5"><sup>&#x0002A;</sup></xref></bold></th>
<th valign="top" align="center"><bold>Specificity (%)<xref ref-type="table-fn" rid="TN5"><sup>&#x0002A;</sup></xref></bold></th>
<th valign="top" align="center"><bold>F1 score (%)<xref ref-type="table-fn" rid="TN5"><sup>&#x0002A;</sup></xref></bold></th>
<th valign="top" align="center"><bold>Dataset specifications</bold></th>
</tr>
</thead>
<tbody> <tr>
<td valign="top" align="left">Singh et al. (<xref ref-type="bibr" rid="B31">2017</xref>)</td>
<td valign="top" align="center">64.82%</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">0.6844</td>
<td valign="top" align="center">178 EMCI, 158 LMCI</td>
</tr> <tr>
<td valign="top" align="left">Nozadi et al. (<xref ref-type="bibr" rid="B27">2018</xref>)</td>
<td valign="top" align="center">72.50%</td>
<td valign="top" align="center">79.20%</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">164 EMCI, 189 LMCI</td>
</tr> <tr>
<td valign="top" align="left">Forouzannezhad et al. (<xref ref-type="bibr" rid="B11">2020</xref>)</td>
<td valign="top" align="center">61.50%</td>
<td valign="top" align="center">64.3%</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">296 EMCI, 193 LMCI</td>
</tr> <tr>
<td valign="top" align="left">Ours</td>
<td valign="top" align="center">81.63%</td>
<td valign="top" align="center">85.19%</td>
<td valign="top" align="center">82.47%</td>
<td valign="top" align="center">265 EMCI, 249 LMCI</td>
</tr></tbody>
</table>
<table-wrap-foot>
<p>EMCI, early mild cognitive impairment, LMCI, late mild cognitive impairment.</p>
<fn id="TN5"><label>&#x0002A;</label><p>A value of NA indicates that this result is not reported in the literature.</p></fn>
</table-wrap-foot>
</table-wrap></sec>
<sec>
<title>3.4. Comparison of model predictions with professional radiologists</title>
<p>As shown in <xref ref-type="table" rid="T2">Table 2</xref>, two radiologists gave their interpretations of a test set. For radiologist 1, the sensitivity results for MCI, AD, and CN were 51.85 (14 of 27), 29.41 (5 of 17), and 46.67% (14 of 30), respectively; the specificity results were 57.45 (27 of 47), 80.70 (46 of 57), and 77.27% (34 of 44), respectively; and the precision results were 41.76 (14 of 34), 34.25 (5 of 16), and 58.33% (14 of 24), respectively. For radiologist 2, sensitivity for MCI, AD, and CN was 37.04 (10 of 27), 35.29 (6 of 17), and 46.67% (14 of 30), respectively; specificity was 72.34 (34 of 47), 63.16 (36 of 57), and 77.27% (34 of 44), respectively; and precision was 43.48 (10 of 23), 22.22 (6 of 27), and 58.33% (14 of 24), respectively. It can be observed that the prediction results of our proposed model were better than those of the radiologist, which indicates that the model was able to find lesions that were difficult to observe with the naked eye. It is also worth noting that although the two radiologists obtained the same results in their evaluations of healthy cases, patients with MCI and AD were difficult to evaluate.</p>
<p><xref ref-type="table" rid="T3">Table 3</xref> reports reader performance on prediction of EMCI vs. LMCI. For radiologist 1, the results in terms of sensitivity, specificity, and precision were 84.00 (21 of 25), 25.00 (5 of 20), and 58.33% (21 of 36), respectively. For radiologist 2, the results were 76.00 (19 of 25), 30.00 (6 of 20), and 58.33% (19 of 33), respectively. Although radiologists had higher sensitivity in this scenario, their specificity was very low; this is because radiologists tend to predict cases as LMCI. In contrast, our model was able to achieve high specificity under high sensitivity.</p></sec></sec>
<sec id="s4">
<title>4. Discussion</title>
<p>With the aging of the population, the number of patients with AD is continuously increasing. However, research on a cure for AD has been slow, and the focus of research has shifted to the early diagnosis of AD, so that early prevention measures can delay the progression of the disease. However, early identification of patients at the prodromal stage of AD is still a challenging problem. The broad neural network-based model can identify patients with AD at different stages with high sensitivity and specificity. In addition, in identifying patients at the EMCI or LMCI stage, the proposed model is able to achieve high sensitivity under high specificity; notably, it outperformed professional radiologist readers, achieving higher sensitivity and specificity.</p>
<p>Previous research has studied the specific pattern of hypometabolism that can be observed in FDG-PET of patients with AD. Bilateral temporo-parietal hypometabolism has been found to be a dominant pattern related to clinically confirmed AD (Hoffman et al., <xref ref-type="bibr" rid="B15">2000</xref>). Other studies have demonstrated that, as the disease progresses, FDG uptake is reduced, especially in the frontal, parietal, and lateral temporal lobes (Ossenkoppele et al., <xref ref-type="bibr" rid="B28">2012</xref>). However, FDG-PET is not a definitive imaging biomarker for AD and MCI. Substantial previous efforts have been devoted to attempts to develop computer-aided methods of diagnosis of AD <italic>via</italic> other modalities, but few studies have been conducted involving attempts to applying machine learning approaches to classify patients with AD by FDG-PET alone. Previous attempts to identify MCI have resulted in limited sensitivity (81% for AD, 54% for MCI) and specificity (Ding et al., <xref ref-type="bibr" rid="B8">2019</xref>). In addition to prediction of AD, our model performs refined classification of EMCI vs. LMCI, achieving sensitivity of 81.63% and specificity of 85.19% in doing so. Compared to previous studies, the key advantages of our model are as follows. First, due to the incremental learning ability of BLS, our model can be dynamically updated without retraining from scratch if new imaging studies are added; our EBLS model can further extend the incremental learning ability of BLS by adding new BLS blocks dynamically. In addition, our model exhibits better performance in the identification of the early stage of AD, which is of great significance for the diagnosis of AD, because early identification of AD facilitates early intervention in the progression of the disease. There are also some limitations to our model in that the training needs to be completed in two stages, and the process is complicated. In addition, training a convolution layer from scratch for the first time is still time-consuming work, and the BLS model in the second stage depends on the quality of feature extraction in the convolution layer.</p>
<p>Because of deep structure, deep learning models are very good at capturing abstract and intrinsic features of images. However, the problems existing in deep learning models, such as gradient explosion and vanishing gradients, usually limit the possibility of deepening the networks of deep learning models indefinitely. BLS can solve this problem in a different way, providing good universal approximation ability with a flat structure. The universal approximation ability of BLS has been proven by Chen et al. (<xref ref-type="bibr" rid="B5">2018</xref>). Our proposed method utilizes a convolution layer as a feature extractor to provide deep space features for BLS, and our proposed EBLS model can enhance the features in broad space before computing the final output. The comparison in the section above demonstrates that our method achieves better performance than state-of-the-art deep learning methods, which demonstrates the role of broad learning in feature enhancement. In addition, compared to other studies that have only used dozens of images, our model was trained and validated on a large dataset containing thousands of images and achieves better performance, which indicates that our method has better generalizability. However, in real clinical scenarios, the reasons for hypometabolism observed in FDG-PET may be more complicated. For instance, other types of dementia, such as dementia with Lewy bodies (DLB) or frontotemporal dementia (FTD), may also cause pathological changes similar to AD. Further studies that verify this method on more complex data may in future provide more reliable clinical aids for diagnosis of AD.</p>
<p>Our study also has limitations. First, although the machine learning method has achieved very good results in the validation with the ADNI data set, actual clinical prediction is much more complicated. For instance, many patients may have neurological diseases other than AD, which will affect the prediction results. We will continue our investigation and apply our model to a more general patient population in the future. Second, the algorithm can learn features that are difficult to see with the naked eye (which means that its predictions can differ from experts&#x00027; interpretations), and t-SNE dimension reduction also shows the gradual progression of patients from MCI to AD, but the model cannot provide interpretable information for radiologists.</p></sec>
<sec id="s5">
<title>5. Conclusion</title>
<p>In conclusion, in our study we have developed a novel broad network-based model for prediction of AD diagnosis using 18F-FDG-PET of the brain. The proposed broad learning-based model was able to achieve high accuracy, sensitivity, and specificity on the validation set and outperformed professional radiologist readers in predicting AD based on FDG-PET. Moreover, the proposed model can be integrated into the clinical workflow as a powerful auxiliary diagnosis tool for reading PET imaging of patients with AD.</p></sec>
<sec sec-type="data-availability" id="s6">
<title>Data availability statement</title>
<p>Publicly available datasets were analyzed in this study. This data can be found here: <ext-link ext-link-type="uri" xlink:href="https://adni.loni.usc.edu">https://adni.loni.usc.edu</ext-link>.</p></sec>
<sec sec-type="ethics-statement" id="s7">
<title>Ethics statement</title>
<p>Ethical approval was not provided for this study on human participants because the data used in this research was obtained from public available dataset. The patients/participants provided their written informed consent to participate in this study.</p></sec>
<sec sec-type="author-contributions" id="s8">
<title>Author contributions</title>
<p>JD: conceptualization, methodology, validation, visualization, writing&#x02014;original draft, writing&#x02014;review and editing, supervision, project administration, and funding acquisition. YL: data curation, methodology, software, validation, and writing&#x02014;original draft. HW: validation and software. JW: supervision and project administration. LC: visualization and writing&#x02014;review and editing. CC: conceptualization, resources, and supervision. All authors contributed to the article and approved the submitted version.</p></sec>
</body>
<back>
<sec sec-type="funding-information" id="s9">
<title>Funding</title>
<p>Data used in preparation of this article were obtained from the Alzheimer&#x00027;s Disease Neuroimaging Initiative (ADNI) database (<ext-link ext-link-type="uri" xlink:href="http://adni.loni.usc.edu">http://adni.loni.usc.edu</ext-link>). As such, the investigators within the ADNI contributed to the design and implementation of ADNI and/or provided data but did not participate in analysis or writing of this report. A complete listing of ADNI investigators can be found at: <ext-link ext-link-type="uri" xlink:href="http://adni.loni.usc.edu/wp-content/uploads/how_to_apply/ADNI_Acknowledgement_List.pdf">http://adni.loni.usc.edu/wp-content/uploads/how_to_apply/ADNI_Acknowledgement_List.pdf</ext-link>. This work was supported in part by Guangdong Basic and Applied Basic Research Foundation under Grant 2021A1515011999, in part by the National Key Research and Development Program of China under Grant 2018YFC2002500, and in part by Guangdong Provincial Key Laboratory of Traditional Chinese Medicine Informatization under Grant 2021B1212040007.</p>
</sec>

<sec sec-type="COI-statement" id="conf1">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="disclaimer" id="s10">
<title>Publisher&#x00027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec sec-type="supplementary-material" id="s11">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fnins.2023.1137567/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fnins.2023.1137567/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Data_Sheet_1.docx" id="SM1" mimetype="application/vnd.openxmlformats-officedocument.wordprocessingml.document" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<fn-group>
<fn id="fn0001"><p><sup>1</sup>The code of EBLS model (<ext-link ext-link-type="uri" xlink:href="https://github.com/YangLiuuuu/Extreme-Broad-Learning-System">https://github.com/YangLiuuuu/Extreme-Broad-Learning-System</ext-link>).</p></fn>
</fn-group>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Baydargil</surname> <given-names>H. B.</given-names></name> <name><surname>Park</surname> <given-names>J. S.</given-names></name> <name><surname>Kang</surname> <given-names>D. Y.</given-names></name></person-group> (<year>2021</year>). <article-title>Anomaly analysis of Alzheimer&#x00027;s disease in PET images using an unsupervised adversarial deep learning model</article-title>. <source>Appl. Sci</source>. <volume>11</volume>, <fpage>2187</fpage>. <pub-id pub-id-type="doi">10.3390/app11052187</pub-id></citation>
</ref>
<ref id="B2">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bouter</surname> <given-names>C.</given-names></name> <name><surname>Henniges</surname> <given-names>P.</given-names></name> <name><surname>Franke</surname> <given-names>T. N.</given-names></name> <name><surname>Irwin</surname> <given-names>C.</given-names></name> <name><surname>Sahlmann</surname> <given-names>C. O.</given-names></name> <name><surname>Sichler</surname> <given-names>M. E.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>18F-FDG-PET detects drastic changes in brain metabolism in the Tg4&#x02013;42 model of Alzheimer&#x00027;s disease</article-title>. <source>Front. Aging Neurosci</source>. <volume>10</volume>, <fpage>425</fpage>. <pub-id pub-id-type="doi">10.3389/fnagi.2018.00425</pub-id><pub-id pub-id-type="pmid">30670962</pub-id></citation></ref>
<ref id="B3">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Brown</surname> <given-names>R. K.</given-names></name> <name><surname>Bohnen</surname> <given-names>N. I.</given-names></name> <name><surname>Wong</surname> <given-names>K. K.</given-names></name> <name><surname>Minoshima</surname> <given-names>S.</given-names></name> <name><surname>Frey</surname> <given-names>K. A.</given-names></name></person-group> (<year>2014</year>). <article-title>Brain PET in suspected dementia: patterns of altered FDG metabolism</article-title>. <source>Radiographics</source> <volume>34</volume>, <fpage>684</fpage>&#x02013;<lpage>701</lpage>. <pub-id pub-id-type="doi">10.1148/rg.343135065</pub-id><pub-id pub-id-type="pmid">24819789</pub-id></citation></ref>
<ref id="B4">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>C. P.</given-names></name> <name><surname>Liu</surname> <given-names>Z.</given-names></name></person-group> (<year>2017</year>). <article-title>Broad learning system: an effective and efficient incremental learning system without the need for deep architecture</article-title>. <source>IEEE Trans. Neural Netw. Learn. Syst</source>. <volume>29</volume>, <fpage>10</fpage>&#x02013;<lpage>24</lpage>. <pub-id pub-id-type="doi">10.1109/TNNLS.2017.2716952</pub-id><pub-id pub-id-type="pmid">28742048</pub-id></citation></ref>
<ref id="B5">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>C. P.</given-names></name> <name><surname>Liu</surname> <given-names>Z.</given-names></name> <name><surname>Feng</surname> <given-names>S.</given-names></name></person-group> (<year>2018</year>). <article-title>Universal approximation capability of broad learning system and its structural variations</article-title>. <source>IEEE Trans. Neural Netw. Learn. Syst</source>. <volume>30</volume>, <fpage>1191</fpage>&#x02013;<lpage>1204</lpage>. <pub-id pub-id-type="doi">10.1109/TNNLS.2018.2866622</pub-id><pub-id pub-id-type="pmid">30207965</pub-id></citation></ref>
<ref id="B6">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ch&#x000E9;telat</surname> <given-names>G.</given-names></name> <name><surname>Arbizu</surname> <given-names>J.</given-names></name> <name><surname>Barthel</surname> <given-names>H.</given-names></name> <name><surname>Garibotto</surname> <given-names>V.</given-names></name> <name><surname>Law</surname> <given-names>I.</given-names></name> <name><surname>Morbelli</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Amyloid-PET and 18F-FDG-PET in the diagnostic investigation of Alzheimer&#x00027;s disease and other dementias</article-title>. <source>Lancet Neurol</source>. <volume>19</volume>, <fpage>951</fpage>&#x02013;<lpage>962</lpage>. <pub-id pub-id-type="doi">10.1016/S1474-4422(20)30314-8</pub-id><pub-id pub-id-type="pmid">33098804</pub-id></citation></ref>
<ref id="B7">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chu</surname> <given-names>F.</given-names></name> <name><surname>Liang</surname> <given-names>T.</given-names></name> <name><surname>Chen</surname> <given-names>C. P.</given-names></name> <name><surname>Wang</surname> <given-names>X.</given-names></name> <name><surname>Ma</surname> <given-names>X.</given-names></name></person-group> (<year>2019</year>). <article-title>Weighted broad learning system and its application in non-linear industrial process modeling</article-title>. <source>IEEE Trans. Neural Netw. Learn. Syst</source>. <volume>31</volume>, <fpage>3017</fpage>&#x02013;<lpage>3031</lpage>. <pub-id pub-id-type="doi">10.1109/TNNLS.2019.2935033</pub-id><pub-id pub-id-type="pmid">31514158</pub-id></citation></ref>
<ref id="B8">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ding</surname> <given-names>Y.</given-names></name> <name><surname>Sohn</surname> <given-names>J. H.</given-names></name> <name><surname>Kawczynski</surname> <given-names>M. G.</given-names></name> <name><surname>Trivedi</surname> <given-names>H.</given-names></name> <name><surname>Harnish</surname> <given-names>R.</given-names></name> <name><surname>Jenkins</surname> <given-names>N. W.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>A deep learning model to predict a diagnosis of Alzheimer disease by using 18F-FDG PET of the brain</article-title>. <source>Radiology</source> <volume>290</volume>, <fpage>456</fpage>&#x02013;<lpage>464</lpage>. <pub-id pub-id-type="doi">10.1148/radiol.2018180958</pub-id><pub-id pub-id-type="pmid">30398430</pub-id></citation></ref>
<ref id="B9">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Du</surname> <given-names>J.</given-names></name> <name><surname>Vong</surname> <given-names>C. M.</given-names></name> <name><surname>Chen</surname> <given-names>C. P.</given-names></name></person-group> (<year>2020</year>). <article-title>Novel efficient RNN and LSTM-like architectures: recurrent and gated broad learning systems and their applications for text classification</article-title>. <source>IEEE Trans. Cybern</source>. <volume>51</volume>, <fpage>1586</fpage>&#x02013;<lpage>1597</lpage>. <pub-id pub-id-type="doi">10.1109/TCYB.2020.2969705</pub-id><pub-id pub-id-type="pmid">32086231</pub-id></citation></ref>
<ref id="B10">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Feng</surname> <given-names>S.</given-names></name> <name><surname>Chen</surname> <given-names>C. P.</given-names></name></person-group> (<year>2018</year>). <article-title>Fuzzy broad learning system: a novel neuro-fuzzy model for regression and classification</article-title>. <source>IEEE Trans. Cybern</source>. <volume>50</volume>, <fpage>414</fpage>&#x02013;<lpage>424</lpage>. <pub-id pub-id-type="doi">10.1109/TCYB.2018.2857815</pub-id><pub-id pub-id-type="pmid">30106747</pub-id></citation></ref>
<ref id="B11">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Forouzannezhad</surname> <given-names>P.</given-names></name> <name><surname>Abbaspour</surname> <given-names>A.</given-names></name> <name><surname>Li</surname> <given-names>C.</given-names></name> <name><surname>Fang</surname> <given-names>C.</given-names></name> <name><surname>Williams</surname> <given-names>U.</given-names></name> <name><surname>Cabrerizo</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>A Gaussian-based model for early detection of mild cognitive impairment using multimodal neuroimaging</article-title>. <source>J. Neurosci. Methods</source> <volume>333</volume>, <fpage>108544</fpage>. <pub-id pub-id-type="doi">10.1016/j.jneumeth.2019.108544</pub-id><pub-id pub-id-type="pmid">31838182</pub-id></citation></ref>
<ref id="B12">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gong</surname> <given-names>X.</given-names></name> <name><surname>Zhang</surname> <given-names>T.</given-names></name> <name><surname>Chen</surname> <given-names>C. P.</given-names></name> <name><surname>Liu</surname> <given-names>Z.</given-names></name></person-group> (<year>2021</year>). <article-title>Research review for broad learning system: algorithms, theory, and applications</article-title>. <source>IEEE Trans. Cybern</source>. <volume>52</volume>, <fpage>8922</fpage>&#x02013;<lpage>8950</lpage>. <pub-id pub-id-type="doi">10.1109/TCYB.2021.3061094</pub-id><pub-id pub-id-type="pmid">33729975</pub-id></citation></ref>
<ref id="B13">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hamdi</surname> <given-names>M.</given-names></name> <name><surname>Bourouis</surname> <given-names>S.</given-names></name> <name><surname>Rastislav</surname> <given-names>K.</given-names></name> <name><surname>Mohmed</surname> <given-names>F.</given-names></name></person-group> (<year>2022</year>). <article-title>Evaluation of neuro images for the diagnosis of Alzheimer&#x00027;s disease using deep learning neural network</article-title>. <source>Front. Public Health</source> <volume>10</volume>, <fpage>35</fpage>. <pub-id pub-id-type="doi">10.3389/fpubh.2022.834032</pub-id><pub-id pub-id-type="pmid">35198526</pub-id></citation></ref>
<ref id="B14">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>He</surname> <given-names>K.</given-names></name> <name><surname>Zhang</surname> <given-names>X.</given-names></name> <name><surname>Ren</surname> <given-names>S.</given-names></name> <name><surname>Sun</surname> <given-names>J.</given-names></name></person-group> (<year>2016</year>). <article-title>&#x0201C;Deep residual learning for image recognition,&#x0201D;</article-title> in <source>Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition</source> (<publisher-loc>Las Vegas, NV</publisher-loc>: <publisher-name>IEEE</publisher-name>). <pub-id pub-id-type="doi">10.1109/CVPR.2016.90</pub-id><pub-id pub-id-type="pmid">32166560</pub-id></citation></ref>
<ref id="B15">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hoffman</surname> <given-names>J. M.</given-names></name> <name><surname>Welsh-Bohmer</surname> <given-names>K. A.</given-names></name> <name><surname>Hanson</surname> <given-names>M.</given-names></name> <name><surname>Crain</surname> <given-names>B.</given-names></name> <name><surname>Hulette</surname> <given-names>C.</given-names></name> <name><surname>Earl</surname> <given-names>N.</given-names></name> <etal/></person-group>. (<year>2000</year>). <article-title>FDG PET imaging in patients with pathologically verified dementia</article-title>. <source>J. Nucl. Med</source>. <volume>41</volume>, <fpage>1920</fpage>&#x02013;<lpage>1928</lpage>.<pub-id pub-id-type="pmid">11079505</pub-id></citation></ref>
<ref id="B16">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Huang</surname> <given-names>Y.</given-names></name> <name><surname>Xu</surname> <given-names>J.</given-names></name> <name><surname>Zhou</surname> <given-names>Y.</given-names></name> <name><surname>Tong</surname> <given-names>T.</given-names></name> <name><surname>Zhuang</surname> <given-names>X.</given-names></name> <collab>Alzheimer&#x00027;s Disease Neuroimaging Initiative (ADNI)</collab></person-group>. (<year>2019</year>). <article-title>Diagnosis of Alzheimer&#x00027;s disease <italic>via</italic> multi-modality 3D convolutional neural network</article-title>. <source>Front. Neurosci.</source> <volume>13</volume>, <fpage>509</fpage>. <pub-id pub-id-type="doi">10.3389/fnins.2019.00509</pub-id><pub-id pub-id-type="pmid">31213967</pub-id></citation></ref>
<ref id="B17">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jack</surname> <given-names>C. R.</given-names> <suffix>Jr</suffix></name> <name><surname>Bernstein</surname> <given-names>M. A.</given-names></name> <name><surname>Fox</surname> <given-names>N. C.</given-names></name> <name><surname>Thompson</surname> <given-names>P.</given-names></name> <name><surname>Alexander</surname> <given-names>G.</given-names></name> <name><surname>Harvey</surname> <given-names>D.</given-names></name> <etal/></person-group>. (<year>2008</year>). <article-title>The Alzheimer&#x00027;s disease neuroimaging initiative (ADNI): MRI methods</article-title>. <source>J. Magnet. Reson. Imag. Off. J. Int. Soc. Magnet. Reson. Med</source>. <volume>27</volume>, <fpage>685</fpage>&#x02013;<lpage>691</lpage>. <pub-id pub-id-type="doi">10.1002/jmri.21049</pub-id><pub-id pub-id-type="pmid">18302232</pub-id></citation></ref>
<ref id="B18">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jagust</surname> <given-names>W.</given-names></name> <name><surname>Gitcho</surname> <given-names>A.</given-names></name> <name><surname>Sun</surname> <given-names>F.</given-names></name> <name><surname>Kuczynski</surname> <given-names>B.</given-names></name> <name><surname>Mungas</surname> <given-names>D.</given-names></name> <name><surname>Haan</surname> <given-names>M.</given-names></name></person-group> (<year>2006</year>). <article-title>Brain imaging evidence of preclinical Alzheimer&#x00027;s disease in normal aging</article-title>. <source>Ann. Neurol. Off. J. Am. Neurol. Assoc. Child Neurol. Soc</source>. <volume>59</volume>, <fpage>673</fpage>&#x02013;<lpage>681</lpage>. <pub-id pub-id-type="doi">10.1002/ana.20799</pub-id><pub-id pub-id-type="pmid">16470518</pub-id></citation></ref>
<ref id="B19">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jara-Maldonado</surname> <given-names>M.</given-names></name> <name><surname>Alarcon-Aquino</surname> <given-names>V.</given-names></name> <name><surname>Rosas-Romero</surname> <given-names>R.</given-names></name></person-group> (<year>2022</year>). <article-title>A new machine learning model based on the broad learning system and wavelets</article-title>. <source>Eng. Appl. Artif. Intell</source>. <volume>112</volume>, <fpage>104886</fpage>. <pub-id pub-id-type="doi">10.1016/j.engappai.2022.104886</pub-id></citation>
</ref>
<ref id="B20">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jessen</surname> <given-names>F.</given-names></name> <name><surname>Wolfsgruber</surname> <given-names>S.</given-names></name> <name><surname>Wiese</surname> <given-names>B.</given-names></name> <name><surname>Bickel</surname> <given-names>H.</given-names></name> <name><surname>M&#x000F6;sch</surname> <given-names>E.</given-names></name> <name><surname>Kaduszkiewicz</surname> <given-names>H.</given-names></name> <etal/></person-group>. (<year>2014</year>). <article-title>AD dementia risk in late MCI, in early MCI, and in subjective memory impairment</article-title>. <source>Alzheimer&#x00027;s Dementia</source> <volume>10</volume>, <fpage>76</fpage>&#x02013;<lpage>83</lpage>. <pub-id pub-id-type="doi">10.1016/j.jalz.2012.09.017</pub-id><pub-id pub-id-type="pmid">23375567</pub-id></citation></ref>
<ref id="B21">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kobylecki</surname> <given-names>C.</given-names></name> <name><surname>Langheinrich</surname> <given-names>T.</given-names></name> <name><surname>Hinz</surname> <given-names>R.</given-names></name> <name><surname>Vardy</surname> <given-names>E. R.</given-names></name> <name><surname>Brown</surname> <given-names>G.</given-names></name> <name><surname>Martino</surname> <given-names>M. E.</given-names></name> <etal/></person-group>. (<year>2015</year>). <article-title>18F-florbetapir PET in patients with frontotemporal dementia and Alzheimer disease</article-title>. <source>J. Nucl. Med</source>. <volume>56</volume>, <fpage>386</fpage>&#x02013;<lpage>391</lpage>. <pub-id pub-id-type="doi">10.2967/jnumed.114.147454</pub-id><pub-id pub-id-type="pmid">25655625</pub-id></citation></ref>
<ref id="B22">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Levin</surname> <given-names>F.</given-names></name> <name><surname>Ferreira</surname> <given-names>D.</given-names></name> <name><surname>Lange</surname> <given-names>C.</given-names></name> <name><surname>Dyrba</surname> <given-names>M.</given-names></name> <name><surname>Westman</surname> <given-names>E.</given-names></name> <name><surname>Buchert</surname> <given-names>R.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Data-driven FDG-PET subtypes of Alzheimer&#x00027;s disease-related neurodegeneration</article-title>. <source>Alzheimer&#x00027;s Res. Therapy</source> <volume>13</volume>, <fpage>1</fpage>&#x02013;<lpage>14</lpage>. <pub-id pub-id-type="doi">10.1186/s13195-021-00785-9</pub-id><pub-id pub-id-type="pmid">33608059</pub-id></citation></ref>
<ref id="B23">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>M.</given-names></name> <name><surname>Cheng</surname> <given-names>D.</given-names></name> <name><surname>Yan</surname> <given-names>W.</given-names></name> <collab>Alzheimer&#x00027;s Disease Neuroimaging Initiative</collab></person-group>. (<year>2018</year>). <article-title>Classification of Alzheimer&#x00027;s disease by combination of convolutional and recurrent neural networks using FDG-PET images</article-title>. <source>Front. Neuroinform</source>. <volume>12</volume>, <fpage>35</fpage>. <pub-id pub-id-type="doi">10.3389/fninf.2018.00035</pub-id><pub-id pub-id-type="pmid">29970996</pub-id></citation></ref>
<ref id="B24">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lu</surname> <given-names>D.</given-names></name> <name><surname>Popuri</surname> <given-names>K.</given-names></name> <name><surname>Ding</surname> <given-names>G. W.</given-names></name> <name><surname>Balachandar</surname> <given-names>R.</given-names></name> <name><surname>Beg</surname> <given-names>M. F.</given-names></name> <collab>Alzheimer&#x00027;s Disease Neuroimaging Initiative</collab></person-group>. (<year>2018</year>). <article-title>Multiscale deep neural network based analysis of FDG-PET images for the early diagnosis of Alzheimer&#x00027;s disease</article-title>. <source>Med. Image Anal</source>. <volume>46</volume>, <fpage>26</fpage>&#x02013;<lpage>34</lpage>. <pub-id pub-id-type="doi">10.1016/j.media.2018.02.002</pub-id><pub-id pub-id-type="pmid">29502031</pub-id></citation></ref>
<ref id="B25">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Marcus</surname> <given-names>C.</given-names></name> <name><surname>Mena</surname> <given-names>E.</given-names></name> <name><surname>Subramaniam</surname> <given-names>R. M.</given-names></name></person-group> (<year>2014</year>). <article-title>Brain PET in the diagnosis of Alzheimer&#x00027;s disease</article-title>. <source>Clin. Nucl. Med</source>. <volume>39</volume>, <fpage>e413</fpage>. <pub-id pub-id-type="doi">10.1097/RLU.0000000000000547</pub-id><pub-id pub-id-type="pmid">25199063</pub-id></citation></ref>
<ref id="B26">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mosconi</surname> <given-names>L.</given-names></name> <name><surname>Tsui</surname> <given-names>W. H.</given-names></name> <name><surname>Herholz</surname> <given-names>K.</given-names></name> <name><surname>Pupi</surname> <given-names>A.</given-names></name> <name><surname>Drzezga</surname> <given-names>A.</given-names></name> <name><surname>Lucignani</surname> <given-names>G.</given-names></name> <etal/></person-group>. (<year>2008</year>). <article-title>Multicenter standardized 18F-FDG PET diagnosis of mild cognitive impairment, Alzheimer&#x00027;s disease, and other dementias</article-title>. <source>J. Nucl. Med</source>. <volume>49</volume>, <fpage>390</fpage>&#x02013;<lpage>398</lpage>. <pub-id pub-id-type="doi">10.2967/jnumed.107.045385</pub-id><pub-id pub-id-type="pmid">18287270</pub-id></citation></ref>
<ref id="B27">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nozadi</surname> <given-names>S. H.</given-names></name> <name><surname>Kadoury</surname> <given-names>S.</given-names></name> <collab>Alzheimer&#x00027;s Disease Neuroimaging Initiative</collab></person-group>. (<year>2018</year>). <article-title>Classification of Alzheimer&#x00027;s and MCI patients from semantically parcelled PET images: a comparison between AV45 and FDG-PET</article-title>. <source>Int. J. Biomed. Imag</source>. <volume>2018</volume>, <fpage>7430</fpage>. <pub-id pub-id-type="doi">10.1155/2018/1247430</pub-id><pub-id pub-id-type="pmid">29736165</pub-id></citation></ref>
<ref id="B28">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ossenkoppele</surname> <given-names>R.</given-names></name> <name><surname>Tolboom</surname> <given-names>N.</given-names></name> <name><surname>Foster-Dingley</surname> <given-names>J. C.</given-names></name> <name><surname>Adriaanse</surname> <given-names>S. F.</given-names></name> <name><surname>Boellaard</surname> <given-names>R.</given-names></name> <name><surname>Yaqub</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2012</year>). <article-title>Longitudinal imaging of Alzheimer pathology using [11C] PIB,[18F] FDDNP and [18F] FDG PET</article-title>. <source>Eur. J. Nucl. Med. Mol. Imag</source>. <volume>39</volume>, <fpage>990</fpage>&#x02013;<lpage>1000</lpage>. <pub-id pub-id-type="doi">10.1007/s00259-012-2102-3</pub-id><pub-id pub-id-type="pmid">22441582</pub-id></citation></ref>
<ref id="B29">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pan</surname> <given-names>X.</given-names></name> <name><surname>Adel</surname> <given-names>M.</given-names></name> <name><surname>Fossati</surname> <given-names>C.</given-names></name> <name><surname>Gaidon</surname> <given-names>T.</given-names></name> <name><surname>Guedj</surname> <given-names>E.</given-names></name></person-group> (<year>2018</year>). <article-title>Multilevel feature representation of FDG-PET brain images for diagnosing Alzheimer&#x00027;s disease</article-title>. <source>IEEE J. Biomed. Health Inform</source>. <volume>23</volume>, <fpage>1499</fpage>&#x02013;<lpage>1506</lpage>. <pub-id pub-id-type="doi">10.1109/JBHI.2018.2857217</pub-id><pub-id pub-id-type="pmid">30028716</pub-id></citation></ref>
<ref id="B30">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pao</surname> <given-names>Y. H.</given-names></name> <name><surname>Takefuji</surname> <given-names>Y.</given-names></name></person-group> (<year>1992</year>). <article-title>Functional-link net computing: theory, system architecture, and functionalities</article-title>. <source>Computer</source> <volume>25</volume>, <fpage>76</fpage>&#x02013;<lpage>79</lpage>.</citation>
</ref>
<ref id="B31">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Singh</surname> <given-names>S.</given-names></name> <name><surname>Srivastava</surname> <given-names>A.</given-names></name> <name><surname>Mi</surname> <given-names>L.</given-names></name> <name><surname>Caselli</surname> <given-names>R. J.</given-names></name> <name><surname>Chen</surname> <given-names>K.</given-names></name> <name><surname>Goradia</surname> <given-names>D.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>&#x0201C;Deep-learning-based classification of FDG-PET data for Alzheimer&#x00027;s disease categories,&#x0201D;</article-title> in <source>Proceedings of the 13th International Conference on Medical Information Processing and Analysis</source> (<publisher-loc>Washington, DC</publisher-loc>: <publisher-name>SPIE</publisher-name>). <pub-id pub-id-type="doi">10.1117/12.2294537</pub-id><pub-id pub-id-type="pmid">29263566</pub-id></citation></ref>
<ref id="B32">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Suk</surname> <given-names>H. I.</given-names></name> <name><surname>Shen</surname> <given-names>D.</given-names></name></person-group> (<year>2013</year>). <article-title>&#x0201C;Deep learning-based feature representation for AD/MCI classification,&#x0201D;</article-title> in <source>International Conference on Medical Image Computing and Computer-Assisted Intervention</source> (<publisher-loc>Berlin, Heidelberg</publisher-loc>: <publisher-name>Springer</publisher-name>). <pub-id pub-id-type="doi">10.1007/978-3-642-40763-5_72</pub-id><pub-id pub-id-type="pmid">24579188</pub-id></citation></ref>
<ref id="B33">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wu</surname> <given-names>G.</given-names></name> <name><surname>Duan</surname> <given-names>J.</given-names></name></person-group> (<year>2022</year>). <article-title>BLCov: a novel collaborative&#x02013;competitive broad learning system for COVID-19 detection from radiology images</article-title>. <source>Eng. Appl. Artif. Intell</source>. <volume>115</volume>, <fpage>105323</fpage>. <pub-id pub-id-type="doi">10.1016/j.engappai.2022.105323</pub-id><pub-id pub-id-type="pmid">35992036</pub-id></citation></ref>
<ref id="B34">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>L.</given-names></name> <name><surname>Wang</surname> <given-names>M.</given-names></name> <name><surname>Liu</surname> <given-names>M.</given-names></name> <name><surname>Zhang</surname> <given-names>D.</given-names></name></person-group> (<year>2020</year>). <article-title>A survey on deep learning for neuroimaging-based brain disorder analysis</article-title>. <source>Front. Neurosci</source>. <volume>14</volume>, <fpage>779</fpage>. <pub-id pub-id-type="doi">10.3389/fnins.2020.00779</pub-id><pub-id pub-id-type="pmid">33117114</pub-id></citation></ref>
</ref-list>


</back>
</article> 