<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Syst. Neurosci.</journal-id>
<journal-title>Frontiers in Systems Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Syst. Neurosci.</abbrev-journal-title>
<issn pub-type="epub">1662-5137</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnsys.2022.838822</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Neuroscience</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>DSNN: A DenseNet-Based SNN for Explainable Brain Disease Classification</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Zhu</surname> <given-names>Ziquan</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="author-notes" rid="fn001"><sup>&#x02020;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1568762/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Lu</surname> <given-names>Siyuan</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="author-notes" rid="fn001"><sup>&#x02020;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1426667/overview"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Wang</surname> <given-names>Shui-Hua</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/625461/overview"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Gorriz</surname> <given-names>Juan Manuel</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/133411/overview"/>
</contrib> 
<contrib contrib-type="author" corresp="yes">
<name><surname>Zhang</surname> <given-names>Yu-Dong</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1535600/overview"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>School of Computing and Mathematical Sciences, University of Leicester</institution>, <addr-line>East Midlands</addr-line>, <country>United Kingdom</country></aff>
<aff id="aff2"><sup>2</sup><institution>School of Computer Science and Technology, Henan Polytechnic University</institution>, <addr-line>Jiaozuo</addr-line>, <country>China</country></aff>
<aff id="aff3"><sup>3</sup><institution>Department of Signal Theory, Networking and Communications, University of Granada</institution>, <addr-line>Granada</addr-line>, <country>Spain</country></aff>
<aff id="aff4"><sup>4</sup><institution>Guangxi Key Laboratory of Trusted Software, Guilin University of Electronic Technology</institution>, <addr-line>Guilin</addr-line>, <country>China</country></aff>
<author-notes>
<fn fn-type="edited-by"><p>Edited by: Robertas Damasevicius, Silesian University of Technology, Poland</p></fn>
<fn fn-type="edited-by"><p>Reviewed by: Afshin Shoeibi, K.N.Toosi University of Technology, Iran; Delaram Sadeghi, Islamic Azad University of Mashhad, Iran; Ritys Moskoliunas, Vytautas Magnus University, Lithuania</p></fn>
<corresp id="c001">&#x0002A;Correspondence: Shui-Hua Wang<email>shuihuawang&#x00040;ieee.org</email> Juan Manuel Gorriz <email>jg825&#x00040;cam.ac.uk</email> Yu-Dong Zhang <email>yudong.zhang&#x00040;le.ac.uk</email></corresp>
<fn fn-type="other" id="fn001"><p><sup>&#x02020;</sup>These authors have contributed equally to this work</p></fn>
</author-notes>
<pub-date pub-type="epub">
<day>26</day>
<month>05</month>
<year>2022</year>
</pub-date>
<pub-date pub-type="collection">
<year>2022</year>
</pub-date>
<volume>16</volume>
<elocation-id>838822</elocation-id>
<history>
<date date-type="received">
<day>18</day>
<month>12</month>
<year>2021</year>
</date>
<date date-type="accepted">
<day>25</day>
<month>04</month>
<year>2022</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2022 Zhu, Lu, Wang, Gorriz and Zhang.</copyright-statement>
<copyright-year>2022</copyright-year>
<copyright-holder>Zhu, Lu, Wang, Gorriz and Zhang</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract><p><bold>Aims</bold>: Brain diseases refer to intracranial tissue and organ inflammation, vascular diseases, tumors, degeneration, malformations, genetic diseases, immune diseases, nutritional and metabolic diseases, poisoning, trauma, parasitic diseases, etc. Taking Alzheimer&#x02019;s disease (AD) as an example, the number of patients dramatically increases in developed countries. By 2025, the number of elderly patients with AD aged 65 and over will reach 7.1 million, an increase of nearly 29% over the 5.5 million patients of the same age in 2018. Unless medical breakthroughs are made, AD patients may increase from 5.5 million to 13.8 million by 2050, almost three times the original. Researchers have focused on developing complex machine learning (ML) algorithms, i.e., convolutional neural networks (CNNs), containing millions of parameters. However, CNN models need many training samples. A small number of training samples in CNN models may lead to overfitting problems. With the continuous research of CNN, other networks have been proposed, such as randomized neural networks (RNNs). Schmidt neural network (SNN), random vector functional link (RVFL), and extreme learning machine (ELM) are three types of RNNs.</p>
<p><bold>Methods</bold>: We propose three novel models to classify brain diseases to cope with these problems. The proposed models are DenseNet-based SNN (DSNN), DenseNet-based RVFL (DRVFL), and DenseNet-based ELM (DELM). The backbone of the three proposed models is the pre-trained &#x0201C;customize&#x0201D; DenseNet. The modified DenseNet is fine-tuned on the empirical dataset. Finally, the last five layers of the fine-tuned DenseNet are substituted by SNN, ELM, and RVFL, respectively.</p>
<p><bold>Results</bold>: Overall, the DSNN gets the best performance among the three proposed models in classification performance. We evaluate the proposed DSNN by five-fold cross-validation. The accuracy, sensitivity, specificity, precision, and F1-score of the proposed DSNN on the test set are 98.46% &#x000B1; 2.05%, 100.00% &#x000B1; 0.00%, 85.00% &#x000B1; 20.00%, 98.36% &#x000B1; 2.17%, and 99.16% &#x000B1; 1.11%, respectively. The proposed DSNN is compared with restricted DenseNet, spiking neural network, and other state-of-the-art methods. Finally, our model obtains the best results among all models.</p>
<p><bold>Conclusions</bold>: DSNN is an effective model for classifying brain diseases.</p></abstract>
<kwd-group>
<kwd>brain diseases</kwd>
<kwd>convolutional neural network</kwd>
<kwd>randomized neural network</kwd>
<kwd>DenseNet</kwd>
<kwd>MRI</kwd>
</kwd-group>
<counts>
<fig-count count="9"/>
<table-count count="8"/>
<equation-count count="18"/>
<ref-count count="63"/>
<page-count count="15"/>
<word-count count="10525"/>
</counts>
</article-meta>
</front>
<body>
<sec sec-type="introduction" id="s1">
<title>Introduction</title>
<p>Brain diseases refer to intracranial tissue and organ inflammation, vascular diseases, tumors, degeneration, malformations, genetic diseases, immune diseases, nutritional and metabolic diseases, poisoning, trauma, parasitic diseases, etc. Brain diseases often show disorders of consciousness, sensation, movement, or autonomic nerve dysfunction. There may also be fever, headache, vomiting, and other mental symptoms. Taking Alzheimer&#x02019;s disease (AD) as an example, the number of patients dramatically increases in developed countries. By 2025, the number of elderly patients with AD aged 65 and over will reach 7.1 million, increasing nearly 29% over the 5.5 million patients of the same age in 2018 (Lynch, <xref ref-type="bibr" rid="B34">2018</xref>). Unless medical breakthroughs are made, the number of Alzheimer&#x02019;s patients aged 65 and over may increase from 5.5 million to 13.8 million by 2050, almost three times the original.</p>
<p>Now, brain diseases are mainly diagnosed by doctors. However, the manual diagnosis requires much time. At the same time, different doctors may have different views on the same examination results, which has brought a lot of trouble to patients.</p>
<p>More and more researchers use computational methods (Wang et al., <xref ref-type="bibr" rid="B57">2021</xref>) to classify brain diseases. Noreen et al. (<xref ref-type="bibr" rid="B38">2020</xref>) introduced a multi-level method using two DensNet201 and Inception-v3 to diagnose early brain tumors. Finally, the accuracy of Inception-v3 and DensNet201 were 99.34% and 99.51%, respectively. Amin et al. (<xref ref-type="bibr" rid="B3">2019b</xref>) presented a model using magnetic resonance images to automatically classify brain tumors according to the LSTM model method. What&#x02019;s more, this method obtained 0.97 DSC in practical application. Amin et al. (<xref ref-type="bibr" rid="B4">2019a</xref>) used a deep learning model to predict healthy and unhealthy brain tumor slices. Arunkumar et al. (<xref ref-type="bibr" rid="B5">2020</xref>) introduced a new model to identify ROI location based on brain tumor MRI. The method finally got 89% sensitivity, 92.14% accuracy, and 94% specificity. Purushottam Gumaste and Bairagi (<xref ref-type="bibr" rid="B42">2020</xref>) proposed an algorithm to extract left and right brain features. This article also introduced different statistical feature extraction methods and used a support vector machine to extract tumor regions from statistical features. Chatterjee and Das (<xref ref-type="bibr" rid="B8">2019</xref>) proposed a novel method for the segmentation of brain images, which were divided into two categories: benign (low level) and evil (high level). Bhanothu et al. (<xref ref-type="bibr" rid="B7">2020</xref>) presented a new method according to R-CNN to detect tumors and mark their location. Finally, the detection and classification accuracy of the three types of brain tumors were 89.45%, 68.18%, and 75.18%. Natekar et al. (<xref ref-type="bibr" rid="B36">2020</xref>) compared various technologies for brain tumor segmentation models and visualized the internal concepts to have a deeper understanding of how these technologies segmented with high accuracy. Aboelenein et al. (<xref ref-type="bibr" rid="B1">2020</xref>) introduced a novel network (HTTU-Net) for brain tumor cutting. Huang et al. (<xref ref-type="bibr" rid="B19">2020</xref>) presented the differential feature neural network (DFNN) method. The method introduced DFM blocks and combined SE blocks. When the DFM block was introduced, the accuracy of the two databases was improved by 1.8% and 1.3%, respectively. Hu and Razmjooy (<xref ref-type="bibr" rid="B16">2020</xref>) proposed a meta heuristic-based system to detect tumors. Sadad et al. (<xref ref-type="bibr" rid="B45">2021</xref>) introduced a novel model according to UNET architecture and ResNet50 as the backbone for the detection of brain tumors. Kalaiselvi et al. (<xref ref-type="bibr" rid="B21">2020</xref>) proposed a patch-based-updated run-length region growth (PR2G) method to detect and segment tumors. The accuracy of this method was 97%. Kaplan et al. (<xref ref-type="bibr" rid="B22">2020</xref>) used two methods to classify the three different types of brain tumors. The two methods were <italic>n</italic>LBP and &#x003B1;LBP. The highest classification accuracy of brain tumors was 95.56%. Khalil et al. (<xref ref-type="bibr" rid="B24">2020</xref>) proposed a new method (DA clustering) to improve the accuracy of extracting initial contour points to detect three-dimensional magnetic resonance brain tumors better. Khan et al. (<xref ref-type="bibr" rid="B25">2020</xref>) proposed a new method, partial tree (PART), to detect brain tumors of grade I to grade IV brain tumors. This method used the rule learner of an advanced feature set. Ma and Zhang (<xref ref-type="bibr" rid="B35">2021</xref>) proposed a method to intelligently detect brain tumors based on a lightweight neural network. Hollon et al. (<xref ref-type="bibr" rid="B15">2020</xref>) introduced a new method for the automatic detection of brain tumors by combining SRH 5&#x02013;7, CNN, and the label-free optical imaging method. Saba et al. (<xref ref-type="bibr" rid="B44">2020</xref>) used a new method to detect brain tumors. The Grasp cut method was used to segment brain tumor symptoms, and VGG-19 was used to obtain features. Sharif et al. (<xref ref-type="bibr" rid="B48">2020</xref>) proposed an unsupervised fuzzy set method for brain tumor segmentation. The triangular fuzzy median filter enhanced the image to better detect brain tumors. Xu et al. (<xref ref-type="bibr" rid="B59">2020</xref>) presented a new structure for the early detection of brain tumors. The new structure was mainly composed of five parts: tumor segmentation, morphology, denoising, feature extraction, and classification. Hemanth et al. (<xref ref-type="bibr" rid="B14">2011</xref>) introduced a novel method (HSBPN) to segment MR brain tumor images. Nayef et al. (<xref ref-type="bibr" rid="B37">2013</xref>) introduced a novel structure for the classification of the MRI dataset. Chen et al. (<xref ref-type="bibr" rid="B11">2017</xref>) presented an improved method for detecting pathological brains. A new classifier was used in the improved method. Shoeibi et al. (<xref ref-type="bibr" rid="B49">2020</xref>) finished a review on the segmentation of the Covid-19 by DL. Shoeibi et al. (<xref ref-type="bibr" rid="B51">2021b</xref>) performed a comprehensive survey about the application of DL in the detection of multiple sclerosis. Sadeghi et al. (<xref ref-type="bibr" rid="B46">2021</xref>) showed a survey on the automatic diagnosis of the SZ by AI. Shoeibi et al. (<xref ref-type="bibr" rid="B50">2021a</xref>) completed a comprehensive review on the application of the various AI techniques in the diagnosis of epileptic seizures. Shoeibi et al. (<xref ref-type="bibr" rid="B52">2021c</xref>) completed a review of various methods based on DL for automatic diagnosis of SZ by electroencephalogram (EEG) signals. Shoeibi et al. (<xref ref-type="bibr" rid="B53">2022</xref>) proposed a new model to automatically detect Epileptic seizures. The proposed model was based on the DL and the fuzzy theory. Odusami et al. (<xref ref-type="bibr" rid="B39">2022</xref>) proposed a method for the recognition of AD. They tested two CNN models (DenseNet201 and ResNet18) to perform this task. This method obtained 98.86% accuracy, 98.94% precision, and 98.89% recall. Razzak et al. (<xref ref-type="bibr" rid="B43">2022</xref>) introduced a new network (PartialNet) to detect AD based on MRIs. This network achieved improvements on the AD detection. Ashraf et al. (<xref ref-type="bibr" rid="B6">2021</xref>) experimented with different CNN models to detect AD based on transfer learning. Finally, the fine-tuned DenseNet got the highest accuracy (99.05%).</p>
<p>If brain diseases are diagnosed manually, doctors need to spend a lot of time on examination. Sometimes we may encounter the problem that different doctors have different views on the examination results of the same patient. As shown in <xref ref-type="table" rid="T1">Table 1</xref>, most researchers use deep convolution neural networks (DCNNs) to classify and identify brain diseases. However, there will be many parameters and calculations in the training of DCNN, which can lead to a long training time (Zhang et al., <xref ref-type="bibr" rid="B62">2021</xref>). At the same time, DCNNs need a sea number of experimental data for training because a small number of experimental data may lead to overfitting problems (G&#x000F3;rriz et al., <xref ref-type="bibr" rid="B12">2020</xref>; Zhang et al., <xref ref-type="bibr" rid="B63">2020</xref>).</p>
<table-wrap id="T1" position="float">
<label>Table 1</label>
<caption><p>Contributions of state-of-the-art methods.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="center">Method</th>
<th align="center">Contribution</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">Noreen et al. (<xref ref-type="bibr" rid="B38">2020</xref>)</td>
<td align="center">A multi-level method using two DensNet201 and Inception-v3 was proposed to diagnose early brain tumors.</td>
</tr>
<tr>
<td align="left">Amin et al. (<xref ref-type="bibr" rid="B3">2019b</xref>)</td>
<td align="center">A model according to the LSTM model method using magnetic resonance images was introduced to classify brain tumors automatically.</td>
</tr>
<tr>
<td align="left">Amin et al. (<xref ref-type="bibr" rid="B4">2019a</xref>)</td>
<td align="center">A deep learning model was used to predict healthy and unhealthy brain tumor slices.</td>
</tr>
<tr>
<td align="left">Arunkumar et al. (<xref ref-type="bibr" rid="B5">2020</xref>)</td>
<td align="center">A new model was introduced to train MRI brain tumors to identify ROI location.</td>
</tr>
<tr>
<td align="left">Purushottam Gumaste and Bairagi (<xref ref-type="bibr" rid="B42">2020</xref>)</td>
<td align="center">An algorithm was proposed to extract left and right brain features. This article also introduced different statistical feature extraction methods and used a Support Vector Machine to extract tumor regions from statistical features.</td>
</tr>
<tr>
<td align="left">Chatterjee and Das (<xref ref-type="bibr" rid="B8">2019</xref>)</td>
<td align="center">A novel method was proposed for the segmentation of brain images.</td>
</tr>
<tr>
<td align="left">Bhanothu et al. (<xref ref-type="bibr" rid="B7">2020</xref>)</td>
<td align="center">A new method based on R-CNN was presented to detect tumors and mark their location.</td>
</tr>
<tr>
<td align="left">Natekar et al. (<xref ref-type="bibr" rid="B36">2020</xref>)</td>
<td align="center">Various technologies were compared for brain tumor segmentation models.</td>
</tr>
<tr>
<td align="left">Aboelenein et al. (<xref ref-type="bibr" rid="B1">2020</xref>)</td>
<td align="center">The HTTU-Net was proposed for brain tumor cutting.</td>
</tr>
<tr>
<td align="left">Huang et al. (<xref ref-type="bibr" rid="B19">2020</xref>)</td>
<td align="center">The DFNN was proposed. The method introduced DFM blocks and combined SE blocks.</td>
</tr>
<tr>
<td align="left">Hu and Razmjooy (<xref ref-type="bibr" rid="B16">2020</xref>)</td>
<td align="center">A meta heuristic-based system was presented to detect tumors.</td>
</tr>
<tr>
<td align="left">Sadad et al. (<xref ref-type="bibr" rid="B45">2021</xref>)</td>
<td align="center">A novel model according to UNET architecture and ResNet50 as the backbone was proposed for the detection of brain tumors.</td>
</tr>
<tr>
<td align="left">Kalaiselvi et al. (<xref ref-type="bibr" rid="B21">2020</xref>)</td>
<td align="center">The PR2G was proposed to detect and segment tumors.</td>
</tr>
<tr>
<td align="left">Kaplan et al. (<xref ref-type="bibr" rid="B22">2020</xref>)</td>
<td align="center">Then LBP and &#x003B1;LBP were used to classify the three different types of brain tumors.</td>
</tr>
<tr>
<td align="left">Khalil et al. (<xref ref-type="bibr" rid="B24">2020</xref>)</td>
<td align="center">The DA clustering was proposed to improve the accuracy of extracting initial contour points to detect three-dimensional magnetic resonance brain tumors better.</td>
</tr>
<tr>
<td align="left">Khan et al. (<xref ref-type="bibr" rid="B25">2020</xref>)</td>
<td align="center">The PART was introduced to detect brain tumors of grade I to grade IV brain tumors.</td>
</tr>
<tr>
<td align="left">Ma and Zhang (<xref ref-type="bibr" rid="B35">2021</xref>)</td>
<td align="center">A method was proposed to intelligently detect brain tumors based on a lightweight neural network.</td>
</tr>
<tr>
<td align="left">Hollon et al. (<xref ref-type="bibr" rid="B15">2020</xref>)</td>
<td align="center">A new method was proposed for the automatic detection of brain tumors by combining SRH 5&#x02013;7, CNN, and the label-free optical imaging method.</td>
</tr>
<tr>
<td align="left">Saba et al. (<xref ref-type="bibr" rid="B44">2020</xref>)</td>
<td align="center">A new method was proposed to detect brain tumors. The Grasp cut method was used to segment brain tumor symptoms, and VGG-19 was used to obtain features.</td>
</tr>
<tr>
<td align="left">Sharif et al. (<xref ref-type="bibr" rid="B48">2020</xref>)</td>
<td align="center">An unsupervised fuzzy set method was introduced for brain tumor segmentation.</td>
</tr>
<tr>
<td align="left">Xu et al. (<xref ref-type="bibr" rid="B59">2020</xref>)</td>
<td align="center">A new structure was proposed for the early detection of brain tumors. The new structure was mainly composed of five parts: tumor segmentation, morphology, denoising, feature extraction, and classification.</td>
</tr>
<tr>
<td align="left">Hemanth et al. (<xref ref-type="bibr" rid="B14">2011</xref>)</td>
<td align="center">The HSBPN was proposed to segment MR brain tumor images.</td>
</tr>
<tr>
<td align="left">Nayef et al. (<xref ref-type="bibr" rid="B37">2013</xref>)</td>
<td align="center">A novel structure was presented for the classification of the MRI dataset.</td>
</tr>
<tr>
<td align="left">Chen et al. (<xref ref-type="bibr" rid="B11">2017</xref>)</td>
<td align="center">An improved method was introduced for detecting pathological brains.</td>
</tr>
<tr>
<td align="left">Shoeibi et al. (<xref ref-type="bibr" rid="B49">2020</xref>)</td>
<td align="center">A review was presented on the segmentation of the Covid-19 by DL.</td>
</tr>
<tr>
<td align="left">Shoeibi et al. (<xref ref-type="bibr" rid="B51">2021b</xref>)</td>
<td align="center">A comprehensive survey about the application of DL in the detection of Multiple Sclerosis</td>
</tr>
<tr>
<td align="left">Sadeghi et al. (<xref ref-type="bibr" rid="B46">2021</xref>)</td>
<td align="center">A survey was presented on the automatic diagnosis of the SZ by AI.</td>
</tr>
<tr>
<td align="left">Shoeibi et al. (<xref ref-type="bibr" rid="B50">2021a</xref>)</td>
<td align="center">A comprehensive review was presented on applying the various AI techniques in the diagnosis of Epileptic seizures.</td>
</tr>
<tr>
<td align="left">Shoeibi et al. (<xref ref-type="bibr" rid="B52">2021c</xref>)</td>
<td align="center">A review of various methods based on DL for automatic diagnosis of SZ by electroencephalogram (EEG) signals was completed.</td>
</tr>
<tr>
<td align="left">Shoeibi et al. (<xref ref-type="bibr" rid="B53">2022</xref>)</td>
<td align="center">A new model was proposed to detect Epileptic seizures automatically. The proposed model was based on the DL and the fuzzy theory.</td>
</tr>
<tr>
<td align="left">Odusami et al. (<xref ref-type="bibr" rid="B39">2022</xref>)</td>
<td align="center">A method was proposed for the recognition of AD. They tested two CNN models (DenseNet201 and ResNet18) to perform this task.</td>
</tr>
<tr>
<td align="left">Razzak et al. (<xref ref-type="bibr" rid="B43">2022</xref>)</td>
<td align="center">A new network (PartialNet) was introduced to detect AD based on MRIs. This network achieved improvements in AD detection.</td>
</tr>
<tr>
<td align="left">Ashraf et al. (<xref ref-type="bibr" rid="B6">2021</xref>)</td>
<td align="center">Different CNN models were experimented with to detect AD based on transfer learning. Finally, the fine-tuned DenseNet got the highest accuracy (99.05%).</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>To cope with the problems mentioned above, we propose three novel models to classify brain diseases automatically. They are: DenseNet-based Schmidt neural network (DSNN), DenseNet-based random vector functional link (DRVFL), and DenseNet-based extreme learning machine (DELM). We select DenseNet to extract features and use randomized neural networks (RNNs) for classification.</p>
<p>We modify the pre-trained DenseNet. Then, the modified DenseNet is fine-tuned on the dataset. In the DSNN, the last five layers within the fine-tuned DenseNet are substituted by the Schmidt neural network (SNN). In the DRVFL, we select the RVFL (RVFL) to substitute the last five layers of the fine-tuned DenseNet. In the DELM, we choose the extreme learning machine (ELM) to replace the end five layers of the fine-tuned DenseNet.Five-fold cross-validation is used to evaluate the proposed three models: DSNN, DRVFL, and DELM, in terms of aspects (Acc, Sen, Spe, Pre, and F1). We finally get thatDSNN gives the best performance among the three proposed models and overperforms the other six state-of-the-art algorithms. The five main innovations of this study are:</p>
<list list-type="simple">
<list-item><label>(1)</label><p>DenseNet is validated as the backbone by experiments showing its superiority to AlexNet, ResNet-18, ResNet-50, and VGG.</p></list-item>
<list-item><label>(2)</label><p>DSNN, DRVFL, and DELM are proposed by replacing the last five layers within the fine-tuned DenseNet with three randomized neural networks.</p></list-item>
<list-item><label>(3)</label><p>The DSNN gets the best performance among the three proposed models.</p></list-item>
<list-item><label>(4)</label><p>The DSNN overperforms the restricted DenseNet and spiking neural network by experiments.</p></list-item>
<list-item><label>(5)</label><p>The DSNN is compared with six state-of-the-art algorithms and obtains the best results among the list methods.</p></list-item>
</list>
<p>The rest of this article is as follows. The dataset is given in Section &#x0201C;Materials". Section &#x0201C;Methodology" discusses the methodology. Section &#x0201C;Results and Discussion" is about the experiment results. We conclude this article in Section &#x0201C;Conclusion".</p>
</sec>
<sec sec-type="materials" id="s2">
<title>Materials</title>
<p>The dataset is downloaded from the Harvard Medical School website (Johnson and Becker, <xref ref-type="bibr" rid="B20">2021</xref>). There are four types of brain diseases: cerebrovascular disease, neoplastic disease, degenerative disease, and inflammatory or infectious disease. This article classifies all four brain disease images as unhealthy brain images. A total of 177 unhealthy brain images and 20 healthy brain images are used in this article. The size of all images in this article is 256 &#x000D7; 256. Some unhealthy and healthy brain images in this article are shown in <xref ref-type="fig" rid="F1">Figure 1</xref>. The left four images are the unhealthy brain images, and the right four are the healthy images.</p>
<fig id="F1" position="float">
<label>Figure 1</label>
<caption><p><bold>(A)</bold>Unhealthy and <bold>(B)</bold> healthy brain images in the dataset.</p></caption>
<graphic xlink:href="fnsys-16-838822-g0001.tif"/>
</fig>
</sec>
<sec id="s3">
<title>Methodology</title>
<sec id="s3-1">
<title>Proposed DSNN</title>
<p><xref ref-type="table" rid="T2">Tables s 2</xref>, <xref ref-type="table" rid="T3">3</xref> give the acronym definitions and parameter definitions, respectively. More and more researchers devote energy to researching image classification technology (Lu et al., <xref ref-type="bibr" rid="B31">2021</xref>). In image classification, feature extraction is a crucial step. However, the image contains too much messy information, so extracting valuable features is difficult. Decades ago, people usually manually extracted features. However, manual feature extraction takes much time, and the results are usually not ideal. With the continuous progress of computer technology, more and more people use computer models for image feature extraction (Leming et al., <xref ref-type="bibr" rid="B27">2020</xref>). Many computer models are successful (Lu S. Y. et al., <xref ref-type="bibr" rid="B28">2021</xref>), such as CNN models. The convolution layer in the CNN model can significantly reduce the volume of parameters to shorten the training time. Researchers have proposed many great CNN models, such as AlexNet (Lu et al., <xref ref-type="bibr" rid="B30">2020a</xref>), MobileNet (Lu et al., <xref ref-type="bibr" rid="B32">2020</xref>), ResNet (Lu et al., <xref ref-type="bibr" rid="B29">2020b</xref>), and so on. This article proposes three models for the automatic classification of brain diseases: DSNN, DRVFL, and DELM. The DSNN gets the best performance among the three proposed models.</p>
<table-wrap id="T2" position="float">
<label>Table 2</label>
<caption><p>Acronym and full explanation.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="center">Acronym</th>
<th align="center">Full explanation</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">AD</td>
<td align="center">Alzheimer&#x02019;s disease</td>
</tr>
<tr>
<td align="left">Acc</td>
<td align="center">Accuracy</td>
</tr>
<tr>
<td align="left">Avr</td>
<td align="center">Average</td>
</tr>
<tr>
<td align="left">BN</td>
<td align="center">Batch normalization</td>
</tr>
<tr>
<td align="left">CNN</td>
<td align="center">Convolution neural network</td>
</tr>
<tr>
<td align="left">DCNN</td>
<td align="center">Deep convolution neural network</td>
</tr>
<tr>
<td align="left">DELM</td>
<td align="center">DenseNet-based extreme learning machine</td>
</tr>
<tr>
<td align="left">DL</td>
<td align="center">Deep learning</td>
</tr>
<tr>
<td align="left">DRVFL</td>
<td align="center">DenseNet-based random vector functional link</td>
</tr>
<tr>
<td align="left">DSNN</td>
<td align="center">DenseNet-based Schmidt neural network</td>
</tr>
<tr>
<td align="left">ELM</td>
<td align="center">Extreme learning machine</td>
</tr>
<tr>
<td align="left">F1</td>
<td align="center">F1-score</td>
</tr>
<tr>
<td align="left">FC</td>
<td align="center">Fully connected</td>
</tr>
<tr>
<td align="left">ML</td>
<td align="center">Machine learning</td>
</tr>
<tr>
<td align="left">Pre</td>
<td align="center">Precision</td>
</tr>
<tr>
<td align="left">RVFL</td>
<td align="center">Random vector functional link</td>
</tr>
<tr>
<td align="left">RNNs</td>
<td align="center">Randomized neural networks</td>
</tr>
<tr>
<td align="left">Sen</td>
<td align="center">Sensitivity</td>
</tr>
<tr>
<td align="left">SNN</td>
<td align="center">Schmidt neural network</td>
</tr>
<tr>
<td align="left">Spe</td>
<td align="center">Specificity</td>
</tr>
<tr>
<td align="left">Std</td>
<td align="center">Standard deviation</td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap id="T3" position="float">
<label>Table 3</label>
<caption><p>The definition of the parameter.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="center">Parameter</th>
<th align="center">Definition</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left"><italic>O</italic><sub><italic>m</italic></sub></td>
<td align="center">The output of the <italic>M</italic>-th layer</td>
</tr>
<tr>
<td align="left"><italic>T</italic><sub><italic>m</italic></sub></td>
<td align="center">The nonlinear transformation</td>
</tr>
<tr>
<td align="left"><italic>(<bold>x<sub>i</sub>, y<sub>i</sub></bold>)</italic></td>
<td align="center">The given dataset</td>
</tr>
<tr>
<td align="left"><italic>n</italic></td>
<td align="center">The input dimension</td>
</tr>
<tr>
<td align="left"><italic>M</italic></td>
<td align="center">The output dimension</td>
</tr>
<tr>
<td align="left"><italic><bold>w<sub>j</sub></bold></italic></td>
<td align="center">The weights vector</td>
</tr>
<tr>
<td align="left"><italic>d</italic><sub><italic>j</italic></sub></td>
<td align="center">The bias of the <italic>j</italic>-th hidden node</td>
</tr>
<tr>
<td align="left"><italic>P</italic></td>
<td align="center">The final output weights</td>
</tr>
<tr>
<td align="left"><italic>q</italic></td>
<td align="center">The output biases of SNN</td>
</tr>
<tr>
<td align="left"><bold>Y= (<italic>y<sub>1</sub>,....,y<sub>N</sub></italic>)<sup>T</sup></bold></td>
<td align="center">The ground-truth label matrix of the dataset</td>
</tr>
<tr>
<td align="left"><bold>X= (<italic>x<sub>1</sub>,....,x<sub>N</sub></italic>)<sup>T</sup></bold></td>
<td align="center">The input matrix</td>
</tr>
<tr>
<td align="left"><italic>s</italic>()</td>
<td align="center">The sigmoid function</td>
</tr>
<tr>
<td align="left"><italic>V</italic></td>
<td align="center">The number of hidden nodes</td>
</tr>
<tr>
<td align="left"><bold>A</bold></td>
<td align="center">The output matrix of the hidden layer</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The pseudocode of the proposed DSNN is shown in <xref ref-type="table" rid="T4">Table 4</xref>. The pipeline of our model is given in <xref ref-type="fig" rid="F2">Figure 2</xref>. We choose the pre-trained DenseNet as the backbone of the proposed DSNN. We modify the pre-trained DenseNet. Then, the modified DenseNet is fine-tuned on the dataset. The last five layers within the fine-tuned DenseNet are substituted by the Schmidt neural network (SNN). In our model, the fine-tuned DenseNet plays the role of feature extraction. The SNN is trained by the extracted features F from the fine-tuned DenseNet. Five-fold cross-validation is used to evaluate the proposed DSNN.</p>
<fig id="F2" position="float">
<label>Figure 2</label>
<caption><p>The pipeline of the proposed DSNN.</p></caption>
<graphic xlink:href="fnsys-16-838822-g0002.tif"/>
</fig>
<table-wrap id="T4" position="float">
<label>Table 4</label>
<caption><p>Pseudocode of the proposed DSNN.</p></caption>
<table frame="hsides" rules="groups">
<tbody>
<tr>
<td align="left">Step 1: Load the pre-trained DenseNet.</td>
</tr>
<tr>
<td align="left">Step 2: Modify the pre-trained DenseNet.</td>
</tr>
<tr>
<td align="left">&#x02003;&#x02003; Step 2.1 Remove softmax and classification layer from the pre-trainedDenseNet.</td>
</tr>
<tr>
<td align="left">&#x02003;&#x02003; Step 2.2 Add FC128, ReLU, BN, FC2, softmax, and classification layer.</td>
</tr>
<tr>
<td align="left">Step 3: Divide the dataset into five groups of the same size and set <italic>i</italic>=1</td>
</tr>
<tr>
<td align="left">&#x02003; Step 4: Use the <italic>i</italic>-th group as the test set, and all the other groups form the training set.</td>
</tr>
<tr>
<td align="left">&#x02003; Step 5: Fine-tune the modified DenseNet.</td>
</tr>
<tr>
<td align="left">&#x02003;&#x02003; Step 5.1: Input is the training set.</td>
</tr>
<tr>
<td align="left">&#x02003;&#x02003; Step 5.2: Target is the corresponding label.</td>
</tr>
<tr>
<td align="left">&#x02003; Step 6: Replace the last five layers of the fine-tuned DenseNet with SNN.</td>
</tr>
<tr>
<td align="left">&#x02003; Step 7: Extract features <italic>F</italic> as the output of the FC128 layer.</td>
</tr>
<tr>
<td align="left">&#x02003; Step 8: Train the classifier of the DSNN on the extracted features <italic>F</italic> and the labels.</td>
</tr>
<tr>
<td align="left">&#x02003;&#x02003; Step 8.1: Input is the extracted features.</td>
</tr>
<tr>
<td align="left">&#x02003;&#x02003; Step 8.2: The target is the label of the training set.</td>
</tr>
<tr>
<td align="left">&#x02003;&#x02003; Step 8.3: SNN is the classifier of the DSNN.</td>
</tr>
<tr>
<td align="left">&#x02003; Step 9: Test the trained DSNN on the test set.</td>
</tr>
<tr>
<td align="left">&#x02003; Step 10: Report the test classification performance of the trained DSNN.</td>
</tr>
<tr>
<td align="left">&#x02003; Step 11: Set <italic>i</italic>= <italic>i</italic> &#x0002B; 1, if <italic>i</italic> &#x0003C; 6, go to Step 4.</td>
</tr>
<tr>
<td align="left">Step 12: Average test classification performance.</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s3-2">
<title>Backbone of the Proposed DSNN</title>
<p>The CNN models (Albawi et al., <xref ref-type="bibr" rid="B2">2017</xref>) have been researched continuously in recent decades. In 1998, LeCun proposed LeNet (LeCun, <xref ref-type="bibr" rid="B26">2015</xref>) with a five-layer structure. In 2014, the visual geometry group proposed VGG (Simonyan and Zisserman, <xref ref-type="bibr" rid="B54">2014</xref>) with a 19-layer structure. The Highway Networks (Srivastava et al., <xref ref-type="bibr" rid="B55">2015</xref>) were proposed later, with more than 100 layers.</p>
<p>With the increasing number of network layers in CNN models, researchers are troubled by the problem of gradient vanishing. Batch normalization (BN) alleviates the problem of gradient vanishing to some extent. ResNet (He et al., <xref ref-type="bibr" rid="B13">2016</xref>) reduces the gradient vanishing problem by constructing identity mapping. In 2017, DenseNet (Huang et al., <xref ref-type="bibr" rid="B17">2017</xref>) was proposed to reduce the gradient vanishing problem by establishing dense connectivity between the front and rear layers. Dense connectivity makes more effective use of features than other networks. Thus, DenseNet can achieve better performance. The general view of DenseNet is given in <xref ref-type="fig" rid="F3">Figure 3A</xref>.</p>
<fig id="F3" position="float">
<label>Figure 3</label>
<caption><p>Backbone of the proposed DSNN. <bold>(A)</bold> The general view of DenseNet.<bold>(B)</bold> The modifications in the pre-trained DenseNet.</p></caption>
<graphic xlink:href="fnsys-16-838822-g0003.tif"/>
</fig>
<p>Dense blocks refer to the specific blocks of DenseNet, as shown in <xref ref-type="fig" rid="F3">Figure 3A</xref>. All the front layers are connected with the rear layers. In the same dense block, the height and width of each feature map will not change, but the number of channels will change. In the traditional sequential CNN, if you have <italic>M</italic> layers, there will be <italic>M</italic> connections, but DenseNet will introduce <italic>M</italic>(<italic>M</italic>+1)/2 more connections. Supposing there are <italic>M</italic> layers, <italic>O</italic><sub>M</sub> denotes the output of the <italic>M</italic>-th layer, <italic>T</italic><sub>M</sub> represents the nonlinear transformation. The comparison of DenseNet with other CNNs is listed below:</p>
<p>Traditional sequential CNN:</p>
<disp-formula id="E1"><label>(1)</label><mml:math id="M1"><mml:mrow><mml:msub><mml:mi>O</mml:mi><mml:mi>M</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mi>T</mml:mi><mml:mi>M</mml:mi></mml:msub><mml:mo>(</mml:mo><mml:msub><mml:mi>O</mml:mi><mml:mrow><mml:mi>M</mml:mi><mml:mo>&#x02212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula>
<p>ResNet:</p>
<disp-formula id="E2"><label>(2)</label><mml:math id="M2"><mml:mrow><mml:msub><mml:mi>O</mml:mi><mml:mi>M</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mi>T</mml:mi><mml:mi>M</mml:mi></mml:msub><mml:mo>(</mml:mo><mml:msub><mml:mi>O</mml:mi><mml:mrow><mml:mi>M</mml:mi><mml:mo>&#x02212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>)</mml:mo><mml:mo>+</mml:mo><mml:msub><mml:mi>O</mml:mi><mml:mrow><mml:mi>M</mml:mi><mml:mo>&#x02212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:math></disp-formula>
<p>DenseNet:</p>
<disp-formula id="E3"><label>(3)</label><mml:math id="M3"><mml:mrow><mml:msub><mml:mi>O</mml:mi><mml:mi>M</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mi>T</mml:mi><mml:mi>M</mml:mi></mml:msub><mml:mo>[</mml:mo><mml:mo>(</mml:mo><mml:msub><mml:mi>O</mml:mi><mml:mn>0</mml:mn></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>O</mml:mi><mml:mn>1</mml:mn></mml:msub><mml:mo>,</mml:mo><mml:mn>...</mml:mn><mml:mo>,</mml:mo><mml:msub><mml:mi>O</mml:mi><mml:mrow><mml:mi>M</mml:mi><mml:mo>&#x02212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>)</mml:mo><mml:mo>]</mml:mo></mml:mrow></mml:math></disp-formula>
<p>where [] is the concatenation.</p>
<p>The transition layer is a module that connects different dense blocks. Its primary function is to integrate the features obtained from the previous dense block and reduce its width and height.</p>
<p>Researchers used the ImageNet dataset to pre-train the DenseNet. There are 1,000 output nodes on the pre-trained DenseNet. However, this article only needs two output nodes. We modify the pre-trained DenseNet. The modifications are shown in <xref ref-type="fig" rid="F3">Figure 3B</xref>.</p>
<p>After these modifications, we fine-tune the modified DenseNet by the training set. We remove the last five layers of the fine-tuned DenseNet and add SNN to improve the classification performance. In the proposed DSNN, the fine-tuned DenseNet is the feature extraction.</p>
</sec>
<sec id="s3-3">
<title>Three Proposed Networks</title>
<p>Compared with the pre-trained DenseNet, randomized neural networks (RNNs) have a much shorter training time. In the DSNN, we replace the end five layers of the fine-tuned DenseNet with the RNN: the Schmidt neural network (SNN; Schmidt et al., <xref ref-type="bibr" rid="B47">1992</xref>). The SNN is trained by extracted features <italic>n</italic> from FC128. The structure of the SNN is shown in <xref ref-type="fig" rid="F4">Figure 4</xref>.</p>
<fig id="F4" position="float">
<label>Figure 4</label>
<caption><p>Structure of SNN.</p></caption>
<graphic xlink:href="fnsys-16-838822-g0004.tif"/>
</fig>
<p>The yellow box is the input, the pink circle represents the hidden nodes, and the green box shows the output. Given <italic>N</italic> samples and dataset with the <italic>i</italic>-th sample as (<bold><italic>x<sub>i</sub>, y<sub>i</sub></italic></bold>):</p>
<disp-formula id="E4"><label>(4)</label><mml:math id="M4"><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mn>...</mml:mn><mml:mo>,</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mi>n</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mi>T</mml:mi></mml:msup><mml:mo>&#x02208;</mml:mo><mml:msup><mml:mi>R</mml:mi><mml:mi>n</mml:mi></mml:msup><mml:mo>,</mml:mo><mml:mtext>&#x02009;&#x02009;</mml:mtext><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>...</mml:mn><mml:mo>,</mml:mo><mml:mi>N</mml:mi><mml:mo>,</mml:mo></mml:mrow></mml:math></disp-formula>
<disp-formula id="E5"><label>(5)</label><mml:math id="M5"><mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mn>...</mml:mn><mml:mo>,</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mi>n</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mi>T</mml:mi></mml:msup><mml:mo>&#x02208;</mml:mo><mml:msup><mml:mi>R</mml:mi><mml:mi>m</mml:mi></mml:msup><mml:mo>,</mml:mo><mml:mtext>&#x02009;&#x02009;</mml:mtext><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>...</mml:mn><mml:mo>,</mml:mo><mml:mi>N</mml:mi><mml:mo>,</mml:mo></mml:mrow></mml:math></disp-formula>
<p>where <italic>n</italic> is the input dimension, <italic>m</italic> is the output dimension.</p>
<p>The training algorithm of SNN is as follows. The weights vector (<bold><italic>w<sub>j</sub></italic></bold>) connects the <italic>j</italic>-th hidden node with input nodes, <italic>d<sub>j</sub></italic> is the bias of the <italic>j</italic>-th hidden node. The weights vector (<bold><italic>w<sub>j</sub></italic></bold>) and the bias (<italic>d<sub>j</sub></italic>) are assigned with random values and will remain unchanged during the training process. The output matrix of the hidden layer with <italic>V</italic> hidden nodes is calculated as follows:</p>
<disp-formula id="E6"><label>(6)</label><mml:math id="M6"><mml:mrow><mml:msub><mml:mtext>A</mml:mtext><mml:mrow><mml:mtext>SNN</mml:mtext></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mstyle><mml:munderover><mml:mo>&#x02211;</mml:mo><mml:mrow><mml:mi>j</mml:mi><mml:mtext>&#x02009;</mml:mtext><mml:mo>=</mml:mo><mml:mtext>&#x02009;</mml:mtext><mml:mn>1</mml:mn></mml:mrow><mml:mi>V</mml:mi></mml:munderover><mml:mrow><mml:mi>s</mml:mi><mml:mo>(</mml:mo><mml:msub><mml:mi>w</mml:mi><mml:mi>j</mml:mi></mml:msub><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>d</mml:mi><mml:mi>j</mml:mi></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:mstyle><mml:mo>,</mml:mo><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>...</mml:mn><mml:mo>,</mml:mo><mml:mi>N</mml:mi><mml:mo>,</mml:mo></mml:mrow></mml:math></disp-formula>
<p>where the sigmoid function is represented as <italic>s</italic>(). Then we use pseudo-inverse to calculate the final output weights (<italic><bold>P</bold></italic>):</p>
<disp-formula id="E7"><label>(7)</label><mml:math id="M7"><mml:mrow><mml:mo>(</mml:mo><mml:mi>P</mml:mi><mml:mo>,</mml:mo><mml:mi>q</mml:mi><mml:mo>)</mml:mo><mml:mo>=</mml:mo><mml:msubsup><mml:mtext>A</mml:mtext><mml:mrow><mml:mtext>SNN</mml:mtext></mml:mrow><mml:mtext>&#x02020;</mml:mtext></mml:msubsup><mml:mtext>Y,</mml:mtext></mml:mrow></mml:math></disp-formula>
<p>where the output biases of SNN are <bold><italic>q</italic></bold>, <inline-formula><mml:math id="M8"><mml:mrow><mml:msubsup><mml:mtext>A</mml:mtext><mml:mrow><mml:mtext>SNN</mml:mtext></mml:mrow><mml:mtext>&#x02020;</mml:mtext></mml:msubsup></mml:mrow></mml:math></inline-formula> is the pseudo-inverse matrix of <bold>A<sub>SNN</sub></bold>, and <bold>Y</bold> = (<bold>(<italic>y<sub>1</sub>,....,y<sub>N</sub></italic>)<sup>T</sup></bold> is the ground-truth label matrix of the dataset.</p>
<p>We propose two other models: DRVFL and DELM. The backbone of the two other proposed models is the pre-trained DenseNet. We modify the pre-trained DenseNet in the two proposed models as the &#x0201C;modifications of the pre-trained DenseNet&#x0201D; in the DSNN. We replace the softmax and classification layer of the pre-trained DenseNet with six layers: FC128, ReLU, BN, FC2, softmax, and classification layer. We fine-tune the modified DensNet by the training set. In the DRVFL, we select RVFL (Pao et al., <xref ref-type="bibr" rid="B41">1994</xref>) to substitute the last five layers of the fine-tuned DenseNet. The structure of RVFL is shown in <xref ref-type="fig" rid="F5">Figure 5A</xref>. In the DELM, ELM (Huang et al., <xref ref-type="bibr" rid="B18">2006</xref>) is chosen to replace the last five layers of the fine-tuned DenseNet. The structure of ELM is shown in <xref ref-type="fig" rid="F5">Figure 5B</xref>. ELM and RVFL are two types of RNNs.</p>
<fig id="F5" position="float">
<label>Figure 5</label>
<caption><p>Thestructures of <bold>(A)</bold> RVFL and <bold>(B)</bold> ELM.</p></caption>
<graphic xlink:href="fnsys-16-838822-g0005.tif"/>
</fig>
<p>The yellow box represents the input, the pink circle denotes the hidden nodes, and the green box shows the output. The difference between these two RNNs is that there are shortcut connections from the input to the output in RVFL. The calculation steps are similar:</p>
<disp-formula id="E8"><label>(8)</label><mml:math id="M9"><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mn>...</mml:mn><mml:mo>,</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mi>n</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mi>T</mml:mi></mml:msup><mml:mo>&#x02208;</mml:mo><mml:msup><mml:mi>R</mml:mi><mml:mi>n</mml:mi></mml:msup><mml:mo>,</mml:mo><mml:mtext>&#x02009;&#x02009;</mml:mtext><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>...</mml:mn><mml:mo>,</mml:mo><mml:mi>N</mml:mi><mml:mo>,</mml:mo></mml:mrow></mml:math></disp-formula>
<disp-formula id="E9"><label>(9)</label><mml:math id="M10"><mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mn>...</mml:mn><mml:mo>,</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mi>n</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mi>T</mml:mi></mml:msup><mml:mo>&#x02208;</mml:mo><mml:msup><mml:mi>R</mml:mi><mml:mi>m</mml:mi></mml:msup><mml:mo>,</mml:mo><mml:mtext>&#x02009;&#x02009;</mml:mtext><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>...</mml:mn><mml:mo>,</mml:mo><mml:mi>N</mml:mi><mml:mo>,</mml:mo></mml:mrow></mml:math></disp-formula>
<p>Given <italic>N</italic> samples and dataset with the <italic>i</italic>-th sample as (<italic><bold>x<sub>i</sub>, y<sub>i</sub></bold></italic>)</p>
<p>where <italic>n</italic> is the input dimension, <italic>m</italic> is the output dimension. The training steps of these two RNNs are as follows:</p>
<p><bold>Step 1:</bold> <bold><italic>w<sub>j</sub></italic></bold> is the weight vector, which connects the input nodes with the <italic>j</italic>-th hidden node. The bias of the <italic>j</italic>-th hidden node is represented as <italic>d<sub>j</sub></italic>. We randomly assign <bold><italic>w<sub>j</sub></italic></bold> and <italic>d<sub>j</sub></italic> with values. These values will not change in training.</p>
<p><bold>Step 2:</bold> The hidden layer&#x02019;s output matrix is calculated as:</p>
<p>For RVFL:</p>
<disp-formula id="E10"><label>(10)</label><mml:math id="M11"><mml:mrow><mml:msub><mml:mtext>A</mml:mtext><mml:mrow><mml:mtext>RVFL</mml:mtext></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mtext>concat&#x02009;(X,K),</mml:mtext></mml:mrow></mml:math></disp-formula>
<p>where <bold>X</bold> = <bold>(<italic>x<sub>1</sub>,....,x<sub>N</sub></italic>)<sup>T</sup></bold> denotes the input matrix. The <bold>K</bold> is calculated as follows:</p>
<disp-formula id="E11"><label>(11)</label><mml:math id="M12"><mml:mrow><mml:msub><mml:mtext>K</mml:mtext><mml:mrow><mml:mtext>RVFL</mml:mtext></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mstyle><mml:munderover><mml:mo>&#x02211;</mml:mo><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>V</mml:mi></mml:munderover><mml:mrow><mml:mi>s</mml:mi><mml:mo>(</mml:mo><mml:msub><mml:mi>w</mml:mi><mml:mi>j</mml:mi></mml:msub><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>d</mml:mi><mml:mi>j</mml:mi></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:mstyle><mml:mo>,</mml:mo><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>...</mml:mn><mml:mo>,</mml:mo><mml:mi>N</mml:mi><mml:mo>,</mml:mo></mml:mrow></mml:math></disp-formula>
<p>where <italic>V</italic> is the number of the hidden nodes in the hidden layer, <italic>s</italic>() represents the sigmoid function.</p>
<p>For ELM:</p>
<disp-formula id="E12"><label>(12)</label><mml:math id="M13"><mml:mrow><mml:msub><mml:mtext>A</mml:mtext><mml:mrow><mml:mtext>ELM</mml:mtext></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mstyle><mml:munderover><mml:mo>&#x02211;</mml:mo><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>V</mml:mi></mml:munderover><mml:mrow><mml:mi>s</mml:mi><mml:mo>(</mml:mo><mml:msub><mml:mi>w</mml:mi><mml:mi>j</mml:mi></mml:msub><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>d</mml:mi><mml:mi>j</mml:mi></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:mstyle><mml:mo>,</mml:mo><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>...</mml:mn><mml:mo>,</mml:mo><mml:mi>N</mml:mi><mml:mo>,</mml:mo></mml:mrow></mml:math></disp-formula>
<p><bold>Step 3</bold>: The output weights (<bold><italic>p</italic></bold>): can be calculated by pseudo-inverse:</p>
<p>For RVFL:</p>
<disp-formula id="E13"><label>(13)</label><mml:math id="M14"><mml:mrow><mml:mi>p</mml:mi><mml:mo>=</mml:mo><mml:msubsup><mml:mtext>A</mml:mtext><mml:mrow><mml:mtext>RVFL</mml:mtext></mml:mrow><mml:mtext>&#x02020;</mml:mtext></mml:msubsup><mml:mtext>Y,</mml:mtext></mml:mrow></mml:math></disp-formula>
<p>where <inline-formula><mml:math id="M15"><mml:mrow><mml:msubsup><mml:mtext>A</mml:mtext><mml:mrow><mml:mtext>RVFL</mml:mtext></mml:mrow><mml:mtext>&#x02020;</mml:mtext></mml:msubsup></mml:mrow></mml:math></inline-formula> is the pseudo-inverse matrix of <bold>A<sub>RVFL</sub></bold>, and <bold>Y</bold> = (<italic>y<sub>1</sub>,...,y<sub>N</sub></italic>)<sup>T</sup> is the ground-truth label matrix of the dataset.</p>
<p>For ELM:</p>
<disp-formula id="E14"><label>(14)</label><mml:math id="M16"><mml:mrow><mml:mi>p</mml:mi><mml:mo>=</mml:mo><mml:msubsup><mml:mtext>A</mml:mtext><mml:mrow><mml:mtext>ELM</mml:mtext></mml:mrow><mml:mtext>&#x02020;</mml:mtext></mml:msubsup><mml:mtext>Y,</mml:mtext></mml:mrow></mml:math></disp-formula>
<p>where <inline-formula><mml:math id="M17"><mml:mrow><mml:msubsup><mml:mtext>A</mml:mtext><mml:mrow><mml:mtext>ELM</mml:mtext></mml:mrow><mml:mtext>&#x02020;</mml:mtext></mml:msubsup></mml:mrow></mml:math></inline-formula> is the pseudo-inverse matrix of <bold>A<sub>ELM</sub></bold>.</p>
<p>The backbone of these other two proposed models in this article is the same. The difference is that DRVFL chooses RVFL as its classifier, and DELM selects ELM as its classifier.</p>
</sec>
<sec id="s3-4">
<title>Evaluation</title>
<p>We define the unhealthy brain as the positive and the healthy brain as the negative. Five indicators are chosen to verify our model: accuracy (Acc), sensitivity (Sen), specificity (Spe), precision (Pre), and F1-score (F1), respectively. Their formulas are shown below:</p>
<disp-formula id="E15"><label>(15)</label><mml:math id="M18"><mml:mrow><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mtable><mml:mtr><mml:mtd><mml:mrow><mml:mtext>Acc&#x02009;=&#x02009;&#x02009;</mml:mtext><mml:mfrac><mml:mrow><mml:mtext>TP&#x02009;+&#x02009;TN</mml:mtext></mml:mrow><mml:mrow><mml:mtext>TP&#x02009;+&#x02009;TN&#x02009;+&#x02009;FP&#x02009;+&#x02009;FN</mml:mtext></mml:mrow></mml:mfrac></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow><mml:mtext>Sen&#x02009;=&#x02009;</mml:mtext><mml:mfrac><mml:mrow><mml:mtext>TP</mml:mtext></mml:mrow><mml:mrow><mml:mtext>TP&#x02009;+&#x02009;FN</mml:mtext></mml:mrow></mml:mfrac></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow><mml:mtext>Spe&#x02009;=&#x02009;</mml:mtext><mml:mfrac><mml:mrow><mml:mtext>TN</mml:mtext></mml:mrow><mml:mrow><mml:mtext>TN&#x02009;+&#x02009;FP</mml:mtext></mml:mrow></mml:mfrac></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow><mml:mtext>PRe&#x02009;=&#x02009;</mml:mtext><mml:mfrac><mml:mrow><mml:mtext>TP</mml:mtext></mml:mrow><mml:mrow><mml:mtext>TP&#x02009;+&#x02009;FP</mml:mtext></mml:mrow></mml:mfrac></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow><mml:mtext>F1&#x02009;=&#x02009;</mml:mtext><mml:mfrac><mml:mrow><mml:mtext>2&#x000D7;TP</mml:mtext></mml:mrow><mml:mrow><mml:mtext>2TP&#x02009;+&#x02009;FP&#x02009;+&#x02009;FN</mml:mtext></mml:mrow></mml:mfrac></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:mrow></mml:mrow></mml:math></disp-formula>
<p>where the definitions of TP, FN, FP, and TN are the true positive, false negative, false positive, and true negative, respectively.</p>
</sec>
</sec>
<sec id="s4">
<title>Results and Discussions</title>
<sec id="s4-1">
<title>Experiment Settings</title>
<p>We modify the hyper-parameter settings of the proposed DSNN. The max-epoch is set to 4 for reducing overfitting problems. We set our mini-batch size to 10 because the dataset is relatively small. According to the experience, the learning rate is 10<sup>&#x02212;4</sup>. A hyper-parameter we set in our model is the number of hidden nodes (<italic>V</italic>), which is set as 400 based on the input dimension. The hyper-parameter settings of our model are shown in <xref ref-type="table" rid="T5">Table 5</xref>.</p>
<table-wrap id="T5" position="float">
<label>Table 5</label>
<caption><p>The hyper-parameter settings of the proposed DSNN.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="center">Hyper-parameter</th>
<th align="center">Value</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">Mini-batch size</td>
<td align="center">10</td>
</tr>
<tr>
<td align="left">Max-epoch</td>
<td align="center">4</td>
</tr>
<tr>
<td align="left">Learning rate</td>
<td align="center">10<sup>&#x02212;4</sup></td>
</tr>
<tr>
<td align="left">Number of the hidden nodes V</td>
<td align="center">400</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s4-2">
<title>Performances of the DSNN</title>
<p>We use five-fold cross-validation to evaluate the proposed DSNN. The classification performance of our model is given in <xref ref-type="table" rid="T6">Table 6</xref>. The Acc, Sen, Spe, Pre, and F1 of the proposed DSNN are 98.46% &#x000B1; 2.05% , 100.00% &#x000B1; 0.00% , 85.00% &#x000B1; 20.00% , 98.36% &#x000B1; 2.17%, and 99.16% &#x000B1; 1.11% , respectively. The results of DSNN are higher than 85%. Especially the sensitivity is 100%. The ROC curve is shown in <xref ref-type="fig" rid="F6">Figure 6</xref>. The AUC value is 0.9786. It is an effective classifier when the AUC value is greater than 0.95. These results can be concluded that DSNN is an effective model to classify brain diseases.</p>
<fig id="F6" position="float">
<label>Figure 6</label>
<caption><p>ROC curve of DSNN.</p></caption>
<graphic xlink:href="fnsys-16-838822-g0006.tif"/>
</fig>
<table-wrap id="T6" position="float">
<label>Table 6</label>
<caption><p>The classification performance based on five-fold cross-validation (unit: %).</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="center">Methods</th>
<th align="center">Fold</th>
<th align="center" colspan="1">Acc</th>
<th align="center" colspan="1">Sen</th>
<th align="center" colspan="1">Spe</th>
<th align="center" colspan="1">Pre</th>
<th align="center" colspan="1">F1</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">DSNN(Ours)</td>
<td align="center">F 1</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 2</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 3</td>
<td align="center">94.87</td>
<td align="center">100.00</td>
<td align="center">50.00</td>
<td align="center">94.59</td>
<td align="center">97.22</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 4</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 5</td>
<td align="center">97.44</td>
<td align="center">100.00</td>
<td align="center">75.00</td>
<td align="center">97.22</td>
<td align="center">98.59</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">Avr</td>
<td align="center"><bold>98.46</bold></td>
<td align="center"><bold>100.00</bold></td>
<td align="center"><bold>85.00</bold></td>
<td align="center"><bold>98.36</bold></td>
<td align="center"><bold>99.16</bold></td>
</tr>
<tr>
<td align="center"></td>
<td align="center">Std</td>
<td align="center">&#x000B1;2.05</td>
<td align="center">&#x000B1;0.00</td>
<td align="center">&#x000B1;20.00</td>
<td align="center">&#x000B1;2.17</td>
<td align="center">&#x000B1;1.11</td>
</tr>
<tr>
<td align="left">DRVFL(Ours)</td>
<td align="center">F 1</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 2</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 3</td>
<td align="center">89.74</td>
<td align="center">100.00</td>
<td align="center">0.00</td>
<td align="center">89.74</td>
<td align="center">94.59</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 4</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 5</td>
<td align="center">97.44</td>
<td align="center">100.00</td>
<td align="center">75.00</td>
<td align="center">97.22</td>
<td align="center">98.59</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">Avr</td>
<td align="center">97.44</td>
<td align="center">100.00</td>
<td align="center">75.00</td>
<td align="center">97.39</td>
<td align="center">98.64</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">Std</td>
<td align="center">&#x000B1;3.97</td>
<td align="center">&#x000B1;0.00</td>
<td align="center">&#x000B1;38.73</td>
<td align="center">&#x000B1;3.97</td>
<td align="center">&#x000B1;2.10</td>
</tr>
<tr>
<td align="left">DELM(Ours)</td>
<td align="center">F 1</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 2</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 3</td>
<td align="center">92.31</td>
<td align="center">100.00</td>
<td align="center">25.00</td>
<td align="center">92.11</td>
<td align="center">95.89</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 4</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 5</td>
<td align="center">97.44</td>
<td align="center">100.00</td>
<td align="center">75.00</td>
<td align="center">97.22</td>
<td align="center">98.59</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">Avr</td>
<td align="center">97.95</td>
<td align="center">100.00</td>
<td align="center">80.00</td>
<td align="center">97.87</td>
<td align="center">98.90</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">Std</td>
<td align="center">&#x000B1;2.99</td>
<td align="center">&#x000B1;0.00</td>
<td align="center">&#x000B1;29.15</td>
<td align="center">&#x000B1;3.07</td>
<td align="center">&#x000B1;1.60</td>
</tr>
<tr>
<td align="left">Fine-tuned DenseNet</td>
<td align="center">F 1</td>
<td align="center">87.50</td>
<td align="center">86.11</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">92.54</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 2</td>
<td align="center">82.05</td>
<td align="center">80.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">88.89</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 3</td>
<td align="center">89.74</td>
<td align="center">88.57</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">93.94</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 4</td>
<td align="center">85.00</td>
<td align="center">83.33</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">90.91</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 5</td>
<td align="center">79.49</td>
<td align="center">77.14</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">87.10</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">Avr</td>
<td align="center">84.76</td>
<td align="center">83.03</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">90.67</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">Std</td>
<td align="center">&#x000B1;3.67</td>
<td align="center">&#x000B1;4.10</td>
<td align="center">&#x000B1;0.00</td>
<td align="center">&#x000B1;0.00</td>
<td align="center">&#x000B1;2.46</td>
</tr>
<tr>
<td align="left">AlexNet-SNN</td>
<td align="center">F 1</td>
<td align="center">89.74</td>
<td align="center">100.00</td>
<td align="center">0.00</td>
<td align="center">89.74</td>
<td align="center">94.59</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 2</td>
<td align="center">89.74</td>
<td align="center">100.00</td>
<td align="center">0.00</td>
<td align="center">89.74</td>
<td align="center">94.59</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 3</td>
<td align="center">90.00</td>
<td align="center">97.22</td>
<td align="center">25.00</td>
<td align="center">92.11</td>
<td align="center">94.59</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 4</td>
<td align="center">90.00</td>
<td align="center">97.22</td>
<td align="center">25.00</td>
<td align="center">92.11</td>
<td align="center">94.59</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 5</td>
<td align="center">89.74</td>
<td align="center">97.14</td>
<td align="center">25.00</td>
<td align="center">91.89</td>
<td align="center">94.44</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">Avr</td>
<td align="center">89.84</td>
<td align="center">98.32</td>
<td align="center">15.00</td>
<td align="center">91.12</td>
<td align="center">94.56</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">Std</td>
<td align="center">&#x000B1;0.13</td>
<td align="center">&#x000B1;1.38</td>
<td align="center">&#x000B1;12.25</td>
<td align="center">&#x000B1;1.13</td>
<td align="center">&#x000B1;0.06</td>
</tr>
<tr>
<td align="left">ResNet-18-SNN</td>
<td align="center">F 1</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 2</td>
<td align="center">97.50</td>
<td align="center">100.00</td>
<td align="center">75.00</td>
<td align="center">97.30</td>
<td align="center">98.63</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 3</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 4</td>
<td align="center">94.87</td>
<td align="center">100.00</td>
<td align="center">50.00</td>
<td align="center">94.59</td>
<td align="center">97.22</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 5</td>
<td align="center">94.87</td>
<td align="center">97.14</td>
<td align="center">75.00</td>
<td align="center">97.14</td>
<td align="center">97.14</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">Avr</td>
<td align="center">97.45</td>
<td align="center">99.43</td>
<td align="center">80.00</td>
<td align="center">97.81</td>
<td align="center">98.60</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">Std</td>
<td align="center">&#x000B1;2.29</td>
<td align="center">&#x000B1;1.14</td>
<td align="center">&#x000B1;18.71</td>
<td align="center">&#x000B1;2.03</td>
<td align="center">&#x000B1;1.26</td>
</tr>
<tr>
<td align="left">ResNet-50-SNN</td>
<td align="center">F 1</td>
<td align="center">95.00</td>
<td align="center">94.44</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">97.14</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 2</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 3</td>
<td align="center">97.44</td>
<td align="center">97.14</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">98.55</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 4</td>
<td align="center">95.00</td>
<td align="center">100.00</td>
<td align="center">50.00</td>
<td align="center">94.74</td>
<td align="center">97.30</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 5</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">Avr</td>
<td align="center">97.49</td>
<td align="center">98.32</td>
<td align="center">90.00</td>
<td align="center">98.95</td>
<td align="center">98.60</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">Std</td>
<td align="center">&#x000B1;2.24</td>
<td align="center">&#x000B1;2.23</td>
<td align="center">&#x000B1;20.00</td>
<td align="center">&#x000B1;2.10</td>
<td align="center">&#x000B1;1.24</td>
</tr>
<tr>
<td align="left">VGG-SNN</td>
<td align="center">F 1</td>
<td align="center">97.50</td>
<td align="center">100.00</td>
<td align="center">75.00</td>
<td align="center">97.30</td>
<td align="center">98.63</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 2</td>
<td align="center">87.50</td>
<td align="center">94.44</td>
<td align="center">25.00</td>
<td align="center">91.89</td>
<td align="center">93.15</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 3</td>
<td align="center">94.87</td>
<td align="center">97.14</td>
<td align="center">75.00</td>
<td align="center">97.14</td>
<td align="center">97.14</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 4</td>
<td align="center">89.74</td>
<td align="center">100.00</td>
<td align="center">0.00</td>
<td align="center">89.74</td>
<td align="center">94.59</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 5</td>
<td align="center">87.18</td>
<td align="center">88.57</td>
<td align="center">75.00</td>
<td align="center">96.88</td>
<td align="center">92.54</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">Avr</td>
<td align="center">91.36</td>
<td align="center">96.03</td>
<td align="center">50.00</td>
<td align="center">94.59</td>
<td align="center">95.21</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">Std</td>
<td align="center">&#x000B1;4.12</td>
<td align="center">&#x000B1;4.26</td>
<td align="center">&#x000B1;31.62</td>
<td align="center">&#x000B1;3.16</td>
<td align="center">&#x000B1;2.33</td>
</tr>
<tr>
<td align="left">Restricted DenseNet-SNN</td>
<td align="center">F 1</td>
<td align="center">94.87</td>
<td align="center">94.29</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">97.06</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 2</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 3</td>
<td align="center">97.37</td>
<td align="center">97.06</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">98.51</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 4</td>
<td align="center">94.87</td>
<td align="center">100.00</td>
<td align="center">50.00</td>
<td align="center">94.59</td>
<td align="center">97.22</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">F 5</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
<td align="center">100.00</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">Avr</td>
<td align="center">97.42</td>
<td align="center">98.27</td>
<td align="center">90.00</td>
<td align="center">98.92</td>
<td align="center">98.56</td>
</tr>
<tr>
<td align="center"></td>
<td align="center">Std</td>
<td align="center">&#x000B1;2.09</td>
<td align="center">&#x000B1;1.83</td>
<td align="center">&#x000B1;1.97</td>
<td align="center">&#x000B1;2.09</td>
<td align="center">&#x000B1;1.69</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>The bold values are results of our proposed model.</p>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="s4-3">
<title>Comparison of Three Proposed Models</title>
<p>The classification performances of DRVFL and DELM based on the five-fold cross-validation are shown in <xref ref-type="table" rid="T6">Table 6</xref>. For a more explicit comparison, the comparison figure of the three proposed models is presented in <xref ref-type="fig" rid="F7">Figure 7A</xref>. The proposed DSNN is 1.06% more accurate than the proposed DRVFL and 0.53% more accurate than the proposed DELM. The proposed DSNN gets the best performance among the three proposed models because there is an output bias in SNN.</p>
<fig id="F7" position="float">
<label>Figure 7</label>
<caption><p>Modelcomparison. <bold>(A)</bold> Comparison of three proposed models (unit:%). <bold>(B)</bold> Comparison with the fine-tuned DenseNet (unit: %).<bold>(C)</bold> The classification performance of the proposed DSNN withdifferent backbones (unit: %). <bold>(D)</bold> Comparison with thespiking neural network (unit: %).</p></caption>
<graphic xlink:href="fnsys-16-838822-g0007.tif"/>
</fig>
</sec>
<sec id="s4-4">
<title>Comparison With the Fine-Tuned DenseNet</title>
<p>We compare the proposed DSNN with the fine-tuned DenseNet. The classification performance of the fine-tuned DenseNet is given in <xref ref-type="table" rid="T6">Table 6</xref>. The comparison figure of the proposed DSNN with the fine-tuned DenseNet is given in <xref ref-type="fig" rid="F7">Figure 7B</xref>. It can be seen from <xref ref-type="table" rid="T6">Table 6</xref> and <xref ref-type="fig" rid="F7">Figure 7B</xref> that the accuracy of the proposed DSNN results is 13.97% greater than that of the fine-tuned DenseNet.</p>
<p>DenseNet has too many layers and parameters and is prone to meet overfitting problems because our dataset is relatively small. The structure of SNN is simple and has only three layers. What&#x02019;s more, there are fewer parameters in SNN, which is not easy to produce overfitting problems. So, our method achieves better accuracy than fine-tuned DenseNet.</p>
</sec>
<sec id="s4-5">
<title>Comparison of Different Backbones</title>
<p>We test the performance of the proposed DSNN with different backbones. These backbones are AlexNet, ResNet-18, ResNet-50, and VGG, respectively. The classification performances of the proposed DSNN with different backbones are shown in <xref ref-type="table" rid="T6">Table 6</xref>. For a clear comparison, the comparison of DSNN with different backbones is presented in <xref ref-type="fig" rid="F7">Figure 7C</xref>.</p>
<p>DenseNet as the backbone model achieves the best results compared with other backbones. The reason is that DenseNet can reduce gradient vanishing problems better than other CNN models by establishing dense connectivity between all front and rear layers. There are too many parameters in VGG and AlexNet. There are 138M parameters for VGG and 61M parameters for AlexNet. However, there are only 20M parameters in DenseNet. More epoch is needed to converge for VGG and AlexNet. Nevertheless, to prevent overfitting problems, we set the max-epoch to 4. Therefore, DenseNet obtains better performance than VGG and AlexNet. Compared with ResNet, dense connections in the layers can provide more supervision information so that DenseNet can produce better classification performance. DenseNet has also shown its superiority in image learning in other studies, such as Ker et al. (<xref ref-type="bibr" rid="B23">2017</xref>), Zhang and Patel (<xref ref-type="bibr" rid="B61">2018</xref>), and Lundervold and Lundervold (<xref ref-type="bibr" rid="B33">2019</xref>).</p>
</sec>
<sec id="s4-6">
<title>Comparison With Restricted DenseNet</title>
<p>We limit the number of connections in the DenseNet block. Each layer is only connected to the previous layer in the last block. The results are shown in <xref ref-type="table" rid="T6">Table 6</xref>. Except for the specificity (Spe) value, all other results are not as good as the results of the network we proposed. It is concluded that reducing some dense connections will not improve the classification performance.</p>
</sec>
<sec id="s4-7">
<title>Comparison With Spiking Neural Network</title>
<p>We compare the proposed DSNN with the spiking neural network (Yaqoob and Wr&#x000F3;bel, <xref ref-type="bibr" rid="B60">2017</xref>). Although the brain inspires spiking and convolutional neural networks, there are still differences. The communication between neurons is completed in the spiking neural network by broadcasting the action sequence (Tavanaei et al., <xref ref-type="bibr" rid="B56">2019</xref>). The final result of the spiking neural network is shown in <xref ref-type="table" rid="T7">Table 7</xref>. The comparison figure is given in <xref ref-type="fig" rid="F7">Figure 7D</xref>. In conclusion, the performance of our model is better than the spiking neural network.</p>
<table-wrap id="T7" position="float">
<label>Table 7</label>
<caption><p>The final result of the spiking neural network (unit: %).</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="center">Model</th>
<th align="center">Acc</th>
<th align="center">Sen</th>
<th align="center">Spe</th>
<th align="center">Pre</th>
<th align="center">F1</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">Spiking neural network</td>
<td align="center">86.05</td>
<td align="center">100.00</td>
<td align="center">0.00</td>
<td align="center">96.05</td>
<td align="center">92.50</td>
</tr>
<tr>
<td align="left"><bold>DSNN(Ours)</bold></td>
<td align="center"><bold>98.46</bold></td>
<td align="center"><bold>100.00</bold></td>
<td align="center"><bold>85.00</bold></td>
<td align="center"><bold>98.36</bold></td>
<td align="center"><bold>99.16</bold></td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>The bold values are results of our proposed model.</p>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="s4-8">
<title>Explainability of the Proposed DSNN</title>
<p>It is significant to explain the DCNNs because it is difficult for researchers to figure out how DCNNs make predictions. We can visualize the attention of DCNNs by the Gradient-weighted class activation mapping (Grad-CAM). We present the raw images and heatmap images in <xref ref-type="fig" rid="F8">Figure 8</xref>. The brain diseases are within the red region, which is the greatest attention in Grad-CAM.</p>
<fig id="F8" position="float">
<label>Figure 8</label>
<caption><p>Explainability of the proposed DSNN.</p></caption>
<graphic xlink:href="fnsys-16-838822-g0008.tif"/>
</fig>
<p>The blue region is the lowest attention in Grad-CAM. Based on the Grad-CAM, we can conclude that DSNN can classify brain diseases in MRI. Also, some other studies have proven that the Grad-CAM efficiently visualizes the attention of DCNNs, such as Chattopadhay et al. (<xref ref-type="bibr" rid="B9">2018</xref>), Woo et al. (<xref ref-type="bibr" rid="B58">2018</xref>), Chen et al. (<xref ref-type="bibr" rid="B10">2020</xref>), and Panwar et al. (<xref ref-type="bibr" rid="B40">2020</xref>).</p>
</sec>
<sec id="s4-9">
<title>Comparison With Other State-of-the-Art Methods</title>
<p>We compare the proposed DSNN with other state-of-the-art methods. These state-of-the-art methods are: ANN (Arunkumar et al., <xref ref-type="bibr" rid="B5">2020</xref>), PR2G (Kalaiselvi et al., <xref ref-type="bibr" rid="B21">2020</xref>), SRH + CNNs (Hollon et al., <xref ref-type="bibr" rid="B15">2020</xref>), BPNN (Hemanth et al., <xref ref-type="bibr" rid="B14">2011</xref>), LVQNN (Nayef et al., <xref ref-type="bibr" rid="B37">2013</xref>), and LRC (Chen et al., <xref ref-type="bibr" rid="B11">2017</xref>), respectively. The results are presented in <xref ref-type="table" rid="T8">Table 8</xref>. The comparison chart is given in <xref ref-type="fig" rid="F9">Figure 9</xref>. The proposed DSNN gets the best performance among the list methods.</p>
<fig id="F9" position="float">
<label>Figure 9</label>
<caption><p>Comparison with other state-of-the-art methods (unit: %).</p></caption>
<graphic xlink:href="fnsys-16-838822-g0009.tif"/>
</fig>
<table-wrap id="T8" position="float">
<label>Table 8</label>
<caption><p>Comparison with other state-of-the-art methods (unit: %).</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="center">Methods</th>
<th align="center">Sen</th>
<th align="center">Spe</th>
<th align="center">Pre</th>
<th align="center">Acc</th>
<th align="center">F1</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">ANN (Arunkumar et al., <xref ref-type="bibr" rid="B5">2020</xref>)</td>
<td align="center">89.00</td>
<td align="center">-</td>
<td align="center">-</td>
<td align="center">92.14</td>
<td align="center">-</td>
</tr>
<tr>
<td align="left">PR2G (Kalaiselvi et al., <xref ref-type="bibr" rid="B21">2020</xref>)</td>
<td align="center">98.46</td>
<td align="center">-</td>
<td align="center">-</td>
<td align="center">83.90</td>
<td align="center">-</td>
</tr>
<tr>
<td align="left">SRH + CNNs (Hollon et al., <xref ref-type="bibr" rid="B15">2020</xref>)</td>
<td align="center">-</td>
<td align="center">-</td>
<td align="center">-</td>
<td align="center"><bold>94.6</bold></td>
<td align="center">-</td>
</tr>
<tr>
<td align="left">BPNN (Hemanth et al., <xref ref-type="bibr" rid="B14">2011</xref>)</td>
<td align="center">57.54</td>
<td align="center">54.50</td>
<td align="center">91.71</td>
<td align="center">57.23</td>
<td align="center">70.72</td>
</tr>
<tr>
<td align="left">LVQNN (Nayef et al., <xref ref-type="bibr" rid="B37">2013</xref>)</td>
<td align="center">59.94</td>
<td align="center">61.00</td>
<td align="center">93.08</td>
<td align="center">60.05</td>
<td align="center">72.92</td>
</tr>
<tr>
<td align="left">LRC (Chen et al., <xref ref-type="bibr" rid="B11">2017</xref>)</td>
<td align="center">100.00</td>
<td align="center">58.50</td>
<td align="center">95.47</td>
<td align="center">95.74</td>
<td align="center">97.68</td>
</tr>
<tr>
<td align="left">DSNN (Ours)</td>
<td align="center"><bold>100.00</bold></td>
<td align="center"><bold>85.00</bold></td>
<td align="center"><bold>98.36</bold></td>
<td align="center"><bold>98.46</bold></td>
<td align="center"><bold>99.16</bold></td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p><italic>Bold means the best results, - means not available</italic>.</p>
</table-wrap-foot>
</table-wrap>
<p>Our model is an effective method to classify brain diseases based on the comparison results. The proposed DSNN can achieve these good results because deep learning is used to extract features, and SNN is used for classification.</p>
</sec>
</sec>
<sec sec-type="conclusion" id="s5">
<title>Conclusion</title>
<p>Three novel models are proposed to automatically classify brain diseases in this article. The proposed models are DSNN, DRVFL, and DELM. The DSNN gets the best performance among the three proposed models in terms of classification performance. The backbone of the proposed DSNN is the pre-trained DenseNet. We modify the pre-trained DenseNet. Then, the modified DenseNet is fine-tuned on the dataset. The last five layers within the fine-tuned DenseNet are substituted by the Schmidt neural network (SNN). In the proposed DSNN, the fine-tuned DenseNet plays the role of feature extraction. The extracted features train the SNN. We evaluate the proposed DSNN by using five-fold cross-validation. The accuracy, sensitivity, specificity, precision, and F1-score of the proposed DSNN on the test set are 98.46% &#x000B1; 2.05%, 100.00% &#x000B1; 0.00%, 85.00% &#x000B1; 20.00%, 98.36% &#x000B1; 2.17%, and 99.16% &#x000B1; 1.11%, respectively. The proposed DSNN is compared with other state-of-the-art methods and obtains the best results among the list methods. Our model obtaining the best performance can conclude that DSNN is an effective model for classifying brain diseases.</p>
<p>Although the proposed model gets good results, this article still has some shortcomings. (1) The dataset is relatively small. (2) We divide the datasets into two categories. However, there are many kinds of brain diseases.</p>
<p>We will collect more data to test the proposed model in the future. Then, we will try to classify multiple brain diseases. What&#x02019;s more, we will do more research on brain segmentation. We will try more new deep learning methods, such as VIT, attention learning, etc.</p>
</sec>
<sec id="s6" sec-type="data-availability">
<title>Data Availability Statement</title>
<p>Publicly available datasets were analyzed in this study. This data can be found here: https://www.med.harvard.edu/aanlib.</p>
</sec>
<sec id="s7">
<title>Author Contributions</title>
<p>ZZ: conceptualization, software, data curation, writing&#x02014;original draft, writing&#x02014;review and editing, visualization. SL: conceptualization, software, data curation, writing&#x02014;review and editing. S-HW: methodology, software, validation, investigation, resources, writing&#x02014;review and editing, supervision, and funding acquisition. JG: methodology, validation, formal analysis, resources, writing&#x02014;original draft, writing&#x02014;review and editing, supervision. Y-DZ: methodology, formal analysis, investigation, data curation, writing&#x02014;original draft, writing&#x02014;review and editing, visualization, supervision, project administration, and funding acquisition.</p>
</sec>
<sec id="s8" sec-type="COI-statement">
<title>Conflict of Interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="disclaimer" id="s9">
<title>Publisher&#x02019;s Note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
</body>
<back>
<sec id="s10" sec-type="funding-information">
<title>Funding</title>
<p>The article was partially supported by Hope Foundation for Cancer Research, UK (RM60G0680); Royal Society International Exchanges Cost Share Award, UK (RP202G0230); Medical Research Council Confidence in Concept Award, UK (MC_PC_17171); British Heart Foundation Accelerator Award, UK (AA/18/3/34220); Sino-UK Industrial Fund, UK (RP202G0289); Global Challenges Research Fund (GCRF), UK (P202PF11); LIAS Pioneering Partnerships award, UK (P202ED10); Data Science Enhancement Fund, UK (P202RE237); Guangxi Key Laboratory of Trusted Software (kx201901).</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Aboelenein</surname> <given-names>N. M.</given-names></name> <name><surname>Songhao</surname> <given-names>P.</given-names></name> <name><surname>Koubaa</surname> <given-names>A.</given-names></name> <name><surname>Noor</surname> <given-names>A.</given-names></name> <name><surname>Afifi</surname> <given-names>A.</given-names></name></person-group> (<year>2020</year>). <article-title>HTTU-net: hybrid two track U-net for automatic brain tumor segmentation</article-title>. <source>IEEE Access</source> <volume>8</volume>, <fpage>101406</fpage>&#x02013;<lpage>101415</lpage>. <pub-id pub-id-type="doi">10.1109/ACCESS.2020.2998601</pub-id></citation></ref>
<ref id="B2"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Albawi</surname> <given-names>S.</given-names></name> <name><surname>Mohammed</surname> <given-names>T. A.</given-names></name> <name><surname>Al-Zawi</surname> <given-names>S.</given-names></name></person-group> (<year>2017</year>). &#x0201C;<article-title>Understanding of a convolutional neural network</article-title>,&#x0201D; in <conf-name>2017 International Conference on Engineering and Technology (ICET)</conf-name> <conf-loc>(Antalya, Turkey)</conf-loc>. <pub-id pub-id-type="doi">10.1109/ICEngTechnol.2017.8308186</pub-id></citation></ref>
<ref id="B4"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Amin</surname> <given-names>J.</given-names></name> <name><surname>Sharif</surname> <given-names>M.</given-names></name> <name><surname>Raza</surname> <given-names>M.</given-names></name> <name><surname>Saba</surname> <given-names>T.</given-names></name> <name><surname>Sial</surname> <given-names>R.</given-names></name> <name><surname>Shad</surname> <given-names>S. A.</given-names></name></person-group> (<year>2019a</year>). <article-title>Brain tumor detection: a long short-term memory (LSTM)-based learning model</article-title>. <source>Neural Comput. Appl.</source> <volume>32</volume>, <fpage>15965</fpage>&#x02013;<lpage>15973</lpage>. <pub-id pub-id-type="doi">10.1007/s00521-019-04650-7</pub-id></citation></ref>
<ref id="B3"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Amin</surname> <given-names>J.</given-names></name> <name><surname>Sharif</surname> <given-names>M.</given-names></name> <name><surname>Gul</surname> <given-names>N.</given-names></name> <name><surname>Raza</surname> <given-names>M.</given-names></name> <name><surname>Anjum</surname> <given-names>M. A.</given-names></name> <name><surname>Nisar</surname> <given-names>M. W.</given-names></name> <etal/></person-group>. (<year>2019b</year>). <article-title>Brain tumor detection by using stacked autoencoders in deep learning</article-title>. <source>J. Med. Syst.</source> <volume>44</volume>:<fpage>32</fpage>. <pub-id pub-id-type="doi">10.1007/s10916-019-1483-2</pub-id><pub-id pub-id-type="pmid">31848728</pub-id></citation></ref>
<ref id="B5"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Arunkumar</surname> <given-names>N.</given-names></name> <name><surname>Mohammed</surname> <given-names>M. A.</given-names></name> <name><surname>Mostafa</surname> <given-names>S. A.</given-names></name> <name><surname>Ibrahim</surname> <given-names>D. A.</given-names></name> <name><surname>Rodrigues</surname> <given-names>J. J.</given-names></name> <name><surname>de Albuquerque</surname> <given-names>V. H. C.</given-names></name></person-group> (<year>2020</year>). <article-title>Fully automatic model-based segmentation and classification approach for MRI brain tumor using artificial neural networks</article-title>. <source>Concurrency Comput. Pract. Experience</source> <volume>32</volume>:<fpage>e4962</fpage>. <pub-id pub-id-type="doi">10.1002/cpe.4962</pub-id></citation></ref>
<ref id="B6"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ashraf</surname> <given-names>A.</given-names></name> <name><surname>Naz</surname> <given-names>S.</given-names></name> <name><surname>Shirazi</surname> <given-names>S. H.</given-names></name> <name><surname>Razzak</surname> <given-names>I.</given-names></name> <name><surname>Parsad</surname> <given-names>M.</given-names></name></person-group> (<year>2021</year>). <article-title>Deep transfer learning for Alzheimer neurological disorder detection</article-title>. <source>Multimed. Tools Appl.</source> <volume>80</volume>, <fpage>30117</fpage>&#x02013;<lpage>30142</lpage>. <pub-id pub-id-type="doi">10.1007/s11042-020-10331-8</pub-id></citation></ref>
<ref id="B7"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Bhanothu</surname> <given-names>Y.</given-names></name> <name><surname>Kamalakannan</surname> <given-names>A.</given-names></name> <name><surname>Rajamanickam</surname> <given-names>G.</given-names></name></person-group> (<year>2020</year>). &#x0201C;<article-title>Detection and classification of brain tumor in MRI images using deep convolutional network</article-title>,&#x0201D; in <conf-name>2020 6th International Conference on Advanced Computing and Communication Systems (ICACCS)</conf-name> <conf-loc>(Coimbatore, India: IEEE)</conf-loc>, <fpage>248</fpage>&#x02013;<lpage>252</lpage>. <pub-id pub-id-type="doi">10.1109/ICACCS48705.2020.9074375</pub-id></citation></ref>
<ref id="B8"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chatterjee</surname> <given-names>S.</given-names></name> <name><surname>Das</surname> <given-names>A.</given-names></name></person-group> (<year>2019</year>). <article-title>A novel systematic approach to diagnose brain tumor using integrated type-II fuzzy logic and ANFIS (adaptive neuro-fuzzy inference system) model</article-title>. <source>Soft Comput.</source> <volume>24</volume>, <fpage>11731</fpage>&#x02013;<lpage>11754</lpage>. <pub-id pub-id-type="doi">10.1007/s00500-019-04635-7</pub-id></citation></ref>
<ref id="B9"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Chattopadhay</surname> <given-names>A.</given-names></name> <name><surname>Sarkar</surname> <given-names>A.</given-names></name> <name><surname>Howlader</surname> <given-names>P.</given-names></name> <name><surname>Balasubramanian</surname> <given-names>V. N.</given-names></name></person-group> (<year>2018</year>). &#x0201C;<article-title>Grad-cam++: generalized gradient-based visual explanations for deep convolutional networks</article-title>,&#x0201D; in <conf-name>2018 IEEE Winter Conference on Applications of Computer Vision (WACV)</conf-name> <conf-loc>(Lake Tahoe, NV, USA)</conf-loc>, <fpage>839</fpage>&#x02013;<lpage>847</lpage>. <pub-id pub-id-type="doi">10.1109/WACV.2018.00097</pub-id></citation></ref>
<ref id="B10"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>L.</given-names></name> <name><surname>Chen</surname> <given-names>J.</given-names></name> <name><surname>Hajimirsadeghi</surname> <given-names>H.</given-names></name> <name><surname>Mori</surname> <given-names>G.</given-names></name></person-group> (<year>2020</year>). &#x0201C;<article-title>Adapting Grad-CAM for embedding networks</article-title>,&#x0201D; in <conf-name>Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)</conf-name> <conf-loc>(Snowmass, CO, USA)</conf-loc>, <fpage>2794</fpage>&#x02013;<lpage>2803</lpage>. <pub-id pub-id-type="doi">10.1109/WACV45572.2020.9093461</pub-id></citation></ref>
<ref id="B11"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>Y.</given-names></name> <name><surname>Shao</surname> <given-names>Y.</given-names></name> <name><surname>Yan</surname> <given-names>J.</given-names></name> <name><surname>Yuan</surname> <given-names>T.-F.</given-names></name> <name><surname>Qu</surname> <given-names>Y.</given-names></name> <name><surname>Lee</surname> <given-names>E.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>A feature-free 30-disease pathological brain detection system by linear regression classifier</article-title>. <source>CNS Neurol. Disord. Drug Targets</source> <volume>16</volume>, <fpage>5</fpage>&#x02013;<lpage>10</lpage>. <pub-id pub-id-type="doi">10.2174/1871527314666161124115531</pub-id><pub-id pub-id-type="pmid">27890009</pub-id></citation></ref>
<ref id="B12"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>G&#x000F3;rriz</surname> <given-names>J. M.</given-names></name> <name><surname>Ram&#x000ED;rez</surname> <given-names>J.</given-names></name> <name><surname>Ort&#x000ED;z</surname> <given-names>A.</given-names></name> <name><surname>Mart&#x000ED;nez-Murcia</surname> <given-names>F. J.</given-names></name> <name><surname>Segovia</surname> <given-names>F.</given-names></name> <name><surname>Suckling</surname> <given-names>J.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Artificial intelligence within the interplay between natural and artificial computation: advances in data science, trends and applications</article-title>. <source>Neurocomputing</source> <volume>410</volume>, <fpage>237</fpage>&#x02013;<lpage>270</lpage>. <pub-id pub-id-type="doi">10.1016/j.neucom.2020.05.078</pub-id></citation></ref>
<ref id="B13"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>He</surname> <given-names>K.</given-names></name> <name><surname>Zhang</surname> <given-names>X.</given-names></name> <name><surname>Ren</surname> <given-names>S.</given-names></name> <name><surname>Sun</surname> <given-names>J.</given-names></name></person-group> (<year>2016</year>). &#x0201C;<article-title>Deep residual learning for image recognition</article-title>,&#x0201D; <conf-name>Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</conf-name> <conf-loc>(Las Vegas, NV, USA)</conf-loc>, <fpage>770</fpage>&#x02013;<lpage>778</lpage>. <pub-id pub-id-type="doi">10.1109/CVPR.2016.90</pub-id></citation></ref>
<ref id="B14"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hemanth</surname> <given-names>D. J.</given-names></name> <name><surname>Vijila</surname> <given-names>C. K. S.</given-names></name> <name><surname>Anitha</surname> <given-names>J.</given-names></name></person-group> (<year>2011</year>). <article-title>A high speed back propagation neural network for multistage MR brain tumor image segmentation</article-title>. <source>Neural Netw. World</source> <volume>21</volume>, <fpage>51</fpage>&#x02013;<lpage>56</lpage>. <pub-id pub-id-type="doi">10.14311/NNW.2011.21.004</pub-id></citation></ref>
<ref id="B15"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hollon</surname> <given-names>T. C.</given-names></name> <name><surname>Pandian</surname> <given-names>B.</given-names></name> <name><surname>Adapa</surname> <given-names>A. R.</given-names></name> <name><surname>Urias</surname> <given-names>E.</given-names></name> <name><surname>Save</surname> <given-names>A. V.</given-names></name> <name><surname>Khalsa</surname> <given-names>S. S. S.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Near real-time intraoperative brain tumor diagnosis using stimulated Raman histology and deep neural networks</article-title>. <source>Nat. Med.</source> <volume>26</volume>, <fpage>52</fpage>&#x02013;<lpage>58</lpage>. <pub-id pub-id-type="doi">10.1038/s41591-019-0715-9</pub-id><pub-id pub-id-type="pmid">31907460</pub-id></citation></ref>
<ref id="B16"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hu</surname> <given-names>A.</given-names></name> <name><surname>Razmjooy</surname> <given-names>N.</given-names></name></person-group> (<year>2020</year>). <article-title>Brain tumor diagnosis based on metaheuristics and deep learning</article-title>. <source>Int. J. Imaging Syst. Technol.</source> <volume>31</volume>, <fpage>657</fpage>&#x02013;<lpage>669</lpage>. <pub-id pub-id-type="doi">10.1002/ima.22495</pub-id></citation></ref>
<ref id="B17"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Huang</surname> <given-names>G.</given-names></name> <name><surname>Liu</surname> <given-names>Z.</given-names></name> <name><surname>Van Der Maaten</surname> <given-names>L.</given-names></name> <name><surname>Weinberger</surname> <given-names>K. Q.</given-names></name></person-group> (<year>2017</year>). &#x0201C;<article-title>Densely connected convolutional networks</article-title>,&#x0201D; <conf-name>Proceedings of the IEEE Conference On Computer Vision And Pattern Recognition (CVPR)</conf-name> <conf-loc>(Honolulu, HI, USA)</conf-loc>, <fpage>4700</fpage>&#x02013;<lpage>4708</lpage>. <pub-id pub-id-type="doi">10.1109/CVPR.2017.243</pub-id></citation></ref>
<ref id="B18"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Huang</surname> <given-names>G.-B.</given-names></name> <name><surname>Zhu</surname> <given-names>Q.-Y.</given-names></name> <name><surname>Siew</surname> <given-names>C.-K.</given-names></name></person-group> (<year>2006</year>). <article-title>Extreme learning machine: theory and applications</article-title>. <source>Neurocomputing</source> <volume>70</volume>, <fpage>489</fpage>&#x02013;<lpage>501</lpage>. <pub-id pub-id-type="doi">10.1016/j.neucom.2005.12.126</pub-id></citation></ref>
<ref id="B19"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Huang</surname> <given-names>Z.</given-names></name> <name><surname>Xu</surname> <given-names>H.</given-names></name> <name><surname>Su</surname> <given-names>S.</given-names></name> <name><surname>Wang</surname> <given-names>T.</given-names></name> <name><surname>Luo</surname> <given-names>Y.</given-names></name> <name><surname>Zhao</surname> <given-names>X.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>A computer-aided diagnosis system for brain magnetic resonance imaging images using a novel differential feature neural network</article-title>. <source>Comput. Biol. Med.</source> <volume>121</volume>:<fpage>103818</fpage>. <pub-id pub-id-type="doi">10.1016/j.compbiomed.2020.103818</pub-id><pub-id pub-id-type="pmid">32568685</pub-id></citation></ref>
<ref id="B20"><citation citation-type="web"><person-group person-group-type="author"><name><surname>Johnson</surname> <given-names>K. A.</given-names></name> <name><surname>Becker</surname> <given-names>J. A.</given-names></name></person-group> (<year>2021</year>). Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.med.harvard.edu/aanlib/">https://www.med.harvard.edu/aanlib/</ext-link>. Accessed December, 2021.</citation></ref>
<ref id="B21"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kalaiselvi</surname> <given-names>T.</given-names></name> <name><surname>Kumarashankar</surname> <given-names>P.</given-names></name> <name><surname>Sriramakrishnan</surname> <given-names>P.</given-names></name></person-group> (<year>2020</year>). <article-title>Three-phase automatic brain tumor diagnosis system using patches based updated run length region growing technique</article-title>. <source>J. Digit. Imaging</source> <volume>33</volume>, <fpage>465</fpage>&#x02013;<lpage>479</lpage>. <pub-id pub-id-type="doi">10.1007/s10278-019-00276-2</pub-id><pub-id pub-id-type="pmid">31529237</pub-id></citation></ref>
<ref id="B22"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kaplan</surname> <given-names>K.</given-names></name> <name><surname>Kaya</surname> <given-names>Y.</given-names></name> <name><surname>Kuncan</surname> <given-names>M.</given-names></name> <name><surname>Ertunc</surname> <given-names>H. M.</given-names></name></person-group> (<year>2020</year>). <article-title>Brain tumor classification using modified local binary patterns (LBP) feature extraction methods</article-title>. <source>Med. Hypotheses</source> <volume>139</volume>:<fpage>109696</fpage>. <pub-id pub-id-type="doi">10.1016/j.mehy.2020.109696</pub-id><pub-id pub-id-type="pmid">32234609</pub-id></citation></ref>
<ref id="B23"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ker</surname> <given-names>J.</given-names></name> <name><surname>Wang</surname> <given-names>L.</given-names></name> <name><surname>Rao</surname> <given-names>J.</given-names></name> <name><surname>Lim</surname> <given-names>T.</given-names></name></person-group> (<year>2017</year>). <article-title>Deep learning applications in medical image analysis</article-title>. <source>IEEE Access</source> <volume>6</volume>, <fpage>9375</fpage>&#x02013;<lpage>9389</lpage>. <pub-id pub-id-type="doi">10.1109/ACCESS.2017.2788044</pub-id></citation></ref>
<ref id="B24"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Khalil</surname> <given-names>H. A.</given-names></name> <name><surname>Darwish</surname> <given-names>S.</given-names></name> <name><surname>Ibrahim</surname> <given-names>Y. M.</given-names></name> <name><surname>Hassan</surname> <given-names>O. F.</given-names></name></person-group> (<year>2020</year>). <article-title>3D-MRI brain tumor detection model using modified version of level set segmentation based on dragonfly algorithm</article-title>. <source>Symmetry</source> <volume>12</volume>:<fpage>1256</fpage>. <pub-id pub-id-type="doi">10.3390/sym12081256</pub-id></citation></ref>
<ref id="B25"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Khan</surname> <given-names>S. R.</given-names></name> <name><surname>Sikandar</surname> <given-names>M.</given-names></name> <name><surname>Almogren</surname> <given-names>A.</given-names></name> <name><surname>Ud Din</surname> <given-names>I.</given-names></name> <name><surname>Guerrieri</surname> <given-names>A.</given-names></name> <name><surname>Fortino</surname> <given-names>G.</given-names></name></person-group> (<year>2020</year>). <article-title>IoMT-based computational approach for detecting brain tumor</article-title>. <source>Future Generation Comput. Sys.</source> <volume>109</volume>, <fpage>360</fpage>&#x02013;<lpage>367</lpage>. <pub-id pub-id-type="doi">10.1016/j.future.2020.03.054</pub-id></citation></ref>
<ref id="B26"><citation citation-type="web"><person-group person-group-type="author"><name><surname>LeCun</surname> <given-names>Y.</given-names></name></person-group> (<year>2015</year>). <article-title>LeNet-5, convolutional neural networks</article-title>. Available online at: <ext-link ext-link-type="uri" xlink:href="http://yann.lecun.com/exdb/lenet">http://yann.lecun.com/exdb/lenet</ext-link>.</citation></ref>
<ref id="B27"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Leming</surname> <given-names>M.</given-names></name> <name><surname>G&#x000F3;rriz</surname> <given-names>J. M.</given-names></name> <name><surname>Suckling</surname> <given-names>J.</given-names></name></person-group> (<year>2020</year>). <article-title>Ensemble deep learning on large, mixed-site fMRI datasets in autism and other tasks</article-title>. <source>Int. J. Neural Syst.</source> <volume>30</volume>:<fpage>2050012</fpage>. <pub-id pub-id-type="doi">10.1142/S0129065720500124</pub-id><pub-id pub-id-type="pmid">32308082</pub-id></citation></ref>
<ref id="B28"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lu</surname> <given-names>S. Y.</given-names></name> <name><surname>Satapathy</surname> <given-names>S. C.</given-names></name> <name><surname>Wang</surname> <given-names>S. H.</given-names></name> <name><surname>Zhang</surname> <given-names>Y. D.</given-names></name></person-group> (<year>2021</year>). <article-title>PBTNet: a new computer-aided diagnosis system for detecting primary brain tumors</article-title>. <source>Front. Cell Dev. Biol.</source> <volume>9</volume>:<fpage>765654</fpage>. <pub-id pub-id-type="doi">10.3389/fcell.2021.765654</pub-id><pub-id pub-id-type="pmid">34722549</pub-id></citation></ref>
<ref id="B30"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lu</surname> <given-names>S.</given-names></name> <name><surname>Wang</surname> <given-names>S.-H.</given-names></name> <name><surname>Zhang</surname> <given-names>Y.-D.</given-names></name></person-group> (<year>2020a</year>). <article-title>Detection of abnormal brain in MRI via improved AlexNet and ELM optimized by chaotic bat algorithm</article-title>. <source>Neural Comput. Appl.</source> <volume>33</volume>, <fpage>10799</fpage>&#x02013;<lpage>10811</lpage>. <pub-id pub-id-type="doi">10.1007/s00521-020-05082-4</pub-id></citation></ref>
<ref id="B29"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lu</surname> <given-names>S.</given-names></name> <name><surname>Wang</surname> <given-names>S. H.</given-names></name> <name><surname>Zhang</surname> <given-names>Y. D.</given-names></name></person-group> (<year>2020b</year>). <article-title>Detecting pathological brain via ResNet and randomized neural networks</article-title>. <source>Heliyon</source> <volume>6</volume>:<fpage>e05625</fpage>. <pub-id pub-id-type="doi">10.1016/j.heliyon.2020.e05625</pub-id><pub-id pub-id-type="pmid">33305056</pub-id></citation></ref>
<ref id="B32"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lu</surname> <given-names>S.-Y.</given-names></name> <name><surname>Wang</surname> <given-names>S.-H.</given-names></name> <name><surname>Zhang</surname> <given-names>Y.-D.</given-names></name></person-group> (<year>2020</year>). <article-title>A classification method for brain MRI via mobilenet and feedforward network with random weights</article-title>. <source>Pattern Recogn. Lett.</source> <volume>140</volume>, <fpage>252</fpage>&#x02013;<lpage>260</lpage>. <pub-id pub-id-type="doi">10.1016/j.patrec.2020.10.017</pub-id></citation></ref>
<ref id="B31"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lu</surname> <given-names>S.</given-names></name> <name><surname>Wu</surname> <given-names>D.</given-names></name> <name><surname>Zhang</surname> <given-names>Z.</given-names></name> <name><surname>Wang</surname> <given-names>S.-H.</given-names></name></person-group> (<year>2021</year>). <article-title>An explainable framework for diagnosis of COVID-19 Pneumonia via transfer learning and discriminant correlation analysis</article-title>. <source>ACM Trans. Multimed. Comput. Commun. Appl.</source> <volume>17</volume>, <fpage>1</fpage>&#x02013;<lpage>16</lpage>. <pub-id pub-id-type="doi">10.1145/3449785</pub-id></citation></ref>
<ref id="B33"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lundervold</surname> <given-names>A. S.</given-names></name> <name><surname>Lundervold</surname> <given-names>A.</given-names></name></person-group> (<year>2019</year>). <article-title>An overview of deep learning in medical imaging focusing on MRI</article-title>. <source>Z. Med. Phys.</source> <volume>29</volume>, <fpage>102</fpage>&#x02013;<lpage>127</lpage>. <pub-id pub-id-type="doi">10.1016/j.zemedi.2018.11.002</pub-id><pub-id pub-id-type="pmid">30553609</pub-id></citation></ref>
<ref id="B34"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lynch</surname> <given-names>M.</given-names></name></person-group> (<year>2018</year>). <article-title>New Alzheimer&#x02019;s association report reveals sharp increases in Alzheimer&#x02019;s prevalence, deaths, cost of care</article-title>. <source>Alzheimer&#x02019;s Dement.</source> Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.alz.org/news/2018/new_alzheimer_s_association_report_reveals_sharp_i">www.alz.org/news/2018/new_alzheimer_s_association_report_reveals_sharp_i</ext-link>. Accessed December, 2021.</citation></ref>
<ref id="B35"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ma</surname> <given-names>L.</given-names></name> <name><surname>Zhang</surname> <given-names>F.</given-names></name></person-group> (<year>2021</year>). <article-title>End-to-end predictive intelligence diagnosis in brain tumor using lightweight neural network</article-title>. <source>Appl. Soft Comput.</source> <volume>111</volume>:<fpage>107666</fpage>. <pub-id pub-id-type="doi">10.1016/J.ASOC.2021.107666</pub-id></citation></ref>
<ref id="B36"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Natekar</surname> <given-names>P.</given-names></name> <name><surname>Kori</surname> <given-names>A.</given-names></name> <name><surname>Krishnamurthi</surname> <given-names>G.</given-names></name></person-group> (<year>2020</year>). <article-title>Demystifying brain tumor segmentation networks: interpretability and uncertainty analysis</article-title>. <source>Front. Comput. Neurosci.</source> <volume>14</volume>:<fpage>6</fpage>. <pub-id pub-id-type="doi">10.3389/fncom.2020.00006</pub-id><pub-id pub-id-type="pmid">32116620</pub-id></citation></ref>
<ref id="B37"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Nayef</surname> <given-names>B. H.</given-names></name> <name><surname>Sahran</surname> <given-names>S.</given-names></name> <name><surname>Hussain</surname> <given-names>R. I.</given-names></name> <name><surname>Abdullah</surname> <given-names>S. N. H. S.</given-names></name></person-group> (<year>2013</year>). &#x0201C;<article-title>Brain imaging classification based on learning vector quantization</article-title>,&#x0201D; <conf-name>2013 1st International Conference on Communications, Signal Processing and their Applications (ICCSPA)</conf-name> (<conf-loc>Sharjah, United Arab Emirates</conf-loc>), <fpage>1</fpage>&#x02013;<lpage>6</lpage>. <pub-id pub-id-type="doi">10.1109/ICCSPA.2013.6487253</pub-id></citation></ref>
<ref id="B38"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Noreen</surname> <given-names>N.</given-names></name> <name><surname>Palaniappan</surname> <given-names>S.</given-names></name> <name><surname>Qayyum</surname> <given-names>A.</given-names></name> <name><surname>Ahmad</surname> <given-names>I.</given-names></name> <name><surname>Imran</surname> <given-names>M.</given-names></name> <name><surname>Shoaib</surname> <given-names>M.</given-names></name></person-group> (<year>2020</year>). <article-title>A deep learning model based on concatenation approach for the diagnosis of brain tumor</article-title>. <source>IEEE Access</source> <volume>8</volume>, <fpage>55135</fpage>&#x02013;<lpage>55144</lpage>. <pub-id pub-id-type="doi">10.1109/ACCESS.2020.2978629</pub-id></citation></ref>
<ref id="B39"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Odusami</surname> <given-names>M.</given-names></name> <name><surname>Maskeli&#x0016A;nas</surname> <given-names>R.</given-names></name> <name><surname>Dama&#x00161;evi&#x0010D;ius</surname> <given-names>R.</given-names></name></person-group> (<year>2022</year>). <article-title>An intelligent system for early recognition of Alzheimer&#x02019;s disease using neuroimaging</article-title>. <source>Sensors (Basel)</source> <volume>22</volume>:<fpage>740</fpage>. <pub-id pub-id-type="doi">10.3390/s22030740</pub-id><pub-id pub-id-type="pmid">35161486</pub-id></citation></ref>
<ref id="B40"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Panwar</surname> <given-names>H.</given-names></name> <name><surname>Gupta</surname> <given-names>P.</given-names></name> <name><surname>Siddiqui</surname> <given-names>M. K.</given-names></name> <name><surname>Morales-Menendez</surname> <given-names>R.</given-names></name> <name><surname>Bhardwaj</surname> <given-names>P.</given-names></name> <name><surname>Singh</surname> <given-names>V.</given-names></name></person-group> (<year>2020</year>). <article-title>A deep learning and grad-CAM based color visualization approach for fast detection of COVID-19 cases using chest X-ray and CT-Scan images</article-title>. <source>Chaos, Solitons Fractals</source> <volume>140</volume>:<fpage>110190</fpage>. <pub-id pub-id-type="doi">10.1016/j.chaos.2020.110190</pub-id><pub-id pub-id-type="pmid">32836918</pub-id></citation></ref>
<ref id="B41"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pao</surname> <given-names>Y.-H.</given-names></name> <name><surname>Park</surname> <given-names>G.-H.</given-names></name> <name><surname>Sobajic</surname> <given-names>D. J.</given-names></name></person-group> (<year>1994</year>). <article-title>Learning and generalization characteristics of the random vector functional-link net</article-title>. <source>Neurocomputing</source> <volume>6</volume>, <fpage>163</fpage>&#x02013;<lpage>180</lpage>. <pub-id pub-id-type="doi">10.1016/0925-2312(94)90053-1</pub-id></citation></ref>
<ref id="B42"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Purushottam Gumaste</surname> <given-names>P.</given-names></name> <name><surname>Bairagi</surname> <given-names>V. K.</given-names></name></person-group> (<year>2020</year>). <article-title>A hybrid method for brain tumor detection using advanced textural feature extraction</article-title>. <source>Biomed. Pharmacol. J.</source> <volume>13</volume>, <fpage>145</fpage>&#x02013;<lpage>157</lpage>. <pub-id pub-id-type="doi">10.13005/bpj/1871</pub-id></citation></ref>
<ref id="B43"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Razzak</surname> <given-names>I.</given-names></name> <name><surname>Naz</surname> <given-names>S.</given-names></name> <name><surname>Ashraf</surname> <given-names>A.</given-names></name> <name><surname>Khalifa</surname> <given-names>F.</given-names></name> <name><surname>Bouadjenek</surname> <given-names>M. R.</given-names></name> <name><surname>Mumtaz</surname> <given-names>S.</given-names></name></person-group> (<year>2022</year>). <article-title>Mutliresolutional ensemble PartialNet for Alzheimer detection using magnetic resonance imaging data</article-title>. <source>Int. J. Intell. Syst.</source> <pub-id pub-id-type="doi">10.1002/int.22856</pub-id></citation></ref>
<ref id="B44"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Saba</surname> <given-names>T.</given-names></name> <name><surname>Sameh Mohamed</surname> <given-names>A.</given-names></name> <name><surname>El-Affendi</surname> <given-names>M.</given-names></name> <name><surname>Amin</surname> <given-names>J.</given-names></name> <name><surname>Sharif</surname> <given-names>M.</given-names></name></person-group> (<year>2020</year>). <article-title>Brain tumor detection using fusion of hand crafted and deep learning features</article-title>. <source>Cogn. Syst. Res.</source> <volume>59</volume>, <fpage>221</fpage>&#x02013;<lpage>230</lpage>. <pub-id pub-id-type="doi">10.1016/j.cogsys.2019.09.007</pub-id></citation></ref>
<ref id="B45"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sadad</surname> <given-names>T.</given-names></name> <name><surname>Rehman</surname> <given-names>A.</given-names></name> <name><surname>Munir</surname> <given-names>A.</given-names></name> <name><surname>Saba</surname> <given-names>T.</given-names></name> <name><surname>Tariq</surname> <given-names>U.</given-names></name> <name><surname>Ayesha</surname> <given-names>N.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Brain tumor detection and multi-classification using advanced deep learning techniques</article-title>. <source>Microsc. Res. Tech.</source> <volume>84</volume>, <fpage>1296</fpage>&#x02013;<lpage>1308</lpage>. <pub-id pub-id-type="doi">10.1002/jemt.23688</pub-id><pub-id pub-id-type="pmid">33400339</pub-id></citation></ref>
<ref id="B46"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sadeghi</surname> <given-names>D.</given-names></name> <name><surname>Shoeibi</surname> <given-names>A.</given-names></name> <name><surname>Ghassemi</surname> <given-names>N.</given-names></name> <name><surname>Moridian</surname> <given-names>P.</given-names></name> <name><surname>Khadem</surname> <given-names>A.</given-names></name> <name><surname>Alizadehsani</surname> <given-names>R.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>An overview on artificial intelligence techniques for diagnosis of schizophrenia based on magnetic resonance imaging modalities: methods, challenges and future works</article-title>. <source>arXiv [Preprint]</source>. <pub-id pub-id-type="doi">10.48550/arXiv.2103.03081</pub-id></citation></ref>
<ref id="B47"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Schmidt</surname> <given-names>W. F.</given-names></name> <name><surname>Kraaijveld</surname> <given-names>M. A.</given-names></name> <name><surname>Duin</surname> <given-names>R. P.</given-names></name></person-group> (<year>1992</year>). &#x0201C;<article-title>Feed forward neural networks with random weights</article-title>,&#x0201D; in <conf-name>11th IAPR International Conference on Pattern Recognition. Vol.II. Conference B: Pattern Recognition Methodology and Systems</conf-name> <conf-loc>(The Hague, Netherlands)</conf-loc>, <fpage>1</fpage>&#x02013;<lpage>4</lpage>. <pub-id pub-id-type="doi">10.1109/ICPR.1992.201708</pub-id></citation></ref>
<ref id="B48"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sharif</surname> <given-names>M.</given-names></name> <name><surname>Amin</surname> <given-names>J.</given-names></name> <name><surname>Raza</surname> <given-names>M.</given-names></name> <name><surname>Anjum</surname> <given-names>M. A.</given-names></name> <name><surname>Afzal</surname> <given-names>H.</given-names></name> <name><surname>Shad</surname> <given-names>S. A.</given-names></name></person-group> (<year>2020</year>). <article-title>Brain tumor detection based on extreme learning</article-title>. <source>Neural Comput. Appl.</source> <volume>32</volume>, <fpage>15975</fpage>&#x02013;<lpage>15987</lpage>. <pub-id pub-id-type="doi">10.1007/s00521-019-04679-8</pub-id></citation></ref>
<ref id="B53"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Shoeibi</surname> <given-names>A.</given-names></name> <name><surname>Ghassemi</surname> <given-names>N.</given-names></name> <name><surname>Khodatars</surname> <given-names>M.</given-names></name> <name><surname>Moridian</surname> <given-names>P.</given-names></name> <name><surname>Alizadehsani</surname> <given-names>R.</given-names></name> <name><surname>Zare</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Detection of epileptic seizures on EEG signals using ANFIS classifier, autoencoders and fuzzy entropies</article-title>. <source>Biomed. Signal Process. Control</source> <volume>73</volume>:<fpage>103417</fpage>. <pub-id pub-id-type="doi">10.1016/j.bspc.2021.103417</pub-id></citation></ref>
<ref id="B50"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Shoeibi</surname> <given-names>A.</given-names></name> <name><surname>Khodatars</surname> <given-names>M.</given-names></name> <name><surname>Jafari</surname> <given-names>M.</given-names></name> <name><surname>Moridian</surname> <given-names>P.</given-names></name> <name><surname>Rezaei</surname> <given-names>M.</given-names></name> <name><surname>Alizadehsani</surname> <given-names>R.</given-names></name> <etal/></person-group>. (<year>2021a</year>). <article-title>Applications of deep learning techniques for automated multiple sclerosis detection using magnetic resonance imaging: a review</article-title>. <source>arXiv [Preprint]</source>. <pub-id pub-id-type="doi">10.48550/arXiv.2105.04881</pub-id></citation></ref>
<ref id="B51"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Shoeibi</surname> <given-names>A.</given-names></name> <name><surname>Ghassemi</surname> <given-names>N.</given-names></name> <name><surname>Khodatars</surname> <given-names>M.</given-names></name> <name><surname>Jafari</surname> <given-names>M.</given-names></name> <name><surname>Moridian</surname> <given-names>P.</given-names></name> <name><surname>Alizadehsani</surname> <given-names>R.</given-names></name> <etal/></person-group>. (<year>2021b</year>). <article-title>Applications of epileptic seizures detection in neuroimaging modalities using deep learning techniques: methods, challenges and future works</article-title>. <source>arXiv [Preprint]</source>. <pub-id pub-id-type="doi">10.48550/arXiv.2105.14278</pub-id></citation></ref>
<ref id="B52"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Shoeibi</surname> <given-names>A.</given-names></name> <name><surname>Sadeghi</surname> <given-names>D.</given-names></name> <name><surname>Moridian</surname> <given-names>P.</given-names></name> <name><surname>Ghassemi</surname> <given-names>N.</given-names></name> <name><surname>Heras</surname> <given-names>J.</given-names></name> <name><surname>Alizadehsani</surname> <given-names>R.</given-names></name> <etal/></person-group>. (<year>2021c</year>). <article-title>Automatic diagnosis of schizophrenia in EEG signals using CNN-LSTM models</article-title>. <source>Front. Neuroinform.</source> <volume>15</volume>:<fpage>777977</fpage>. <pub-id pub-id-type="doi">10.3389/fninf.2021.777977</pub-id><pub-id pub-id-type="pmid">34899226</pub-id></citation></ref>
<ref id="B49"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Shoeibi</surname> <given-names>A.</given-names></name> <name><surname>Khodatars</surname> <given-names>M.</given-names></name> <name><surname>Alizadehsani</surname> <given-names>R.</given-names></name> <name><surname>Ghassemi</surname> <given-names>N.</given-names></name> <name><surname>Jafari</surname> <given-names>M.</given-names></name> <name><surname>Moridian</surname> <given-names>P.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Automated detection and forecasting of covid-19 using deep learning techniques: a review</article-title>. <source>arXiv [Preprint]</source>. <pub-id pub-id-type="doi">10.48550/arXiv.2007.10785</pub-id></citation></ref>
<ref id="B54"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Simonyan</surname> <given-names>K.</given-names></name> <name><surname>Zisserman</surname> <given-names>A.</given-names></name></person-group> (<year>2014</year>). <article-title>Very deep convolutional networks for large-scale image recognition</article-title>. <source>arXiv [Preprint]</source>. <pub-id pub-id-type="doi">10.48550/arXiv.1409.1556</pub-id></citation></ref>
<ref id="B55"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Srivastava</surname> <given-names>R. K.</given-names></name> <name><surname>Greff</surname> <given-names>K.</given-names></name> <name><surname>Schmidhuber</surname> <given-names>J.</given-names></name></person-group> (<year>2015</year>). <article-title>Highway networks</article-title>. <source>arXiv [Preprint]</source>.<pub-id pub-id-type="doi">10.48550/arXiv.1505.00387</pub-id></citation></ref>
<ref id="B56"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tavanaei</surname> <given-names>A.</given-names></name> <name><surname>Ghodrati</surname> <given-names>M.</given-names></name> <name><surname>Kheradpisheh</surname> <given-names>S. R.</given-names></name> <name><surname>Masquelier</surname> <given-names>T.</given-names></name> <name><surname>Maida</surname> <given-names>A.</given-names></name></person-group> (<year>2019</year>). <article-title>Deep learning in spiking neural networks</article-title>. <source>Neural Netw.</source> <volume>111</volume>, <fpage>47</fpage>&#x02013;<lpage>63</lpage>. <pub-id pub-id-type="doi">10.1016/j.neunet.2018.12.002</pub-id><pub-id pub-id-type="pmid">30682710</pub-id></citation></ref>
<ref id="B57"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>S.-H.</given-names></name> <name><surname>Govindaraj</surname> <given-names>V. V.</given-names></name> <name><surname>G&#x000F3;rriz</surname> <given-names>J. M.</given-names></name> <name><surname>Zhang</surname> <given-names>X.</given-names></name> <name><surname>Zhang</surname> <given-names>Y.-D.</given-names></name></person-group> (<year>2021</year>). <article-title>Covid-19 classification by FGCNet with deep feature fusion from graph convolutional network and convolutional neural network</article-title>. <source>Info. Fusion</source> <volume>67</volume>, <fpage>208</fpage>&#x02013;<lpage>229</lpage>. <pub-id pub-id-type="doi">10.1016/j.inffus.2020.10.004</pub-id><pub-id pub-id-type="pmid">33052196</pub-id></citation></ref>
<ref id="B58"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Woo</surname> <given-names>S.</given-names></name> <name><surname>Park</surname> <given-names>J.</given-names></name> <name><surname>Lee</surname> <given-names>J.-Y.</given-names></name> <name><surname>Kweon</surname> <given-names>I. S.</given-names></name></person-group> (<year>2018</year>). &#x0201C;<article-title>CBAM: convolutional block attention module</article-title>,&#x0201D; in <conf-name>Proceedings of the European Conference On Computer Vision (ECCV)</conf-name> (<publisher-loc>Munich, Germany</publisher-loc>), <fpage>3</fpage>&#x02013;<lpage>19</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-030-01234-2_1</pub-id></citation></ref>
<ref id="B59"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Xu</surname> <given-names>L.</given-names></name> <name><surname>Gao</surname> <given-names>Q.</given-names></name> <name><surname>Yousefi</surname> <given-names>N.</given-names></name></person-group> (<year>2020</year>). <article-title>Brain tumor diagnosis based on discrete wavelet transform, gray-level co-occurrence matrix and optimal deep belief network</article-title>. <source>Simulation</source> <volume>96</volume>, <fpage>867</fpage>&#x02013;<lpage>879</lpage>. <pub-id pub-id-type="doi">10.1177/0037549720948595</pub-id></citation></ref>
<ref id="B60"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Yaqoob</surname> <given-names>M.</given-names></name> <name><surname>Wr&#x000F3;bel</surname> <given-names>B.</given-names></name></person-group> (<year>2017</year>). &#x0201C;<article-title>Very small spiking neural networks evolved to recognize a pattern in a continuous input stream</article-title>,&#x0201D; in <conf-name>2017 IEEE Symposium Series On Computational Intelligence (SSCI)</conf-name> <conf-loc>(Honolulu, HI, USA)</conf-loc>, <fpage>1</fpage>&#x02013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1109/SSCI.2017.8285420</pub-id></citation></ref>
<ref id="B61"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>H.</given-names></name> <name><surname>Patel</surname> <given-names>V. M.</given-names></name></person-group> (<year>2018</year>). &#x0201C;<article-title>Densely connected pyramid dehazing network</article-title>,&#x0201D; in <conf-name>Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition</conf-name> <conf-loc>(Salt Lake City, UT, USA)</conf-loc>, <fpage>3194</fpage>&#x02013;<lpage>3203</lpage>. <pub-id pub-id-type="doi">10.1109/CVPR.2018.00337</pub-id></citation></ref>
<ref id="B62"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>Y.-D.</given-names></name> <name><surname>Satapathy</surname> <given-names>S. C.</given-names></name> <name><surname>Guttery</surname> <given-names>D. S.</given-names></name> <name><surname>G&#x000F3;rriz</surname> <given-names>J. M.</given-names></name> <name><surname>Wang</surname> <given-names>S.-H.</given-names></name></person-group> (<year>2021</year>). <article-title>Improved breast cancer classification through combining graph convolutional network and convolutional neural network</article-title>. <source>Info. Process. Manag.</source> <volume>58</volume>:<fpage>102439</fpage>. <pub-id pub-id-type="doi">10.1016/j.ipm.2020.102439</pub-id></citation></ref>
<ref id="B63"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>Y.-D.</given-names></name> <name><surname>Satapathy</surname> <given-names>S. C.</given-names></name> <name><surname>Zhu</surname> <given-names>L.-Y.</given-names></name> <name><surname>G&#x000F3;rriz</surname> <given-names>J. M.</given-names></name> <name><surname>Wang</surname> <given-names>S.-H.</given-names></name></person-group> (<year>2020</year>). <article-title>A seven-layer convolutional neural network for chest CT based COVID-19 diagnosis using stochastic pooling</article-title>. <source>IEEE Sensors J</source>. <fpage>20504</fpage>&#x02013;<lpage>20511</lpage>. <pub-id pub-id-type="doi">10.1109/JSEN.2020.3025855</pub-id></citation></ref>
</ref-list>
</back>
</article>
