<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Comput. Neurosci.</journal-id>
<journal-title>Frontiers in Computational Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Comput. Neurosci.</abbrev-journal-title>
<issn pub-type="epub">1662-5188</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fncom.2017.00117</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Neuroscience</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Classification of Alzheimer&#x00027;s Disease, Mild Cognitive Impairment, and Cognitively Unimpaired Individuals Using Multi-feature Kernel Discriminant Dictionary Learning</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Li</surname> <given-names>Qing</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/511397/overview"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Wu</surname> <given-names>Xia</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="author-notes" rid="fn001"><sup>&#x0002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/128448/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Xu</surname> <given-names>Lele</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Chen</surname> <given-names>Kewei</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/155711/overview"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Yao</surname> <given-names>Li</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="author-notes" rid="fn002"><sup>&#x0002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/10457/overview"/>
</contrib>
<contrib contrib-type="author">
<collab>Alzheimer&#x00027;s Disease Neuroimaging Initiative</collab>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Department of Electronics, College of Information Science and Technology, Beijing Normal University</institution>, <addr-line>Beijing</addr-line>, <country>China</country></aff>
<aff id="aff2"><sup>2</sup><institution>State Key Laboratory of Cognitive Neuroscience and Learning, Beijing Normal University</institution>, <addr-line>Beijing</addr-line>, <country>China</country></aff>
<aff id="aff3"><sup>3</sup><institution>Banner Alzheimer&#x00027;s Institute and Banner Good Samaritan PET Center</institution>, <addr-line>Phoenix, AZ</addr-line>, <country>United States</country></aff>
<author-notes>
<fn fn-type="edited-by"><p>Edited by: Tianming Liu, University of Georgia, United States</p></fn>
<fn fn-type="edited-by"><p>Reviewed by: Feng Liu, Tianjin Medical University General Hospital, China; Kaiming Li, Sichuan University, China</p></fn>
<fn fn-type="corresp" id="fn001"><p>&#x0002A;Correspondence: Xia Wu <email>wuxia&#x00040;bnu.edu.cn</email></p></fn>
<fn fn-type="corresp" id="fn002"><p>Li Yao <email>yaoli&#x00040;bnu.edu.cn</email></p></fn>
</author-notes>
<pub-date pub-type="epub">
<day>09</day>
<month>01</month>
<year>2018</year>
</pub-date>
<pub-date pub-type="collection">
<year>2017</year>
</pub-date>
<volume>11</volume>
<elocation-id>117</elocation-id>
<history>
<date date-type="received">
<day>15</day>
<month>09</month>
<year>2017</year>
</date>
<date date-type="accepted">
<day>19</day>
<month>12</month>
<year>2017</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2018 Li, Wu, Xu, Chen, Yao and Alzheimer&#x00027;s Disease Neuroimaging Initiative.</copyright-statement>
<copyright-year>2018</copyright-year>
<copyright-holder>Li, Wu, Xu, Chen, Yao and Alzheimer&#x00027;s Disease Neuroimaging Initiative</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) or licensor are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license>
</permissions>
<abstract><p>Accurate classification of either patients with Alzheimer&#x00027;s disease (AD) or patients with mild cognitive impairment (MCI), the prodromal stage of AD, from cognitively unimpaired (CU) individuals is important for clinical diagnosis and adequate intervention. The current study focused on distinguishing AD or MCI from CU based on the multi-feature kernel supervised within-Class-similar discriminative dictionary learning algorithm (MKSCDDL), which we introduced in a previous study, demonstrating that MKSCDDL had superior performance in face recognition. Structural magnetic resonance imaging (sMRI), fluorodeoxyglucose (FDG) positron emission tomography (PET), and florbetapir-PET data from the Alzheimer&#x00027;s Disease Neuroimaging Initiative (ADNI) database were all included for classification of AD vs. CU, MCI vs. CU, as well as AD vs. MCI (113 AD patients, 110 MCI patients, and 117 CU subjects). By adopting MKSCDDL, we achieved a classification accuracy of 98.18% for AD vs. CU, 78.50% for MCI vs. CU, and 74.47% for AD vs. MCI, which in each instance was superior to results obtained using several other state-of-the-art approaches (MKL, JRC, mSRC, and mSCDDL). In addition, testing time results outperformed other high quality methods. Therefore, the results suggested that the MKSCDDL procedure is a promising tool for assisting early diagnosis of diseases using neuroimaging data.</p></abstract>
<kwd-group>
<kwd>Alzheimer&#x00027;s disease (AD)</kwd>
<kwd>mild cognitive impairment (MCI)</kwd>
<kwd>multimodal imaging</kwd>
<kwd>multiple kernel dictionary learning</kwd>
</kwd-group>
<counts>
<fig-count count="8"/>
<table-count count="4"/>
<equation-count count="19"/>
<ref-count count="82"/>
<page-count count="14"/>
<word-count count="9055"/>
</counts>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<title>Introduction</title>
<p>Alzheimer&#x00027;s disease (AD) is a complex multifactorial neurodegenerative disorder and is the most common type of dementia, defined by extensive neuronal and synapses loss (Tan et al., <xref ref-type="bibr" rid="B55">2013</xref>; Gao et al., <xref ref-type="bibr" rid="B13">2016</xref>). Recent study has shown that AD has high prevalence of an estimated 40 million patients worldwide (Selkoe and Hardy, <xref ref-type="bibr" rid="B50">2016</xref>). Mild cognitive impairment (MCI) has been generally viewed as an intermediate state between normal aging and the onset of AD (Petersen et al., <xref ref-type="bibr" rid="B42">2001</xref>; Garc&#x000E9;s et al., <xref ref-type="bibr" rid="B14">2014</xref>). Thus, AD and MCI, the transitional stage between the healthy aging and dementia, which commonly characterized by slight cognitive deficits but largely intact activities of daily living (Petersen, <xref ref-type="bibr" rid="B41">2004</xref>; Wei et al., <xref ref-type="bibr" rid="B63">2016</xref>), have been greatly interested.</p>
<fig position="float">
<label>Graphical Abstract</label>
<graphic xlink:href="fncom-11-00117-g0008.tif"/>
</fig>
<p>It has been shown that the neuroimaging data, including structural magnetic resonance imaging (sMRI) (Wee et al., <xref ref-type="bibr" rid="B61">2011</xref>; Zhou et al., <xref ref-type="bibr" rid="B78">2011</xref>), functional MRI (fMRI) (Suk et al., <xref ref-type="bibr" rid="B54">2013</xref>), fluorodeoxyglucose positron emission tomography (FDG-PET) (Sanabria-Diaz et al., <xref ref-type="bibr" rid="B48">2013</xref>), and amyloid PETs, such as Pittsburgh compound B (PiB-PET) (Zhang et al., <xref ref-type="bibr" rid="B76">2014</xref>), florbetapir-PET (Saint-Aubert et al., <xref ref-type="bibr" rid="B47">2013</xref>), can be used to discriminate AD or MCI with promising results when each modality is used individually and separately. It has been speculated that different neuroimaging tool provides complementary information, which, when combined, can be more powerful for diagnosis of AD or MCI (Liu et al., <xref ref-type="bibr" rid="B30">2014b</xref>; Suk et al., <xref ref-type="bibr" rid="B53">2015</xref>; Wang et al., <xref ref-type="bibr" rid="B60">2016</xref>) and combining these potentially complementary information from various modalities would produce more powerful classifiers (Zhang et al., <xref ref-type="bibr" rid="B72">2012a</xref>; Xu et al., <xref ref-type="bibr" rid="B68">2015</xref>).</p>
<p>Several classification methods of combining multi-modality data have been used to classify AD or MCI from CU. For example, a weighted multiple kernel learning (MKL) model has been proposed to classify AD or MCI based on combining different modalities (Wee et al., <xref ref-type="bibr" rid="B62">2012</xref>; Zhang et al., <xref ref-type="bibr" rid="B73">2012b</xref>; Liu et al., <xref ref-type="bibr" rid="B30">2014b</xref>). A joint regression and classification (JRC) algorithm was also introduced and has been indicated to diagnosis AD or MCI effectively based on multi-modalities data (Zhu et al., <xref ref-type="bibr" rid="B79">2014a</xref>,<xref ref-type="bibr" rid="B80">b</xref>). A weighted multi-modality sparse representation-based classification (mSRC) was developed and applied for discriminating AD or MCI based on multi-modalities (Xu et al., <xref ref-type="bibr" rid="B68">2015</xref>). Recently, a multi-modal discriminative dictionary learning (mSCDDL) (Li et al., <xref ref-type="bibr" rid="B27">2017</xref>) algorithm has been proposed for classifying AD or MCI efficiently, which was a weighted multi-modality way extended from supervised within-Class-similarity discriminative dictionary learning (SCDDL), a robust and efficient machine learning method for facial recognition by Xu et al (Xu et al., <xref ref-type="bibr" rid="B69">2016</xref>).</p>
<p>SCDDL was a discriminant dictionary learning (DL), which combined the classification error term and the within-Class-similarity in the objection function of DL scheme (Xu et al., <xref ref-type="bibr" rid="B69">2016</xref>). Recently, SCDDL was extended to a kernel framework, due to MKL algorithm has been suggested to be effective for feature fusion (G&#x000F6;nen and Alpaydin, <xref ref-type="bibr" rid="B15">2011</xref>), named as multi-feature kernel SCDDL (MKSCDDL) and has been indicated to be an efficient tool in face recognition (Wu et al., <xref ref-type="bibr" rid="B66">2017</xref>).</p>
<p>In this study, MKSCDDL was examined for its robustness and efficiency of classification accuracy for AD or MCI with CU, based on three modalities data i.e., sMRI, FDG-PET and florbetapir-PET. Our experimental results indicated that the MKSCDDL method combined multi-modalities could outperform SCDDL with each modality data alone, and achieve better or comparable classification performance, compared with some other state-of-the-art multi-modality classification algorithms, including MKL (Zhang et al., <xref ref-type="bibr" rid="B74">2011</xref>), JRC (Zhu et al., <xref ref-type="bibr" rid="B79">2014a</xref>), mSRC (Xu et al., <xref ref-type="bibr" rid="B68">2015</xref>), and mSCDDL (Li et al., <xref ref-type="bibr" rid="B27">2017</xref>).</p>
</sec>
<sec id="s2">
<title>Image preprocessing</title>
<p>In this work, we used data from the Alzheimer&#x00027;s Disease Neuroimaging Initiative (ADNI) for performance evaluation. The ADNI was launched in 2003 by the National Institute on Aging (NIA), the National Institute of Biomedical Imaging and Bioengineering (NIBIB), the Food and Drug Administration (FDA), private pharmaceutical companies, and non-profit organizations, as a 5-year public-private partnership. For up-to-date information, see <ext-link ext-link-type="uri" xlink:href="http://www.adni-info.org">http://www.adni-info.org</ext-link>.</p>
<sec>
<title>Subjects</title>
<p>In this paper, 113 patients with AD, 110 patients with MCI and 117 CU with the age ranged from 55 to 99 years were included. All the data, including the sMRI, FDG-PET, and florbetapir-PET, were downloaded from ADNI 1, ADNI GO, or ADNI 2. For each subject, the data-acquisition interval of the three modalities was within four months. Moreover, the subjects were matched in terms of age, the years of education and gender. The subjects we selected satisfied the following criteria: (1) The MMSE score of each AD subject was between 20 and 26, with a CDR of 0.5 or 1.0. The AD group did not significantly differ with respect to the presence of APOE4 alleles from the MCI group (<italic>p</italic> &#x0003D; 0.765), but had significantly lower MMSE scores (compared with CU group, <italic>p</italic> &#x0003D; 1.24 &#x000D7; 10<sup>&#x02212;90</sup>; MCI group, <italic>p</italic> &#x0003D; 1.61 &#x000D7; 10<sup>&#x02212;40</sup>) and a different presence of APOE4 alleles compared with the CU group (<italic>p</italic> &#x0003D; 0.014). (2) The MMSE score of each MCI subject was between 24 and 30, and the CDR was 0.5. The MCI group had significantly lower MMSE scores (<italic>p</italic> &#x0003D; 4.69 &#x000D7; 10<sup>&#x02212;31</sup>) and a different presence of APOE4 alleles (<italic>p</italic> &#x0003D; 7.34 &#x000D7; 10<sup>&#x02212;04</sup>) compared with CU group. (3) The MMSE score of each CU was between 26 and 30 and their CDR was 0.0. Table <xref ref-type="table" rid="T1">1</xref> shows the demographic information of the subjects.</p>
<table-wrap position="float" id="T1">
<label>Table 1</label>
<caption><p>Demographic information of the subjects, <italic>p</italic>-value was obtained using one-way ANOVA to the AD, MCI, and CU groups.</p></caption>
<table frame="hsides" rules="groups">
<thead><tr>
<th/>
<th valign="top" align="center"><bold>AD (<italic>n</italic> &#x0003D; 113)</bold></th>
<th valign="top" align="center"><bold>MCI (<italic>n</italic> &#x0003D; 110)</bold></th>
<th valign="top" align="center"><bold>CU (<italic>n</italic> &#x0003D; 117)</bold></th>
<th valign="top" align="center"><bold><italic>p</italic>-value</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Gender</td>
<td valign="top" align="center">62M/51F</td>
<td valign="top" align="center">59M/51F</td>
<td valign="top" align="center">62M/55F</td>
<td valign="top" align="center">0.96</td>
</tr>
<tr>
<td valign="top" align="left">Age</td>
<td valign="top" align="center">75.6 &#x000B1; 7.6</td>
<td valign="top" align="center">75.2 &#x000B1; 7.8</td>
<td valign="top" align="center">75.4 &#x000B1; 7.0</td>
<td valign="top" align="center">0.94</td>
</tr>
<tr>
<td valign="top" align="left">EDU</td>
<td valign="top" align="center">16.10 &#x000B1; 3.00</td>
<td valign="top" align="center">16.57 &#x000B1; 2.76</td>
<td valign="top" align="center">16.44 &#x000B1; 2.41</td>
<td valign="top" align="center">0.65</td>
</tr>
<tr>
<td valign="top" align="left">MMSE</td>
<td valign="top" align="center">22.4 &#x000B1; 2.2</td>
<td valign="top" align="center">27.4 &#x000B1; 1.9</td>
<td valign="top" align="center">28.9 &#x000B1; 1.3</td>
<td valign="top" align="center">7.75 &#x000D7; 10<sup>&#x02212;75</sup></td>
</tr>
<tr>
<td valign="top" align="left">CDR</td>
<td valign="top" align="center">0.8 &#x000B1; 0.2</td>
<td valign="top" align="center">0.5 &#x000B1; 0.0</td>
<td valign="top" align="center">0.0 &#x000B1; 0.0</td>
<td valign="top" align="center">7.00 &#x000D7; 10<sup>&#x02212;151</sup></td>
</tr>
<tr>
<td valign="top" align="left">APOE4 (%)</td>
<td valign="top" align="center">50.00</td>
<td valign="top" align="center">52.73</td>
<td valign="top" align="center">24.49</td>
<td/>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p><italic>AD, Alzheimer&#x00027;s disease; MCI, mild cognitive impairment; CU, cognitively unimpaired; M, male; F, female; MMSE, Mini-Mental State Examination; CDR, Clinical Dementia Rating; EDU, years of education; APOE4, percentage of APOE4 alleles</italic>.</p>
</table-wrap-foot>
</table-wrap>
</sec>
<sec>
<title>Image processing</title>
<p>Images were preprocessed using the VBM8 (Voxel-Based Morphometry 8) Toolbox (<ext-link ext-link-type="uri" xlink:href="http://dbm.neuro.uni-jena.de/vbm8/">http://dbm.neuro.uni-jena.de/vbm8/</ext-link>) in SPM8 (Statistical Parametric Mapping 8) (<ext-link ext-link-type="uri" xlink:href="http://www.fil.ion.ucl.ac.uk/spm/">http://www.fil.ion.ucl.ac.uk/spm/</ext-link>) that running on MATLAB 2010b (The MathWorks, Inc., Sherborn, MA, USA). Based on adaptive maximum posterior and partial volume estimation, every structural image was segmented into rigid-body-aligned gray matter (GM), white matter (WM) and cerebrospinal fluid (CSF) for each subject (Rajapakse et al., <xref ref-type="bibr" rid="B44">1997</xref>; Tohka et al., <xref ref-type="bibr" rid="B56">2004</xref>). Spatially adaptive non-local approach was applied to improve the segmentation. The diffeomorphic anatomical registration through exponential lie algebra (DARTEL) protocol (Ashburner, <xref ref-type="bibr" rid="B1">2007</xref>) in which template creation and image registration were performed to normalize the gray-matter images iteratively by using a diffeomorphic anatomical registration.</p>
<p>All FDG-PET and florbetapir-PET images were co-registered with each individual&#x00027;s sMRI using a rigid body transformation, and subsequently warped to the cohort-specific DARTEL template. Then, the standard uptake value ratio (SUVr) image was calculated for each FDG-PET image and florbetapir-PET image; reference masks for quantification were defined relative to the whole brain (Langbaum et al., <xref ref-type="bibr" rid="B25">2009</xref>; Sabbagh et al., <xref ref-type="bibr" rid="B46">2015</xref>) or cerebellum (Reitan, <xref ref-type="bibr" rid="B45">1958</xref>; Camus et al., <xref ref-type="bibr" rid="B3">2012</xref>), respectively.</p>
<p>Then, based on the Automated Anatomical Labeling (AAL) (Tzourio-Mazoyer et al., <xref ref-type="bibr" rid="B58">2002</xref>), 90 regions of interest (ROIs) (45 for each hemisphere; Table <xref ref-type="supplementary-material" rid="SM1">S1</xref>) were obtained. The feature of sMRI, FDG-PET, and florbetapir-PET were got by averaging the corresponding value of mean volume of GM, SUVr values of FDG-PET and florbetapir-PET from each ROI that all the voxels within the ROI of each subject.</p>
</sec>
</sec>
<sec id="s3">
<title>Method</title>
<sec>
<title>Discriminant dictionary learning</title>
<p>Suppose <italic>n</italic> training samples with <italic>d</italic>-dimension from <italic>k</italic> classes are represented by <inline-formula><mml:math id="M1"><mml:mi>A</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mo>&#x02026;</mml:mo><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>A</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mo>&#x02026;</mml:mo><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>A</mml:mi></mml:mrow><mml:mrow><mml:mi>l</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mo>&#x02026;</mml:mo><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>A</mml:mi></mml:mrow><mml:mrow><mml:mi>k</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>&#x02208;</mml:mo><mml:msup><mml:mrow><mml:mi>&#x0211C;</mml:mi></mml:mrow><mml:mrow><mml:mi>d</mml:mi><mml:mo>&#x000D7;</mml:mo><mml:mi>n</mml:mi></mml:mrow></mml:msup></mml:math></inline-formula>, in which, column vector <italic>a</italic><sub><italic>i</italic></sub> is the sample <italic>i</italic> (<italic>i</italic> &#x0003D; 1, &#x02026;, <italic>n</italic>), and submatrix <italic>A</italic><sub><italic>j</italic></sub> consists of column vectors (samples) from class <italic>j</italic> (<italic>j</italic> &#x0003D; 1, &#x02026;, <italic>k</italic>), and there are <italic>m</italic> atoms (each column of the dictionary can be viewed as an atom) in the corresponding dictionary <inline-formula><mml:math id="M2"><mml:mi>D</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>d</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>d</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mo>&#x02026;</mml:mo><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>d</mml:mi></mml:mrow><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mtext>&#x000A0;</mml:mtext><mml:mo>&#x02208;</mml:mo><mml:msup><mml:mrow><mml:mi>&#x0211C;</mml:mi></mml:mrow><mml:mrow><mml:mi>d</mml:mi><mml:mo>&#x000D7;</mml:mo><mml:mi>m</mml:mi></mml:mrow></mml:msup><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>m</mml:mi><mml:mo>&#x02264;</mml:mo><mml:mi>n</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula>. The general supervised DL model can be denoted as follows:</p>
<disp-formula id="E1"><label>(1)</label><mml:math id="M3"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mrow><mml:mo>&#x02329;</mml:mo><mml:mrow><mml:mi>D</mml:mi><mml:mo>,</mml:mo><mml:mi>&#x003B8;</mml:mi><mml:mo>,</mml:mo><mml:mi>X</mml:mi></mml:mrow><mml:mo>&#x0232A;</mml:mo></mml:mrow></mml:mtd><mml:mtd><mml:mo>=</mml:mo></mml:mtd><mml:mtd><mml:mtext>arg</mml:mtext><mml:mstyle displaystyle="true"><mml:munder class="msub"><mml:mrow><mml:mtext>min</mml:mtext></mml:mrow><mml:mrow><mml:mi>D</mml:mi><mml:mo>,</mml:mo><mml:mi>&#x003B8;</mml:mi><mml:mo>,</mml:mo><mml:mi>X</mml:mi></mml:mrow></mml:munder></mml:mstyle><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:mi>A</mml:mi><mml:mo>-</mml:mo><mml:mi>D</mml:mi><mml:mi>X</mml:mi><mml:mo>|</mml:mo><mml:msubsup><mml:mrow><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>&#x0002B;</mml:mo><mml:msub><mml:mrow><mml:msub><mml:mrow><mml:mi>&#x003BB;</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:mi>X</mml:mi><mml:mo>|</mml:mo><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>&#x0002B;</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003BB;</mml:mi></mml:mrow><mml:mrow><mml:mi>&#x003B8;</mml:mi></mml:mrow></mml:msub><mml:mi>g</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003B8;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mi>s</mml:mi><mml:mo>.</mml:mo><mml:mi>t</mml:mi><mml:mo>.</mml:mo><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:msub><mml:mrow><mml:mi>d</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>|</mml:mo><mml:msubsup><mml:mrow><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:mtd><mml:mtd><mml:mo>=</mml:mo></mml:mtd><mml:mtd><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mi>f</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mi>a</mml:mi><mml:mi>l</mml:mi><mml:mi>l</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mo>&#x02026;</mml:mo><mml:mo>,</mml:mo><mml:mi>m</mml:mi></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>where &#x003B8; is the discriminative parameter and <italic>g</italic>(&#x003B8;) represents the discriminative term, <italic>X</italic> denotes the coding coefficients of training samples <italic>A</italic> on the dictionary <italic>D</italic>. <italic>g</italic>(&#x003B8;) here indicates the linear classification error function (like <inline-formula><mml:math id="M5"><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:mi>H</mml:mi><mml:mo>-</mml:mo><mml:mi>W</mml:mi><mml:mi>X</mml:mi><mml:mo>|</mml:mo><mml:msubsup><mml:mrow><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> in the DL methods of D-KSVD (Zhang and Li, <xref ref-type="bibr" rid="B75">2010</xref>) and LC-KSVD (Jiang et al., <xref ref-type="bibr" rid="B23">2013</xref>), where <italic>H</italic> is the class label matrix and <italic>W</italic> is a classifier).</p>
<p>For classification, the classifier learned with the dictionary may be optimal simultaneously, as in the DL algorithms that incorporate a linear classification error term (Zhang and Li, <xref ref-type="bibr" rid="B75">2010</xref>). However, the inner-structure of representation coefficients between classes has not been considered in such approach. To further enhance the discriminant power of the dictionary, both the linear classifier and the direct restriction of within-Class scatter on coding coefficients in the above discriminant DL scheme in our previous study are indicated (Xu et al., <xref ref-type="bibr" rid="B69">2016</xref>), which is referred to as the SCDDL algorithm.</p>
</sec>
<sec>
<title>Supervised within-class-similar discriminative dictionary learning</title>
<p>Suppose <inline-formula><mml:math id="M6"><mml:mi>A</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>A</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mo>&#x02026;</mml:mo><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>A</mml:mi></mml:mrow><mml:mrow><mml:mi>l</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mo>&#x02026;</mml:mo><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>A</mml:mi></mml:mrow><mml:mrow><mml:mi>k</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>&#x02208;</mml:mo><mml:msup><mml:mrow><mml:mi>&#x0211C;</mml:mi></mml:mrow><mml:mrow><mml:mi>d</mml:mi><mml:mo>&#x000D7;</mml:mo><mml:mi>n</mml:mi></mml:mrow></mml:msup><mml:mtext>&#x000A0;</mml:mtext></mml:math></inline-formula>denotes the <italic>n d</italic>-dimensional training samples from <italic>k</italic> classes, <italic>D</italic> &#x02208; &#x0211C;<sup><italic>d</italic>&#x000D7;<italic>m</italic></sup>(<italic>m</italic> &#x02264; <italic>n</italic>) is the discriminative dictionary with <italic>m</italic> atoms that needs to be derived, and <italic>X</italic> represents the coding coefficients of training samples <italic>A</italic> on the dictionary <italic>D</italic>, denoted as <inline-formula><mml:math id="M7"><mml:mi>X</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>X</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mo>&#x02026;</mml:mo><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>X</mml:mi></mml:mrow><mml:mrow><mml:mi>l</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mo>&#x02026;</mml:mo><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>X</mml:mi></mml:mrow><mml:mrow><mml:mi>k</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>&#x02208;</mml:mo><mml:msup><mml:mrow><mml:mi>&#x0211C;</mml:mi></mml:mrow><mml:mrow><mml:mi>m</mml:mi><mml:mo>&#x000D7;</mml:mo><mml:mi>n</mml:mi></mml:mrow></mml:msup></mml:math></inline-formula>, same as above. The SCDDL model can be written as follows:</p>
<disp-formula id="E3"><label>(2)</label><mml:math id="M8"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mrow><mml:mo>&#x02329;</mml:mo><mml:mrow><mml:mi>D</mml:mi><mml:mo>,</mml:mo><mml:mi>W</mml:mi><mml:mo>,</mml:mo><mml:mi>X</mml:mi></mml:mrow><mml:mo>&#x0232A;</mml:mo></mml:mrow></mml:mtd><mml:mtd><mml:mo>=</mml:mo></mml:mtd><mml:mtd><mml:mo class="qopname">arg</mml:mo><mml:mstyle displaystyle="true"><mml:munder class="msub"><mml:mrow><mml:mtext>min</mml:mtext></mml:mrow><mml:mrow><mml:mi>D</mml:mi><mml:mo>,</mml:mo><mml:mi>W</mml:mi><mml:mo>,</mml:mo><mml:mi>X</mml:mi></mml:mrow></mml:munder></mml:mstyle><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:mi>A</mml:mi><mml:mo>-</mml:mo><mml:mi>D</mml:mi><mml:mi>X</mml:mi><mml:mo>|</mml:mo><mml:msubsup><mml:mrow><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>&#x0002B;</mml:mo><mml:msubsup><mml:mrow><mml:mi>&#x003B1;</mml:mi><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:mi>H</mml:mi><mml:mo>-</mml:mo><mml:mi>W</mml:mi><mml:mi>X</mml:mi><mml:mo>|</mml:mo><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>&#x0002B;</mml:mo><mml:mi>&#x003B2;</mml:mi><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:mi>W</mml:mi><mml:mo>|</mml:mo><mml:msubsup><mml:mrow><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mo>&#x0002B;</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:msub><mml:mrow><mml:msub><mml:mrow><mml:mi>&#x003BB;</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:mi>X</mml:mi><mml:mo>|</mml:mo><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>&#x0002B;</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003BB;</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mstyle displaystyle="true"><mml:munderover accentunder="false" accent="false"><mml:mrow><mml:mo>&#x02211;</mml:mo></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>k</mml:mi></mml:mrow></mml:munderover></mml:mstyle><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:msub><mml:mrow><mml:mi>X</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>-</mml:mo><mml:msub><mml:mrow><mml:mi>M</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>|</mml:mo><mml:msubsup><mml:mrow><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>&#x0002B;</mml:mo><mml:mi>&#x003B7;</mml:mi><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:msub><mml:mrow><mml:mi>X</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>|</mml:mo><mml:msubsup><mml:mrow><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mi>s</mml:mi><mml:mo>.</mml:mo><mml:mi>t</mml:mi><mml:mo>.</mml:mo><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:msub><mml:mrow><mml:mi>d</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>|</mml:mo><mml:msubsup><mml:mrow><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:mtd><mml:mtd><mml:mo>=</mml:mo></mml:mtd><mml:mtd><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mi>f</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mi>a</mml:mi><mml:mi>l</mml:mi><mml:mi>l</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mo>&#x02026;</mml:mo><mml:mo>,</mml:mo><mml:mi>m</mml:mi></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>where <inline-formula><mml:math id="M10"><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:mo>&#x000B7;</mml:mo><mml:mo>|</mml:mo><mml:msubsup><mml:mrow><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> represents the Frobenius norm. <inline-formula><mml:math id="M11"><mml:msubsup><mml:mrow><mml:mo>&#x02225;</mml:mo><mml:mi>A</mml:mi><mml:mo>-</mml:mo><mml:mi>D</mml:mi><mml:mi>X</mml:mi><mml:mo>&#x02225;</mml:mo></mml:mrow><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> is the reconstructed error term of the training samples <italic>A</italic> on the newly constructed dictionary <italic>D</italic>, <inline-formula><mml:math id="M12"><mml:mi>&#x003B1;</mml:mi><mml:msubsup><mml:mrow><mml:mo>&#x02225;</mml:mo><mml:mi>H</mml:mi><mml:mo>-</mml:mo><mml:mi>W</mml:mi><mml:mi>X</mml:mi><mml:mo>&#x02225;</mml:mo></mml:mrow><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>&#x0002B;</mml:mo><mml:mi>&#x003B2;</mml:mi><mml:msubsup><mml:mrow><mml:mo>&#x02225;</mml:mo><mml:mi>W</mml:mi><mml:mo>&#x02225;</mml:mo></mml:mrow><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> is the linear classification error term, and <inline-formula><mml:math id="M13"><mml:mstyle displaystyle='true'><mml:munderover accentunder="false" accent="false"><mml:mrow><mml:mo>&#x02211;</mml:mo></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>k</mml:mi></mml:mrow></mml:munderover></mml:mstyle><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mo>&#x02225;</mml:mo><mml:msub><mml:mrow><mml:mi>X</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>-</mml:mo><mml:msub><mml:mrow><mml:mi>M</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>&#x02225;</mml:mo></mml:mrow><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>&#x0002B;</mml:mo><mml:mi>&#x003B7;</mml:mi><mml:msubsup><mml:mrow><mml:mo>&#x02225;</mml:mo><mml:msub><mml:mrow><mml:mi>X</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>&#x02225;</mml:mo></mml:mrow><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula> is the within-Class-similar term. <italic>W</italic> &#x02208; &#x0211C;<sup><italic>k</italic>&#x000D7;<italic>m</italic></sup> is the parameter of the classifier; each column of <italic>H</italic> &#x02208; &#x0211C;<sup><italic>k</italic>&#x000D7;<italic>m</italic></sup> is a vector, corresponds to one training sample with the form as [0, 0, &#x02026;, 1, &#x02026;, 0, 0] &#x02208; &#x0211C;<sup><italic>k</italic></sup>, where 1 locates the corresponding class of the training sample; and each column of <italic>M</italic><sub><italic>i</italic></sub> is the mean vector of the coefficients <italic>X</italic><sub><italic>i</italic></sub> corresponding to class <italic>i</italic>. According to the elastic-net theory, the term <inline-formula><mml:math id="M14"><mml:msubsup><mml:mrow><mml:mo>&#x02225;</mml:mo><mml:msub><mml:mrow><mml:mi>X</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>&#x02225;</mml:mo></mml:mrow><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> combined with the term &#x02225;<italic>X</italic>&#x02225;<sub>1</sub> might make the solution of Equation (2) more stable (Zou and Hastie, <xref ref-type="bibr" rid="B81">2005</xref>); and &#x003B7; is set as &#x003B7; &#x0003D; 1 for simplicity (Yang et al., <xref ref-type="bibr" rid="B70">2014</xref>). Then Equation (2) can be written as:</p>
<disp-formula id="E5"><label>(3)</label><mml:math id="M15"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mrow><mml:mo>&#x02329;</mml:mo><mml:mrow><mml:mi>D</mml:mi><mml:mo>,</mml:mo><mml:mi>W</mml:mi><mml:mo>,</mml:mo><mml:mi>X</mml:mi></mml:mrow><mml:mo>&#x0232A;</mml:mo></mml:mrow></mml:mtd><mml:mtd><mml:mo>=</mml:mo></mml:mtd><mml:mtd><mml:mo class="qopname">arg</mml:mo><mml:mstyle displaystyle="true"><mml:munder class="msub"><mml:mrow><mml:mtext>min</mml:mtext></mml:mrow><mml:mrow><mml:mi>D</mml:mi><mml:mo>,</mml:mo><mml:mi>W</mml:mi><mml:mo>,</mml:mo><mml:mi>X</mml:mi></mml:mrow></mml:munder></mml:mstyle><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:mi>A</mml:mi><mml:mo>-</mml:mo><mml:mi>D</mml:mi><mml:mi>X</mml:mi><mml:mo>|</mml:mo><mml:msubsup><mml:mrow><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>&#x0002B;</mml:mo><mml:msubsup><mml:mrow><mml:mi>&#x003B1;</mml:mi><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:mi>H</mml:mi><mml:mo>-</mml:mo><mml:mi>W</mml:mi><mml:mi>X</mml:mi><mml:mo>|</mml:mo><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>&#x0002B;</mml:mo><mml:mi>&#x003B2;</mml:mi><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:mi>W</mml:mi><mml:mo>|</mml:mo><mml:msubsup><mml:mrow><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mo>&#x0002B;</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:msub><mml:mrow><mml:msub><mml:mrow><mml:mi>&#x003BB;</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:mi>X</mml:mi><mml:mo>|</mml:mo><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>&#x0002B;</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003BB;</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mstyle displaystyle="true"><mml:munderover accentunder="false" accent="false"><mml:mrow><mml:mo>&#x02211;</mml:mo></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>k</mml:mi></mml:mrow></mml:munderover></mml:mstyle><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:msub><mml:mrow><mml:mi>X</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>-</mml:mo><mml:msub><mml:mrow><mml:mi>M</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>|</mml:mo><mml:msubsup><mml:mrow><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>&#x0002B;</mml:mo><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:msub><mml:mrow><mml:mi>X</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>|</mml:mo><mml:msubsup><mml:mrow><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mi>s</mml:mi><mml:mo>.</mml:mo><mml:mi>t</mml:mi><mml:mo>.</mml:mo><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:msub><mml:mrow><mml:mi>d</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>|</mml:mo><mml:msubsup><mml:mrow><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:mtd><mml:mtd><mml:mo>=</mml:mo></mml:mtd><mml:mtd><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mi>f</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mi>a</mml:mi><mml:mi>l</mml:mi><mml:mi>l</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mo>&#x02026;</mml:mo><mml:mo>,</mml:mo><mml:mi>m</mml:mi></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>The optimization process of Equation (3) has been discussed in our previous study (Xu et al., <xref ref-type="bibr" rid="B69">2016</xref>). In SCDDL, the directly restricted within-Class-similar term makes the coding coefficients similar within one class and the linear classification error term selects the optimal classifier. This combination has been shown to improve the discriminative classification of the dictionary (Xu et al., <xref ref-type="bibr" rid="B69">2016</xref>).</p>
<p>After obtaining the dictionary <italic>D</italic> and classifier <italic>W</italic> in the SCDDL model, the test samples can be finally classified.</p>
<p>For a given test sample <italic>y</italic>, the representation coefficient on <italic>D</italic> is:</p>
<disp-formula id="E7"><label>(4)</label><mml:math id="M17"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>x</mml:mi><mml:mo>=</mml:mo><mml:mo class="qopname">arg</mml:mo><mml:mstyle displaystyle="true"><mml:munder><mml:mrow><mml:mi>m</mml:mi><mml:mi>i</mml:mi><mml:mi>n</mml:mi><mml:mtext>&#x000A0;</mml:mtext></mml:mrow><mml:mrow><mml:mi>x</mml:mi></mml:mrow></mml:munder></mml:mstyle><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:mi>y</mml:mi><mml:mo>-</mml:mo><mml:msub><mml:mrow><mml:mi>D</mml:mi></mml:mrow><mml:mrow><mml:mi>x</mml:mi></mml:mrow></mml:msub><mml:mo>|</mml:mo><mml:msubsup><mml:mrow><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>&#x0002B;</mml:mo><mml:mi>&#x003BB;</mml:mi><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:mi>x</mml:mi><mml:mo>|</mml:mo><mml:msub><mml:mrow><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>where &#x003BB; is a scalar constant. The representation coefficient <italic>x</italic> can be simply combined with the linear classifier <italic>W</italic>. Then the final identification of the test sample <italic>y</italic> is obtained in the DL procedure with:</p>
<disp-formula id="E8"><label>(5)</label><mml:math id="M18"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>l</mml:mi><mml:mi>a</mml:mi><mml:mi>b</mml:mi><mml:mi>e</mml:mi><mml:mi>l</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mo class="qopname">arg</mml:mo><mml:mstyle displaystyle="true"><mml:munder class="msub"><mml:mrow><mml:mo class="qopname">max</mml:mo></mml:mrow><mml:mrow><mml:mi>l</mml:mi></mml:mrow></mml:munder></mml:mstyle><mml:msub><mml:mrow><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>W</mml:mi></mml:mrow><mml:mrow><mml:mi>x</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>l</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mi>l</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mo class="qopname">&#x02026;</mml:mo><mml:mo>,</mml:mo><mml:mi>k</mml:mi></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>where {&#x000B7;}<sub><italic>l</italic></sub> represents the <italic>l</italic>-th element in the brace, <italic>x</italic> contains discriminant information for classification.</p>
</sec>
<sec>
<title>Multi-feature kernel SCDDL (MKSCDDL)</title>
<p>The SCDDL model is extended to a kernel framework for the further multi-feature fusion in our previous study (Wu et al., <xref ref-type="bibr" rid="B66">2017</xref>). Suppose &#x003D5;(&#x000B7;) is a mapping function from <italic>R</italic><sup><italic>N</italic></sup> to a higher dimensional feature space. To avoid the explicit high-dimensional mapping procedure, mercer kernels could be helpful. The common mercer kernels include the linear kernel <italic>k</italic>(<italic>x, y</italic>) &#x0003D; &#x02329;<italic>x, y</italic>&#x0232A;, which equals to non-mapping; the Gaussian kernels <inline-formula><mml:math id="M19"><mml:mi>k</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>x</mml:mi><mml:mo>,</mml:mo><mml:mi>y</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mi>e</mml:mi><mml:mi>x</mml:mi><mml:mi>p</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mo>-</mml:mo><mml:mfrac><mml:mrow><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:mi>x</mml:mi><mml:mo>-</mml:mo><mml:mi>y</mml:mi><mml:mo>|</mml:mo><mml:msup><mml:mrow><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:mfrac></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula>; the polynomial kernels <italic>k</italic>(<italic>x, y</italic>) &#x0003D; (&#x02329;<italic>x, y</italic>&#x0232A; &#x0002B; <italic>c</italic>)<sup><italic>d</italic></sup> (<italic>c</italic> and <italic>d</italic> are parameters) and the sigmoid kernels <italic>k</italic>(<italic>x, y</italic>) &#x0003D; <italic>tanh</italic>(<italic>a</italic>(<italic>x</italic><sup><italic>T</italic></sup><italic>y</italic>) &#x0002B; <italic>r</italic>) (<italic>a</italic> and <italic>r</italic> are parameters) (Manevitz and Yousef, <xref ref-type="bibr" rid="B33">2001</xref>; Hussain et al., <xref ref-type="bibr" rid="B21">2011</xref>; Liu et al., <xref ref-type="bibr" rid="B29">2013</xref>; Pham and Pagh, <xref ref-type="bibr" rid="B43">2013</xref>; Dyrba et al., <xref ref-type="bibr" rid="B10">2015</xref>).</p>
<p>The training samples <italic>A</italic> and dictionary <italic>D</italic> can be mapped to a higher dimensional space by a function of &#x003D5;(&#x000B7;), then <italic>A</italic> and <italic>D</italic> in the SCDDL model can be replaced by <inline-formula><mml:math id="M20"><mml:mi>&#x003D5;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>A</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x02208;</mml:mo><mml:msup><mml:mrow><mml:mi>R</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>d</mml:mi></mml:mrow><mml:mrow><mml:mi>m</mml:mi><mml:mi>a</mml:mi><mml:mi>p</mml:mi></mml:mrow></mml:msub><mml:mo>&#x000D7;</mml:mo><mml:mi>n</mml:mi></mml:mrow></mml:msup></mml:math></inline-formula> and <inline-formula><mml:math id="M21"><mml:mi>&#x003D5;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>D</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x02208;</mml:mo><mml:msup><mml:mrow><mml:mi>R</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>d</mml:mi></mml:mrow><mml:mrow><mml:mi>m</mml:mi><mml:mi>a</mml:mi><mml:mi>p</mml:mi></mml:mrow></mml:msub><mml:mo>&#x000D7;</mml:mo><mml:mi>m</mml:mi></mml:mrow></mml:msup></mml:math></inline-formula> (<italic>d</italic><sub><italic>map</italic></sub> is the dimensional number in the mapping space) respectively for the kernel SCDDL framework as follows:</p>
<disp-formula id="E9"><label>(6)</label><mml:math id="M22"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mrow><mml:mo>&#x02329;</mml:mo><mml:mrow><mml:mi>D</mml:mi><mml:mo>,</mml:mo><mml:mi>W</mml:mi><mml:mo>,</mml:mo><mml:mi>X</mml:mi></mml:mrow><mml:mo>&#x0232A;</mml:mo></mml:mrow></mml:mtd><mml:mtd><mml:mo>=</mml:mo></mml:mtd><mml:mtd><mml:mo class="qopname">arg</mml:mo><mml:mstyle displaystyle="true"><mml:munder class="msub"><mml:mrow><mml:mtext>min</mml:mtext></mml:mrow><mml:mrow><mml:mi>D</mml:mi><mml:mo>,</mml:mo><mml:mi>W</mml:mi><mml:mo>,</mml:mo><mml:mi>X</mml:mi></mml:mrow></mml:munder></mml:mstyle><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:mi>&#x003D5;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>A</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>-</mml:mo><mml:mi>&#x003D5;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>D</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mi>X</mml:mi><mml:mo>|</mml:mo><mml:msubsup><mml:mrow><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>&#x0002B;</mml:mo><mml:msubsup><mml:mrow><mml:mi>&#x003B1;</mml:mi><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:mi>H</mml:mi><mml:mo>-</mml:mo><mml:mi>W</mml:mi><mml:mi>X</mml:mi><mml:mo>|</mml:mo><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mo>&#x0002B;</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mi>&#x003B2;</mml:mi><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:mi>W</mml:mi><mml:mo>|</mml:mo><mml:msubsup><mml:mrow><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>&#x0002B;</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:msub><mml:mrow><mml:msub><mml:mrow><mml:mi>&#x003BB;</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:mi>X</mml:mi><mml:mo>|</mml:mo><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mo>&#x0002B;</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:msub><mml:mrow><mml:mi>&#x003BB;</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mstyle displaystyle="true"><mml:munderover accentunder="false" accent="false"><mml:mrow><mml:mo>&#x02211;</mml:mo></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>k</mml:mi></mml:mrow></mml:munderover></mml:mstyle><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:msub><mml:mrow><mml:mi>X</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>-</mml:mo><mml:msub><mml:mrow><mml:mi>M</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>|</mml:mo><mml:msubsup><mml:mrow><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>&#x0002B;</mml:mo><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:msub><mml:mrow><mml:mi>X</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>|</mml:mo><mml:msubsup><mml:mrow><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mi>s</mml:mi><mml:mo>.</mml:mo><mml:mi>t</mml:mi><mml:mo>.</mml:mo><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:msub><mml:mrow><mml:mi>d</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>|</mml:mo><mml:msubsup><mml:mrow><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:mtd><mml:mtd><mml:mo>=</mml:mo></mml:mtd><mml:mtd><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mi>f</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mi>a</mml:mi><mml:mi>l</mml:mi><mml:mi>l</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mo>&#x02026;</mml:mo><mml:mo>,</mml:mo><mml:mi>m</mml:mi></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>The dictionary can be represented by the training samples as Equation (7), according to the represented theorem (Sch&#x000F6;lkopf et al., <xref ref-type="bibr" rid="B49">2001</xref>):</p>
<disp-formula id="E11"><label>(7)</label><mml:math id="M24"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>&#x003D5;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>D</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mi>&#x003D5;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>A</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mi>V</mml:mi></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>where <italic>V</italic> &#x02208; <italic>R</italic><sup><italic>n</italic>&#x000D7;<italic>m</italic></sup> is the representation matrix. Equation (6) can be transformed to Equation (8) with Equation (7):</p>
<disp-formula id="E13"><label>(8)</label><mml:math id="M26"><mml:mtable columnalign='left'><mml:mtr><mml:mtd><mml:mrow><mml:mo>&#x02329;</mml:mo><mml:mrow><mml:mi>V</mml:mi><mml:mo>,</mml:mo><mml:mi>W</mml:mi><mml:mo>,</mml:mo><mml:mi>X</mml:mi></mml:mrow><mml:mo>&#x0232A;</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mi>arg</mml:mi><mml:munder><mml:mrow><mml:mtext>min</mml:mtext></mml:mrow><mml:mrow><mml:mi>V</mml:mi><mml:mo>,</mml:mo><mml:mi>W</mml:mi><mml:mo>,</mml:mo><mml:mi>X</mml:mi></mml:mrow></mml:munder><mml:msubsup><mml:mrow><mml:mo>&#x02016;</mml:mo><mml:mrow><mml:mi>&#x003D5;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>A</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x02212;</mml:mo><mml:mi>&#x003D5;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>A</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mi>V</mml:mi><mml:mi>X</mml:mi></mml:mrow><mml:mo>&#x02016;</mml:mo></mml:mrow><mml:mi>F</mml:mi><mml:mn>2</mml:mn></mml:msubsup><mml:mo>+</mml:mo><mml:mi>&#x003B1;</mml:mi><mml:msubsup><mml:mrow><mml:mo>&#x02016;</mml:mo><mml:mrow><mml:mi>H</mml:mi><mml:mo>&#x02212;</mml:mo><mml:mi>W</mml:mi><mml:mi>X</mml:mi></mml:mrow><mml:mo>&#x02016;</mml:mo></mml:mrow><mml:mi>F</mml:mi><mml:mn>2</mml:mn></mml:msubsup></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mtext>&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;</mml:mtext><mml:mo>+</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mi>&#x003B2;</mml:mi><mml:msubsup><mml:mrow><mml:mo>&#x02016;</mml:mo><mml:mi>W</mml:mi><mml:mo>&#x02016;</mml:mo></mml:mrow><mml:mi>F</mml:mi><mml:mn>2</mml:mn></mml:msubsup><mml:mo>+</mml:mo><mml:msub><mml:mi>&#x003BB;</mml:mi><mml:mn>1</mml:mn></mml:msub><mml:msub><mml:mrow><mml:mo>&#x02016;</mml:mo><mml:mi>X</mml:mi><mml:mo>&#x02016;</mml:mo></mml:mrow><mml:mn>1</mml:mn></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>&#x003BB;</mml:mi><mml:mn>2</mml:mn></mml:msub><mml:mstyle displaystyle='true'><mml:munderover><mml:mo>&#x02211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>k</mml:mi></mml:munderover><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mrow><mml:mo>&#x02016;</mml:mo><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>&#x02212;</mml:mo><mml:msub><mml:mi>M</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mo>&#x02016;</mml:mo></mml:mrow></mml:mrow><mml:mi>F</mml:mi><mml:mn>2</mml:mn></mml:msubsup></mml:mrow></mml:mrow></mml:mrow></mml:mstyle></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow><mml:mrow><mml:mtext>&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;</mml:mtext><mml:mo>+</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:msubsup><mml:mrow><mml:mrow><mml:mo>&#x02016;</mml:mo><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mo>&#x02016;</mml:mo></mml:mrow></mml:mrow><mml:mi>F</mml:mi><mml:mn>2</mml:mn></mml:msubsup></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>The optimization process of Equation (8) has been discussed in our previous study (Wu et al., <xref ref-type="bibr" rid="B66">2017</xref>). Then, the test sample <italic>y</italic> and dictionary <italic>D</italic> in Equation (4) can be replaced by <inline-formula><mml:math id="M27"><mml:mi>&#x003D5;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x02208;</mml:mo><mml:msup><mml:mrow><mml:mi>R</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>d</mml:mi></mml:mrow><mml:mrow><mml:mi>m</mml:mi><mml:mi>a</mml:mi><mml:mi>p</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:msup></mml:math></inline-formula> and &#x003D5;(<italic>A</italic>)<italic>V</italic> respectively as:</p>
<disp-formula id="E14"><label>(9)</label><mml:math id="M28"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>x</mml:mi><mml:mo>=</mml:mo><mml:mi>a</mml:mi><mml:mi>r</mml:mi><mml:mi>g</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mstyle displaystyle="true"><mml:munder class="msub"><mml:mrow><mml:mo class="qopname">min</mml:mo></mml:mrow><mml:mrow><mml:mi>x</mml:mi></mml:mrow></mml:munder></mml:mstyle><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:mi>&#x003D5;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>-</mml:mo><mml:mi>&#x003D5;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>A</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mi>V</mml:mi><mml:mi>x</mml:mi><mml:mo>|</mml:mo><mml:msubsup><mml:mrow><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>&#x0002B;</mml:mo><mml:mi>&#x003BB;</mml:mi><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:mi>X</mml:mi><mml:mo>|</mml:mo><mml:msub><mml:mrow><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>where &#x003BB; is a scalar constant as above.</p>
<p>Let <inline-formula><mml:math id="M29"><mml:mi>T</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:munder class="msub"><mml:mrow><mml:mtext>min</mml:mtext></mml:mrow><mml:mrow><mml:mi>x</mml:mi></mml:mrow></mml:munder><mml:mtext>&#x000A0;</mml:mtext><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:mi>&#x003D5;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>-</mml:mo><mml:mi>&#x003D5;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>A</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mi>V</mml:mi><mml:mi>x</mml:mi><mml:mo>|</mml:mo><mml:msubsup><mml:mrow><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula>, then <italic>T</italic>(<italic>x</italic>) can be simplified as:</p>
<disp-formula id="E15"><label>(10)</label><mml:math id="M30"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>T</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mstyle displaystyle="true"><mml:munder class="msub"><mml:mrow><mml:mtext class="textrm" mathvariant="normal">min</mml:mtext></mml:mrow><mml:mrow><mml:mi>x</mml:mi></mml:mrow></mml:munder></mml:mstyle><mml:mi>t</mml:mi><mml:mi>r</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msup><mml:mi>P</mml:mi><mml:mi>x</mml:mi><mml:mo>-</mml:mo><mml:mn>2</mml:mn><mml:msup><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msup><mml:mi>Q</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:mi>S</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>where <italic>P</italic> &#x0003D; <italic>V</italic><sup><italic>T</italic></sup><italic>k</italic>(<italic>A, A</italic>)<italic>V</italic>, <italic>Q</italic> &#x0003D; <italic>V</italic><sup><italic>T</italic></sup><italic>k</italic>(<italic>y, A</italic>), and <italic>S</italic> &#x0003D; <italic>k</italic>(<italic>y, y</italic>).</p>
<p>Using the conclusions in previous study (Harandi and Salzmann, <xref ref-type="bibr" rid="B19">2015</xref>), Equation (10) is equivalent to:</p>
<disp-formula id="E16"><label>(11)</label><mml:math id="M31"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>T</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mi>a</mml:mi><mml:mi>r</mml:mi><mml:mi>g</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mstyle displaystyle="true"><mml:munder class="msub"><mml:mrow><mml:mo class="qopname">min</mml:mo></mml:mrow><mml:mrow><mml:mi>x</mml:mi></mml:mrow></mml:munder></mml:mstyle><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:mi>&#x01EF9;</mml:mi><mml:mo>-</mml:mo><mml:mover accent="true"><mml:mrow><mml:mi>D</mml:mi></mml:mrow><mml:mo class="qopname">&#x0007E;</mml:mo></mml:mover><mml:mi>x</mml:mi><mml:mo>|</mml:mo><mml:msubsup><mml:mrow><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>where <inline-formula><mml:math id="M32"><mml:mi>&#x01EF9;</mml:mi><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mi>&#x003A3;</mml:mi></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:mfrac></mml:mrow></mml:msup><mml:msup><mml:mrow><mml:mi>U</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msup><mml:mi>Q</mml:mi></mml:math></inline-formula>, <inline-formula><mml:math id="M33"><mml:mover accent="true"><mml:mrow><mml:mi>D</mml:mi></mml:mrow><mml:mo>&#x0007E;</mml:mo></mml:mover><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mi>&#x003A3;</mml:mi></mml:mrow><mml:mrow><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:mfrac></mml:mrow></mml:msup><mml:msup><mml:mrow><mml:mi>U</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msup></mml:math></inline-formula>, and <italic>U &#x003A3; U</italic><sup><italic>T</italic></sup> is the SVD of <italic>P</italic> (Nguyen et al., <xref ref-type="bibr" rid="B38">2012</xref>). Then Equation (9) can be denoted as:</p>
<disp-formula id="E17"><label>(12)</label><mml:math id="M34"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mrow><mml:mo>&#x02329;</mml:mo><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mo>&#x0232A;</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mi>a</mml:mi><mml:mi>r</mml:mi><mml:mi>g</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mstyle displaystyle="true"><mml:munder class="msub"><mml:mrow><mml:mo class="qopname">min</mml:mo></mml:mrow><mml:mrow><mml:mi>x</mml:mi></mml:mrow></mml:munder></mml:mstyle><mml:msub><mml:mrow><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:mi>&#x01EF9;</mml:mi><mml:mo>-</mml:mo><mml:mover accent="true"><mml:mrow><mml:mi>D</mml:mi></mml:mrow><mml:mo class="qopname">&#x0007E;</mml:mo></mml:mover><mml:mi>x</mml:mi><mml:mo>|</mml:mo><mml:msubsup><mml:mrow><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>&#x0002B;</mml:mo><mml:mi>&#x003BB;</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>|</mml:mo><mml:mo>|</mml:mo><mml:mi>X</mml:mi><mml:mo>|</mml:mo><mml:msub><mml:mrow><mml:mo>|</mml:mo></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>The convex problems in Equation (12) can be efficiently solved by plenty of tools such as the <italic>L</italic><sub>1</sub>-magic software package (Candes and Romberg, <xref ref-type="bibr" rid="B4">2005</xref>), the GPSR package (Figueiredo et al., <xref ref-type="bibr" rid="B12">2007</xref>) and the <italic>L</italic><sub>1</sub>-homotopy package (Asif and Romberg, <xref ref-type="bibr" rid="B2">2010</xref>).</p>
<p>Finally, the identification of the test sample <italic>y</italic> can be employed using Equation (5) as follows:</p>
<disp-formula id="E18"><mml:math id="M35"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:mi>l</mml:mi><mml:mi>a</mml:mi><mml:mi>b</mml:mi><mml:mi>e</mml:mi><mml:mi>l</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mo class="qopname">arg</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mstyle displaystyle="true"><mml:munder class="msub"><mml:mrow><mml:mtext class="textrm" mathvariant="normal">max</mml:mtext></mml:mrow><mml:mrow><mml:mi>l</mml:mi></mml:mrow></mml:munder></mml:mstyle><mml:msub><mml:mrow><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>W</mml:mi></mml:mrow><mml:mrow><mml:mi>x</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>l</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mi>l</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mo class="qopname">&#x02026;</mml:mo><mml:mo>,</mml:mo><mml:mi>k</mml:mi></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>where the {&#x000B7;}<sub><italic>l</italic></sub> represents the <italic>l</italic>-th element in the brace.</p>
<p>As it is shown in the MKL algorithm (Sonnenburg et al., <xref ref-type="bibr" rid="B52">2006</xref>), suppose there are <italic>J</italic> features for each sample, the kernel can be combined by convex combinations of <italic>J</italic> kernels, i.e.,</p>
<disp-formula id="E19"><label>(13)</label><mml:math id="M36"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>k</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>x</mml:mi><mml:mo>,</mml:mo><mml:mi>y</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mstyle displaystyle="true"><mml:munderover accentunder="false" accent="false"><mml:mrow><mml:mo>&#x02211;</mml:mo></mml:mrow><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>J</mml:mi></mml:mrow></mml:munderover></mml:mstyle><mml:msub><mml:mrow><mml:mi>w</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>x</mml:mi><mml:mo>,</mml:mo><mml:mi>y</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:msub><mml:mrow><mml:mi>w</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>&#x02265;</mml:mo><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x000A0;&#x000A0;</mml:mtext><mml:mstyle displaystyle="true"><mml:munderover accentunder="false" accent="false"><mml:mrow><mml:mo>&#x02211;</mml:mo></mml:mrow><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>J</mml:mi></mml:mrow></mml:munderover></mml:mstyle><mml:msub><mml:mrow><mml:mi>w</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>where each sub-kernel <italic>k</italic><sub><italic>j</italic></sub> corresponds to feature <italic>j</italic>.</p>
<p>So far, the kernels involved in the solution of Equation (12) can be replaced by Equation (13) for the multi-feature fusion of MKSCDDL. The combination coefficients can be simply set to be equal across all the features or optimized by cross-validation on the training samples. The sub-kernels can be selected from linear kernel, polynomial kernels, Gaussian kernels and sigmoid kernels etc. After the substitution of the kernels involved in the solution of Equation (12), MKSCDDL is realized (Wu et al., <xref ref-type="bibr" rid="B66">2017</xref>).</p>
</sec>
<sec>
<title>Experimental setting</title>
<p>In MKSCDDL model and the classification scheme, there are several parameters need to be set, including the parameter &#x003B1; for the classification error term, &#x003BB; for the sparse coding term, &#x003BB;<sub>1</sub> for the sparsity term, and &#x003BB;<sub>2</sub> for the with-Class-similar term. Here, for simplify, &#x003B1; was set with &#x003B1; &#x0003D; 1 to make the contribution of the classification error equal (Xu et al., <xref ref-type="bibr" rid="B69">2016</xref>). Furthermore, the parameter in the classification scheme &#x003BB; made a little effect in the experimental results. So, &#x003BB; was set with &#x003BB; &#x0003D; 0.001 in the experiment. For the parameters in the optimization model &#x003BB;<sub>1</sub> and &#x003BB;<sub>2</sub>, the optimal values were searched from a small set of {0.001, 0.005, 0.01, 0.05, 0.1} with a 5-fold cross-validation on the training set (Wu et al., <xref ref-type="bibr" rid="B66">2017</xref>). For the AD and CU data set: &#x003BB;<sub>1</sub> &#x0003D; 0.001, &#x003BB;<sub>2</sub> &#x0003D; 0.1. For the MCI and CU data set: &#x003BB;<sub>1</sub> &#x0003D; 0.05, &#x003BB;<sub>2</sub> &#x0003D; 0.05. For the AD and MCI data set: &#x003BB;<sub>1</sub> &#x0003D; 0.05, &#x003BB;<sub>2</sub> &#x0003D; 0.005.</p>
<p>The dictionary size in MKSCDDL, mSCDDL, and SCDDL were set as 20 atoms (equivalent to 10 atoms for each class) for AD/CU, MCI/CU and AD/MCI classification; for MKL and JRC algorithms, all the training samples were trained for the model and classification; and for mSRC, all the training samples were used as a dictionary.</p>
<p>In this study, linear kernel was employed for MKSCDDL in the experiment. The combining weight parameters of three modalities for MKSCDDL was derived based on grid search approach with the range of [0,1] at a step size of 0.1 with a 5-fold cross-validation on training set (Zhang et al., <xref ref-type="bibr" rid="B74">2011</xref>; Xu et al., <xref ref-type="bibr" rid="B68">2015</xref>, <xref ref-type="bibr" rid="B69">2016</xref>). Particularly, the combing weight parameters optimized corresponding to sMRI, FDG-PET and florbetapir-PET for classifying AD from CU are 0.5, 0.3, and 0.2; for discriminating MCI from CU are 0.2, 0.7, and 0.1; for detecting MCI from AD are 0.3, 0.6, and 0.1.</p>
<p>To evaluate the performance of all competing methods, their accuracy (the ratio of samples correctly classified among the test samples), sensitivity (the ratio of positive classes that were correctly identified), specificity (the ratio of negative classes that were accurately classified), and the areas under the Receiver Operating Characteristic (ROC) curves (AUC) were employed and compared in classification. For each group (AD, MCI, and CU), samples (subjects) were divided randomly into training and test sets. Sixty samples were selected randomly as the training set, and the rest comprised the test set. The division process was then repeated five times for the results of means and standard deviations, which were reported in this paper. Then, a two-sample t-test was carried out for each comparison pair to obtain the <italic>p</italic>-value.</p>
<p>In order to find the biomarkers for AD, MCI and CU classification, the 90 features were ranked according to the significance of the two-sample <italic>t</italic>-test. Then, the classification accuracy with different number (from 1 to 90) of the ranked 90 features has been calculated based on MKSCDDL (Zhang et al., <xref ref-type="bibr" rid="B74">2011</xref>; Xu et al., <xref ref-type="bibr" rid="B69">2016</xref>).</p>
</sec>
</sec>
<sec id="s4">
<title>Results and discussions</title>
<sec>
<title>Comparison with single-modality SCDDL</title>
<p>The performance of using single-modality SCDDL (SCDDL-sMRI, SCDDL-FDG-PET, and SCDDL-florbetapir-PET) and MKSCDDL (sMRI &#x0002B; FDG-PET &#x0002B; florbetapir-PET) were evaluated, as shown in Figures <xref ref-type="fig" rid="F1">1</xref>, <xref ref-type="fig" rid="F2">2</xref> and Table <xref ref-type="table" rid="T2">2</xref>, the MKSCDDL achieved higher accuracy in classifying AD, MCI, and CU than single-modality SCDDL methods.</p>
<fig id="F1" position="float">
<label>Figure 1</label>
<caption><p>Comparison of the ROC curves based on SCDDL-sMRI, SCDDL-FDG-PET, SCDDL-florbetapir-PET, and MKSCDDL <bold>(A)</bold> for classification AD and CU; <bold>(B)</bold> for classification MCI and CU; and <bold>(C)</bold> for classification AD and MCI.</p></caption>
<graphic xlink:href="fncom-11-00117-g0001.tif"/>
</fig>
<fig id="F2" position="float">
<label>Figure 2</label>
<caption><p>Comparison of the areas under the ROC curves based on SCDDL-sMRI, SCDDL-FDG-PET, SCDDL-florbetapir-PET, and MKSCDDL <bold>(A)</bold> for classification AD and CU; <bold>(B)</bold> for classification MCI and CU; and <bold>(C)</bold> for classification AD and MCI (<sup>&#x0002A;&#x0002A;</sup>indicates 0.01 &#x02264; <italic>p</italic> &#x0003C; 0.05; <sup>&#x0002A;</sup>indicates 0.05 &#x02264; <italic>p</italic> &#x0003C; 0.10).</p></caption>
<graphic xlink:href="fncom-11-00117-g0002.tif"/>
</fig>
<table-wrap position="float" id="T2">
<label>Table 2</label>
<caption><p>Comparison of the performance of single-modality (SCDDL-sMRI, SCDDL-FDG-PET, and SCDDL-florbetapir-PET) and multi-modality methods based on MKSCDDL in classification AD, CU; MCI, CU; and AD, MCI.</p></caption>
<table frame="hsides" rules="groups">
<thead><tr>
<th valign="top" align="left"><bold>Algorithm</bold></th>
<th valign="top" align="center" colspan="4" style="border-bottom: thin solid #000000;"><bold>AD vs. CU (%)</bold></th>
<th valign="top" align="center" colspan="4" style="border-bottom: thin solid #000000;"><bold>MCI vs. CU (%)</bold></th>
<th valign="top" align="center" colspan="4" style="border-bottom: thin solid #000000;"><bold>AD vs. MCI (%)</bold></th>
</tr>
<tr>
<th/>
<th valign="top" align="center"><bold>ACC</bold></th>
<th valign="top" align="center"><bold>SE</bold></th>
<th valign="top" align="center"><bold>SP</bold></th>
<th valign="top" align="center"><bold>AUC</bold></th>
<th valign="top" align="center"><bold>ACC</bold></th>
<th valign="top" align="center"><bold>SE</bold></th>
<th valign="top" align="center"><bold>SP</bold></th>
<th valign="top" align="center"><bold>AUC</bold></th>
<th valign="top" align="center"><bold>ACC</bold></th>
<th valign="top" align="center"><bold>SE</bold></th>
<th valign="top" align="center"><bold>SP</bold></th>
<th valign="top" align="center"><bold>AUC</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">SCDDL-sMRI</td>
<td valign="top" align="center">88.27 &#x000B1; 1.51</td>
<td valign="top" align="center">94.53 &#x000B1; 1.88</td>
<td valign="top" align="center">82.46 &#x000B1; 1.85</td>
<td valign="top" align="center">93.90 &#x000B1; 0.80</td>
<td valign="top" align="center">71.96 &#x000B1; 1.08</td>
<td valign="top" align="center">69.80 &#x000B1; 1.63</td>
<td valign="top" align="center">72.11 &#x000B1; 1.54</td>
<td valign="top" align="center">78.70 &#x000B1; 0.76</td>
<td valign="top" align="center">63.69 &#x000B1; 3.45</td>
<td valign="top" align="center">60.85 &#x000B1; 2.85</td>
<td valign="top" align="center">65.21 &#x000B1; 2.58</td>
<td valign="top" align="center">68.70 &#x000B1; 1.26</td>
</tr>
<tr>
<td valign="top" align="left">SCDDL-FDG-PET</td>
<td valign="top" align="center">91.18 &#x000B1; 1.72</td>
<td valign="top" align="center">86.42 &#x000B1; 1.64</td>
<td valign="top" align="center">95.61 &#x000B1; 1.24</td>
<td valign="top" align="center">97.00 &#x000B1; 0.65</td>
<td valign="top" align="center">72.50 &#x000B1; 1.24</td>
<td valign="top" align="center">62.20 &#x000B1; 1.35</td>
<td valign="top" align="center"><bold>81.23 &#x000B1; 1.81</bold></td>
<td valign="top" align="center">76.20 &#x000B1; 0.53</td>
<td valign="top" align="center">72.23 &#x000B1; 2.85</td>
<td valign="top" align="center">65.20 &#x000B1; 2.95</td>
<td valign="top" align="center">75.60 &#x000B1; 3.23</td>
<td valign="top" align="center">74.20 &#x000B1; 1.44</td>
</tr>
<tr>
<td valign="top" align="left">SCDDL-florbetapir-PET</td>
<td valign="top" align="center">85.64 &#x000B1; 1.87</td>
<td valign="top" align="center">85.51 &#x000B1; 1.95</td>
<td valign="top" align="center">85.61 &#x000B1; 1.29</td>
<td valign="top" align="center">93.70 &#x000B1; 1.09</td>
<td valign="top" align="center">70.09 &#x000B1; 0.89</td>
<td valign="top" align="center">66.00 &#x000B1; 0.97</td>
<td valign="top" align="center">73.68 &#x000B1; 1.02</td>
<td valign="top" align="center">74.20 &#x000B1; 0.45</td>
<td valign="top" align="center">63.50 &#x000B1; 3.71</td>
<td valign="top" align="center">64.00 &#x000B1; 2.96</td>
<td valign="top" align="center">71.64 &#x000B1; 3.02</td>
<td valign="top" align="center">69.40 &#x000B1; 1.58</td>
</tr>
<tr>
<td valign="top" align="left">MKSCDDL</td>
<td valign="top" align="center"><bold>98.18 &#x000B1; 0.29</bold></td>
<td valign="top" align="center"><bold>99.81 &#x000B1; 0.35</bold></td>
<td valign="top" align="center"><bold>96.49 &#x000B1; 0.41</bold></td>
<td valign="top" align="center"><bold>99.10 &#x000B1; 0.60</bold></td>
<td valign="top" align="center"><bold>78.50 &#x000B1; 0.39</bold></td>
<td valign="top" align="center"><bold>76.00 &#x000B1; 0.42</bold></td>
<td valign="top" align="center">81.06 &#x000B1; 0.48</td>
<td valign="top" align="center"><bold>83.90 &#x000B1; 0.55</bold></td>
<td valign="top" align="center"><bold>74.47 &#x000B1; 1.02</bold></td>
<td valign="top" align="center"><bold>72.44 &#x000B1; 1.53</bold></td>
<td valign="top" align="center"><bold>78.99 &#x000B1; 1.52</bold></td>
<td valign="top" align="center"><bold>79.10 &#x000B1; 1.70</bold></td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p><italic>AD, Alzheimer&#x00027;s disease; CU, cognitively unimpaired; MCI, Mild Cognitive Impairment; ACC, classification accuracy; SE, classification sensitivity; SP, classification specificity; AUC, the area under the ROC curve. All the results had 5 folder cross-validations. The bold values mean the best performance in the corresponding column</italic>.</p>
</table-wrap-foot>
</table-wrap>
<p>For discriminating AD from CU, MKSCDDL achieved an accuracy of 98.18% (with 99.81% sensitivity and 96.49% specificity) that was much better than the best accuracy of 91.18% with single-modality method (using SCDDL-FDG-PET). Further, the comparison of the ROC curves for classification of AD and CU is shown in Figure <xref ref-type="fig" rid="F1">1A</xref>, and the comparison of AUCs is shown in Table <xref ref-type="table" rid="T2">2</xref>. The ROC curve of MKSCDDL was closer to the top-left corner than that of SCDDL-FDG-PET, SCDDL-florbetapir-PET, and SCDDL-sMRI. The AUC of MKSCDDL was 0.991, which was better than the single-modality methods (AUC &#x0003D; 0.939, <italic>p</italic> &#x0003D; 0.046 for SCDDL-sMRI; AUC &#x0003D; 0.937, <italic>p</italic> &#x0003D; 0.028 for SCDDL-florbetapir-PET; and AUC &#x0003D; 0.970, <italic>p</italic> &#x0003D; 0.151 for SCDDL-FDG-PET, which was not significant in validation, but was numerically greater) as shown in Figure <xref ref-type="fig" rid="F2">2A</xref>.</p>
<p>For classifying MCI from CU, MKSCDDL achieved an accuracy of 78.50% (with sensitivity of 76.00% and specificity of 81.06%), which was greater than all three single-modality methods (the best classification accuracy was 72.50% when using SCDDL-FDG-PET). The comparison of the ROC curves for classification of MCI and CU are shown in Figure <xref ref-type="fig" rid="F1">1B</xref> and the comparison of AUCs is shown in Table <xref ref-type="table" rid="T2">2</xref>. The ROC curve of MKSCDDL was closer to the top-left corner than that of SCDDL-sMRI, SCDDL-florbetapir-PET, and SCDDL-FDG-PET. Further, based on the significance validation, MKSCDDL was significantly much better than the single-modality methods with AUC, which was 0.839 for the multi-modality method compared with that of the single-modality methods (AUC &#x0003D; 0.762, <italic>p</italic> &#x0003D; 0.094 for SCDDL-FDG-PET; AUC &#x0003D; 0.742, <italic>p</italic> &#x0003D; 0.076 for SCDDL-florbetapir-PET; AUC &#x0003D; 0.787, <italic>p</italic> &#x0003D; 0.315 for SCDDL-sMRI, which were numerically better, though were not significant in validation) as shown in Figure <xref ref-type="fig" rid="F2">2B</xref>.</p>
<p>For classifying AD from MCI, MKSCDDL achieved an accuracy of 74.47% (with sensitivity of 72.44% and specificity of 78.99%), which was greater than all three single-modality methods (the best classification accuracy was 72.23% when using SCDDL-FDG-PET). The comparison of the ROC curves for classification of AD and MCI are shown in Figure <xref ref-type="fig" rid="F1">1C</xref> and the comparison of AUCs is shown in Table <xref ref-type="table" rid="T2">2</xref>. The ROC curve of MKSCDDL was closer to the top-left corner than that of SCDDL-sMRI, SCDDL-florbetapir-PET, and SCDDL-FDG-PET. Further, based on significant validation, MKSCDDL was significantly much better than the single-modality methods with AUC, which was 0.791 for the multi-modality method compared with that of the single-modality methods (AUC &#x0003D; 0.687, <italic>p</italic> &#x0003D; 0.091 for SCDDL-sMRI; AUC &#x0003D; 0.694, <italic>p</italic> &#x0003D; 0.107 for SCDDL-florbetapir-PET; and AUC &#x0003D; 0.742, <italic>p</italic> &#x0003D; 0.198 for SCDDL-FDG-PET, which was numerically better, though were not significant in validation) as shown in Figure <xref ref-type="fig" rid="F2">2C</xref>.</p>
<p>The MKSCDDL achieved better classification accuracy and AUC for AD, MCI, and CU classification than the methods based on single-modality SCDDL (SCDDL-sMRI, SCDDL-FDG-PET, and SCDDL-florbetapir-PET), as seen in the results above, either statistically or numerically. The results we derived here were also consistent with those of other studies that have reported fusing multiple modalities could obtain better classification accuracy (Zhang et al., <xref ref-type="bibr" rid="B74">2011</xref>; Westman et al., <xref ref-type="bibr" rid="B64">2012</xref>; Xu et al., <xref ref-type="bibr" rid="B69">2016</xref>).</p>
<p>Notably, on differentiating between MCI and CU, the classification specificity based on SCDDL-FDG-PET was 81.23%, which was slightly higher than that based on MKSCDDL (81.06%), whereas the classification sensitivity based on SCDDL-FDG-PET (62.20%) was much lower than that of MKSCDDL (76.00%). Lower sensitivity with only marginally higher specificity (which could be due to random noise) would result in underdiagnosis. The MKSCDDL method had higher sensitivity and outstanding specificity that was comparable with that of SCDDL-FDG-PET, and much higher than that of the other methods. Therefore, the results suggest the feasibility of using MKSCDDL for neuroimaging classification tasks. These meant that the MKSCDDL method was much or slightly better than SCDDL-florbetapir-PET, SCDDL-sMRI and SCDDL-FDG-PET in differentiating AD or MCI from CU.</p>
</sec>
<sec>
<title>Comparison with several other multi-modality methods</title>
<p>The performance of using MKL, JRC, mSRC, mSCDDL, and MKSCDDL were evaluated and compared, including recognition rate, ROC curve and testing time. As shown in Figures <xref ref-type="fig" rid="F3">3</xref>&#x02013;<bold>5</bold> and Table <xref ref-type="table" rid="T3">3</xref>, the MKSCDDL achieved higher accuracy in classifying AD or MCI from CU than other multimodal methods, and outperforms in testing time.</p>
<fig id="F3" position="float">
<label>Figure 3</label>
<caption><p>Comparison of the ROC curves based on JRC, MKL, mSRC, mSCDDL, and MKSCDDL <bold>(A)</bold> for classification AD and CU; <bold>(B)</bold> for classification MCI and CU; and <bold>(C)</bold> for classification AD and MCI.</p></caption>
<graphic xlink:href="fncom-11-00117-g0003.tif"/>
</fig>
<table-wrap position="float" id="T3">
<label>Table 3</label>
<caption><p>Comparison of the performance of MKL, JRC, mSRC, mSCDDL, and MKSCDDL in classification AD, CU; MCI, CU; and AD, MCI.</p></caption>
<table frame="hsides" rules="groups">
<thead><tr>
<th valign="top" align="left"><bold>Algorithm</bold></th>
<th valign="top" align="center" colspan="4" style="border-bottom: thin solid #000000;"><bold>AD vs. CU (%)</bold></th>
<th valign="top" align="center" colspan="4" style="border-bottom: thin solid #000000;"><bold>MCI vs. CU (%)</bold></th>
<th valign="top" align="center" colspan="4" style="border-bottom: thin solid #000000;"><bold>AD vs. MCI (%)</bold></th>
</tr>
<tr>
<th/>
<th valign="top" align="center"><bold>ACC</bold></th>
<th valign="top" align="center"><bold>SE</bold></th>
<th valign="top" align="center"><bold>SP</bold></th>
<th valign="top" align="center"><bold>AUC</bold></th>
<th valign="top" align="center"><bold>ACC</bold></th>
<th valign="top" align="center"><bold>SE</bold></th>
<th valign="top" align="center"><bold>SP</bold></th>
<th valign="top" align="center"><bold>AUC</bold></th>
<th valign="top" align="center"><bold>ACC</bold></th>
<th valign="top" align="center"><bold>SE</bold></th>
<th valign="top" align="center"><bold>SP</bold></th>
<th valign="top" align="center"><bold>AUC</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">MKL</td>
<td valign="top" align="center">93.64 &#x000B1; 0.87</td>
<td valign="top" align="center">96.23 &#x000B1; 0.69</td>
<td valign="top" align="center">91.23 &#x000B1; 1.07</td>
<td valign="top" align="center">96.30 &#x000B1; 1.90</td>
<td valign="top" align="center">74.77 &#x000B1; 0.74</td>
<td valign="top" align="center">74.00 &#x000B1; 1.01</td>
<td valign="top" align="center">75.44 &#x000B1; 1.14</td>
<td valign="top" align="center">80.40 &#x000B1; 1.03</td>
<td valign="top" align="center">72.94 &#x000B1; 1.87</td>
<td valign="top" align="center">72.04 &#x000B1; 1.56</td>
<td valign="top" align="center">74.10 &#x000B1; 1.95</td>
<td valign="top" align="center">77.90 &#x000B1; 1.60</td>
</tr>
<tr>
<td valign="top" align="left">JRC</td>
<td valign="top" align="center">94.55 &#x000B1; 1.18</td>
<td valign="top" align="center">98.11 &#x000B1; 1.85</td>
<td valign="top" align="center">91.23 &#x000B1; 1.42</td>
<td valign="top" align="center">97.10 &#x000B1; 1.45</td>
<td valign="top" align="center">73.83 &#x000B1; 1.02</td>
<td valign="top" align="center">72.00 &#x000B1; 1.40</td>
<td valign="top" align="center">75.44 &#x000B1; 1.25</td>
<td valign="top" align="center">79.30 &#x000B1; 1.77</td>
<td valign="top" align="center">72.05 &#x000B1; 1.98</td>
<td valign="top" align="center">70.68 &#x000B1; 2.03</td>
<td valign="top" align="center">73.23 &#x000B1; 2.23</td>
<td valign="top" align="center">77.20 &#x000B1; 1.83</td>
</tr>
<tr>
<td valign="top" align="left">mSRC</td>
<td valign="top" align="center">94.55 &#x000B1; 1.35</td>
<td valign="top" align="center">96.23 &#x000B1; 1.64</td>
<td valign="top" align="center">92.98 &#x000B1; 1.57</td>
<td valign="top" align="center">97.80 &#x000B1; 1.92</td>
<td valign="top" align="center">75.70 &#x000B1; 1.44</td>
<td valign="top" align="center">66.00 &#x000B1; 1.67</td>
<td valign="top" align="center"><bold>84.21 &#x000B1; 2.12</bold></td>
<td valign="top" align="center">78.50 &#x000B1; 2.04</td>
<td valign="top" align="center">68.55 &#x000B1; 2.01</td>
<td valign="top" align="center">64.26 &#x000B1; 2.44</td>
<td valign="top" align="center">74.66 &#x000B1; 2.54</td>
<td valign="top" align="center">69.30 &#x000B1; 2.03</td>
</tr>
<tr>
<td valign="top" align="left">mSCDDL</td>
<td valign="top" align="center">97.36 &#x000B1; 1.00</td>
<td valign="top" align="center">99.25 &#x000B1; 1.32</td>
<td valign="top" align="center">95.61 &#x000B1; 1.49</td>
<td valign="top" align="center">98.50 &#x000B1; 1.33</td>
<td valign="top" align="center">77.66 &#x000B1; 1.12</td>
<td valign="top" align="center">75.00 &#x000B1; 1.46</td>
<td valign="top" align="center">80.70 &#x000B1; 1.29</td>
<td valign="top" align="center">82.80 &#x000B1; 1.31</td>
<td valign="top" align="center">73.20 &#x000B1; 1.00</td>
<td valign="top" align="center">69.31 &#x000B1; 1.85</td>
<td valign="top" align="center">75.62 &#x000B1; 1.61</td>
<td valign="top" align="center">78.00 &#x000B1; 1.22</td>
</tr>
<tr>
<td valign="top" align="left">MKSCDDL</td>
<td valign="top" align="center"><bold>98.18 &#x000B1; 0.29</bold></td>
<td valign="top" align="center"><bold>99.81 &#x000B1; 0.35</bold></td>
<td valign="top" align="center"><bold>96.49 &#x000B1; 0.41</bold></td>
<td valign="top" align="center"><bold>99.10 &#x000B1; 0.60</bold></td>
<td valign="top" align="center"><bold>78.50 &#x000B1; 0.39</bold></td>
<td valign="top" align="center"><bold>76.00 &#x000B1; 0.42</bold></td>
<td valign="top" align="center">81.06 &#x000B1; 0.48</td>
<td valign="top" align="center"><bold>83.90 &#x000B1; 0.55</bold></td>
<td valign="top" align="center"><bold>74.47 &#x000B1; 1.02</bold></td>
<td valign="top" align="center"><bold>72.44 &#x000B1; 1.53</bold></td>
<td valign="top" align="center"><bold>78.99 &#x000B1; 1.52</bold></td>
<td valign="top" align="center"><bold>79.10 &#x000B1; 1.70</bold></td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p><italic>The bold values mean the best performance in the corresponding column</italic>.</p>
</table-wrap-foot>
</table-wrap>
<p>For differentiating AD from CU, MKSCDDL achieved an accuracy of 98.18% accuracy CU that was higher than MKL (93.64%), JRC (94.55%), mSRC (94.55%), and mSCDDL (97.36%). The comparison of the ROC curves for classification of AD and CU is shown in Figure <xref ref-type="fig" rid="F3">3A</xref> and the comparison of AUCs is shown in Table <xref ref-type="table" rid="T3">3</xref>. The ROC curve of MKSCDDL was closer to the top-left corner than that of MKL, JRC, mSRC, and mSCDDL. The areas under the ROC curves for differentiation of AD and CU based on the five different methods are displayed in Figure <xref ref-type="fig" rid="F4">4A</xref>, in which the MKSCDDL method (AUC &#x0003D; 0.991) performed equally well statistically or numerically better than the other three multi-modality methods (AUC &#x0003D; 0.963, <italic>p</italic> &#x0003D; 0.095 for MKL; AUC &#x0003D; 0.971, <italic>p</italic> &#x0003D; 0.291 for JRC; AUC &#x0003D; 0.978, <italic>p</italic> &#x0003D; 0.429 for mSRC; and AUC &#x0003D; 0.985, <italic>p</italic> &#x0003D; 0.603 for mSCDDL). Figure <xref ref-type="fig" rid="F5">5</xref> has shown the computational time for classification of per test sample with the corresponding methods. As shown, MKSCDDL consumed much less testing time than JRC (<italic>p</italic> &#x0003D; 0.007), mSRC (<italic>p</italic> &#x0003D; 0.010), and mSCDDL (<italic>p</italic> &#x0003D; 0.036), and was comparable with the MKL (<italic>p</italic> &#x0003D; 0.208) method.</p>
<fig id="F4" position="float">
<label>Figure 4</label>
<caption><p>Comparison of the areas under the ROC curves based on MKL, JRC, mSRC, mSCDDL, and MKSCDDL <bold>(A)</bold> for classification AD and CU; <bold>(B)</bold> for classification MCI and CU; and <bold>(C)</bold> for classification AD and MCI (<sup>&#x0002A;</sup>indicates 0.05 &#x02264; <italic>p</italic> &#x0003C; 0.10).</p></caption>
<graphic xlink:href="fncom-11-00117-g0004.tif"/>
</fig>
<fig id="F5" position="float">
<label>Figure 5</label>
<caption><p>Comparison of testing time of different multi-modality methods for classification AD, MCI, and CU based on MKL, JRC, mSRC, mSCDDL, and MKSCDDL (<sup>&#x0002A;&#x0002A;</sup>indicates 0.01&#x02264; <italic>p</italic> &#x0003C; 0.05; <sup>&#x0002A;</sup>indicates 0.05 &#x02264; <italic>p</italic> &#x0003C; 0.10).</p></caption>
<graphic xlink:href="fncom-11-00117-g0005.tif"/>
</fig>
<p>For classifying MCI from CU, MKSCDDL achieved an accuracy of 78.50% (with sensitivity of 76.00% and specificity of 81.06%), which was greater than MKL (74.77%), JRC (73.83%), mSRC (75.70%), and mSCDDL (77.66%). The comparison of the ROC curves for classification of MCI and CU are shown in Figure <xref ref-type="fig" rid="F3">3B</xref> and the comparison of AUCs is shown in Table <xref ref-type="table" rid="T3">3</xref>. The ROC curve of MKSCDDL was closer to the top-left corner than that of MKL, JRC, mSRC, and mSCDDL. Further, based on significant validation, MKSCDDL was numerically better than the corresponding methods with AUC, which was 0.839 for the MKSCDDL method compared with that of the corresponding methods (AUC &#x0003D; 0.804, <italic>p</italic> &#x0003D; 0.534 for MKL; AUC &#x0003D; 0.793, <italic>p</italic> &#x0003D; 0.331 for JRC; AUC &#x0003D; 0.785, <italic>p</italic> &#x0003D; 0.223 for mSRC; and AUC &#x0003D; 0.828, <italic>p</italic> &#x0003D; 0.843 for mSCDDL), as shown in Figure <xref ref-type="fig" rid="F4">4B</xref>. As shown in Figure <xref ref-type="fig" rid="F5">5</xref>, MKSCDDL consumed much less testing time than JRC (<italic>p</italic> &#x0003D; 0.009), mSRC (<italic>p</italic> &#x0003D; 0.015) and mSCDDL (<italic>p</italic> &#x0003D; 0.047), and was comparable with the MKL (<italic>p</italic> &#x0003D; 0.389) method.</p>
<p>For classifying AD from MCI, MKSCDDL achieved an accuracy of 74.47% (with sensitivity of 72.44% and specificity of 78.99%), which was greater than MKL (72.94%), JRC (72.05%), mSRC (68.55%), and mSCDDL (73.20%). The comparison of the ROC curves for classification of AD and MCI are shown in Figure <xref ref-type="fig" rid="F3">3C</xref> and the comparison of AUCs is shown in Table <xref ref-type="table" rid="T3">3</xref>. The ROC curve of MKSCDDL was closer to the top-left corner than that of MKL, JRC, mSRC, and mSCDDL. Further, based on significant validation, MKSCDDL was numerically better than the corresponding methods with AUC, which was 0.791 for the MKSCDDL method compared with that of the corresponding methods (AUC &#x0003D; 0.779, <italic>p</italic> &#x0003D; 0.600 for MKL; AUC &#x0003D; 0.772, <italic>p</italic> &#x0003D; 0.477 for JRC; AUC &#x0003D; 0.693, <italic>p</italic> &#x0003D; 0.120 for mSRC; and AUC &#x0003D; 0.780, <italic>p</italic> &#x0003D; 0.593 for mSCDDL), which shown in Figure <xref ref-type="fig" rid="F4">4C</xref>. As shown in Figure <xref ref-type="fig" rid="F5">5</xref>, MKSCDDL consumed much less testing time than JRC (<italic>p</italic> &#x0003D; 0.011), mSRC (<italic>p</italic> &#x0003D; 0.019) and mSCDDL (<italic>p</italic> &#x0003D; 0.059), and was comparable with the MKL (<italic>p</italic> &#x0003D; 0.352) method.</p>
</sec>
<sec>
<title>Biomarkers for AD, MCI, and CU classification</title>
<p>To characterize the classification performance for AD, MCI, and CU with all 90 features (without feature selection), the classification accuracy has been investigated under feature selection with 1, 2, 3, &#x02026;, or 90 features for each of the ranked 90 features. The results of classification performance for different numbers of ranked features are shown in Figure <xref ref-type="fig" rid="F6">6</xref>.</p>
<fig id="F6" position="float">
<label>Figure 6</label>
<caption><p>Classification accuracy for AD, MCI, and CU with different feature dimensions based on MKSCDDL.</p></caption>
<graphic xlink:href="fncom-11-00117-g0006.tif"/>
</fig>
<p>The figure shows that the MKSCDDL method could reach strong classification accuracy even with fewer than 5 features (the top 5% ranked features on sMRI, FDG-PET, and florbetapir-PET) for AD/MCI/CU classification. In particular, there was higher than 90% accuracy for classifying AD from CU, higher than 78% accuracy for distinguishing MCI from CU, and higher than 61% accuracy for discriminating AD and MCI. The MKSCDDL method was stable (with less ups and downs) for the classification of AD/MCI from CU, which indicated that redundant features likely introduced little interference of classification. For classification of AD and MCI, though the accuracy was also acceptable, it was not as stable as the classification accuracy for AD/MCI with CU, which may be due to the biomarkers for AD and MCI having very high similarity. When the top 10% features were used, the accuracy for classification of AD and MCI was higher than 64%.</p>
<p>As shown in Figure <xref ref-type="fig" rid="F6">6</xref>, the MKSCDDL could achieve a promising or acceptable accuracy even with less than 5 features (the top 5% ranked features). Thus, for convenience, one could apply a small set of features to effectively discriminate AD, MCI, and CU. Here, the top 5&#x02013;10% ranked features (4&#x02013;9 features) consisted of sMRI, FDG-PET, and florbetapir-PET data and could be chosen as biomarkers for further classification (Xu et al., <xref ref-type="bibr" rid="B69">2016</xref>).</p>
<p>The biomarkers of different modalities for classification of the AD, MCI, and CU groups are displayed in Table <xref ref-type="table" rid="T4">4</xref> and Figure <xref ref-type="fig" rid="F7">7</xref>. For classification of AD and CU, the Hippocampus, Inferior Temporal, and ParaHippocampal may be the discriminating biomarkers on sMRI; the Angular, Posterior Cingulum, and Inferior Parietal may be the important regions on FDG-PET; and the Hippocampus and ParaHippocampal may be the key regions on florbetapir-PET. For discriminating MCI from CU, the Hippocampus, Middle Temporal, and ParaHippocampal may be the discriminating biomarkers on sMRI; the Angular and Posterior Cingulum may be the important regions on FDG-PET; and the Hippocampus, Posterior Cingulum, and Middle Frontal (Orbital part) may be the key regions on florbetapir-PET. For differentiating AD and MCI, the SupraMarginal, Angular, and left Superior Frontal (Orbital part) were the discriminating biomarkers on sMRI; the Angular, Inferior Parietal, and SupraMarginal may be the important regions on FDG-PET; and the Calcarine, Heschl, and Lingual may be the key regions on florbetapir-PET.</p>
<table-wrap position="float" id="T4">
<label>Table 4</label>
<caption><p>The most discriminating regions for classification AD, MCI, and CU based on sMRI, FDG-PET, and florbetapir-PET.</p></caption>
<table frame="hsides" rules="groups">
<thead><tr>
<th valign="top" align="left"><bold>sMRI</bold></th>
<th valign="top" align="left"><bold>FDG-PET</bold></th>
<th valign="top" align="left"><bold>Florbetapir PET</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left" colspan="3" style="background-color:#bbbdc0"><bold>AD vs. CU</bold></td>
</tr>
<tr>
<td valign="top" align="left">Left hippocampus</td>
<td valign="top" align="left">Left angular</td>
<td valign="top" align="left">Left hippocampus</td>
</tr>
<tr>
<td valign="top" align="left">Right hippocampus</td>
<td valign="top" align="left">Left posterior cingulum</td>
<td valign="top" align="left">Right hippocampus</td>
</tr>
<tr>
<td valign="top" align="left">Left inferior temporal</td>
<td valign="top" align="left">Right angular</td>
<td valign="top" align="left">Left parahippocampal</td>
</tr>
<tr>
<td valign="top" align="left">Right parahippocampal</td>
<td valign="top" align="left">Right inferior parietal</td>
<td valign="top" align="left">Right parahippocampal</td>
</tr>
<tr>
<td valign="top" align="left" colspan="3" style="background-color:#bbbdc0"><bold>MCI vs. CU</bold></td>
</tr>
<tr>
<td valign="top" align="left">Left hippocampus</td>
<td valign="top" align="left">Left posterior cingulum</td>
<td valign="top" align="left">Left hippocampus</td>
</tr>
<tr>
<td valign="top" align="left">Right hippocampus</td>
<td valign="top" align="left">Left angular</td>
<td valign="top" align="left">Right hippocampus</td>
</tr>
<tr>
<td valign="top" align="left">Left middle temporal</td>
<td valign="top" align="left">Right posterior cingulum</td>
<td valign="top" align="left">Right posterior cingulum</td>
</tr>
<tr>
<td valign="top" align="left">Right parahippocampal</td>
<td valign="top" align="left">Right angular</td>
<td valign="top" align="left">Left middle frontal (orbital part)</td>
</tr>
<tr>
<td valign="top" align="left" colspan="3" style="background-color:#bbbdc0"><bold>AD vs. MCI</bold></td>
</tr>
<tr>
<td valign="top" align="left">Left supramarginal</td>
<td valign="top" align="left">Right angular</td>
<td valign="top" align="left">Left calcarine</td>
</tr>
<tr>
<td valign="top" align="left">Right angular</td>
<td valign="top" align="left">Left angular</td>
<td valign="top" align="left">Left heschl</td>
</tr>
<tr>
<td valign="top" align="left">Right supramarginal</td>
<td valign="top" align="left">Right inferior parietal</td>
<td valign="top" align="left">Right lingual</td>
</tr>
<tr>
<td valign="top" align="left">Left superior frontal (orbital part)</td>
<td valign="top" align="left">Left supramarginal</td>
<td valign="top" align="left">Right heschl</td>
</tr>
</tbody>
</table>
</table-wrap>
<fig id="F7" position="float">
<label>Figure 7</label>
<caption><p>Biomarkers with sMRI, FDG-PET, and florbetapir-PET <bold>(A)</bold> for classification AD and CU; <bold>(B)</bold> for classification MCI and CU; and <bold>(C)</bold> for classification AD and MCI.</p></caption>
<graphic xlink:href="fncom-11-00117-g0007.tif"/>
</fig>
<p>For AD and CU classification, the Hippocampus (Wisse et al., <xref ref-type="bibr" rid="B65">2014</xref>; de Flores et al., <xref ref-type="bibr" rid="B7">2015</xref>; Voineskos et al., <xref ref-type="bibr" rid="B59">2015</xref>), Inferior Temporal (Seo et al., <xref ref-type="bibr" rid="B51">2017</xref>), ParaHippocampal (Guo et al., <xref ref-type="bibr" rid="B17">2014</xref>; Peng et al., <xref ref-type="bibr" rid="B40">2016</xref>), Angular (Sanabria-Diaz et al., <xref ref-type="bibr" rid="B48">2013</xref>), Posterior Cingulum (Nakata et al., <xref ref-type="bibr" rid="B37">2009</xref>; Demirhan et al., <xref ref-type="bibr" rid="B8">2015</xref>), and Inferior Parietal (Murray et al., <xref ref-type="bibr" rid="B36">2015</xref>; Zhang et al., <xref ref-type="bibr" rid="B77">2015</xref>) have been proposed in several studies to be effective biomarkers. The Hippocampus (Wee et al., <xref ref-type="bibr" rid="B61">2011</xref>; Zhou et al., <xref ref-type="bibr" rid="B78">2011</xref>; Liu et al., <xref ref-type="bibr" rid="B31">2014a</xref>), Middle Temporal (Lenzi et al., <xref ref-type="bibr" rid="B26">2011</xref>; Jiang et al., <xref ref-type="bibr" rid="B22">2014</xref>), ParaHippocampal (Cerami et al., <xref ref-type="bibr" rid="B5">2015</xref>; Kato et al., <xref ref-type="bibr" rid="B24">2016</xref>), Angular (Nobili et al., <xref ref-type="bibr" rid="B39">2010</xref>; Martlno et al., <xref ref-type="bibr" rid="B34">2013</xref>; Zu et al., <xref ref-type="bibr" rid="B82">2015</xref>), Posterior Cingulum (Choo et al., <xref ref-type="bibr" rid="B6">2010</xref>; Yu et al., <xref ref-type="bibr" rid="B71">2017</xref>), and Middle Frontal (Orbital part) (Xiang et al., <xref ref-type="bibr" rid="B67">2013</xref>) have been reported as the important regions for discriminating MCI and CU. For differentiating AD and MCI, the SupraMarginal (Esposito et al., <xref ref-type="bibr" rid="B11">2013</xref>; Moretti, <xref ref-type="bibr" rid="B35">2015</xref>), Angular (Hirao et al., <xref ref-type="bibr" rid="B20">2005</xref>; Griffith et al., <xref ref-type="bibr" rid="B16">2010</xref>; Li et al., <xref ref-type="bibr" rid="B28">2016</xref>), Superior Frontal (Orbital part) (Liu et al., <xref ref-type="bibr" rid="B32">2012</xref>), Inferior Parietal (Desikan et al., <xref ref-type="bibr" rid="B9">2009</xref>; Triplett et al., <xref ref-type="bibr" rid="B57">2016</xref>), Calcarine (Liu et al., <xref ref-type="bibr" rid="B32">2012</xref>), Heschl (Hanggi et al., <xref ref-type="bibr" rid="B18">2011</xref>), and Lingual (Li et al., <xref ref-type="bibr" rid="B28">2016</xref>) may be the key biomarkers for diagnosis.</p>
<p>Therefore, MKSCDDL was proved as a very efficient method for classifying AD or MCI from CU, and had potential to discriminate AD from MCI, as compared to the single-modality method and several state-of-art multi-modality methods. The MKSCDDL method performed better than MKL, JRC, mSRC, and mSCDDL in terms of accuracy rate and AUC, often significantly on validation but at least numerically for AD, MCI, and CU classification. In addition, the MKSCDDL method took less computation time than did JRC, mSRC, and mSCDDL, and was comparable to MKL in terms of computation time. Together, this indicates that the MKSCDDL method could potentially play an important role in AD and MCI diagnosis.</p>
</sec>
</sec>
<sec sec-type="conclusions" id="s5">
<title>Conclusions</title>
<p>In this study, a novel DL method, named as MKSCDDL with previous successful application to face recognition, was introduced combining sMRI, FDG-PET, and florbetapir-PET for differentiating AD, MCI, and CU. The results suggested that the MKSCDDL is promising for classification and diagnose diseases with neuroimaging data.</p>
</sec>
<sec id="s6">
<title>Ethics statement</title>
<p>All procedures performed in studies involving human participants were in accordance with the ethical standards of the institutional and/or national research committee and with the 1964 Helsinki declaration and its later amendments or comparable ethical standards.</p>
<sec>
<title>Informed consent</title>
<p>Informed consent was obtained from all individual participants included in the study.</p>
</sec>
</sec>
<sec id="s7">
<title>Author contributions</title>
<p>XW, LY: designed the study. LX, KC: collected the original imaging data. QL, XW, KC: managed and analyzed the imaging data. QL and XW: wrote the manuscript. All authors contributed to and have approved the final manuscript.</p>
<sec>
<title>Conflict of interest statement</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p></sec>
</sec>
</body>
<back>
<ack><p>The data set used in preparation of this paper was obtained from the Alzheimer&#x00027;s Disease Neuroimaging Initiative (ADNI) database (adni.loni.ucla.edu). As such, the investigators within the ADNI contributed to the design and implementation of ADNI and/or provided data but did not participate in the analysis or writing of this report. A complete listing of ADNI investigators can be found at: <ext-link ext-link-type="uri" xlink:href="https://adni.loni.usc.edu/wp-content/uploads/how_to_apply/ADNI_Data_Use_Agreement.pdf">https://adni.loni.usc.edu/wp-content/uploads/how_to_apply/ADNI_Data_Use_Agreement.pdf</ext-link>.</p>
</ack>
<sec sec-type="supplementary-material" id="s8">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fncom.2017.00117/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fncom.2017.00117/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Table1.docx" id="SM1" mimetype="application/vnd.openxmlformats-officedocument.wordprocessingml.document" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ashburner</surname> <given-names>J.</given-names></name></person-group> (<year>2007</year>). <article-title>A fast diffeomorphic image registration algorithm</article-title>. <source>Neuroimage</source> <volume>38</volume>, <fpage>95</fpage>&#x02013;<lpage>113</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2007.07.007</pub-id><pub-id pub-id-type="pmid">17761438</pub-id></citation></ref>
<ref id="B2">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Asif</surname> <given-names>M. S.</given-names></name> <name><surname>Romberg</surname> <given-names>J.</given-names></name></person-group> (<year>2010</year>). <article-title>Dynamic updating for L1 minimization</article-title>. <source>IEEE J. Select. Top. Signal Process</source>. <volume>4</volume>, <fpage>421</fpage>&#x02013;<lpage>434</lpage>. <pub-id pub-id-type="doi">10.1109/JSTSP.2009.2039174</pub-id></citation></ref>
<ref id="B3">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Camus</surname> <given-names>V.</given-names></name> <name><surname>Payoux</surname> <given-names>P.</given-names></name> <name><surname>Barr&#x000E9;</surname> <given-names>L.</given-names></name> <name><surname>Desgranges</surname> <given-names>B.</given-names></name> <name><surname>Voisin</surname> <given-names>T.</given-names></name> <name><surname>Tauber</surname> <given-names>C.</given-names></name> <etal/></person-group>. (<year>2012</year>). <article-title>Using PET with 18F-AV-45 (florbetapir) to quantify brain amyloid load in a clinical environment</article-title>. <source>Eur. J. Nucl. Med. Mol. Imaging</source> <volume>39</volume>, <fpage>621</fpage>&#x02013;<lpage>631</lpage>. <pub-id pub-id-type="doi">10.1007/s00259-011-2021-8</pub-id><pub-id pub-id-type="pmid">22252372</pub-id></citation></ref>
<ref id="B4">
<citation citation-type="web"><person-group person-group-type="author"><name><surname>Candes</surname> <given-names>E.</given-names></name> <name><surname>Romberg</surname> <given-names>J.</given-names></name></person-group> (<year>2005</year>). <source>l1-MAGIC: Recovery of Sparse Signals via Convex Programming</source>. Available online at: <ext-link ext-link-type="uri" xlink:href="http://www.acm.caltech.edu/l1magic/downloads/l1magic.pdf">www.acm.caltech.edu/l1magic/downloads/l1magic.pdf</ext-link></citation></ref>
<ref id="B5">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cerami</surname> <given-names>C.</given-names></name> <name><surname>Della Rosa</surname> <given-names>P. A.</given-names></name> <name><surname>Magnani</surname> <given-names>G.</given-names></name> <name><surname>Santangelo</surname> <given-names>R.</given-names></name> <name><surname>Marcone</surname> <given-names>A.</given-names></name> <name><surname>Cappa</surname> <given-names>S. F.</given-names></name> <etal/></person-group>. (<year>2015</year>). <article-title>Brain metabolic maps in Mild Cognitive Impairment predict heterogeneity of progression to dementia</article-title>. <source>Neuroimage Clin</source>. <volume>7</volume>(<supplement>Suppl. C</supplement>), <fpage>187</fpage>&#x02013;<lpage>194</lpage>. <pub-id pub-id-type="doi">10.1016/j.nicl.2014.12.004</pub-id><pub-id pub-id-type="pmid">25610780</pub-id></citation></ref>
<ref id="B6">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Choo</surname> <given-names>I. H.</given-names></name> <name><surname>Lee</surname> <given-names>D. Y.</given-names></name> <name><surname>Oh</surname> <given-names>J. S.</given-names></name> <name><surname>Lee</surname> <given-names>J. S.</given-names></name> <name><surname>Lee</surname> <given-names>D. S.</given-names></name> <name><surname>Song</surname> <given-names>I. C.</given-names></name> <etal/></person-group>. (<year>2010</year>). <article-title>Posterior cingulate cortex atrophy and regional cingulum disruption in mild cognitive impairment and Alzheimer&#x00027;s disease</article-title>. <source>Neurobiol. Aging</source> <volume>31</volume>, <fpage>772</fpage>&#x02013;<lpage>779</lpage>. <pub-id pub-id-type="doi">10.1016/j.neurobiolaging.2008.06.015</pub-id><pub-id pub-id-type="pmid">18687503</pub-id></citation></ref>
<ref id="B7">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>de Flores</surname> <given-names>R.</given-names></name> <name><surname>La Joie</surname> <given-names>R.</given-names></name> <name><surname>Ch&#x000E9;telat</surname> <given-names>G.</given-names></name></person-group> (<year>2015</year>). <article-title>Structural imaging of hippocampal subfields in healthy aging and Alzheimer&#x00027;s disease</article-title>. <source>Neuroscience</source> <volume>309</volume>, <fpage>29</fpage>&#x02013;<lpage>50</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroscience.2015.08.033</pub-id><pub-id pub-id-type="pmid">26306871</pub-id></citation></ref>
<ref id="B8">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Demirhan</surname> <given-names>A.</given-names></name> <name><surname>Nir</surname> <given-names>T. M.</given-names></name> <name><surname>Zavaliangos-Petropulu</surname> <given-names>A.</given-names></name> <name><surname>Jack</surname> <given-names>C. R.</given-names></name> <name><surname>Weiner</surname> <given-names>M. W.</given-names></name> <name><surname>Bernstein</surname> <given-names>M. A.</given-names></name> <etal/></person-group>. (<year>2015</year>). <article-title>Feature selection improves the accuracy of classifying alzheimer disease using diffusion tensor images</article-title>. <source>Proc. IEEE Int. Symp. Biomed. Imaging</source> <volume>2015</volume>, <fpage>126</fpage>&#x02013;<lpage>130</lpage>. <pub-id pub-id-type="doi">10.1109/ISBI.2015.7163832</pub-id><pub-id pub-id-type="pmid">26413201</pub-id></citation></ref>
<ref id="B9">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Desikan</surname> <given-names>R. S.</given-names></name> <name><surname>Cabral</surname> <given-names>H. J.</given-names></name> <name><surname>Fischl</surname> <given-names>B.</given-names></name> <name><surname>Guttmann</surname> <given-names>C. R. G.</given-names></name> <name><surname>Blacker</surname> <given-names>D.</given-names></name> <name><surname>Hyman</surname> <given-names>B. T.</given-names></name></person-group> (<year>2009</year>). <article-title>Temporoparietal MR imaging measures of atrophy in subjects with mild cognitive impairment that predict subsequent diagnosis of Alzheimer Disease</article-title>. <source>Am. J. Neuroradiol</source>. <volume>30</volume>, <fpage>532</fpage>&#x02013;<lpage>538</lpage>. <pub-id pub-id-type="doi">10.3174/ajnr.A1397</pub-id><pub-id pub-id-type="pmid">19112067</pub-id></citation></ref>
<ref id="B10">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dyrba</surname> <given-names>M.</given-names></name> <name><surname>Grothe</surname> <given-names>M.</given-names></name> <name><surname>Kirste</surname> <given-names>T.</given-names></name> <name><surname>Teipel</surname> <given-names>S. J.</given-names></name></person-group> (<year>2015</year>). <article-title>Multimodal analysis of functional and structural disconnection in Alzheimer&#x00027;s disease using multiple kernel SVM</article-title>. <source>Hum. Brain Mapp</source>. <volume>36</volume>, <fpage>2118</fpage>&#x02013;<lpage>2131</lpage>. <pub-id pub-id-type="doi">10.1002/hbm.22759</pub-id><pub-id pub-id-type="pmid">25664619</pub-id></citation></ref>
<ref id="B11">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Esposito</surname> <given-names>R.</given-names></name> <name><surname>Mosca</surname> <given-names>A.</given-names></name> <name><surname>Pieramico</surname> <given-names>V.</given-names></name> <name><surname>Cieri</surname> <given-names>F.</given-names></name> <name><surname>Cera</surname> <given-names>N.</given-names></name> <name><surname>Sensi</surname> <given-names>S. L.</given-names></name></person-group> (<year>2013</year>). <article-title>Characterization of resting state activity in MCI individuals</article-title>. <source>PeerJ</source>. <volume>1</volume>:<fpage>e135</fpage>. <pub-id pub-id-type="doi">10.7717/peerj.135</pub-id><pub-id pub-id-type="pmid">24010015</pub-id></citation></ref>
<ref id="B12">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Figueiredo</surname> <given-names>M. A.</given-names></name> <name><surname>Nowak</surname> <given-names>R. D.</given-names></name> <name><surname>Wright</surname> <given-names>S. J.</given-names></name></person-group> (<year>2007</year>). <article-title>Gradient projection for sparse reconstruction: application to compressed sensing and other inverse problems</article-title>. <source>IEEE J. Sel. Top. Signal Process</source>. <volume>1</volume>, <fpage>586</fpage>&#x02013;<lpage>597</lpage>. <pub-id pub-id-type="doi">10.1109/JSTSP.2007.910281</pub-id></citation></ref>
<ref id="B13">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gao</surname> <given-names>Y.</given-names></name> <name><surname>Tan</surname> <given-names>M. S.</given-names></name> <name><surname>Wang</surname> <given-names>H. F.</given-names></name> <name><surname>Zhang</surname> <given-names>W.</given-names></name> <name><surname>Wang</surname> <given-names>Z. X.</given-names></name> <name><surname>Jiang</surname> <given-names>T.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>ZCWPW is associated with late-onset Alzheimer&#x00027;s disease in Han Chinese: a replication study and meta-analyses</article-title>. <source>Oncotarget</source> <volume>7</volume>, <fpage>20305</fpage>&#x02013;<lpage>20311</lpage>. <pub-id pub-id-type="doi">10.18632/oncotarget.7945</pub-id><pub-id pub-id-type="pmid">26958812</pub-id></citation></ref>
<ref id="B14">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Garc&#x000E9;s</surname> <given-names>P.</given-names></name> <name><surname>Pineda-Pardo</surname> <given-names>J. A.</given-names></name> <name><surname>Canuet</surname> <given-names>L.</given-names></name> <name><surname>Aurtenetxe</surname> <given-names>S.</given-names></name> <name><surname>Lopez</surname> <given-names>M. E.</given-names></name> <name><surname>Marcos</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2014</year>). <article-title>The default mode network is functionally and structurally disrupted in amnestic mild cognitive impairment &#x02013; A bimodal MEG-DTI study</article-title>. <source>Neruoimage Clin</source>. <volume>6</volume>, <fpage>214</fpage>&#x02013;<lpage>221</lpage>. <pub-id pub-id-type="doi">10.1016/j.nicl.2014.09.004</pub-id><pub-id pub-id-type="pmid">25379433</pub-id></citation></ref>
<ref id="B15">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>G&#x000F6;nen</surname> <given-names>M.</given-names></name> <name><surname>Alpaydin</surname> <given-names>E.</given-names></name></person-group> (<year>2011</year>). <article-title>Multiple kernel learning algorithms</article-title>. <source>J. Mach. Learn. Res</source>. <volume>12</volume>, <fpage>2211</fpage>&#x02013;<lpage>2268</lpage>. Available online at: <ext-link ext-link-type="uri" xlink:href="http://www.jmlr.org/papers/volume12/gonen11a/gonen11a.pdf">http://www.jmlr.org/papers/volume12/gonen11a/gonen11a.pdf</ext-link></citation></ref>
<ref id="B16">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Griffith</surname> <given-names>H. R.</given-names></name> <name><surname>Stewart</surname> <given-names>C. C.</given-names></name> <name><surname>Stoeckel</surname> <given-names>L. E.</given-names></name> <name><surname>Okonkwo</surname> <given-names>O. C.</given-names></name> <name><surname>den Hollander</surname> <given-names>J. A.</given-names></name> <name><surname>Martin</surname> <given-names>R. C.</given-names></name> <etal/></person-group>. (<year>2010</year>). <article-title>MRI volume of the angular gyri predicts financial skill deficits in patients with amnestic mild cognitive impairment</article-title>. <source>J. Am. Geriatr. Soc</source>. <volume>58</volume>, <fpage>265</fpage>&#x02013;<lpage>274</lpage>. <pub-id pub-id-type="doi">10.1111/j.1532-5415.2009.02679.x</pub-id></citation></ref>
<ref id="B17">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Guo</surname> <given-names>Y.</given-names></name> <name><surname>Zhang</surname> <given-names>Z.</given-names></name> <name><surname>Zhou</surname> <given-names>B.</given-names></name> <name><surname>Wang</surname> <given-names>P.</given-names></name> <name><surname>Yao</surname> <given-names>H.</given-names></name> <name><surname>Yuan</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2014</year>). <article-title>Grey-matter volume as a potential feature for the classification of Alzheimer&#x00027;s disease and mild cognitive impairment: an exploratory study</article-title>. <source>Neurosci. Bull.</source> <volume>30</volume>, <fpage>477</fpage>&#x02013;<lpage>489</lpage>. <pub-id pub-id-type="doi">10.1007/s12264-013-1432-x</pub-id><pub-id pub-id-type="pmid">24760581</pub-id></citation></ref>
<ref id="B18">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hanggi</surname> <given-names>J.</given-names></name> <name><surname>Streffer</surname> <given-names>J.</given-names></name> <name><surname>Jancke</surname> <given-names>L.</given-names></name> <name><surname>Hock</surname> <given-names>C.</given-names></name></person-group> (<year>2011</year>). <article-title>Volumes of lateral temporal and parietal structures distinguish between healthy aging, mild cognitive impairment, and Alzheimer&#x00027;s disease</article-title>. <source>J. Alzheimers Dis</source>. <volume>26</volume>, <fpage>719</fpage>&#x02013;<lpage>734</lpage>. <pub-id pub-id-type="doi">10.3233/JAD-2011-101260</pub-id><pub-id pub-id-type="pmid">21709375</pub-id></citation></ref>
<ref id="B19">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Harandi</surname> <given-names>M.</given-names></name> <name><surname>Salzmann</surname> <given-names>M.</given-names></name></person-group> (<year>2015</year>). <article-title>Riemannian coding and dictionary learning: kernels to the rescue</article-title>, in <source>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</source> (<publisher-loc>Boston, MA</publisher-loc>).</citation></ref>
<ref id="B20">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hirao</surname> <given-names>K.</given-names></name> <name><surname>Ohnishi</surname> <given-names>T.</given-names></name> <name><surname>Hirata</surname> <given-names>Y.</given-names></name> <name><surname>Yamashita</surname> <given-names>F.</given-names></name> <name><surname>Mori</surname> <given-names>T.</given-names></name> <name><surname>Moriguchi</surname> <given-names>Y.</given-names></name> <etal/></person-group>. (<year>2005</year>). <article-title>The prediction of rapid conversion to Alzheimer&#x00027;s disease in mild cognitive impairment using regional cerebral blood flow SPECT</article-title>. <source>Neuroimage</source> <volume>28</volume>, <fpage>1014</fpage>&#x02013;<lpage>1021</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2005.06.066</pub-id><pub-id pub-id-type="pmid">16129627</pub-id></citation></ref>
<ref id="B21">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Hussain</surname> <given-names>M.</given-names></name> <name><surname>Wajid</surname> <given-names>S. K.</given-names></name> <name><surname>Elzaart</surname> <given-names>A.</given-names></name> <name><surname>Berbar</surname> <given-names>M.</given-names></name></person-group> (<year>2011</year>). <article-title>A comparison of SVM kernel functions for breast cancer detection, in Computer Graphics</article-title>, in <source>2011 Eighth International Conference on Imaging and Visualization (CGIV)</source> (<publisher-loc>Singapore</publisher-loc>).</citation></ref>
<ref id="B22">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jiang</surname> <given-names>X.</given-names></name> <name><surname>Zhu</surname> <given-names>D.</given-names></name> <name><surname>Li</surname> <given-names>K.</given-names></name> <name><surname>Zhang</surname> <given-names>T.</given-names></name> <name><surname>Wang</surname> <given-names>L.</given-names></name> <name><surname>Shen</surname> <given-names>D.</given-names></name> <etal/></person-group>. (<year>2014</year>). <article-title>Predictive models of resting state networks for assessment of altered functional connectivity in mild cognitive impairment</article-title>. <source>Brain Imaging Behav</source>. <volume>8</volume>, <fpage>542</fpage>&#x02013;<lpage>557</lpage>. <pub-id pub-id-type="doi">10.1007/s11682-013-9280-x</pub-id><pub-id pub-id-type="pmid">24293138</pub-id></citation></ref>
<ref id="B23">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jiang</surname> <given-names>Z.</given-names></name> <name><surname>Lin</surname> <given-names>Z.</given-names></name> <name><surname>Davis</surname> <given-names>L. S.</given-names></name></person-group> (<year>2013</year>). <article-title>Label consistent K-SVD: learning a discriminative dictionary for recognition</article-title>. <source>IEEE Trans. Pattern Anal. Mach. Intell.</source> <volume>35</volume>, <fpage>2651</fpage>&#x02013;<lpage>2664</lpage>. <pub-id pub-id-type="doi">10.1109/TPAMI.2013.88</pub-id><pub-id pub-id-type="pmid">24051726</pub-id></citation></ref>
<ref id="B24">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kato</surname> <given-names>T.</given-names></name> <name><surname>Inui</surname> <given-names>Y.</given-names></name> <name><surname>Nakamura</surname> <given-names>A.</given-names></name> <name><surname>Ito</surname> <given-names>K.</given-names></name></person-group> (<year>2016</year>). <article-title>Brain fluorodeoxyglucose (FDG) PET in dementia</article-title>. <source>Ageing Res. Rev</source>. <volume>30</volume>(<supplement>Suppl. C</supplement>), <fpage>73</fpage>&#x02013;<lpage>84</lpage>. <pub-id pub-id-type="doi">10.1016/j.arr.2016.02.003</pub-id><pub-id pub-id-type="pmid">26876244</pub-id></citation></ref>
<ref id="B25">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Langbaum</surname> <given-names>J. B. S.</given-names></name> <name><surname>Chen</surname> <given-names>K.</given-names></name> <name><surname>Lee</surname> <given-names>W.</given-names></name> <name><surname>Reschke</surname> <given-names>C.</given-names></name> <name><surname>Bandy</surname> <given-names>D.</given-names></name> <name><surname>Fleisher</surname> <given-names>A. S.</given-names></name> <etal/></person-group>. (<year>2009</year>). <article-title>Categorical and correlational analyses of baseline fluorodeoxyglucose positron emission tomography images from the Alzheimer&#x00027;s Disease Neuroimaging Initiative (ADNI)</article-title>. <source>Neuroimage</source> <volume>45</volume>, <fpage>1107</fpage>&#x02013;<lpage>1116</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2008.12.072</pub-id><pub-id pub-id-type="pmid">19349228</pub-id></citation></ref>
<ref id="B26">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lenzi</surname> <given-names>D.</given-names></name> <name><surname>Serra</surname> <given-names>L.</given-names></name> <name><surname>Perri</surname> <given-names>R.</given-names></name> <name><surname>Pantano</surname> <given-names>P.</given-names></name> <name><surname>Lenzi</surname> <given-names>G. L.</given-names></name> <name><surname>Paulesu</surname> <given-names>E.</given-names></name> <etal/></person-group>. (<year>2011</year>). <article-title>Single domain amnestic MCI: A multiple cognitive domains fMRI investigation</article-title>. <source>Neurobio. Aging</source> <volume>32</volume>, <fpage>1542</fpage>&#x02013;<lpage>1557</lpage>. <pub-id pub-id-type="doi">10.1016/j.neurobiolaging.2009.09.006</pub-id><pub-id pub-id-type="pmid">19880216</pub-id></citation></ref>
<ref id="B27">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>Q.</given-names></name> <name><surname>Wu</surname> <given-names>X.</given-names></name> <name><surname>Xu</surname> <given-names>L.</given-names></name> <name><surname>Chen</surname> <given-names>K.</given-names></name> <name><surname>Yao</surname> <given-names>L.</given-names></name> <name><surname>Li</surname> <given-names>R.</given-names></name></person-group> (<year>2017</year>). <article-title>Multi-modal discriminative dictionary learning for Alzheimer&#x00027;s disease and mild cognitive impairment</article-title>. <source>Comput. Methods Programs Biomed.</source> <volume>150</volume>, <fpage>1</fpage>&#x02013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1016/j.cmpb.2017.07.003</pub-id><pub-id pub-id-type="pmid">28859825</pub-id></citation></ref>
<ref id="B28">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>Y.</given-names></name> <name><surname>Wang</surname> <given-names>X.</given-names></name> <name><surname>Li</surname> <given-names>Y.</given-names></name> <name><surname>Sun</surname> <given-names>Y.</given-names></name> <name><surname>Sheng</surname> <given-names>C.</given-names></name> <name><surname>Li</surname> <given-names>H.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>Abnormal resting-state functional connectivity strength in mild cognitive impairment and its conversion to Alzheimer&#x00027;s Disease</article-title>. <source>Neural Plast.</source> <volume>201</volume>:<fpage>4680972</fpage>. <pub-id pub-id-type="doi">10.1155/2016/4680972</pub-id></citation></ref>
<ref id="B29">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>F.</given-names></name> <name><surname>Suk</surname> <given-names>H. I.</given-names></name> <name><surname>Wee</surname> <given-names>C. Y.</given-names></name> <name><surname>Chen</surname> <given-names>H.</given-names></name> <name><surname>Shen</surname> <given-names>D.</given-names></name></person-group> (<year>2013</year>). <article-title>High-order graph matching based feature selection for Alzheimer&#x00027;s Disease identification</article-title>. <source>Med. Image Comput. Comput Assist. Interv.</source> <volume>16</volume>(<issue>Pt 2</issue>), <fpage>311</fpage>&#x02013;<lpage>318</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-642-40763-5_39</pub-id><pub-id pub-id-type="pmid">24579155</pub-id></citation></ref>
<ref id="B30">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>F.</given-names></name> <name><surname>Wee</surname> <given-names>C. Y.</given-names></name> <name><surname>Chen</surname> <given-names>H.</given-names></name> <name><surname>Shen</surname> <given-names>D.</given-names></name></person-group> (<year>2014b</year>). <article-title>Inter-modality relationship constrained multi-modality multi-task feature selection for Alzheimer&#x00027;s Disease and mild cognitive impairment identification</article-title>. <source>Neuroimage</source> <volume>84</volume>, <fpage>466</fpage>&#x02013;<lpage>475</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2013.09.015</pub-id><pub-id pub-id-type="pmid">24045077</pub-id></citation></ref>
<ref id="B31">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>F.</given-names></name> <name><surname>Zhou</surname> <given-names>L.</given-names></name> <name><surname>Shen</surname> <given-names>C.</given-names></name> <name><surname>Yin</surname> <given-names>J.</given-names></name></person-group> (<year>2014a</year>). <article-title>Multiple kernel learning in theprimal for multimodal Alzheimer&#x00027;s disease classification</article-title>. <source>IEEE J. Biomed. Health Inform</source>. <volume>18</volume>, <fpage>984</fpage>&#x02013;<lpage>990</lpage>. <pub-id pub-id-type="doi">10.1109/JBHI.2013.2285378</pub-id><pub-id pub-id-type="pmid">24132030</pub-id></citation></ref>
<ref id="B32">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>Z.</given-names></name> <name><surname>Zhang</surname> <given-names>Y.</given-names></name> <name><surname>Yan</surname> <given-names>H.</given-names></name> <name><surname>Bai</surname> <given-names>L.</given-names></name> <name><surname>Dai</surname> <given-names>R.</given-names></name> <name><surname>Wei</surname> <given-names>W.</given-names></name> <etal/></person-group>. (<year>2012</year>). <article-title>Altered topological patterns of brain networks in mild cognitive impairment and Alzheimer&#x00027;s disease: a resting-state fMRI study</article-title>. <source>Psychiatry Res. Neuroimaging</source> <volume>202</volume>, <fpage>118</fpage>&#x02013;<lpage>125</lpage>. <pub-id pub-id-type="doi">10.1016/j.pscychresns.2012.03.002</pub-id><pub-id pub-id-type="pmid">22695315</pub-id></citation></ref>
<ref id="B33">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Manevitz</surname> <given-names>L. M.</given-names></name> <name><surname>Yousef</surname> <given-names>M.</given-names></name></person-group> (<year>2001</year>). <article-title>One-class SVMs for document classification</article-title>. <source>J. Mach. Learn. Res</source>. <volume>2</volume>, <fpage>139</fpage>&#x02013;<lpage>154</lpage>. Available online at: <ext-link ext-link-type="uri" xlink:href="http://www.jmlr.org/papers/volume2/manevitz01a/manevitz01a.pdf">http://www.jmlr.org/papers/volume2/manevitz01a/manevitz01a.pdf</ext-link></citation></ref>
<ref id="B34">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Martlno</surname> <given-names>M. E.</given-names></name> <name><surname>de Villoria</surname> <given-names>J. G.</given-names></name> <name><surname>Lacalle-Aurioles</surname> <given-names>M.</given-names></name> <name><surname>Olazar&#x000E1;n</surname> <given-names>J.</given-names></name> <name><surname>Cruz</surname> <given-names>I.</given-names></name> <name><surname>Navarro</surname> <given-names>E.</given-names></name> <etal/></person-group>. (<year>2013</year>). <article-title>Comparison of different methods of spatial normalization of FDG-PET brain images in the voxel-wise analysis of MCI patients and controls</article-title>. <source>Ann. Nucl. Med</source>. <volume>27</volume>, <fpage>600</fpage>&#x02013;<lpage>609</lpage>. <pub-id pub-id-type="doi">10.1007/s12149-013-0723-7</pub-id></citation></ref>
<ref id="B35">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Moretti</surname> <given-names>D. V.</given-names></name></person-group> (<year>2015</year>). <article-title>Theta and alpha EEG frequency interplay in subjects with mild cognitive impairment: evidence from EEG, MRI, and SPECT brain modifications</article-title>. <source>Front. Aging Neurosci.</source> <volume>7</volume>:<fpage>31</fpage>. <pub-id pub-id-type="doi">10.3389/fnagi.2015.00031</pub-id><pub-id pub-id-type="pmid">25926789</pub-id></citation></ref>
<ref id="B36">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Murray</surname> <given-names>M. E.</given-names></name> <name><surname>Lowe</surname> <given-names>V. J.</given-names></name> <name><surname>Graff-Radford</surname> <given-names>N. R.</given-names></name> <name><surname>Liesinger</surname> <given-names>A. M.</given-names></name> <name><surname>Cannon</surname> <given-names>A.</given-names></name> <name><surname>Przybelski</surname> <given-names>S. A.</given-names></name> <etal/></person-group>. (<year>2015</year>). <article-title>Clinicopathologic and 11C-Pittsburgh compound B implications of Thal amyloid phase across the Alzheimer&#x00027;s disease spectrum</article-title>. <source>Brain</source> <volume>138</volume>, <fpage>1370</fpage>&#x02013;<lpage>1381</lpage>. <pub-id pub-id-type="doi">10.1093/brain/awv050</pub-id><pub-id pub-id-type="pmid">25805643</pub-id></citation></ref>
<ref id="B37">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nakata</surname> <given-names>Y.</given-names></name> <name><surname>Sato</surname> <given-names>N.</given-names></name> <name><surname>Nemoto</surname> <given-names>K.</given-names></name> <name><surname>Abe</surname> <given-names>O.</given-names></name> <name><surname>Shikakura</surname> <given-names>S.</given-names></name> <name><surname>Arima</surname> <given-names>K.</given-names></name> <etal/></person-group>. (<year>2009</year>). <article-title>Diffusion abnormality in the posterior cingulum and hippocampal volume: correlation with disease progression in Alzheimer&#x00027;s disease</article-title>. <source>Magn. Reson. Imaging</source> <volume>27</volume>, <fpage>347</fpage>&#x02013;<lpage>354</lpage>. <pub-id pub-id-type="doi">10.1016/j.mri.2008.07.013</pub-id><pub-id pub-id-type="pmid">18771871</pub-id></citation></ref>
<ref id="B38">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Nguyen</surname> <given-names>H.</given-names></name> <name><surname>Patel</surname> <given-names>V. M.</given-names></name> <name><surname>Nasrabadi</surname> <given-names>N. M.</given-names></name> <name><surname>Chellappa</surname> <given-names>R.</given-names></name></person-group> (<year>2012</year>). <article-title>Kernel dictionary learning, in Acoustics</article-title>, in <source>2012 IEEE International Conference on Speech and Signal Processing (ICASSP)</source> (<publisher-loc>Kyoto</publisher-loc>), <fpage>2021</fpage>&#x02013;<lpage>2024</lpage>.</citation></ref>
<ref id="B39">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nobili</surname> <given-names>F.</given-names></name> <name><surname>Mazzei</surname> <given-names>D.</given-names></name> <name><surname>Dessi</surname> <given-names>B.</given-names></name> <name><surname>Morbelli</surname> <given-names>S.</given-names></name> <name><surname>Brugnolo</surname> <given-names>A.</given-names></name> <name><surname>Barbieri</surname> <given-names>P.</given-names></name> <etal/></person-group>. (<year>2010</year>). <article-title>Unawareness of memory deficit in amnestic MCI: FDG-PET findings</article-title>. <source>J. Alzheimers Dis</source>. <volume>22</volume>, <fpage>993</fpage>&#x02013;<lpage>1003</lpage>. <pub-id pub-id-type="doi">10.3233/JAD-2010-100423</pub-id><pub-id pub-id-type="pmid">20858977</pub-id></citation></ref>
<ref id="B40">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Peng</surname> <given-names>J.</given-names></name> <name><surname>An</surname> <given-names>L.</given-names></name> <name><surname>Zhu</surname> <given-names>X.</given-names></name> <name><surname>Jin</surname> <given-names>Y.</given-names></name> <name><surname>Shen</surname> <given-names>D.</given-names></name></person-group> (<year>2016</year>). <article-title>Structured sparse kernel learning for imaging genetics based Alzheimer&#x00027;s Disease diagnosis</article-title>. <source>Med. Image Comput. Comput. Assist. Interv</source>. <volume>9901</volume>, <fpage>70</fpage>&#x02013;<lpage>78</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-319-46723-8_9</pub-id><pub-id pub-id-type="pmid">28580458</pub-id></citation></ref>
<ref id="B41">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Petersen</surname> <given-names>R. C.</given-names></name></person-group> (<year>2004</year>). <article-title>Mild cognitive impairment as a diagnostic entity</article-title>. <source>J. Intern. Med</source>. <volume>256</volume>, <fpage>183</fpage>&#x02013;<lpage>194</lpage>. <pub-id pub-id-type="doi">10.1111/j.1365-2796.2004.01388.x</pub-id><pub-id pub-id-type="pmid">15324362</pub-id></citation></ref>
<ref id="B42">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Petersen</surname> <given-names>R. C.</given-names></name> <name><surname>Stevens</surname> <given-names>J. C.</given-names></name> <name><surname>Ganguli</surname> <given-names>M.</given-names></name> <name><surname>Tangalos</surname> <given-names>E. G.</given-names></name> <name><surname>Cummings</surname> <given-names>J. L.</given-names></name> <name><surname>DeKosky</surname> <given-names>S. T.</given-names></name></person-group> (<year>2001</year>). <article-title>Practice parameter: early detection of dementia: mild cognitive impairment (an evidence-based review) Report of the Quality Standards Subcommittee of the American Academy of Neurology</article-title>. <source>Neurology</source> <volume>56</volume>, <fpage>1133</fpage>&#x02013;<lpage>1142</lpage>. <pub-id pub-id-type="doi">10.1212/WNL.56.9.1133</pub-id><pub-id pub-id-type="pmid">11342677</pub-id></citation></ref>
<ref id="B43">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Pham</surname> <given-names>N.</given-names></name> <name><surname>Pagh</surname> <given-names>R.</given-names></name></person-group> (<year>2013</year>). <article-title>Fast and scalable polynomial kernels via explicit feature maps</article-title>, in <source>Proceedings of the 19th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining</source> (<publisher-loc>Chicago, IL</publisher-loc>), <fpage>239</fpage>&#x02013;<lpage>247</lpage>.</citation></ref>
<ref id="B44">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rajapakse</surname> <given-names>J. C.</given-names></name> <name><surname>Giedd</surname> <given-names>J. N.</given-names></name> <name><surname>Rapoport</surname> <given-names>J. L.</given-names></name></person-group> (<year>1997</year>). <article-title>Statistical approach to segmentation of single-channel cerebral MR images</article-title>. <source>IEEE Trans. Med. Imaging</source> <volume>16</volume>, <fpage>176</fpage>&#x02013;<lpage>186</lpage>. <pub-id pub-id-type="doi">10.1109/42.563663</pub-id><pub-id pub-id-type="pmid">9101327</pub-id></citation></ref>
<ref id="B45">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Reitan</surname> <given-names>R.</given-names></name></person-group> (<year>1958</year>). <article-title>Validity of the Trail Making Test as an indicator of organic brain damage</article-title>. <source>Percept. Mot. Skills</source> <volume>8</volume>, <fpage>271</fpage>&#x02013;<lpage>276</lpage>. <pub-id pub-id-type="doi">10.2466/pms.1958.8.3.271</pub-id></citation></ref>
<ref id="B46">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sabbagh</surname> <given-names>M. N.</given-names></name> <name><surname>Chen</surname> <given-names>K.</given-names></name> <name><surname>Rogers</surname> <given-names>J.</given-names></name> <name><surname>Fleisher</surname> <given-names>A. S.</given-names></name> <name><surname>Liebsack</surname> <given-names>C.</given-names></name> <name><surname>Bandy</surname> <given-names>D.</given-names></name></person-group> (<year>2015</year>). <article-title>Florbetapir PET, FDG PET, and MRI in Down syndrome individuals with and without Alzheimer&#x00027;s dementia</article-title>. <source>Alzheimers Dement</source>. <volume>11</volume>, <fpage>994</fpage>&#x02013;<lpage>1004</lpage>. <pub-id pub-id-type="doi">10.1016/j.jalz.2015.01.006</pub-id><pub-id pub-id-type="pmid">25849033</pub-id></citation></ref>
<ref id="B47">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Saint-Aubert</surname> <given-names>L.</given-names></name> <name><surname>Barbeau</surname> <given-names>E. J.</given-names></name> <name><surname>P&#x000E9;ran</surname> <given-names>P.</given-names></name> <name><surname>Nemmi</surname> <given-names>F.</given-names></name> <name><surname>Vervueren</surname> <given-names>C.</given-names></name> <name><surname>Mirabel</surname> <given-names>H.</given-names></name> <etal/></person-group>. (<year>2013</year>). <article-title>Cortical florbetapir-PET amyloid load in prodromal Alzheimer&#x00027;s disease patients</article-title>. <source>EJNMMI Res.</source> <volume>3</volume>:<fpage>43</fpage>. <pub-id pub-id-type="doi">10.1186/2191-219X-3-43</pub-id><pub-id pub-id-type="pmid">23731789</pub-id></citation></ref>
<ref id="B48">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sanabria-Diaz</surname> <given-names>G.</given-names></name> <name><surname>Martinez-Montes</surname> <given-names>E.</given-names></name> <name><surname>Melie-Garcia</surname> <given-names>L.</given-names></name></person-group> (<year>2013</year>). <article-title>Glucose metabolism during resting state reveals abnormal brain networks organization in the Alzheimer&#x00027;s Disease and mild cognitive impairment</article-title>. <source>PLoS ONE</source> <volume>8</volume>:<fpage>e68860</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0068860</pub-id><pub-id pub-id-type="pmid">23894356</pub-id></citation></ref>
<ref id="B49">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Sch&#x000F6;lkopf</surname> <given-names>B.</given-names></name> <name><surname>Herbrich</surname> <given-names>R.</given-names></name> <name><surname>Smola</surname> <given-names>A. J.</given-names></name></person-group> (<year>2001</year>). <article-title>A generalized representer theorem</article-title>, in <source>International Conference on Computational Learning Theory</source> (<publisher-loc>Amsterdam</publisher-loc>), <fpage>416</fpage>&#x02013;<lpage>426</lpage>.</citation></ref>
<ref id="B50">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Selkoe</surname> <given-names>D. J.</given-names></name> <name><surname>Hardy</surname> <given-names>J.</given-names></name></person-group> (<year>2016</year>). <article-title>The amyloid hypothesis of Alzheimer&#x00027;s disease at 25 years</article-title>. <source>EMBO Mol. Med</source>. <volume>8</volume>, <fpage>595</fpage>&#x02013;<lpage>608</lpage>. <pub-id pub-id-type="doi">10.15252/emmm.201606210</pub-id><pub-id pub-id-type="pmid">27025652</pub-id></citation></ref>
<ref id="B51">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Seo</surname> <given-names>S. W.</given-names></name> <name><surname>Ayakta</surname> <given-names>N.</given-names></name> <name><surname>Grinberg</surname> <given-names>L. T.</given-names></name> <name><surname>Villeneuve</surname> <given-names>S.</given-names></name> <name><surname>Lehmann</surname> <given-names>M.</given-names></name> <name><surname>Reed</surname> <given-names>B.</given-names></name> <name><surname>Rabinovici</surname> <given-names>G. D.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>Regional correlations between [11C]PIB PET and post-mortem burden of amyloid-beta pathology in a diverse neuropathological cohort</article-title>. <source>Neuroimage Clin.</source> <volume>13</volume>(<supplement>Suppl. C</supplement>), <fpage>130</fpage>&#x02013;<lpage>137</lpage>. <pub-id pub-id-type="doi">10.1016/j.nicl.2016.11.008</pub-id></citation></ref>
<ref id="B52">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sonnenburg</surname> <given-names>S.</given-names></name> <name><surname>R&#x000E4;tsch</surname> <given-names>G.</given-names></name> <name><surname>Sch&#x000E4;fer</surname> <given-names>C.</given-names></name> <name><surname>Sch&#x000F6;lkopf</surname> <given-names>B.</given-names></name></person-group> (<year>2006</year>). <article-title>Large scale multiple kernel learning</article-title>. <source>J. Mach. Learn. Res</source>. <volume>7</volume>, <fpage>1531</fpage>&#x02013;<lpage>1565</lpage>. Available online at: <ext-link ext-link-type="uri" xlink:href="http://www.jmlr.org/papers/volume7/sonnenburg06a/sonnenburg06a.pdf">http://www.jmlr.org/papers/volume7/sonnenburg06a/sonnenburg06a.pdf</ext-link></citation></ref>
<ref id="B53">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Suk</surname> <given-names>H. I.</given-names></name> <name><surname>Lee</surname> <given-names>S. W.</given-names></name> <name><surname>Shen</surname> <given-names>D. G.</given-names></name></person-group> (<year>2015</year>). <article-title>Latent feature representation with stacked auto-encoder for AD/MCI diagnosis</article-title>. <source>Brain Struct. Funct.</source> <volume>220</volume>, <fpage>841</fpage>&#x02013;<lpage>859</lpage>. <pub-id pub-id-type="doi">10.1007/s00429-013-0687-3</pub-id><pub-id pub-id-type="pmid">24363140</pub-id></citation></ref>
<ref id="B54">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Suk</surname> <given-names>H. I.</given-names></name> <name><surname>Wee</surname> <given-names>C. Y.</given-names></name> <name><surname>Shen</surname> <given-names>D.</given-names></name></person-group> (<year>2013</year>). <article-title>Discriminative group sparse representation for mild cognitive impairment classification</article-title>, in <source>International Workshop on Machine Learning in Medical Imaging</source> (<publisher-loc>Nagoya</publisher-loc>), <volume>8184</volume>, <fpage>131</fpage>&#x02013;<lpage>138</lpage>.</citation></ref>
<ref id="B55">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tan</surname> <given-names>M. S.</given-names></name> <name><surname>Yu</surname> <given-names>J. T.</given-names></name> <name><surname>Jiang</surname> <given-names>T.</given-names></name> <name><surname>Zhu</surname> <given-names>X. C.</given-names></name> <name><surname>Wang</surname> <given-names>H. F.</given-names></name> <name><surname>Zhang</surname> <given-names>W.</given-names></name> <etal/></person-group>. (<year>2013</year>). <article-title>NLRP3 polymorphisms are associated with late-onset Alzheimer&#x00027;s disease in Han Chinese</article-title>. <source>J. Neuroimmunol</source>. <volume>265</volume>, <fpage>91</fpage>&#x02013;<lpage>95</lpage>. <pub-id pub-id-type="doi">10.1016/j.jneuroim.2013.10.002</pub-id><pub-id pub-id-type="pmid">24144834</pub-id></citation></ref>
<ref id="B56">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tohka</surname> <given-names>J.</given-names></name> <name><surname>Zijdenbos</surname> <given-names>A.</given-names></name> <name><surname>Evans</surname> <given-names>A.</given-names></name></person-group> (<year>2004</year>). <article-title>Fast and robust parameter estimation for statistical partial volume models in brain MRI</article-title>. <source>Neuroimage</source> <volume>23</volume>, <fpage>84</fpage>&#x02013;<lpage>97</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2004.05.007</pub-id><pub-id pub-id-type="pmid">15325355</pub-id></citation></ref>
<ref id="B57">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Triplett</surname> <given-names>J. C.</given-names></name> <name><surname>Swomley</surname> <given-names>A. M.</given-names></name> <name><surname>Cai</surname> <given-names>J.</given-names></name> <name><surname>Klein</surname> <given-names>J. B.</given-names></name> <name><surname>Butterfield</surname> <given-names>D. A.</given-names></name></person-group> (<year>2016</year>). <article-title>Quantitative phosphoproteomic analyses of the inferior parietal lobule from three different pathological stages of Alzheimer&#x00027;s Disease</article-title>. <source>J. Alzheimers Dis</source>. <volume>49</volume>, <fpage>45</fpage>&#x02013;<lpage>62</lpage>. <pub-id pub-id-type="doi">10.3233/JAD-150417</pub-id><pub-id pub-id-type="pmid">26444780</pub-id></citation></ref>
<ref id="B58">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tzourio-Mazoyer</surname> <given-names>N.</given-names></name> <name><surname>Landeau</surname> <given-names>B.</given-names></name> <name><surname>Papathanassiou</surname> <given-names>D.</given-names></name> <etal/></person-group>. (<year>2002</year>). <article-title>Automated anatomical labeling of activations in SPM using a macroscopic anatomical parcellation of the MNI MRI single-subject brain</article-title>. <source>Neuroimage</source> <volume>15</volume>, <fpage>273</fpage>&#x02013;<lpage>289</lpage>. <pub-id pub-id-type="doi">10.1006/nimg.2001.0978</pub-id><pub-id pub-id-type="pmid">11771995</pub-id></citation></ref>
<ref id="B59">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Voineskos</surname> <given-names>A. N.</given-names></name> <name><surname>Winterburn</surname> <given-names>J. L.</given-names></name> <name><surname>Felsky</surname> <given-names>D.</given-names></name> <name><surname>Pipitone</surname> <given-names>J.</given-names></name> <name><surname>Rajji</surname> <given-names>T. K.</given-names></name> <name><surname>Mulsant</surname> <given-names>B. H.</given-names></name> <etal/></person-group>. (<year>2015</year>). <article-title>Hippocampal (subfield) volume and shape in relation to cognitive performance across the adult lifespan</article-title>. <source>Hum. Brain Mapp</source>. <volume>36</volume>, <fpage>3020</fpage>&#x02013;<lpage>3037</lpage>. <pub-id pub-id-type="doi">10.1002/hbm.22825</pub-id><pub-id pub-id-type="pmid">25959503</pub-id></citation></ref>
<ref id="B60">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>P.</given-names></name> <name><surname>Chen</surname> <given-names>K.</given-names></name> <name><surname>Yao</surname> <given-names>L.</given-names></name> <name><surname>Hu</surname> <given-names>B.</given-names></name> <name><surname>Wu</surname> <given-names>X.</given-names></name> <name><surname>Zhang</surname> <given-names>J.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>Alzheimer&#x00027;s Disease neuroimaging initiative, multimodal classification of mild cognitive impairment based on partial least squares</article-title>. <source>J. Alzheimers Dis.</source> <volume>54</volume>, <fpage>359</fpage>&#x02013;<lpage>371</lpage>. <pub-id pub-id-type="doi">10.3233/JAD-160102</pub-id></citation></ref>
<ref id="B61">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wee</surname> <given-names>C. Y.</given-names></name> <name><surname>Yap</surname> <given-names>P. T.</given-names></name> <name><surname>Li</surname> <given-names>W.</given-names></name> <name><surname>Denny</surname> <given-names>K.</given-names></name> <name><surname>Browndyke</surname> <given-names>J. N.</given-names></name> <name><surname>Potter</surname> <given-names>G. G.</given-names></name> <etal/></person-group>. (<year>2011</year>). <article-title>Enriched white-matter connectivity networks for accurate identification of mci patients</article-title>. <source>Neuroimage</source> <volume>54</volume>, <fpage>1812</fpage>&#x02013;<lpage>1822</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2010.10.026</pub-id><pub-id pub-id-type="pmid">20970508</pub-id></citation></ref>
<ref id="B62">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wee</surname> <given-names>C. Y.</given-names></name> <name><surname>Yap</surname> <given-names>P. T.</given-names></name> <name><surname>Zhang</surname> <given-names>D.</given-names></name> <name><surname>Denny</surname> <given-names>K.</given-names></name> <name><surname>Browndyke</surname> <given-names>J. N.</given-names></name> <name><surname>Potter</surname> <given-names>G. G.</given-names></name> <etal/></person-group>. (<year>2012</year>). <article-title>Identification of MCI individuals using structural and functional connectivity networks</article-title>. <source>Neuroimage</source> <volume>59</volume>, <fpage>2045</fpage>&#x02013;<lpage>2056</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2011.10.015</pub-id><pub-id pub-id-type="pmid">22019883</pub-id></citation></ref>
<ref id="B63">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wei</surname> <given-names>R.</given-names></name> <name><surname>Li</surname> <given-names>C.</given-names></name> <name><surname>Fogelson</surname> <given-names>N.</given-names></name> <name><surname>Li</surname> <given-names>L.</given-names></name> <collab>Alzheimer&#x00027;s Disease Neuroimaging Initiative</collab></person-group> (<year>2016</year>). <article-title>Prediction of conversion from mild cognitive impairment to Alzheimer&#x00027;s Disease using MRI and structural network features</article-title>. <source>Front. Aging Neurosci.</source> <volume>8</volume>:<fpage>76</fpage>. <pub-id pub-id-type="doi">10.3389/fnagi.2016.00076</pub-id><pub-id pub-id-type="pmid">27148045</pub-id></citation></ref>
<ref id="B64">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Westman</surname> <given-names>E.</given-names></name> <name><surname>Muehlboeck</surname> <given-names>J.</given-names></name> <name><surname>Simmons</surname> <given-names>A.</given-names></name></person-group> (<year>2012</year>). <article-title>Combining MRI and CSF measures for classification of Alzheimer&#x00027;s disease and prediction of mild cognitive impairment conversion</article-title>. <source>Neuroimage</source> <volume>62</volume>, <fpage>229</fpage>&#x02013;<lpage>238</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2012.04.056</pub-id><pub-id pub-id-type="pmid">22580170</pub-id></citation></ref>
<ref id="B65">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wisse</surname> <given-names>L. E.</given-names></name> <name><surname>Biessels</surname> <given-names>G. J.</given-names></name> <name><surname>Heringa</surname> <given-names>S. M.</given-names></name> <name><surname>Kuijjf</surname> <given-names>H. J.</given-names></name> <name><surname>Koek</surname> <given-names>D. K.</given-names></name> <name><surname>Luijten</surname> <given-names>P. R.</given-names></name> <etal/></person-group>. (<year>2014</year>). <article-title>Hippocampal subfield volumes at 7T in early Alzheimer&#x00027;s disease and normal aging</article-title>. <source>Neurobiol. Aging</source> <volume>35</volume>, <fpage>2039</fpage>&#x02013;<lpage>2045</lpage>. <pub-id pub-id-type="doi">10.1016/j.neurobiolaging.2014.02.021</pub-id><pub-id pub-id-type="pmid">24684788</pub-id></citation></ref>
<ref id="B66">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wu</surname> <given-names>X.</given-names></name> <name><surname>Li</surname> <given-names>Q.</given-names></name> <name><surname>Xu</surname> <given-names>L.</given-names></name> <name><surname>Chen</surname> <given-names>K.</given-names></name> <name><surname>Yao</surname> <given-names>L.</given-names></name></person-group> (<year>2017</year>). <article-title>Multi-feature kernel discriminant dictionary learning for face recognition</article-title>. <source>Pattern Recogn</source>. <volume>66</volume>, <fpage>404</fpage>&#x02013;<lpage>411</lpage>. <pub-id pub-id-type="doi">10.1016/j.patcog.2016.12.001</pub-id></citation></ref>
<ref id="B67">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Xiang</surname> <given-names>J.</given-names></name> <name><surname>Guo</surname> <given-names>H.</given-names></name> <name><surname>Cao</surname> <given-names>R.</given-names></name> <name><surname>Liang</surname> <given-names>H.</given-names></name> <name><surname>Chen</surname> <given-names>J.</given-names></name></person-group> (<year>2013</year>). <article-title>An abnormal resting-state functional brain network indicates progression towards Alzheimer&#x00027;s disease</article-title>. <source>Neural Regen. Res</source>. <volume>8</volume>, <fpage>2789</fpage>&#x02013;<lpage>2799</lpage>. <pub-id pub-id-type="doi">10.3969/j.issn.1673-5374.2013.30.001</pub-id><pub-id pub-id-type="pmid">25206600</pub-id></citation></ref>
<ref id="B68">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Xu</surname> <given-names>L.</given-names></name> <name><surname>Wu</surname> <given-names>X.</given-names></name> <name><surname>Chen</surname> <given-names>K.</given-names></name> <name><surname>Yao</surname> <given-names>L.</given-names></name></person-group> (<year>2015</year>). <article-title>Multi-modality sparse representation-based classification for Alzheimer&#x00027;s disease and mild cognitive impairment</article-title>. <source>Comput. Methods Programs Biomed.</source> <volume>122</volume>, <fpage>182</fpage>&#x02013;<lpage>190</lpage>. <pub-id pub-id-type="doi">10.1016/j.cmpb.2015.08.004</pub-id><pub-id pub-id-type="pmid">26298855</pub-id></citation></ref>
<ref id="B69">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Xu</surname> <given-names>L.</given-names></name> <name><surname>Wu</surname> <given-names>X.</given-names></name> <name><surname>Li</surname> <given-names>R.</given-names></name> <name><surname>Chen</surname> <given-names>K.</given-names></name> <name><surname>Long</surname> <given-names>Z.</given-names></name> <name><surname>Zhang</surname> <given-names>J.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>Prediction of progressive mild cognitive impairment by multi-modal neuroimaging biomarkers</article-title>. <source>J. Alzheimers Dis</source>. <volume>51</volume>, <fpage>1045</fpage>&#x02013;<lpage>1056</lpage>. <pub-id pub-id-type="doi">10.3233/JAD-151010</pub-id><pub-id pub-id-type="pmid">26923024</pub-id></citation></ref>
<ref id="B70">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yang</surname> <given-names>M.</given-names></name> <name><surname>Zhang</surname> <given-names>L.</given-names></name> <name><surname>Feng</surname> <given-names>X.</given-names></name> <name><surname>Zhang</surname> <given-names>D.</given-names></name></person-group> (<year>2014</year>). <article-title>Sparse representation based fisher discrimination dictionary learning for image classification</article-title>. <source>Int. J. Comput. Vis.</source> <volume>19</volume>, <fpage>209</fpage>&#x02013;<lpage>232</lpage>. <pub-id pub-id-type="doi">10.1007/s11263-014-0722-8</pub-id></citation></ref>
<ref id="B71">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yu</surname> <given-names>E.</given-names></name> <name><surname>Liao</surname> <given-names>Z.</given-names></name> <name><surname>Mao</surname> <given-names>D.</given-names></name> <name><surname>Zhang</surname> <given-names>Q.</given-names></name> <name><surname>Ji</surname> <given-names>G.</given-names></name> <name><surname>Li</surname> <given-names>Y.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>Directed functional connectivity of posterior cingulate cortex and whole brain in Alzheimer&#x00027;s Disease and mild cognitive impairment</article-title>. <source>Curr. Alzheimer Res</source>. <volume>14</volume>, <fpage>628</fpage>&#x02013;<lpage>635</lpage>. <pub-id pub-id-type="doi">10.2174/1567205013666161201201000</pub-id><pub-id pub-id-type="pmid">27915993</pub-id></citation></ref>
<ref id="B72">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>D.</given-names></name> <name><surname>Shen</surname> <given-names>D.</given-names></name> <collab>Alzheimer&#x00027;s Disease Neuroimaging Initiative</collab></person-group> (<year>2012a</year>). <article-title>Multi-modal multi-task learning for joint prediction of multiple regression and classification variables in Alzheimer&#x00027;s disease</article-title>. <source>Neuroimage</source> <volume>59</volume>, <fpage>895</fpage>&#x02013;<lpage>907</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2011.09.069</pub-id><pub-id pub-id-type="pmid">21992749</pub-id></citation></ref>
<ref id="B73">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>D.</given-names></name> <name><surname>Shen</surname> <given-names>D.</given-names></name> <collab>Alzheimer&#x00027;s Disease Neuroimaging Initiative</collab></person-group> (<year>2012b</year>). <article-title>Predicting future clinical changes of MCI patients using longitudinal and multimodal biomarkers</article-title>. <source>PLoS ONE</source> <volume>7</volume>:<fpage>e33182</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0033182</pub-id><pub-id pub-id-type="pmid">22457741</pub-id></citation></ref>
<ref id="B74">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>D.</given-names></name> <name><surname>Wang</surname> <given-names>Y.</given-names></name> <name><surname>Zhou</surname> <given-names>L.</given-names></name> <name><surname>Yuan</surname> <given-names>H.</given-names></name> <name><surname>Shen</surname> <given-names>D.</given-names></name> <collab>Alzheimer&#x00027;s Disease Neuroimaging Initiative</collab></person-group> (<year>2011</year>). <article-title>Multimodal classification of Alzheimer&#x00027;s disease and mild cognitive impairment</article-title>. <source>Neuroimage</source> <volume>55</volume>, <fpage>856</fpage>&#x02013;<lpage>867</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2011.01.008</pub-id><pub-id pub-id-type="pmid">21236349</pub-id></citation></ref>
<ref id="B75">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>Q.</given-names></name> <name><surname>Li</surname> <given-names>B.</given-names></name></person-group> (<year>2010</year>). <article-title>Discriminative K-SVD for dictionary learning in face recognition</article-title>, in <source>IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</source> (<publisher-loc>San Francisco, CA</publisher-loc>), <fpage>2691</fpage>&#x02013;<lpage>2698</lpage>.</citation></ref>
<ref id="B76">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>S.</given-names></name> <name><surname>Smailagic</surname> <given-names>N.</given-names></name> <name><surname>Hyde</surname> <given-names>C.</given-names></name> <name><surname>Noel-Storr</surname> <given-names>A. H.</given-names></name> <name><surname>Takwoingi</surname> <given-names>Y.</given-names></name> <name><surname>McShane</surname> <given-names>R.</given-names></name> <etal/></person-group>. (<year>2014</year>). <article-title>C-PIB-PET for the early diagnosis of Alzheimer&#x00027;s disease dementia and other dementias in people with mild coginitive impairment(MCI)</article-title>. <source>Cochrane Database Syst. Rev</source>. 7:CD010386. <pub-id pub-id-type="doi">10.1002/14651858.CD010386.pub2</pub-id><pub-id pub-id-type="pmid">25052054</pub-id></citation></ref>
<ref id="B77">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>Y.</given-names></name> <name><surname>Dong</surname> <given-names>Z.</given-names></name> <name><surname>Phillips</surname> <given-names>P.</given-names></name> <name><surname>Wang</surname> <given-names>S.</given-names></name> <name><surname>Ji</surname> <given-names>G.</given-names></name> <name><surname>Yang</surname> <given-names>J.</given-names></name> <etal/></person-group>. (<year>2015</year>). <article-title>Detection of subjects and brain regions related to Alzheimer&#x00027;s disease using 3D MRI scans based on eigenbrain and machine learning</article-title>. <source>Front. Comput. Neurosci.</source> <volume>9</volume>:<fpage>66</fpage>. <pub-id pub-id-type="doi">10.3389/fncom.2015.00066</pub-id><pub-id pub-id-type="pmid">26082713</pub-id></citation></ref>
<ref id="B78">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhou</surname> <given-names>L.</given-names></name> <name><surname>Wang</surname> <given-names>Y.</given-names></name> <name><surname>Li</surname> <given-names>Y.</given-names></name> <name><surname>Yap</surname> <given-names>P. T.</given-names></name> <name><surname>Shen</surname> <given-names>D.</given-names></name></person-group> (<year>2011</year>). <article-title>Hierarchical anatomical brain networks for MCI prediction: revisiting volumetric measures</article-title>. <source>PLoS ONE</source> <volume>6</volume>:<fpage>e21935</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0021935</pub-id><pub-id pub-id-type="pmid">21818280</pub-id></citation></ref>
<ref id="B79">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhu</surname> <given-names>X.</given-names></name> <name><surname>Suk</surname> <given-names>H.</given-names></name> <name><surname>Shen</surname> <given-names>D.</given-names></name></person-group> (<year>2014a</year>). <article-title>A novel matrix-similarity based loss function for joint regression and classification in AD diagnosis</article-title>. <source>Neuroimage</source> <volume>100</volume>, <fpage>91</fpage>&#x02013;<lpage>105</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2014.05.078</pub-id><pub-id pub-id-type="pmid">24911377</pub-id></citation></ref>
<ref id="B80">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhu</surname> <given-names>X.</given-names></name> <name><surname>Suk</surname> <given-names>H.</given-names></name> <name><surname>Shen</surname> <given-names>D.</given-names></name></person-group> (<year>2014b</year>). <article-title>A novel multi-relation regularization method for regression and classification in AD diagnosis</article-title>. <source>Med. Image Comput. Comput. Assist. Interv.</source> <volume>17</volume>, <fpage>401</fpage>&#x02013;<lpage>408</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-319-10443-0_51</pub-id><pub-id pub-id-type="pmid">25320825</pub-id></citation></ref>
<ref id="B81">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zou</surname> <given-names>H.</given-names></name> <name><surname>Hastie</surname> <given-names>T.</given-names></name></person-group> (<year>2005</year>). <article-title>Regularization and variable selection via the elastic net</article-title>. <source>J. R. Stat. Soc</source>. <volume>67</volume>, <fpage>301</fpage>&#x02013;<lpage>320</lpage>. <pub-id pub-id-type="doi">10.1111/j.1467-9868.2005.00503.x</pub-id></citation></ref>
<ref id="B82">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zu</surname> <given-names>C.</given-names></name> <name><surname>Jie</surname> <given-names>B.</given-names></name> <name><surname>Liu</surname> <given-names>M.</given-names></name> <name><surname>Chen</surname> <given-names>S.</given-names></name> <name><surname>Shen</surname> <given-names>D.</given-names></name> <name><surname>Zhang</surname> <given-names>D.</given-names></name> <etal/></person-group>. (<year>2015</year>). <article-title>Label-aligned multi-task feature learning for multimodal classification of Alzheimer&#x00027;s disease and mild cognitive impairment</article-title>. <source>Brain Imaging Behav</source>. <volume>10</volume>, <fpage>1148</fpage>&#x02013;<lpage>1159</lpage>. <pub-id pub-id-type="doi">10.1007/s11682-015-9480-7</pub-id><pub-id pub-id-type="pmid">26572145</pub-id></citation></ref>
</ref-list>
<fn-group>
<fn fn-type="financial-disclosure"><p><bold>Funding.</bold> This work was supported by the Funds for International Cooperation and Exchange of the National Natural Science Foundation of China [grant number 61210001], the General Program of National Natural Science Foundation of China [grant number 61571047], the Fundamental Research Funds for the Central Universities [grant number 2017STUD34], and the Fundamental Research Funds for the Central Universities [grant number 2017EYT36].</p>
</fn>
</fn-group>
</back>
</article>