<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="2.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Neurosci.</journal-id>
<journal-title>Frontiers in Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Neurosci.</abbrev-journal-title>
<issn pub-type="epub">1662-453X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnins.2024.1387196</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Neuroscience</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Comparison of deep learning architectures for predicting amyloid positivity in Alzheimer&#x2019;s disease, mild cognitive impairment, and healthy aging, from T1-weighted brain structural MRI</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name><surname>Chattopadhyay</surname> <given-names>Tamoghna</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2657295/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Ozarkar</surname> <given-names>Saket S.</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Buwa</surname> <given-names>Ketaki</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Joshy</surname> <given-names>Neha Ann</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Komandur</surname> <given-names>Dheeraj</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Naik</surname> <given-names>Jayati</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Thomopoulos</surname> <given-names>Sophia I.</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Ver Steeg</surname> <given-names>Greg</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Ambite</surname> <given-names>Jose Luis</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/19639/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Thompson</surname> <given-names>Paul M.</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c002"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/6851/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<on-behalf-of>for the Alzheimer&#x2019;s Disease Neuroimaging Initiative (ADNI)</on-behalf-of>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Imaging Genetics Center, Mark and Mary Stevens Neuroimaging and Informatics Institute, Keck School of Medicine, University of Southern California</institution>, <addr-line>Marina del Rey, CA</addr-line>, <country>United States</country></aff>
<aff id="aff2"><sup>2</sup><institution>University of California</institution>, <addr-line>Riverside, CA</addr-line>, <country>United States</country></aff>
<aff id="aff3"><sup>3</sup><institution>Information Sciences Institute, University of Southern California</institution>, <addr-line>Marina del Rey, CA</addr-line>, <country>United States</country></aff>
<author-notes>
<fn fn-type="edited-by" id="fn0003">
<p>Edited by: Da Ma, Wake Forest University, United States</p>
</fn>
<fn fn-type="edited-by" id="fn0004">
<p>Reviewed by: Yuchuan Zhuang, AbbVie, United States</p>
<p>Robel Kebede Gebre, Mayo Clinic, United States</p>
</fn>
<corresp id="c001">&#x002A;Correspondence: Tamoghna Chattopadhyay, <email>tchattop@usc.edu</email></corresp>
<corresp id="c002">Paul M. Thompson, <email>pthomp@usc.edu</email></corresp>
</author-notes>
<pub-date pub-type="epub">
<day>02</day>
<month>07</month>
<year>2024</year>
</pub-date>
<pub-date pub-type="collection">
<year>2024</year>
</pub-date>
<volume>18</volume>
<elocation-id>1387196</elocation-id>
<history>
<date date-type="received">
<day>16</day>
<month>02</month>
<year>2024</year>
</date>
<date date-type="accepted">
<day>14</day>
<month>06</month>
<year>2024</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2024 Chattopadhyay, Ozarkar, Buwa, Joshy, Komandur, Naik, Thomopoulos, Ver Steeg, Ambite and Thompson.</copyright-statement>
<copyright-year>2024</copyright-year>
<copyright-holder>Chattopadhyay, Ozarkar, Buwa, Joshy, Komandur, Naik, Thomopoulos, Ver Steeg, Ambite and Thompson</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<p>Abnormal &#x03B2;-amyloid (A&#x03B2;) accumulation in the brain is an early indicator of Alzheimer&#x2019;s disease (AD) and is typically assessed through invasive procedures such as PET (positron emission tomography) or CSF (cerebrospinal fluid) assays. As new anti-Alzheimer&#x2019;s treatments can now successfully target amyloid pathology, there is a growing interest in predicting A&#x03B2; positivity (A&#x03B2;+) from less invasive, more widely available types of brain scans, such as T1-weighted (T1w) MRI. Here we compare multiple approaches to infer A&#x03B2;&#x2009;+&#x2009;from standard anatomical MRI: (1) classical machine learning algorithms, including logistic regression, XGBoost, and shallow artificial neural networks, (2) deep learning models based on 2D and 3D convolutional neural networks (CNNs), (3) a hybrid ANN-CNN, combining the strengths of shallow and deep neural networks, (4) transfer learning models based on CNNs, and (5) 3D Vision Transformers. All models were trained on paired MRI/PET data from 1,847 elderly participants (mean age: 75.1&#x2009;yrs. &#x00B1; 7.6SD; 863 females/984 males; 661 healthy controls, 889 with mild cognitive impairment (MCI), and 297 with Dementia), scanned as part of the Alzheimer&#x2019;s Disease Neuroimaging Initiative. We evaluated each model&#x2019;s balanced accuracy and F1 scores. While further tests on more diverse data are warranted, deep learning models trained on standard MRI showed promise for estimating A&#x03B2;&#x2009;+&#x2009;status, at least in people with MCI. This may offer a potential screening option before resorting to more invasive procedures.</p>
</abstract>
<kwd-group>
<kwd>Alzheimer&#x2019;s disease</kwd>
<kwd>amyloid</kwd>
<kwd>3D convolutional neural networks</kwd>
<kwd>deep learning</kwd>
<kwd>transfer learning</kwd>
<kwd>vision transformers</kwd>
</kwd-group>
<contract-num rid="cn1">R01AG058854</contract-num>
<contract-num rid="cn1">U01AG068057</contract-num>
<contract-num rid="cn1">RF1AG057892</contract-num>
<contract-sponsor id="cn1">NIH</contract-sponsor>
<contract-sponsor id="cn2">National Institutes of Health<named-content content-type="fundref-id">10.13039/100000002</named-content></contract-sponsor>
<contract-sponsor id="cn3">Department of Defense<named-content content-type="fundref-id">10.13039/100000005</named-content></contract-sponsor>
<counts>
<fig-count count="5"/>
<table-count count="6"/>
<equation-count count="0"/>
<ref-count count="86"/>
<page-count count="13"/>
<word-count count="10705"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Brain Imaging Methods</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec1">
<label>1</label>
<title>Introduction</title>
<p>According to the <xref ref-type="bibr" rid="ref80">World Health Organization (2022)</xref>, approximately 55 million individuals are now affected by dementia&#x2014;a number expected to rise to 78 million by the year 2030. Alzheimer&#x2019;s disease (AD)&#x2014;the most prevalent type of dementia - accounts for around 60&#x2013;70% of the overall number of cases (<xref ref-type="bibr" rid="ref80">World Health Organization, 2022</xref>). The underlying cause of AD is linked to the abnormal accumulation of specific proteins in the brain, including beta-amyloid plaques (<xref ref-type="bibr" rid="ref31">Jack et al., 2018</xref>). These plaques are insoluble and toxic to brain cells (<xref ref-type="bibr" rid="ref52">Masters and Selkoe, 2012</xref>). Additionally, abnormal tau proteins aggregate within neurons, in the form of neurofibrillary tangles, disrupting molecular transport within cells (<xref ref-type="bibr" rid="ref36">Johnson and Hartigan, 1999</xref>). To visualize the distribution of A&#x03B2; in the brain, positron emission tomography (PET) has been used, but radioactive tracers that are sensitive to amyloid and tau proteins must be injected into the bloodstream, and this is invasive. Amyloid-sensitive PET can map the spatial distribution of A&#x03B2; in the brain, revealing the extent of AD pathology. As amyloid, tau, and neurodegeneration (A/T/N) are all considered to be the defining biological characteristics of AD, a recent NIA-AA task force recommended (<xref ref-type="bibr" rid="ref31">Jack et al., 2018</xref>; <xref ref-type="bibr" rid="ref62">Revised Again: Alzheimer&#x2019;s Diagnostic Criteria Get Another Makeover | ALZFORUM, 2023</xref>) that future AD research studies should measure these processes.</p>
<p>In line with <italic>post mortem</italic> maps of pathology, PET scans show a distinctive trajectory of pathology in AD, usually starting in the entorhinal cortex, hippocampus, and medial temporal lobes, and then spreading throughout the brain as the disease advances. Early neuropathological work by Braak and colleagues pieced together the typical progression patterns for amyloid and tau in the brain (leading to the so-called &#x2018;Braak staging&#x2019; system; <xref ref-type="bibr" rid="ref10">Braak and Braak, 1991</xref>; <xref ref-type="bibr" rid="ref11">Braak and Braak, 1997</xref>; <xref ref-type="bibr" rid="ref8">Braak, 2000</xref>; <xref ref-type="bibr" rid="ref71">Thompson et al., 2004</xref>; <xref ref-type="bibr" rid="ref9">Braak et al., 2006</xref>). This progression is associated with gradual clinical and cognitive decline. Although amyloid levels can be measured in living individuals using PET imaging with amyloid-sensitive ligands such as Pittsburgh compound B (PiB; <xref ref-type="bibr" rid="ref41">Klunk et al., 2004</xref>) or florbetapir (<xref ref-type="bibr" rid="ref16">Clark et al., 2011</xref>), amyloid-PET is expensive, not widely available, and involves an invasive procedure, as it requires the injection of radioactive compounds into the participant. Ground truth measures can be obtained by directly measuring amyloid levels in the cerebrospinal fluid (CSF) through a spinal tap or lumbar puncture. The efficiency of A&#x03B2; protein aggregate clearance can be assessed in cerebrospinal fluid (CSF; <xref ref-type="bibr" rid="ref69">Tarasoff-Conway et al., 2015</xref>). CSF peptides, such as A&#x03B2;1-42, and hyperphosphorylated tau show correlations with amyloid plaques and neuronal tangles observed in brain autopsies (<xref ref-type="bibr" rid="ref55">Nelson et al., 2007</xref>). These biomarkers are linked to cognitive decline, providing insights for early detection of AD. Despite providing accurate information, these procedures are highly invasive. Thus, there is a significant interest in developing a less invasive test for abnormal amyloid to screen individuals before resorting to more invasive testing methods. Standard anatomical MRI cannot directly detect amyloid, but the accumulation of A&#x03B2; leads to widespread brain cell loss, which manifests as atrophy on T1-weighted (T1w) MRI. This process is evident through the expansion of the ventricles and widening of the cortical sulci, and the pattern of A&#x03B2; accumulation closely matches the trajectory of cortical gray matter loss detectable on brain MRI (<xref ref-type="bibr" rid="ref70">Thompson, 2007</xref>). As such, MRI markers may offer a potential avenue for less invasive screening of abnormal amyloid levels in individuals.</p>
<p>In <xref ref-type="bibr" rid="ref60">Petrone and Casamitjana (2019)</xref>, Petrone et al. conducted a study where they used neuroimaging to predict amyloid positivity in cerebrospinal fluid (CSF), using an established cutoff of &#x003E;192&#x2009;pg./mL. They studied 403 elderly participants scanned with MRI and PET. Brain tissue loss rates were longitudinally mapped using the SPM12 (<xref ref-type="bibr" rid="ref67">SPM12 software - Statistical Parametric Mapping, 2014</xref>) software. A machine learning classifier was then applied to the Jacobian determinant maps, representing local rates of atrophy, to predict amyloid levels in cognitively unimpaired individuals. The longitudinal voxel-based classifier demonstrated a promising Area Under the Curve (AUC) of 0.87 (95% CI, 0.72&#x2013;0.97). Even so, this prediction required longitudinal scans from the same individual, and was not applicable when a patient had only a baseline scan. The brain regions with the greatest discriminative power included the temporal lobes, basal forebrain, and lateral ventricles. In <xref ref-type="bibr" rid="ref58">Pan et al. (2018)</xref>, Pan et al. developed a cycle-consistent generative adversarial network (Cycle-GAN) to generate synthetic 3D PET images from brain MRI (i.e., cross-modal image synthesis). Cycle-GANs build on the GAN concept introduced by <xref ref-type="bibr" rid="ref25">Goodfellow et al. (2014)</xref> and perform a form of &#x2018;neural style transfer&#x2019; by learning the statistical relationship between two imaging modalities. In related work (<xref ref-type="bibr" rid="ref34">Jin et al., 2023</xref>), we developed a multimodal contrastive GAN to synthesize amyloid PET scans from T1w MRI and FLAIR scans. For more details on image-to-image translation and the underlying mathematics, readers are referred to <xref ref-type="bibr" rid="ref61">Qu et al. (2021)</xref> and <xref ref-type="bibr" rid="ref78">Wang et al. (2020)</xref>. Cross-modal synthesis is an innovative use of deep learning to generate synthetic PET images, offering potential applications in cases where PET scans may be challenging or costly to obtain.</p>
<p>In <xref ref-type="bibr" rid="ref64">Shan et al. (2021)</xref>, Shan et al. used Monte Carlo simulations with <italic>k</italic>-fold cross validation to predict A&#x03B2; positivity using domain scores from cognitive tests, obtaining an accuracy of 0.90 and 0.86 on men and women, respectively, with subjective memory complaints. In <xref ref-type="bibr" rid="ref21">Ezzati et al. (2020)</xref>, Ezzati et al. used an ensemble linear discriminant model to predict A&#x03B2; positivity using demographic information, ApoE4 genotype (as this is the major risk gene for late onset AD), MRI volumetrics and CSF biomarkers, yielding AUCs between 0.89 and 0.92 in participants with amnestic mild cognitive impairment (aMCI). In <xref ref-type="bibr" rid="ref39">Kim S, et al. (2021)</xref>, Kim et al. used a 2.5-D CNN (a convolutional neural network that operates on a set of 2D slices from a 3D volume) to predict A&#x03B2; positivity from [<sup>18</sup>F]-fluorodeoxyglucose (FDG) PET scans, with an accuracy of 0.75 and an AUC of 0.86. In <xref ref-type="bibr" rid="ref66">Son et al. (2020)</xref>, Son et al. used 2D CNNs to classify A&#x03B2;-PET images. They showed that in cases where scans present visual ambiguity, deep learning algorithms correlated better with ground truth measures than visual assessments. This underscores the potential of such algorithms for clinical diagnosis and prognostic assessment, particularly in scenarios where visual interpretation is challenging or uncertain. In <xref ref-type="bibr" rid="ref5">Bae et al. (2023)</xref>, Bae et al. used a deep learning based classification system (DLCS) to classify A&#x03B2;-positive AD patients vs. A&#x03B2;-negative controls using T1w brain MRI. and reported an AUC of 0.937. In <xref ref-type="bibr" rid="ref81">Yasuno et al. (2017)</xref>, Yasuno et al. conducted a correlation analysis between the T1w/T2w ratio and PiB-BP<sub>ND</sub> values and found a significant positive relationship between the regional T1w/T2w ratio and A&#x03B2; accumulation. Their study concluded that the T1w/T2w ratio is a prospective, stable biological marker of early A&#x03B2; accumulation in cognitively normal individuals.</p>
<p>In our current study, we aimed to assess the effectiveness of a diverse range of deep learning architectures for predicting A&#x03B2;&#x2009;+&#x2009;from 3D T1w structural MRI. 3D convolutional neural networks (CNNs) have demonstrated success in detecting Alzheimer&#x2019;s disease and in &#x2018;brain age&#x2019; estimation from brain MRI (<xref ref-type="bibr" rid="ref45">Lam and Zhu, 2020</xref>; <xref ref-type="bibr" rid="ref51">Lu et al., 2022</xref>). CNNs learn predictive features directly from raw images, eliminating the need for extensive pre-processing, or visual interpretation of images. As A&#x03B2;&#x2009;+&#x2009;is weakly associated with age and regional morphometric measures (such as the volume of the entorhinal cortex), we incorporated these features as predictors as well. To achieve this, we compared the performance of classical machine learning algorithms&#x2014;logistic regression, XGBoost, and shallow artificial neural networks&#x2014;for the amyloid prediction task. We also evaluated a hybrid network that combines a CNN with a shallow artificial neural network. This merges numeric features, often called &#x2018;tabular data&#x2019;, with entire images, weighting each input type in proportion to its added value for the prediction task.</p>
<p>In our tests, we separately report accuracy for A&#x03B2;&#x2009;+&#x2009;prediction in healthy people vs. those who already show signs of clinical impairment (MCI and AD), as A&#x03B2;&#x2009;+&#x2009;prediction may be more challenging in controls. The now-standard biomarker model by <xref ref-type="bibr" rid="ref31">Jack et al. (2018)</xref> posits that amyloid levels may begin to rise before neurodegeneration is apparent on MRI, although some researchers have challenged this sequence of events, noting that it may not be universal (<xref ref-type="bibr" rid="ref15">Cho et al., 2024</xref>), especially in populations of non-European ancestry.</p>
<p>As deep learning models are often enhanced by &#x201C;pre-training&#x201D; (first training networks on related tasks), we evaluated the performance of the models when pre-training them to predict age and sex, using data from 19,839 subjects from the UK Biobank dataset (<xref ref-type="bibr" rid="ref68">Sudlow et al., 2015</xref>). Transfer learning - an artificial intelligence/deep learning approach&#x2014;has previously been shown to enhance MRI-based Alzheimer&#x2019;s disease (AD) classification performance (<xref ref-type="bibr" rid="ref51">Lu et al., 2022</xref>; <xref ref-type="bibr" rid="ref18">Dhinagar and Thomopoulos, 2023</xref>). In transfer learning, network weights are first optimized on previous tasks and then some network layers have their weights &#x2018;frozen&#x2019;&#x2014;held constant&#x2014;while others are adjusted when training the network on the new task. There is a debate about when such pre-training techniques enhance performance on downstream tasks, especially when the tasks differ. Our study aimed to investigate whether these pre-training techniques help in predicting amyloid positivity. We examined whether the amount of data used for the pretraining task impacts the accuracy of the downstream task after fine-tuning. This evaluation assessed transfer learning for predicting A&#x03B2;&#x2009;+&#x2009;from structural MRI.</p>
<p>Finally, Vision Transformers (ViTs) have shown enormous success in computer vision, and more recently in medical imaging (<xref ref-type="bibr" rid="ref53">Matsoukas, 2021</xref>). Unlike CNNs, ViTs employ a self-attention mechanism to capture long-range spatial dependencies in an image, providing a more comprehensive global perspective (<xref ref-type="bibr" rid="ref49">Li, 2022</xref>). This property can help in medical imaging tasks, where anatomical context and spatial patterns can be crucial. Even so, effective training of ViTs typically requires a very large number of MRI scans (<xref ref-type="bibr" rid="ref6">Bi, 2022</xref>; <xref ref-type="bibr" rid="ref33">Jang and Hwang, 2022</xref>; <xref ref-type="bibr" rid="ref79">Willemink et al., 2022</xref>). In <xref ref-type="bibr" rid="ref19">Dhinagar et al. (2023)</xref>, the ViT architecture was used to classify AD vs. healthy aging, achieving an AUC of 0.89. Building on this, our investigation aimed to assess the performance of the ViT architecture in predicting A&#x03B2;&#x2009;+&#x2009;from T1w MRI. We conducted a benchmark comparison with the commonly used CNNs, to compare these two architectures for A&#x03B2;&#x2009;+&#x2009;prediction.</p>
<p>With the advent of new anti-Alzheimer&#x2019;s treatments effectively targeting amyloid pathology, there is increasing interest in predicting A&#x03B2;&#x2009;+&#x2009;using less invasive and more accessible brain imaging techniques, such as T1-weighted MRI. In this work, we compare multiple machine learning and deep learning architectures, including, (1) classical machine learning algorithms, such as logistic regression, XGBoost, and shallow artificial neural networks, (2) deep learning models based on 2D and 3D convolutional neural networks (CNNs), (3) a hybrid ANN-CNN, combining the strengths of shallow and deep neural networks, (4) transfer learning models based on CNNs, and (5) 3D Vision Transformers, to infer A&#x03B2; status from standard anatomical MRI. We hypothesize that methods (1), (3) and (5) will perform best.</p>
</sec>
<sec id="sec2">
<label>2</label>
<title>Imaging data and preprocessing steps</title>
<p>The Alzheimer&#x2019;s Disease Neuroimaging Initiative (ADNI) is a comprehensive, multisite study initiated in 2004, at 58 locations across North America. It aims to collect and analyze neuroimaging, clinical, and genetic data to identify and better understand biomarkers associated with healthy aging and AD (<xref ref-type="bibr" rid="ref75">Veitch et al., 2019</xref>). In our analysis, we examined data from 1,847 ADNI participants with a mean age of 74.04&#x2009;&#x00B1;&#x2009;7.40&#x2009;years (863 females and 984 males). We included participants from all phases of ADNI (1, 2, GO and 3) who had both MRI and PET scans. The data was acquired across 58 sites with (both 1.5 and 3&#x2009;T) GE, Siemens or Philips scanners. Forty of these sites had a change in scanner manufacturer or model across the scanning time of our subset. The distribution of participants included 661 cognitively normal (CN) individuals, 889 with mild cognitive impairment (MCI), and 297 with dementia. Overall, the dataset included 954 individuals classified as A&#x03B2;&#x2009;+&#x2009;(amyloid positive) and 893 as A&#x03B2;- (amyloid negative). A detailed table with the subject demographic breakdown can be found in <xref ref-type="table" rid="tab1">Table 1</xref>.</p>
<table-wrap position="float" id="tab1">
<label>Table 1</label>
<caption>
<p>Demographic data of individual train, validation and test set.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top" rowspan="2">Individual distribution</th>
<th align="center" valign="top" rowspan="2">Total N</th>
<th align="center" valign="top" colspan="2">Sex</th>
<th align="center" valign="top" rowspan="2">Mean age&#x2009;&#x00B1;&#x2009;St. Dev.</th>
<th align="center" valign="top" colspan="2">Amyloid classification</th>
<th align="center" valign="top" colspan="3">Diagnosis</th>
</tr>
<tr>
<th align="center" valign="top">M</th>
<th align="center" valign="top">F</th>
<th align="center" valign="top">+ve</th>
<th align="center" valign="top">-ve</th>
<th align="center" valign="top">CN</th>
<th align="center" valign="top">MCI</th>
<th align="center" valign="top">Dem</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Train</td>
<td align="center" valign="middle">1,292</td>
<td align="center" valign="middle">680</td>
<td align="center" valign="middle">612</td>
<td align="center" valign="middle">73.99&#x2009;&#x00B1;&#x2009;7.43</td>
<td align="center" valign="middle">662</td>
<td align="center" valign="middle">630</td>
<td align="center" valign="middle">465</td>
<td align="center" valign="middle">630</td>
<td align="center" valign="middle">197</td>
</tr>
<tr>
<td align="left" valign="middle">Validation</td>
<td align="center" valign="middle">278</td>
<td align="center" valign="middle">154</td>
<td align="center" valign="middle">124</td>
<td align="center" valign="middle">74.12&#x2009;&#x00B1;&#x2009;6.95</td>
<td align="center" valign="middle">146</td>
<td align="center" valign="middle">132</td>
<td align="center" valign="middle">105</td>
<td align="center" valign="middle">126</td>
<td align="center" valign="middle">47</td>
</tr>
<tr>
<td align="left" valign="middle">Test</td>
<td align="center" valign="middle">277</td>
<td align="center" valign="middle">150</td>
<td align="center" valign="middle">127</td>
<td align="center" valign="middle">74.20&#x2009;&#x00B1;&#x2009;7.74</td>
<td align="center" valign="middle">146</td>
<td align="center" valign="middle">131</td>
<td align="center" valign="middle">91</td>
<td align="center" valign="middle">133</td>
<td align="center" valign="middle">53</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>In ADNI1, participants initially underwent PiB scans instead of florbetapir scans (<xref ref-type="bibr" rid="ref2">ADNI, n.d.</xref>). However, the protocol was amended before the study&#x2019;s conclusion to transition to florbetapir scans due to processing time constraints. Consequently, PiB scans were only collected from ADNI1 participants. For participants in ADNI1 who transitioned into ADNIGO and then ADNI2, initial PET scans occurred 2&#x2009;years from the date of the last successful florbetapir and FDG-PET scan conducted under ADNIGO. Additionally, in ADNI1, only a subset of participants received FDG scans. In ADNI2, subjects underwent up to 3 florbetapir scans and up to 2 FDG scans, with each scan acquired at 2-year intervals. These scans were conducted within a two-week window before or after the in-clinic assessments at Baseline and at 24&#x2009;months after Baseline. In ADNI3, both Tau and Amyloid imaging were conducted on all participants during their initial ADNI3 visit. Amyloid PET imaging was carried out every 2&#x2009;years using florbetapir for participants continuing from ADNI2 or florbetaben for newly enrolled participants (<xref ref-type="bibr" rid="ref3">ADNI, n.d.</xref>). ADNI does not perform partial volume correction for amyloid PET analysis. It also does not account for off-target binding.</p>
<p>Mild cognitive impairment (MCI) is an intermediate state between normal aging and AD (<xref ref-type="bibr" rid="ref59">Petersen et al., 1999</xref>), and is a significant focus in clinical trials, as many trials enroll individuals with MCI as they are assumed to be more likely to respond to therapy than people already diagnosed with AD. In the construction of the final dataset, we excluded participants who lacked basic clinical information or had poor-quality imaging data, such as scans with severe motion, distortion, or ringing artifacts.</p>
<p>ADNI has more participants with MCI compared to those with AD or CN. This is partly due to the initiative&#x2019;s focus on the early stages of cognitive decline and the progression to Alzheimer&#x2019;s disease. From ADNI phase 1 onward, twice as many MCI subjects were enrolled than AD cases or controls, with a target enrolment ratio of 1:2:1 for controls:MCI:AD. This higher proportion of MCI participants aligns with ADNI&#x2019;s objective to study factors that influence disease progression from MCI to AD, which is critical for early diagnosis and intervention.</p>
<p>Having a balanced number of participants in each diagnostic class and repeating the experiments could in principle lead to more reliable and generalizable models, reducing the bias toward the more prevalent class, MCI. But balancing the datasets can come with its own set of challenges. One issue might be the reduced amount of training data if undersampling is used to balance the classes, which can lead to loss of information, especially as the dataset is not large to begin with. Alternatively, oversampling/differential sampling methods such as SMOTE, or generative models such latent diffusion models, denoising diffusion probabilistic models (DDPMs), or VAEs might be used to generate synthetic data for the underrepresented classes, to augment the training set, but this might also introduce noise and overfitting.</p>
<p>T1w MRI scans were further processed using the automated segmentation software package FreeSurfer (<xref ref-type="bibr" rid="ref23">Fisch, 2012</xref>), following the ENIGMA standardized protocol for brain segmentation and quality assurance (<xref ref-type="bibr" rid="ref73">Van Erp and Hibar, 2016</xref>; <xref ref-type="bibr" rid="ref74">van Erp et al., 2018</xref>).<xref ref-type="fn" rid="fn0001"><sup>1</sup></xref> The segmentations of subcortical regions (including lateralized hippocampus) and cortical regions [based on the Desikan-Killiany (DK) atlas regions (<xref ref-type="bibr" rid="ref17">Desikan et al., 2006</xref>); including entorhinal cortex] were extracted and visually inspected for accuracy. The CSF, white and gray matter segmentations were extracted and visually inspected for each subject using FSL&#x2019;s Fast function.<xref ref-type="fn" rid="fn0002"><sup>2</sup>
</xref></p>
<p>For training the CNN architectures, we used part of this dataset, so that an independent subset of the data could be reserved for testing. We focused on 3D T1w brain MRI scans (see <xref ref-type="fig" rid="fig1">Figure 1</xref>) from 762 subjects, with a mean age of 75.1&#x2009;&#x00B1;&#x2009;7.6&#x2009;years (394 females, 368 males). This subset included 459 cognitively normal controls, 67 individuals with MCI, and 236 with AD. These participants were selected as they also had amyloid-sensitive PET scans collected close to the time of the T1w MRI acquisition, with a maximum interval between scans set to 180&#x2009;days (We note that one could consider an extension of the current problem, where the interval from the MRI to the amyloid assessment is considered as a variable, <italic>t</italic>, and used as input in the model, where <italic>t</italic> may be positive or negative). No repeated scans were used for the CNNs. The restriction on the time interval between scans was intended to help in estimating the relation between MRI features and amyloid positivity. As ViTs are more data intensive architectures, the whole dataset - with repeated scans - was used to train them. The test dataset in that case was designed to not have repeated scans, or scans from subjects in training or validation sets. Thus, the training dataset had 1,290 T1w MRI scans from 845 individual subjects, the validation dataset had 276 T1w MRI scans, and the test dataset had 275 T1w MRI scans. For the transfer learning experiments, we used data from 19,839 subjects from the UK Biobank dataset (age: 64.6&#x2009;&#x00B1;&#x2009;7.6&#x2009;years) comprising 10,294 females and 9,545 males.</p>
<fig position="float" id="fig1">
<label>Figure 1</label>
<caption>
<p>MRI scans of three amyloid positive participants: <bold>(A)</bold> a cognitively normal control, and participants diagnosed with <bold>(B)</bold> MCI, and <bold>(C)</bold> dementia.</p>
</caption>
<graphic xlink:href="fnins-18-1387196-g001.tif"/>
</fig>
<p>As is customary when benchmarking deep learning methods, the 3D T1w brain MRI scans underwent a series of pre-processing steps (<xref ref-type="bibr" rid="ref45">Lam and Zhu, 2020</xref>). These steps included nonparametric intensity normalization using N4 bias field correction, &#x2018;skull stripping&#x2019; for brain extraction, registration to a template using 6 degrees of freedom (rigid-body) registration, and isometric voxel resampling to 2&#x2009;mm. The resulting pre-processed images were of size 91x109x91. Furthermore, the T1w images underwent min-max scaling so that all values ranged between 0 and 1. This normalization process is common in image processing (and is similar to batch or instance normalization in deep learning), allowing standardized and consistent representation of image intensity values, which may aid in subsequent analyses and model training. The preprocessing pipeline applied to the 3D T1w MRI images ensures that the background of the scans is 0 intensity, and due to the normalization of input before CNN model, ideally, the effect of the original background or intensity range of the scan on performance of convolution models is negligible. To ensure a direct correspondence with the patch sizes used for the ViT models, the T1w input scans were resized to dimensions of both 64x64x64 and 128x128x128 for the ViT experiments. This resizing ensures compatibility between the image dimensions and the patch sizes employed in the ViT models, and allowed us to consistently integrate the T1w images into the analysis pipeline.</p>
<p>As is the convention in the ADNI dataset, two cut-off values were employed, providing alternative definitions of amyloid positivity, based on PET cortical <italic>standardized uptake value ratio</italic> (SUVR; denoted &#x0391;&#x03B2;_1 by ADNI). For the 18F-florbetapir tracer, amyloid positivity was determined using mean 18F-florbetapir, with A&#x03B2;&#x2009;+&#x2009;defined as &#x003E;1.11 for cutoff_1 and&#x2009;&#x003E;&#x2009;0.79 for cutoff_2. When florbetaben was used, A&#x03B2;&#x2009;+&#x2009;was defined as &#x003E;1.20 for cutoff_1 and&#x2009;&#x003E;&#x2009;1.33 for cutoff_2. The SUVR values were normalized by using a whole cerebellum reference region (<xref ref-type="bibr" rid="ref28">Hansson et al., 2018</xref>; <xref ref-type="bibr" rid="ref7">Blennow et al., 2019</xref>). Each of these two cutoffs has been employed in the literature to define amyloid positivity, and to establish eligibility criteria for anti-amyloid drug treatments (<xref ref-type="bibr" rid="ref72">van Dyck et al., 2023</xref>).</p>
</sec>
<sec id="sec3">
<label>3</label>
<title>Models and experiments</title>
<sec id="sec4">
<label>3.1</label>
<title>Classical machine learning algorithms</title>
<p>As the first set of methods to evaluate for predicting A&#x03B2;&#x2009;+&#x2009;from anatomical MRI, we employed the following three classical machine learning algorithms: logistic regression, XGBoost, and a fully-connected artificial neural network (ANN) with 7 hidden layers. The ANN incorporated a Rectified Linear Unit (ReLU) activation function between layers. As predictors, we used measures that have previously been associated with amyloid levels in the literature: age, sex, clinical diagnosis, ApoE4 genotype values (2 for two copies of the ApoE4 allele and 1 for one E4 allele, 0 otherwise), overall volumes of cerebrospinal fluid (CSF), gray and white matter (all estimated from the brain MRI scan), as well as the left and right hippocampal and entorhinal cortex volumes. Regional volumes were extracted from the T1w MRI using FreeSurfer and were available for the entire brain. Previous studies like Kai et al. (<xref ref-type="bibr" rid="ref29">Hu et al., 2019</xref>) and <xref ref-type="bibr" rid="ref71">Thompson et al. (2004)</xref> show that hippocampal and entorhinal cortex volumes are among the most consistently affected in Alzheimer&#x2019;s disease, and as a result we focused on those two regional volumes in our study. The dataset was partitioned into independent training, validation, and testing sets, approximately in the ratio of 70:20:10. Standard performance metrics for the three algorithms (balanced accuracy and F1 Score on the test dataset), were computed to assess their effectiveness in predicting amyloid positivity.</p>
</sec>
<sec id="sec5">
<label>3.2</label>
<title>2D CNN architecture</title>
<p>We implemented the 2D CNN architecture that we proposed in <xref ref-type="bibr" rid="ref27">Gupta et al. (2021)</xref>. In this model, 3D scans are used as the input, but each slice is encoded using a 2D CNN encoder (see <xref ref-type="fig" rid="fig2">Figure 2</xref>), which makes the training faster, requires less RAM, and allows pre-training using foundation models trained on large datasets of 2D photographic images, such as ImageNet. The encoded slices are then combined through an aggregation module that employs permutation-invariant layers, ultimately producing a single embedding for the entire scan. This embedding was then passed through feed-forward layers to predict whether the individual was amyloid positive or negative. This architecture allows for effective representation learning from 3D scans, and the aggregation module captures information from individual slices to predict amyloid status.</p>
<fig position="float" id="fig2">
<label>Figure 2</label>
<caption>
<p>Model architecture with mean-based aggregation. The two pink blocks include trainable parameters; the purple block is a deterministic operation.</p>
</caption>
<graphic xlink:href="fnins-18-1387196-g002.tif"/>
</fig>
<p>The 2D CNN encoder processes a single 2D slice as input and generates a <italic>d</italic>-dimensional embedding for each slice. The number of filters in the last layer of the architecture is <italic>d</italic>, determined by the dimension of the output from the aggregation module. The aggregation module incorporates permutation-invariant layers, ensuring that the output remains independent of the slice order. Specifically, the element-wise mean of all slice encodings is computed and used as the permutation-invariant layer. The value of <italic>d</italic> is fixed at 32, and a feed-forward layer with one hidden layer containing 64 activations is used. The slices in this context are sagittal. This model was trained for 100 epochs using the Adam optimizer (<xref ref-type="bibr" rid="ref40">Kingma and Ba, 2015</xref>), a weight decay of 1&#x00D7;10<sup>&#x2212;4</sup>, a learning rate of 1&#x00D7;10<sup>&#x2212;4</sup>, and a batch size of 8. Mean squared error loss was employed as the optimization function during training. Model performance was measured using balanced accuracy.</p>
</sec>
<sec id="sec6">
<label>3.3</label>
<title>3D CNN architecture</title>
<p>The 3D CNN was composed of four 3D Convolution layers with a filter size of 3 &#x00D7; 3, followed by one 3D Convolution layer with a 1 &#x00D7; 1 filter, and a final Dense layer with a sigmoid activation function (see <xref ref-type="fig" rid="fig3">Figure 3</xref>). A ReLU activation function and Instance normalization were applied to all layers. Dropout layers (with a dropout rate of 0.5) and a 3D Average Pooling layer with a 2 &#x00D7; 2 filter size were introduced into the 2nd, 3rd, and 4th layers. During training, models were optimized with a learning rate of 1&#x00D7;10<sup>&#x2212;4</sup>. Test performance was evaluated using balanced accuracy and F1 Score. To address overfitting, both L1 and L2 regularizers were employed, along with dropouts between layers and early stopping. Youden&#x2019;s <italic>J</italic> index (<xref ref-type="bibr" rid="ref82">Youden, 1950</xref>) was used to determine the threshold for binary classification of A&#x03B2;&#x2009;+&#x2009;during testing, allowing comparison with the true cutoff values. Hyperparameter tuning was conducted through <italic>k</italic>-fold cross-validation to optimize model performance.</p>
<fig position="float" id="fig3">
<label>Figure 3</label>
<caption>
<p>3D CNN model architecture.</p>
</caption>
<graphic xlink:href="fnins-18-1387196-g003.tif"/>
</fig>
</sec>
<sec id="sec7">
<label>3.4</label>
<title>Hybrid CNN architecture</title>
<p>The hybrid model (<xref ref-type="fig" rid="fig4">Figure 4</xref>) combines a 3D CNN using T1w images as input with an ANN that takes discrete, tabular data (which consists of simple values that are numeric or categorical) including age, sex, diagnosis, APOE4 values (2 for two copies of E4, 1 for one E4, and 0 for none), overall volumes of CSF, white and gray matter, and left and right hippocampal and entorhinal cortex volumes. The 3D images and the derived discrete data were fed into individual models, separately. After passing through flattening layers in the 3D CNN, the layers from the ANN are stacked with the tensors from the 3D CNN. Subsequently, the combined data passes through further Dense layers to predict A&#x03B2;+. The learning rate was set to 0.001, and the Adam Optimizer was used, with a batch size of 2. The model was trained for 200 epochs. The 3D CNN model consisted of 3 convolution blocks with increasing filter sizes (32, 64, 128, and 256) along with Batch Normalization and Max Pooling. The final convolution layer, before concatenation, had a filter size of 256 and used average pooling. The ANN had three layers with hidden layer sizes of 1,024, 512, and 64, along with instance normalization and the ReLU activation function.</p>
<fig position="float" id="fig4">
<label>Figure 4</label>
<caption>
<p>Hybrid 3D CNN model architecture.</p>
</caption>
<graphic xlink:href="fnins-18-1387196-g004.tif"/>
</fig>
<p>This hybrid model was executed separately for both entorhinal cortex and hippocampus volumes, as well as in combination. In the combined case, we also considered the case where APOE genotype values were excluded from the discrete features input. Performance was evaluated using balanced accuracy and F1 Score, to compare the four models.</p>
</sec>
<sec id="sec8">
<label>3.5</label>
<title>Vision transformers</title>
<p>We trained two variations of the ViT architecture: (i) the neuroimage transformer (NiT) and (ii) the multiple instance NiT (MINiT; <xref ref-type="bibr" rid="ref65">Singla et al., 2022</xref>), as illustrated in <xref ref-type="fig" rid="fig5">Figure 5</xref>. These architectures involve several key steps. Initially, the input image is split into fixed-sized patch embeddings. These patches are then combined with learnable position embeddings and a class token. The resulting sequence of vectors is fed into a transformer encoder, consisting of alternating layers of multi-head attention and a multi-layer perceptron (MLP; <italic>top right</italic>, <xref ref-type="fig" rid="fig5">Figure 5</xref>). This architecture has been adapted to accommodate patches (cubes) from 3D scans. The NiT model was configured with a patch size of 8x8x8, without any overlap, a hidden dimension size of 256, six transformer encoder layers, and between 2 and 12 self-attention heads, with a dropout rate of 0.3.</p>
<fig position="float" id="fig5">
<label>Figure 5</label>
<caption>
<p>Overview of the vision transformer architecture, : reproduced from <xref ref-type="bibr" rid="ref65">Singla et al. (2022)</xref>.</p>
</caption>
<graphic xlink:href="fnins-18-1387196-g005.tif"/>
</fig>
<p>Based on MiNiT (<xref ref-type="bibr" rid="ref65">Singla et al., 2022</xref>), the input image, represented as M &#x2208; &#x211D;<italic><sup>L&#x2009;&#x00D7;&#x2009;W&#x2009;&#x00D7;&#x2009;H</sup></italic>, is transformed into a sequence of flattened blocks. If (<italic>B,B,B</italic>) denotes the shape of each block, the number of blocks is <italic>LWH/B</italic><sup>3</sup>. Non-overlapping cubiform patches are extracted from the input volume and flattened. These patches are then projected to <italic>D</italic> dimensions, the inner dimension of the transformer layers, using a learned linear projection. The generated sequence of input patches is augmented with learned positional embeddings for positional information and a learned classification token. Subsequently, this sequence is fed into a transformer encoder comprising <italic>L</italic> transformer layers. Each layer consists of a multi-head self-attention block and a multi-layer perceptron (MLP) block, which incorporates two linear projections, with a Gaussian Error Gated Linear Unit (GEGLU) nonlinearity applied between them. Layer normalization is applied before - and residual connections are added after - every block in each transformer layer. Finally, a layer normalization and an MLP head consisting of a single <italic>D&#x2009;&#x00D7; C</italic> linear layer project the classification token to &#x211D;<italic><sup>C</sup></italic>, where <italic>C</italic> represents the number of classes (<xref ref-type="bibr" rid="ref65">Singla et al., 2022</xref>).</p>
<p>The NiT architecture served as the primary model in our experiments, and we fine-tuned the default values for the number of transformer encoder layers and attention heads. In the case of MINiT, as well as incorporating a learned positional embedding on the training data to patches and adding a learned classification token to their sequence, a learned block embedding was also introduced (<xref ref-type="bibr" rid="ref65">Singla et al., 2022</xref>). This embedding was included to retain the positional information of the block within the neuroimage of each patch. MINiT adopted similar parameters to those described for NiT.</p>
<p>We also performed hyperparameter selection for both models through a random search within specified upper and lower bounds. These parameters included the learning rate (chosen from a uniform distribution between 0.00001 to 0.001), weight decay (selected from a uniform distribution between 0.00001 to 0.001), the number of warm-up epochs (options included 1, 5, 16), the number of attention heads (options included 2, 4, 8, and 12), and the number of encoder layers (choices were 3, 4, and 6). These hyperparameters were defined based on the bounds typically used in ViT architectures (<xref ref-type="bibr" rid="ref6">Bi, 2022</xref>; <xref ref-type="bibr" rid="ref33">Jang and Hwang, 2022</xref>). We used the Adam optimizer (<xref ref-type="bibr" rid="ref40">Kingma and Ba, 2015</xref>).</p>
<p>After training, we tested the model on the hold-out test dataset. We evaluated model performance with several metrics including the receiver-operator characteristic curve-area under the curve (ROC-AUC), accuracy, and F1-score. We determined the threshold for these metrics was accomplished through Youden&#x2019;s Index (<xref ref-type="bibr" rid="ref82">Youden, 1950</xref>).</p>
</sec>
</sec>
<sec sec-type="results" id="sec9">
<label>4</label>
<title>Results</title>
<p>In the comparison of classical machine learning models for predicting amyloid positivity, the best results were achieved with the artificial neural network (ANN), yielding a balanced accuracy of 0.771 and an F1 score of 0.771. The balanced accuracy values for the classical models ranged from 0.69 to 0.77, indicating predominantly similar classification performances across these models (<xref ref-type="table" rid="tab2">Table 2</xref>).</p>
<table-wrap position="float" id="tab2">
<label>Table 2</label>
<caption>
<p>Balanced accuracy (BA) and F1 scores for classical machine learning models.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th/>
<th align="center" valign="top">XGBoost</th>
<th align="center" valign="top">Logistic regression</th>
<th align="center" valign="top">ANN</th>
</tr>
<tr>
<th/>
<th align="center" valign="middle">BA / F1 score</th>
<th align="center" valign="middle">BA / F1 score</th>
<th align="center" valign="middle">BA / F1 score</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Data except for EC volume</td>
<td align="center" valign="middle">0.742 / 0.678</td>
<td align="center" valign="middle">0.770 / 0.734</td>
<td align="center" valign="middle">0.711 / 0.696</td>
</tr>
<tr>
<td align="left" valign="middle">Data except for HP volume</td>
<td align="center" valign="middle">0.742 / 0.689</td>
<td align="center" valign="middle">0.770 / 0.734</td>
<td align="center" valign="middle">0.711 / 0.696</td>
</tr>
<tr>
<td align="left" valign="middle">Data except for GM, WM and CSF volumes</td>
<td align="center" valign="middle">0.697 / 0.656</td>
<td align="center" valign="middle">0.770 / 0.734</td>
<td align="center" valign="middle">0.771 / 0.771</td>
</tr>
<tr>
<td align="left" valign="middle">Data with all features</td>
<td align="center" valign="middle">0.756 / 0.701</td>
<td align="center" valign="middle">0.770 / 0.734</td>
<td align="center" valign="middle">0.725 / 0.716</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>The best performance was obtained with the ANN where all data except GM, WM and CSF volumes are considered, giving a balanced accuracy of 0.771.</p>
</table-wrap-foot>
</table-wrap>
<p>The 2D CNN performed worse than the classical machine learning algorithms. Across an average of three runs, the model achieved a test accuracy of 0.543. In contrast, the 3D CNN architecture performed better, as indicated in <xref ref-type="table" rid="tab3">Table 3</xref>. The Youden&#x2019;s <italic>J</italic> Index, used to determine the threshold for classifying A&#x03B2;&#x2009;+&#x2009;as 0/1 based on MRI scans, varied across different subject groups. Specifically, it was found to be 0.605 when considering only MCI and AD participants, 0.509 for cognitively unimpaired controls (CN), and 0.494 when considering all subjects. A balanced accuracy score of 0.760 was achieved for classification when all subjects were included. The accuracy increased to 0.850 when classifying individuals with only MCI or AD. In the case of CN, the balanced accuracy was 0.631. This observation aligns with expectations, as classifying A&#x03B2;&#x2009;+&#x2009;is more challenging in the earlier stages of the disease. According to the now-accepted Jack et al. model of the sequence of biomarker elevation in AD (<xref ref-type="bibr" rid="ref31">Jack et al., 2018</xref>), abnormal amyloid accumulation typically precedes extensive brain atrophy, although individuals may vary in the order and relative intensities of these processes.</p>
<table-wrap position="float" id="tab3">
<label>Table 3</label>
<caption>
<p>3D CNN results for all subjects, and with CN and MCI/AD groups considered separately.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th/>
<th align="center" valign="top">All subjects</th>
<th align="center" valign="top">CN</th>
<th align="center" valign="top">MCI and AD</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Balanced accuracy</td>
<td align="center" valign="middle">0.760</td>
<td align="center" valign="middle">0.631</td>
<td align="center" valign="middle">0.850</td>
</tr>
<tr>
<td align="left" valign="middle">F1 score</td>
<td align="center" valign="middle">0.746</td>
<td align="center" valign="middle">0.480</td>
<td align="center" valign="middle">0.824</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The hybrid model performed better than the 3D CNN model (<xref ref-type="table" rid="tab4">Table 4</xref>). The hybrid model gave the best balanced accuracy of 0.815, when using hippocampal volume in the predictor set. Considering the CN, MCI and AD subjects in the test set separately for this model, the balanced accuracies are 0.616, 0.75 and 0.85 respectively, while the F1 Scores are 0.4, 0.969 and 0.863, respectively. This observation aligns with expectations, as classifying A&#x03B2;&#x2009;+&#x2009;is more challenging at the earlier stages of the disease.</p>
<table-wrap position="float" id="tab4">
<label>Table 4</label>
<caption>
<p>Balanced accuracy and F1 score for the hybrid model architecture.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th/>
<th align="center" valign="top">Entorhinal cortex volume</th>
<th align="center" valign="top">Hippocampus volume</th>
<th align="center" valign="top">Entorhinal cortex and hippocampus volume</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Balanced accuracy</td>
<td align="center" valign="middle">0.759</td>
<td align="center" valign="middle">0.815</td>
<td align="center" valign="middle">0.787</td>
</tr>
<tr>
<td align="left" valign="middle">F1 score</td>
<td align="center" valign="middle">0.746</td>
<td align="center" valign="middle">0.793</td>
<td align="center" valign="middle">0.769</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The results comparing various hyperparameters for both NiT and MINiT model architectures are summarized in <xref ref-type="table" rid="tab5">Table 5</xref>. Four different hyperparameter tunings were evaluated for both image sizes. In contrast, the NiT architecture performed more poorly, with classification accuracies close to chance (ranging between 0.5 to 0.6) across different hyperparameters and two image sizes. The MINiT architecture outperformed the NiT architectures, particularly for the image size of 64x64x64, with a test accuracy of 0.791 and a test ROC-AUC of 0.857. Therefore, the MINiT architecture improved upon the NiT architecture.</p>
<table-wrap position="float" id="tab5">
<label>Table 5</label>
<caption>
<p>Experimental results for NiT and MINiT models.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top" rowspan="2">Arch.</th>
<th align="center" valign="top" rowspan="2">Image size</th>
<th align="center" valign="top" colspan="4">Hyperparameters of transformer architectures</th>
<th align="center" valign="top" rowspan="2">Test ROC-AUC</th>
<th align="center" valign="top" rowspan="2">Test balanced accuracy</th>
<th align="center" valign="top" rowspan="2">Test F1 score</th>
</tr>
<tr>
<th align="center" valign="top">Transformer layers</th>
<th align="center" valign="top">Attention heads</th>
<th align="center" valign="top">Dimension</th>
<th align="center" valign="top">MLP dimension</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle" rowspan="6">NiT</td>
<td align="center" valign="middle" rowspan="3">(64)<sup>3</sup></td>
<td align="center" valign="middle">512</td>
<td align="center" valign="middle">3</td>
<td align="center" valign="middle">12</td>
<td align="center" valign="middle">175</td>
<td align="center" valign="middle">0.494</td>
<td align="center" valign="middle">0.541</td>
<td align="center" valign="middle">0.614</td>
</tr>
<tr>
<td align="center" valign="middle">256</td>
<td align="center" valign="middle">6</td>
<td align="center" valign="middle">8</td>
<td align="center" valign="middle">64</td>
<td align="center" valign="middle">0.579</td>
<td align="center" valign="middle">0.592</td>
<td align="center" valign="middle">0.609</td>
</tr>
<tr>
<td align="center" valign="middle">256</td>
<td align="center" valign="middle">4</td>
<td align="center" valign="middle">8</td>
<td align="center" valign="middle">234</td>
<td align="center" valign="middle">0.485</td>
<td align="center" valign="middle">0.516</td>
<td align="center" valign="middle">0.221</td>
</tr>
<tr>
<td align="center" valign="middle" rowspan="3">(128)<sup>3</sup></td>
<td align="center" valign="middle">512</td>
<td align="center" valign="middle">3</td>
<td align="center" valign="middle">12</td>
<td align="center" valign="middle">175</td>
<td align="center" valign="middle">0.569</td>
<td align="center" valign="middle">0.581</td>
<td align="center" valign="middle">0.600</td>
</tr>
<tr>
<td align="center" valign="middle">256</td>
<td align="center" valign="middle">6</td>
<td align="center" valign="middle">8</td>
<td align="center" valign="middle">64</td>
<td align="center" valign="middle">0.692</td>
<td align="center" valign="middle">0.590</td>
<td align="center" valign="middle">0.584</td>
</tr>
<tr>
<td align="center" valign="middle">256</td>
<td align="center" valign="middle">4</td>
<td align="center" valign="middle">8</td>
<td align="center" valign="middle">234</td>
<td align="center" valign="middle">0.692</td>
<td align="center" valign="middle">0.468</td>
<td align="center" valign="middle">0.495</td>
</tr>
<tr>
<td align="left" valign="middle" rowspan="8">MINiT</td>
<td align="center" valign="middle" rowspan="4">(64)<sup>3</sup></td>
<td align="center" valign="middle">6</td>
<td align="center" valign="middle">12</td>
<td align="center" valign="middle">256</td>
<td align="center" valign="middle">309</td>
<td align="center" valign="middle"><bold>0.857</bold></td>
<td align="center" valign="middle"><bold>0.791</bold></td>
<td align="center" valign="middle"><bold>0.793</bold></td>
</tr>
<tr>
<td align="center" valign="middle">6</td>
<td align="center" valign="middle">8</td>
<td align="center" valign="middle">256</td>
<td align="center" valign="middle">309</td>
<td align="center" valign="middle">0.755</td>
<td align="center" valign="middle">0.697</td>
<td align="center" valign="middle">0.674</td>
</tr>
<tr>
<td align="center" valign="middle">6</td>
<td align="center" valign="middle">8</td>
<td align="center" valign="middle">128</td>
<td align="center" valign="middle">128</td>
<td align="center" valign="middle">0.585</td>
<td align="center" valign="middle">0.599</td>
<td align="center" valign="middle">0.686</td>
</tr>
<tr>
<td align="center" valign="middle">6</td>
<td align="center" valign="middle">12</td>
<td align="center" valign="middle">258</td>
<td align="center" valign="middle">128</td>
<td align="center" valign="middle">0.794</td>
<td align="center" valign="middle">0.776</td>
<td align="center" valign="middle">0.782</td>
</tr>
<tr>
<td align="center" valign="middle" rowspan="4">(128)<sup>3</sup></td>
<td align="center" valign="middle">6</td>
<td align="center" valign="middle">12</td>
<td align="center" valign="middle">256</td>
<td align="center" valign="middle">309</td>
<td align="center" valign="middle">0.503</td>
<td align="center" valign="middle">0.534</td>
<td align="center" valign="middle">0.557</td>
</tr>
<tr>
<td align="center" valign="middle">6</td>
<td align="center" valign="middle">8</td>
<td align="center" valign="middle">256</td>
<td align="center" valign="middle">309</td>
<td align="center" valign="middle">0.668</td>
<td align="center" valign="middle">0.649</td>
<td align="center" valign="middle">0.688</td>
</tr>
<tr>
<td align="center" valign="middle">6</td>
<td align="center" valign="middle">8</td>
<td align="center" valign="middle">128</td>
<td align="center" valign="middle">128</td>
<td align="center" valign="middle">0.799</td>
<td align="center" valign="middle">0.747</td>
<td align="center" valign="middle">0.766</td>
</tr>
<tr>
<td align="center" valign="middle">6</td>
<td align="center" valign="middle">12</td>
<td align="center" valign="middle">258</td>
<td align="center" valign="middle">128</td>
<td align="center" valign="middle">0.476</td>
<td align="center" valign="middle">0.527</td>
<td align="center" valign="middle">0.584</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>Columns 3 to 6 show the hyperparameters of the transformer architectures, namely Transformer Layers, No. of Attention Heads, Dimension and MLP Dimension. The experiments are compared using test ROC-AUC, accuracy and F1 Score. Bold numbers indicate the best results.</p>
</table-wrap-foot>
</table-wrap>
<p>Hyperparameter tuning of attention heads, learning rate, encoder layer, and weight decay all enhanced model performance. Notably, the performance for the downscaled image of size 64x64x64 was superior to that for the upsampled image of size 128x128x128, in our experiments.</p>
</sec>
<sec sec-type="discussion" id="sec10">
<label>5</label>
<title>Discussion</title>
<p>This work, and several more recent amyloid-PET studies, show that the pattern of A&#x03B2; accumulation closely matches the anatomical trajectory of cortical gray matter loss detectable on brain MRI, a process that is also evident through the widening of the cortical sulci over time. Although the now widely accepted biomarker model by <xref ref-type="bibr" rid="ref32">Jack et al. (2013)</xref> suggests that amyloid levels become statistically abnormal earlier than MRI measures of atrophy, all the processes occurring, to some extent, simultaneously. The order in which we detect them with imaging also depends, to some extent, on the sensitivity of our measurement techniques. Magnetic resonance imaging (MRI) measures of atrophy may not be as sensitive as amyloid positron emission tomography (PET) in detecting early changes, as amyloid levels typically become statistically abnormal earlier than structural atrophy becomes abnormal on MRI. The sensitivity of the imaging modality used plays a role in determining the order in which the pathological changes are observed, in addition to the temporal ordering of the underlying biological processes. There have been successful attempts to predict amyloid positivity in patients with MCI using radiomics and structural MRI (<xref ref-type="bibr" rid="ref60">Petrone and Casamitjana, 2019</xref>; <xref ref-type="bibr" rid="ref38">Kim J P, et al., 2021</xref>). To the best of our knowledge, we are the first to focus on predicting brain amyloid using deep learning architectures and T1-weighted structural MRIs. We know from work on related diseases (<xref ref-type="bibr" rid="ref42">Kochunov et al., 2022</xref>) that even linear multivariate measures pick up disease effects with greater effect sizes than univariate measures, so a deep learning model could in theory produce a biomarker of atrophy that becomes abnormal or offers earlier anomaly detection and greater group differentiation than univariate measures such as hippocampal volume. As the amyloid accumulation and atrophy co-occur in the brain, it is plausible that our deep learning models could pick up on these signals to predict A&#x03B2;+. Thus, in early-stage patients who are A&#x03B2;+, the models attempt to detect any MRI-based anomalies that might separate them from healthy A&#x03B2;- subjects and combine them into a more accurate discriminator.</p>
<p>One potential issue with using amyloid and tau PET for molecular characterization of AD is off-target binding. While this may be a greater issue for tau PET than amyloid PET (<xref ref-type="bibr" rid="ref83">Young et al., 2021</xref>), it is still an area of active research (<xref ref-type="bibr" rid="ref48">Lemoine et al., 2018</xref>), because off-target binding may increase with age, affecting the SUVR metrics.</p>
<p>From our experiments, we can see that both deep and shallow neural networks, along with traditional classical machine learning models, showed promise in predicting amyloid positivity from standard structural brain MRI. Classical machine learning models, including XGBoost, logistic regression, and ANNs, exhibited promising balanced accuracy and F1 scores: best scores reached around 0.77. There is potential for further improvement with larger training samples and additional data modalities like Diffusion Tensor Images, which have shown significant associations with amyloid (<xref ref-type="bibr" rid="ref14">Chattopadhyay and Singh, 2023a</xref>; <xref ref-type="bibr" rid="ref56">Nir et al., 2023</xref>). Deep learning models, such as the 3D CNN tested, showed slightly better performance than classical machine learning models. The 2D CNN, while inferior to the 3D CNN architecture, may perform better with pre-training.</p>
<p>In the Alzheimer&#x2019;s disease (AD) progression model proposed by <xref ref-type="bibr" rid="ref32">Jack et al. (2013)</xref>, brain amyloid typically accumulates before pervasive brain atrophy is visible on MRI. As such, predicting A&#x03B2;&#x2009;+&#x2009;may be more challenging in controls than in individuals with mild cognitive impairment (MCI) and AD, where abnormalities are already evident on both PET and MRI scans. The hybrid model achieved the highest balanced accuracy of 0.815 when incorporating hippocampal volume in the predictor set. Further enhancements may be possible by increasing the size and diversity of the training data. and incorporating data from additional cohorts. The now-standard biomarker model of Alzheimer&#x2019;s disease, proposed by <xref ref-type="bibr" rid="ref32">Jack et al. (2013)</xref>, notes that structural MRI is typically one of the last biomarkers to show detectable changes - after CSF Abeta42, Amyloid PET, and CSF Tau. Because of this sequence, it is reasonable that an amyloid classifier based on T1w may not work as well in the very early stages of AD, and may work better when all of the biomarkers are somewhat elevated.</p>
<p>The MINiT architecture performed better than the other architecture considered&#x2014;NiT. The results are promising. The performance we obtained may even improve with more training data, as the model has a large number of parameters; increasing the training dataset size may enhance model accuracy. In conclusion, the best performing models for the experiments are as summarized in <xref ref-type="table" rid="tab6">Table 6</xref>.</p>
<table-wrap position="float" id="tab6">
<label>Table 6</label>
<caption>
<p>Best performing models for amyloid classification.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Model</th>
<th align="center" valign="top">Balanced accuracy</th>
<th align="center" valign="top">F1 score</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Hybrid Model using Hippocampus Volume in Predictor Set</td>
<td align="center" valign="middle">0.815</td>
<td align="center" valign="middle">0.793</td>
</tr>
<tr>
<td align="left" valign="middle">MINiT with image size (64)<sup>3</sup>, 6 Transformer Layers and 12 Attention Heads</td>
<td align="center" valign="middle">0.791</td>
<td align="center" valign="middle">0.793</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>The performance can improve by increasing the amount of training data.</p>
</table-wrap-foot>
</table-wrap>
<p>A key goal of deep learning methods applied to neuroimaging data is that their performance remains robust even if the scanning protocol changes. In ADNI, the MRI scanning protocols do allow different scanner vendors (Siemens, Philips, and GE), but a long preparatory phase by the ADNI MRI Core was undertaken in 2004, to optimize the scan protocols for tracking of dementia, and to align the pulse sequences to the maximum possible extent across vendors. As such the training data from ADNI was from diverse scanners across the U.S., and included multiple vendors, and although the ADNI protocol was later adopted by many large scale imaging initiatives, there was still somewhat less heterogeneity in the protocols than would be seen in general. Future work will examine the use of <italic>post-hoc</italic> methods for MRI harmonization (<xref ref-type="bibr" rid="ref50">Liu, 2021</xref>; <xref ref-type="bibr" rid="ref86">Zuo et al., 2021</xref>; <xref ref-type="bibr" rid="ref44">Komandur, 2023</xref>), to test whether this improves performance on data from new scanners and other scanning protocols.</p>
<p>The current biological categorization of Alzheimer&#x2019;s disease commonly relies on other data sources such as amyloid- or tau-sensitive PET scans or cerebrospinal fluid (CSF) biomarkers, all of which are more invasive than structural brain MRI. While a T1w MRI-based model may benefit from the incorporation of other data sources, it offers a promising tool for benchmarking. T1w MRIs are more widely available and cost-effective than amyloid PET. Therefore, classifying amyloid positivity from T1w MRIs may help to identify participants, particularly those with MCI, for further, more intensive testing using other modalities. Prior works (<xref ref-type="bibr" rid="ref26">Grill et al., 2019</xref>) show that the selection of biomarker criteria should be guided by the objective of enrolling individuals who are most likely to use and benefit from the intervention being studied in a specific context. As a result, our work shows the potential of ML/DL methods in MCI participants for detection of amyloid positivity before going for further more intensive testing using other modalities such as PET scans.</p>
<sec id="sec11">
<label>5.1</label>
<title>Limitations and future work</title>
<p>This study has limitations - notably the restricted testing on the ADNI dataset. Performance may improve with an increase in the size and diversity of the training data, by including multimodal brain MRI (<xref ref-type="bibr" rid="ref14">Chattopadhyay and Singh, 2023a</xref>, <xref ref-type="bibr" rid="ref13">2023b</xref>) and by adding data from supplementary cohorts. Future work will include individuals of more diverse ancestries (<xref ref-type="bibr" rid="ref35">John et al., 2023</xref>; <xref ref-type="bibr" rid="ref12">Chattopadhyay and Joshy, 2024</xref>) and with various comorbidities such as vascular disease, frontotemporal dementia, and other degenerative diseases. Moreover, the sensitivity of the approach to different MRI scanning protocols and PET tracers should be examined. In the context of multisite data, harmonization methods - such as using centiloids for PET and generative adversarial networks (GANs) for MRIs - may be needed for domain adaptation. These steps may help in evaluating amyloid prediction accuracy across varied scenarios and populations. There are efforts to develop cheaper ways to measure amyloid from blood (<xref ref-type="bibr" rid="ref1">AD Blood Tests Are Here. Now, Let&#x2019;s Grapple With How to Use Them | ALZFORUM, 2024</xref>), but so far tau has been easier to measure accurately (pTau217). As these methods are developed, we hope to incorporate them into multimodal setups.</p>
</sec>
</sec>
<sec id="sec12">
<title>Author&#x2019;s note</title>
<p>Data used in preparing this article were obtained from the Alzheimer&#x2019;s Disease Neuroimaging Initiative (ADNI) database (<ext-link xlink:href="https://adni.loni.usc.edu/" ext-link-type="uri">adni.loni.usc.edu/</ext-link>). As such, many investigators within the ADNI contributed to the design and implementation of ADNI and/or provided data but did not participate in analysis or writing of this report. A complete listing of ADNI investigators can be found at: <ext-link xlink:href="http://adni.loni.usc.edu/wp-content/uploads/how_to_apply/ADNI_Acknowledgement_List.pdf" ext-link-type="uri">http://adni.loni.usc.edu/wp-content/uploads/how_to_apply/ADNI_Acknowledgement_List.pdf</ext-link>.</p>
</sec>
<sec sec-type="data-availability" id="sec13">
<title>Data availability statement</title>
<p>Publicly available datasets were analyzed in this study. This data can be found at: <ext-link xlink:href="https://adni.loni.usc.edu" ext-link-type="uri">https://adni.loni.usc.edu</ext-link>; <ext-link xlink:href="https://www.ukbiobank.ac.uk" ext-link-type="uri">https://www.ukbiobank.ac.uk</ext-link>.</p>
</sec>
<sec sec-type="ethics-statement" id="sec14">
<title>Ethics statement</title>
<p>Ethical approval was not required for the study involving humans in accordance with the local legislation and institutional requirements. Written informed consent to participate in this study was not required from the participants or the participants&#x2019; legal guardians/next of kin in accordance with the national legislation and the institutional requirements.</p>
</sec>
<sec sec-type="author-contributions" id="sec15">
<title>Author contributions</title>
<p>TC: Conceptualization, Formal analysis, Investigation, Methodology, Project administration, Software, Writing &#x2013; original draft. SO: Formal analysis, Software, Validation, Visualization, Writing &#x2013; review &#x0026; editing. KB: Formal analysis, Software, Validation, Visualization, Writing &#x2013; review &#x0026; editing. NJ: Formal analysis, Software, Validation, Visualization, Writing &#x2013; review &#x0026; editing. DK: Formal analysis, Software, Validation, Visualization, Writing &#x2013; review &#x0026; editing. JN: Formal analysis, Software, Validation, Visualization, Writing &#x2013; review &#x0026; editing. ST: Data curation, Writing &#x2013; review &#x0026; editing. GS: Writing &#x2013; review &#x0026; editing, Supervision. JA: Writing &#x2013; review &#x0026; editing, Supervision. PT: Conceptualization, Funding acquisition, Methodology, Project administration, Supervision, Writing &#x2013; review &#x0026; editing.</p>
</sec>
</body>
<back>
<sec sec-type="funding-information" id="sec16">
<title>Funding</title>
<p>The author(s) declare that financial support was received for the research, authorship, and/or publication of this article. This work was supported by NIH grants R01AG058854, U01AG068057 and RF1AG057892 (PI:PT). Data collection and sharing for ADNI was funded by National Institutes of Health Grant U01 AG024904 and the DOD (Department of Defense award number W81XWH-12-2-0012). ADNI is funded by the National Institute on Aging, the National Institute of Biomedical Imaging and Bioengineering, and through generous contributions from the following: AbbVie, Alzheimer&#x2019;s Association; Alzheimer&#x2019;s Drug Discovery Foundation; Araclon Biotech; BioClinica, Inc.; Biogen; Bristol-Myers Squibb Company; CereSpir, Inc.; Cogstate; Eisai Inc.; Elan Pharmaceuticals, Inc.; Eli Lilly and Company; EuroImmun; F. Hoffmann-La Roche Ltd. and its affiliated company Genentech, Inc.; Fujirebio; GE Healthcare; IXICO Ltd.; Janssen Alzheimer Immunotherapy Research &#x0026; Development, LLC.; Johnson &#x0026; Johnson Pharmaceutical Research &#x0026; Development LLC.; Lumosity; Lundbeck; Merck &#x0026; Co., Inc.; Meso Scale Diagnostics, LLC.; NeuroRx Research; Neurotrack Technologies; Novartis Pharmaceuticals Corporation; Pfizer Inc.; Piramal Imaging; Servier; Takeda Pharmaceutical Company; and Transition Therapeutics. The Canadian Institutes of Health Research is providing funds to support ADNI clinical sites in Canada. Private sector contributions are facilitated by the Foundation for the National Institutes of Health (<ext-link xlink:href="http://www.fnih.org" ext-link-type="uri">www.fnih.org</ext-link>). The grantee organization is the Northern California Institute for Research and Education, and the study is coordinated by the Alzheimer&#x2019;s Therapeutic Research Institute at the University of Southern California. ADNI data are disseminated by the Laboratory for Neuro Imaging at the University of Southern California.</p>
</sec>
<ack>
<p>We thank the ADNI investigators, and their public and private funders. For creating and publicly disseminating the ADNI dataset. This study builds on preliminary findings in a conference paper entitled, <italic>Can Structural MRIs be used to detect Amyloid Positivity using Deep Learning,</italic> which may be found in the conference proceedings from the 19th International Symposium on Medical Information Processing and Analysis (SIPAIM; <xref ref-type="bibr" rid="ref14">Chattopadhyay and Singh, 2023a</xref>).</p>
</ack>
<sec sec-type="COI-statement" id="sec17">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="disclaimer" id="sec18">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec sec-type="supplementary-material" id="sec19">
<title>Supplementary material</title>
<p>The Supplementary material for this article can be found online at: <ext-link xlink:href="https://www.frontiersin.org/articles/10.3389/fnins.2024.1387196/full#supplementary-material" ext-link-type="uri">https://www.frontiersin.org/articles/10.3389/fnins.2024.1387196/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Data_Sheet_1.PDF" id="SM1" mimetype="application/pdf" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<fn-group>
<fn id="fn0001"><p><sup>1</sup><ext-link xlink:href="http://enigma.ini.usc.edu/protocols/imaging-protocols/" ext-link-type="uri">http://enigma.ini.usc.edu/protocols/imaging-protocols/</ext-link></p></fn>
<fn id="fn0002"><p><sup>2</sup><ext-link xlink:href="https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FAST" ext-link-type="uri">https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FAST</ext-link></p></fn>
</fn-group>
<ref-list>
<title>References</title>
<ref id="ref1">
<citation citation-type="other"><person-group person-group-type="author">
<collab id="coll2">AD Blood Tests Are Here. Now, Let&#x2019;s Grapple With How to Use Them</collab>
</person-group>. (<year>2024</year>). ALZFORUM. Available at: <ext-link xlink:href="http://Www.alzforum.org" ext-link-type="uri">Www.alzforum.org</ext-link>.</citation>
</ref>
<ref id="ref2">
<citation citation-type="other"><person-group person-group-type="author">
<collab id="coll3">ADNI</collab>
</person-group>. PET Analysis. Available at: <ext-link xlink:href="https://adni.loni.usc.edu/methods/pet-analysis-method/pet-analysis/" ext-link-type="uri">https://adni.loni.usc.edu/methods/pet-analysis-method/pet-analysis/</ext-link></citation>
</ref>
<ref id="ref3">
<citation citation-type="other"><person-group person-group-type="author">
<collab id="coll4">ADNI</collab>
</person-group>. Data types. Available at: <ext-link xlink:href="https://adni.loni.usc.edu/data-samples/data-types/" ext-link-type="uri">https://adni.loni.usc.edu/data-samples/data-types/</ext-link></citation>
</ref>
<ref id="ref4">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Alzubaidi</surname> <given-names>L.</given-names></name> <name><surname>Al-Amidie</surname> <given-names>M.</given-names></name></person-group> (<year>2021</year>). <article-title>Novel transfer learning approach for medical imaging with limited labeled data</article-title>. <source>Cancer</source> <volume>13</volume>:<fpage>1590</fpage>. doi: <pub-id pub-id-type="doi">10.3390/cancers13071590</pub-id>, PMID: <pub-id pub-id-type="pmid">33808207</pub-id></citation>
</ref>
<ref id="ref5">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bae</surname> <given-names>J. B.</given-names></name> <name><surname>Lee</surname> <given-names>S.</given-names></name> <name><surname>Oh</surname> <given-names>H.</given-names></name> <name><surname>Sung</surname> <given-names>J.</given-names></name> <name><surname>Lee</surname> <given-names>D.</given-names></name> <name><surname>Han</surname> <given-names>J. W.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>A case-control clinical trial on a deep learning-based classification system for diagnosis of amyloid-positive Alzheimer&#x2019;s disease</article-title>. <source>Psychiatry Investig.</source> <volume>20</volume>, <fpage>1195</fpage>&#x2013;<lpage>1203</lpage>. doi: <pub-id pub-id-type="doi">10.30773/pi.2023.0052</pub-id>, PMID: <pub-id pub-id-type="pmid">38163659</pub-id></citation>
</ref>
<ref id="ref6">
<citation citation-type="other"><person-group person-group-type="author">
<name><surname>Bi</surname> <given-names>Y.</given-names></name>
</person-group>, &#x201C;MultiCrossViT: multimodal vision transformer for schizophrenia prediction using structural MRI and functional network connectivity data,&#x201D; in arXiv, (<year>2022</year>). <comment>Available at: </comment><ext-link xlink:href="http://arxiv.org/abs/2211.06726" ext-link-type="uri">http://arxiv.org/abs/2211.06726</ext-link>.</citation>
</ref>
<ref id="ref7">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Blennow</surname> <given-names>K.</given-names></name> <name><surname>Shaw</surname> <given-names>L. M.</given-names></name> <name><surname>Stomrud</surname> <given-names>E.</given-names></name> <name><surname>Mattsson</surname> <given-names>N.</given-names></name> <name><surname>Toledo</surname> <given-names>J. B.</given-names></name> <name><surname>Buck</surname> <given-names>K.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>Predicting clinical decline and conversion to Alzheimer&#x2019;s disease or dementia using novel Elecsys Abeta(1-42), pTau and tTau CSF immunoassays</article-title>. <source>Sci. Rep.</source> <volume>9</volume>:<fpage>19024</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-019-54204-z</pub-id>, PMID: <pub-id pub-id-type="pmid">31836810</pub-id></citation>
</ref>
<ref id="ref8">
<citation citation-type="journal"><person-group person-group-type="author">
<name><surname>Braak</surname> <given-names>H.</given-names></name>
</person-group> (<year>2000</year>). <article-title>Vulnerability of select neuronal types to Alzheimer's disease</article-title>. <source>Ann. N. Y. Acad. Sci.</source> <volume>924</volume>, <fpage>53</fpage>&#x2013;<lpage>61</lpage>. doi: <pub-id pub-id-type="doi">10.1111/j.1749-6632.2000.tb05560.x</pub-id></citation>
</ref>
<ref id="ref9">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Braak</surname> <given-names>H.</given-names></name> <name><surname>Alafuzoff</surname> <given-names>I.</given-names></name> <name><surname>Arzberger</surname> <given-names>T.</given-names></name> <name><surname>Kretzschmar</surname> <given-names>H.</given-names></name> <name><surname>Del Tredici</surname> <given-names>K.</given-names></name></person-group> (<year>2006</year>). <article-title>Staging of Alzheimer disease-associated neurofibrillary pathology using paraffin sections and immunocytochemistry</article-title>. <source>Acta Neuropathol.</source> <volume>112</volume>, <fpage>389</fpage>&#x2013;<lpage>404</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00401-006-0127-z</pub-id>, PMID: <pub-id pub-id-type="pmid">16906426</pub-id></citation>
</ref>
<ref id="ref10">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Braak</surname> <given-names>H.</given-names></name> <name><surname>Braak</surname> <given-names>E.</given-names></name></person-group> (<year>1991</year>). <article-title>Neuropathological stageing of Alzheimer-related changes</article-title>. <source>Acta Neuropathol.</source> <volume>82</volume>, <fpage>239</fpage>&#x2013;<lpage>259</lpage>. doi: <pub-id pub-id-type="doi">10.1007/BF00308809</pub-id></citation>
</ref>
<ref id="ref11">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Braak</surname> <given-names>H.</given-names></name> <name><surname>Braak</surname> <given-names>E.</given-names></name></person-group> (<year>1997</year>). <article-title>Frequency of stages of Alzheimer-related lesions in different age categories</article-title>. <source>Neurobiol. Aging</source> <volume>18</volume>, <fpage>351</fpage>&#x2013;<lpage>357</lpage>. doi: <pub-id pub-id-type="doi">10.1016/S0197-4580(97)00056-0</pub-id>, PMID: <pub-id pub-id-type="pmid">9330961</pub-id></citation>
</ref>
<ref id="ref12">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chattopadhyay</surname> <given-names>T.</given-names></name> <name><surname>Joshy</surname> <given-names>N. A.</given-names></name></person-group> (<year>2024</year>). <article-title>Brain age analysis and dementia classification using convolutional neural networks trained on diffusion MRI: tests in Indian and north American cohorts</article-title>. <source>bioRxiv</source>. doi: <pub-id pub-id-type="doi">10.1101/2024.02.04.578829v1</pub-id></citation>
</ref>
<ref id="ref13">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Chattopadhyay</surname> <given-names>T.</given-names></name> <name><surname>Singh</surname> <given-names>A.</given-names></name></person-group> (<year>2023b</year>). <source>Comparison of anatomical and diffusion MRI for detecting Parkinson&#x2032; s disease using deep convolutional neural network</source>: <publisher-name>IEEE EMBC</publisher-name>. <fpage>1</fpage>&#x2013;<lpage>6</lpage>.</citation>
</ref>
<ref id="ref14">
<citation citation-type="other"><person-group person-group-type="author"><name><surname>Chattopadhyay</surname> <given-names>T.</given-names></name> <name><surname>Singh</surname> <given-names>A.</given-names></name></person-group>, (<year>2023a</year>). &#x201C;Predicting dementia severity by merging anatomical and diffusion MRI with deep 3D convolutional neural networks.&#x201D; In the 18th <italic>international symposium on medical information processing and analysis</italic> (Vol. 12567, pp. 90&#x2013;99). SPIE.</citation>
</ref>
<ref id="ref15">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cho</surname> <given-names>S. H.</given-names></name> <name><surname>Kim</surname> <given-names>S.</given-names></name> <name><surname>Choi</surname> <given-names>S. M.</given-names></name> <name><surname>Kim</surname> <given-names>B. C.</given-names></name><collab id="coll5">for the Alzheimer's Disease Neuroimaging Initiative</collab></person-group> (<year>2024</year>). <article-title>ATN classification and clinical progression of the amyloid-negative Group in Alzheimer&#x2019;s disease neuroimaging initiative participants</article-title>. <source>Chonnam Med. J.</source> <volume>60</volume>, <fpage>51</fpage>&#x2013;<lpage>58</lpage>. doi: <pub-id pub-id-type="doi">10.4068/cmj.2024.60.1.51</pub-id>, PMID: <pub-id pub-id-type="pmid">38304128</pub-id></citation>
</ref>
<ref id="ref16">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Clark</surname> <given-names>C. M.</given-names></name> <name><surname>Schneider</surname> <given-names>J. A.</given-names></name> <name><surname>Bedell</surname> <given-names>B. J.</given-names></name> <name><surname>Beach</surname> <given-names>T. G.</given-names></name> <name><surname>Bilker</surname> <given-names>W. B.</given-names></name> <name><surname>Mintun</surname> <given-names>M. A.</given-names></name> <etal/></person-group>. (<year>2011</year>). <article-title>Use of florbetapir-PET for imaging beta-amyloid pathology</article-title>. <source>JAMA</source> <volume>305</volume>, <fpage>275</fpage>&#x2013;<lpage>283</lpage>. doi: <pub-id pub-id-type="doi">10.1001/jama.2010.2008</pub-id>, PMID: <pub-id pub-id-type="pmid">21245183</pub-id></citation>
</ref>
<ref id="ref17">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Desikan</surname> <given-names>R. S.</given-names></name> <name><surname>S&#x00E9;gonne</surname> <given-names>F.</given-names></name> <name><surname>Fischl</surname> <given-names>B.</given-names></name> <name><surname>Quinn</surname> <given-names>B. T.</given-names></name> <name><surname>Dickerson</surname> <given-names>B. C.</given-names></name> <name><surname>Blacker</surname> <given-names>D.</given-names></name> <etal/></person-group>. (<year>2006</year>). <article-title>An automated labeling system for subdividing the human cerebral cortex on MRI scans into gyral based regions of interest</article-title>. <source>NeuroImage</source> <volume>31</volume>, <fpage>968</fpage>&#x2013;<lpage>980</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2006.01.021</pub-id>, PMID: <pub-id pub-id-type="pmid">16530430</pub-id></citation>
</ref>
<ref id="ref18">
<citation citation-type="other"><person-group person-group-type="author"><name><surname>Dhinagar</surname> <given-names>N. J.</given-names></name> <name><surname>Thomopoulos</surname> <given-names>S. I.</given-names></name></person-group> (<year>2023</year>) Evaluation of transfer learning methods for detecting Alzheimer&#x2019;s disease with brain MRI. In the 18th <italic>international symposium on medical information processing and analysis</italic> (Vol. 12567, pp. 504&#x2013;513). SPIE. IEEE</citation>
</ref>
<ref id="ref19">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dhinagar</surname> <given-names>N. J.</given-names></name> <name><surname>Thomopoulos</surname> <given-names>S. I.</given-names></name> <name><surname>Laltoo</surname> <given-names>E.</given-names></name> <name><surname>Thompson</surname> <given-names>P. M.</given-names></name></person-group> (<year>2023</year>). <article-title>Efficiently training vision transformers on structural MRI scans for Alzheimer&#x2019;s disease detection</article-title>. <source>EMBC</source>. (pp. <fpage>1</fpage>&#x2013;<lpage>6</lpage>). <publisher-name>IEEE</publisher-name>.</citation>
</ref>
<ref id="ref20">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dufumier</surname> <given-names>B.</given-names></name> <name><surname>Gori</surname> <given-names>P.</given-names></name> <name><surname>Victor</surname> <given-names>J.</given-names></name> <name><surname>Grigis</surname> <given-names>A.</given-names></name> <name><surname>Wessa</surname> <given-names>M.</given-names></name> <name><surname>Brambilla</surname> <given-names>P.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Contrastive learning with continuous proxy Meta-data for 3D MRI classification</article-title>. <source>MICCAI</source>. <publisher-name>Springer International Publishing</publisher-name>. doi: <pub-id pub-id-type="doi">10.1007/978-3-030-87196-3_6</pub-id></citation>
</ref>
<ref id="ref21">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ezzati</surname> <given-names>A.</given-names></name> <name><surname>Harvey</surname> <given-names>D. J.</given-names></name> <name><surname>Habeck</surname> <given-names>C.</given-names></name></person-group> (<year>2020</year>). <article-title>Predicting amyloid-&#x03B2; levels in amnestic mild cognitive impairment using machine learning techniques</article-title>. <source>J. Alzheimer's Dis.: JAD</source> <volume>73</volume>, <fpage>1211</fpage>&#x2013;<lpage>1219</lpage>. doi: <pub-id pub-id-type="doi">10.3233/JAD-191038</pub-id>, PMID: <pub-id pub-id-type="pmid">31884486</pub-id></citation>
</ref>
<ref id="ref22">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Feng</surname> <given-names>X.</given-names></name> <name><surname>Provenzano</surname> <given-names>F. A.</given-names></name> <name><surname>Small</surname> <given-names>S. A.</given-names></name></person-group> (<year>2022</year>). <article-title>A deep learning MRI approach outperforms other biomarkers of prodromal Alzheimer&#x2019;s disease</article-title>. <source>Alzheimers Res. Ther.</source> <volume>14</volume>:<fpage>45</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s13195-022-00985-x</pub-id>, PMID: <pub-id pub-id-type="pmid">35351193</pub-id></citation>
</ref>
<ref id="ref23">
<citation citation-type="journal"><person-group person-group-type="author">
<name><surname>Fisch</surname> <given-names>B.</given-names></name>
</person-group> (<year>2012</year>). <article-title>FreeSurfer</article-title>. <source>NeuroImage</source> <volume>62</volume>, <fpage>774</fpage>&#x2013;<lpage>781</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2012.01.021</pub-id>, PMID: <pub-id pub-id-type="pmid">22248573</pub-id></citation>
</ref>
<ref id="ref24">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gelosa</surname> <given-names>G.</given-names></name> <name><surname>Brooks</surname> <given-names>D. J.</given-names></name></person-group> (<year>2012</year>). <article-title>The prognostic value of amyloid imaging</article-title>. <source>Eur. J. Nucl. Med. Mol. Imaging</source> <volume>39</volume>, <fpage>1207</fpage>&#x2013;<lpage>1219</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00259-012-2108-x</pub-id></citation>
</ref>
<ref id="ref25">
<citation citation-type="other"><person-group person-group-type="author"><name><surname>Goodfellow</surname> <given-names>I.</given-names></name> <name><surname>Pouget-Abadie</surname> <given-names>J.</given-names></name> <name><surname>Mirza</surname> <given-names>M.</given-names></name> <name><surname>Xu</surname> <given-names>B.</given-names></name> <name><surname>Warde-Farley</surname> <given-names>D.</given-names></name> <name><surname>Ozair</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2014</year>). <article-title>Generative adversarial nets</article-title>. <source>Advances in neural information processing systems</source>, <fpage>27</fpage>.</citation>
</ref>
<ref id="ref26">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Grill</surname> <given-names>J. D.</given-names></name> <name><surname>Nu&#x00F1;o</surname> <given-names>M. M.</given-names></name> <name><surname>Gillen</surname> <given-names>D. L.</given-names></name></person-group> (<year>2019</year>). <article-title>Alzheimer&#x2019;s Disease Neuroimaging Initiative. Which MCI patients should be included in prodromal Alzheimer disease clinical trials?</article-title> <source>Alzheimer Dis. Assoc. Disord.</source> <volume>33</volume>, <fpage>104</fpage>&#x2013;<lpage>112</lpage>. doi: <pub-id pub-id-type="doi">10.1097/WAD.0000000000000303</pub-id>, PMID: <pub-id pub-id-type="pmid">30958413</pub-id></citation>
</ref>
<ref id="ref27">
<citation citation-type="other"><person-group person-group-type="author"><name><surname>Gupta</surname> <given-names>U.</given-names></name> <name><surname>Lam</surname> <given-names>P. K.</given-names></name> <name><surname>Ver Steeg</surname> <given-names>G.</given-names></name> <name><surname>Thompson</surname> <given-names>P. M.</given-names></name></person-group> (<year>2021</year>). Improved brain age estimation with slice-based set networks. In <italic>2021 IEEE 18th international symposium on biomedical imaging (ISBI)</italic> (pp. 840&#x2013;844).</citation>
</ref>
<ref id="ref28">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hansson</surname> <given-names>O.</given-names></name> <name><surname>Seibyl</surname> <given-names>J.</given-names></name> <name><surname>Stomrud</surname> <given-names>E.</given-names></name> <name><surname>Zetterberg</surname> <given-names>H.</given-names></name> <name><surname>Trojanowski</surname> <given-names>J. Q.</given-names></name> <name><surname>Bittner</surname> <given-names>T.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>CSF biomarkers of Alzheimer&#x2019;s disease concord with amyloid-&#x03B2; PET and predict clinical progression: a study of fully automated immunoassays in BioFINDER and ADNI cohorts</article-title>. <source>Alzheimers Dement.</source> <volume>14</volume>, <fpage>1470</fpage>&#x2013;<lpage>1481</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jalz.2018.01.010</pub-id>, PMID: <pub-id pub-id-type="pmid">29499171</pub-id></citation>
</ref>
<ref id="ref29">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hu</surname> <given-names>K.</given-names></name> <name><surname>Li</surname> <given-names>Y.</given-names></name> <name><surname>Yu</surname> <given-names>H.</given-names></name> <name><surname>Hu</surname> <given-names>Y.</given-names></name></person-group> (<year>2019</year>). <article-title>CTBP1 confers protection for hippocampal and cortical neurons in rat models of Alzheimer's disease</article-title>. <source>Neuroimmunomodulation</source> <volume>26</volume>, <fpage>139</fpage>&#x2013;<lpage>152</lpage>. doi: <pub-id pub-id-type="doi">10.1159/000500942</pub-id>, PMID: <pub-id pub-id-type="pmid">31340205</pub-id></citation>
</ref>
<ref id="ref30">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Huang</surname> <given-names>G.</given-names></name> <name><surname>Liu</surname> <given-names>Z.</given-names></name> <name><surname>van der Maaten</surname> <given-names>L.</given-names></name> <name><surname>Weinberger</surname> <given-names>K. Q.</given-names></name></person-group> (<year>2017</year>). <article-title>Densely connected convolutional networks</article-title>. In <source>Proceedings of the IEEE conference on computer vision and pattern recognition</source>. <fpage>4700</fpage>&#x2013;<lpage>4708</lpage>.</citation>
</ref>
<ref id="ref31">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jack</surname> <given-names>C. R.</given-names> <suffix>Jr.</suffix></name> <name><surname>Bennett</surname> <given-names>D. A.</given-names></name> <name><surname>Blennow</surname> <given-names>K.</given-names></name> <name><surname>Carrillo</surname> <given-names>M. C.</given-names></name> <name><surname>Dunn</surname> <given-names>B.</given-names></name> <name><surname>Haeberlein</surname> <given-names>S. B.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>NIA-AA research framework: toward a biological definition of Alzheimer's disease</article-title>. <source>Alzheimers Dement.</source> <volume>14</volume>, <fpage>535</fpage>&#x2013;<lpage>562</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jalz.2018.02.018</pub-id>, PMID: <pub-id pub-id-type="pmid">29653606</pub-id></citation>
</ref>
<ref id="ref32">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jack</surname> <given-names>C. R.</given-names> <suffix>Jr.</suffix></name> <name><surname>Knopman</surname> <given-names>D. S.</given-names></name> <name><surname>Jagust</surname> <given-names>W. J.</given-names></name> <name><surname>Petersen</surname> <given-names>R. C.</given-names></name> <name><surname>Weiner</surname> <given-names>M. W.</given-names></name> <name><surname>Aisen</surname> <given-names>P. S.</given-names></name> <etal/></person-group>. (<year>2013</year>). <article-title>Tracking pathophysiological processes in Alzheimer's disease: an updated hypothetical model of dynamic biomarkers</article-title>. <source>Lancet. Neurol.</source> <volume>12</volume>, <fpage>207</fpage>&#x2013;<lpage>216</lpage>. doi: <pub-id pub-id-type="doi">10.1016/S1474-4422(12)70291-0</pub-id>, PMID: <pub-id pub-id-type="pmid">23332364</pub-id></citation>
</ref>
<ref id="ref33">
<citation citation-type="other"><person-group person-group-type="author"><name><surname>Jang</surname> <given-names>J.</given-names></name> <name><surname>Hwang</surname> <given-names>D.</given-names></name></person-group> (<year>2022</year>). <source>M3T: Three-dimensional medical image classifier using multi-plane and multi-slice transformer</source>, <fpage>20718</fpage>&#x2013;<lpage>20729</lpage>.</citation>
</ref>
<ref id="ref34">
<citation citation-type="other"><person-group person-group-type="author"><name><surname>Jin</surname> <given-names>Y.</given-names></name> <name><surname>DuBois</surname> <given-names>J.</given-names></name> <name><surname>Zhao</surname> <given-names>C.</given-names></name> <name><surname>Zhan</surname> <given-names>L.</given-names></name></person-group>, (<year>2023</year>). &#x201C;Brain MRI to PET synthesis and amyloid estimation in Alzheimer's disease via 3D multimodal contrastive GAN.&#x201D; In <italic>International workshop on machine learning in medical imaging</italic> (pp. 94&#x2013;103). Cham: Springer Nature.</citation>
</ref>
<ref id="ref35">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>John</surname> <given-names>J. P.</given-names></name> <name><surname>Joshi</surname> <given-names>H.</given-names></name> <name><surname>Sinha</surname> <given-names>P.</given-names></name> <name><surname>Harbishettar</surname> <given-names>V.</given-names></name> <name><surname>Tripathi</surname> <given-names>R.</given-names></name> <name><surname>Cherian</surname> <given-names>A. V.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>India ENIGMA initiative for Global Aging &#x0026; Mental Health&#x2013;a globally coordinated study of brain aging and Alzheimer&#x2019;s disease</article-title>. <source>Alzheimers Dement.</source> <volume>19</volume>:<fpage>e076394</fpage>. doi: <pub-id pub-id-type="doi">10.1002/alz.076394</pub-id></citation>
</ref>
<ref id="ref36">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Johnson</surname> <given-names>G. V.</given-names></name> <name><surname>Hartigan</surname> <given-names>J. A.</given-names></name></person-group> (<year>1999</year>). <article-title>Tau protein in normal and Alzheimer's disease brain: an update</article-title>. <source>J. Alzheimers Dis.</source> <volume>1</volume>, <fpage>329</fpage>&#x2013;<lpage>351</lpage>. doi: <pub-id pub-id-type="doi">10.3233/JAD-1999-14-512</pub-id>, PMID: <pub-id pub-id-type="pmid">12214129</pub-id></citation>
</ref>
<ref id="ref37">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kim</surname> <given-names>H. E.</given-names></name> <name><surname>Cosa-Linan</surname> <given-names>A.</given-names></name> <name><surname>Santhanam</surname> <given-names>N.</given-names></name> <name><surname>Jannesari</surname> <given-names>M.</given-names></name> <name><surname>Maros</surname> <given-names>M. E.</given-names></name> <name><surname>Ganslandt</surname> <given-names>T.</given-names></name></person-group> (<year>2022</year>). <article-title>Transfer learning for medical image classification: a literature review</article-title>. <source>BMC Med. Imaging</source> <volume>22</volume>:<fpage>69</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s12880-022-00793-7</pub-id>, PMID: <pub-id pub-id-type="pmid">35418051</pub-id></citation>
</ref>
<ref id="ref38">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kim</surname> <given-names>J. P.</given-names></name> <name><surname>Kim</surname> <given-names>J.</given-names></name> <name><surname>Jang</surname> <given-names>H.</given-names></name> <name><surname>Kim</surname> <given-names>J.</given-names></name> <name><surname>Kang</surname> <given-names>S. H.</given-names></name> <name><surname>Kim</surname> <given-names>J. S.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Predicting amyloid positivity in patients with mild cognitive impairment using a radiomics approach</article-title>. <source>Sci. Rep.</source> <volume>11</volume>:<fpage>6954</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-021-86114-4</pub-id>, PMID: <pub-id pub-id-type="pmid">33772041</pub-id></citation>
</ref>
<ref id="ref39">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kim</surname> <given-names>S.</given-names></name> <name><surname>Lee</surname> <given-names>P.</given-names></name> <name><surname>Oh</surname> <given-names>K. T.</given-names></name> <name><surname>Byun</surname> <given-names>M. S.</given-names></name> <name><surname>Yi</surname> <given-names>D.</given-names></name> <name><surname>Lee</surname> <given-names>J. H.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Deep learning-based amyloid PET positivity classification model in the Alzheimer&#x2019;s disease continuum by using 2-[<sup>18</sup>F] FDG PET</article-title>. <source>Eur J Nucl Med Mol Imag. Res.</source> <volume>11</volume>:<fpage>56</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s13550-021-00798-3</pub-id></citation>
</ref>
<ref id="ref40">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Kingma</surname> <given-names>D.</given-names></name> <name><surname>Ba</surname> <given-names>J.</given-names></name></person-group> (<year>2015</year>). <source>Adam: A method for stochastic optimization</source>: <publisher-name>ICLR</publisher-name>.</citation>
</ref>
<ref id="ref41">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Klunk</surname> <given-names>W. E.</given-names></name> <name><surname>Engler</surname> <given-names>H.</given-names></name> <name><surname>Nordberg</surname> <given-names>A.</given-names></name> <name><surname>Wang</surname> <given-names>Y.</given-names></name> <name><surname>Blomqvist</surname> <given-names>G.</given-names></name> <name><surname>Holt</surname> <given-names>D. P.</given-names></name> <etal/></person-group>. (<year>2004</year>). <article-title>Imaging brain amyloid in Alzheimer's disease with Pittsburgh compound-B</article-title>. <source>Ann. Neurol.</source> <volume>55</volume>, <fpage>306</fpage>&#x2013;<lpage>319</lpage>. doi: <pub-id pub-id-type="doi">10.1002/ana.20009</pub-id></citation>
</ref>
<ref id="ref42">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kochunov</surname> <given-names>P.</given-names></name> <name><surname>Fan</surname> <given-names>F.</given-names></name> <name><surname>Ryan</surname> <given-names>M. C.</given-names></name> <name><surname>Hatch</surname> <given-names>K. S.</given-names></name> <name><surname>Tan</surname> <given-names>S.</given-names></name> <name><surname>Jahanshad</surname> <given-names>N.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Translating ENIGMA schizophrenia findings using the regional vulnerability index: association with cognition, symptoms, and disease trajectory</article-title>. <source>Hum. Brain Mapp.</source> <volume>43</volume>, <fpage>566</fpage>&#x2013;<lpage>575</lpage>. doi: <pub-id pub-id-type="doi">10.1002/hbm.25045</pub-id>, PMID: <pub-id pub-id-type="pmid">32463560</pub-id></citation>
</ref>
<ref id="ref43">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Koivunen</surname> <given-names>J.</given-names></name> <name><surname>Karrasch</surname> <given-names>M.</given-names></name> <name><surname>Scheinin</surname> <given-names>N. M.</given-names></name> <name><surname>Aalto</surname> <given-names>S.</given-names></name> <name><surname>Vahlberg</surname> <given-names>T.</given-names></name> <name><surname>N&#x00E5;gren</surname> <given-names>K.</given-names></name> <etal/></person-group>. (<year>2012</year>). <article-title>Cognitive decline and amyloid accumulation in patients with mild cognitive impairment</article-title>. <source>Dement. Geriatr. Cogn. Disord.</source> <volume>34</volume>, <fpage>31</fpage>&#x2013;<lpage>37</lpage>. doi: <pub-id pub-id-type="doi">10.1159/000341580</pub-id></citation>
</ref>
<ref id="ref44">
<citation citation-type="other"><person-group person-group-type="author">
<name><surname>Komandur</surname> <given-names>D.</given-names></name>
</person-group>, (<year>2023</year>). Unsupervised harmonization of brain MRI using 3D CycleGANs and its effect on brain age prediction. <italic>19th International symposium on medical information processing and analysis</italic> (SIPAIM) (pp. 1&#x2013;5). IEEE.</citation>
</ref>
<ref id="ref45">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lam</surname> <given-names>P.</given-names></name> <name><surname>Zhu</surname> <given-names>A. H.</given-names></name></person-group> (<year>2020</year>). <article-title>3-D grid-attention networks for interpretable age and Alzheimer&#x2019;s disease prediction from structural MRI</article-title>. <source>arXiv</source> preprint arXiv:2011.09115.</citation>
</ref>
<ref id="ref46">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Landau</surname> <given-names>S. M.</given-names></name> <name><surname>Breault</surname> <given-names>C.</given-names></name> <name><surname>Joshi</surname> <given-names>A. D.</given-names></name> <name><surname>Pontecorvo</surname> <given-names>M.</given-names></name> <name><surname>Mathis</surname> <given-names>C. A.</given-names></name> <name><surname>Jagust</surname> <given-names>W. J.</given-names></name> <etal/></person-group>. (<year>2013</year>). <article-title>Amyloid-&#x03B2; imaging with Pittsburgh compound B and florbetapir: comparing radiotracers and quantification methods</article-title>. <source>J. Nucl. Med.</source> <volume>54</volume>, <fpage>70</fpage>&#x2013;<lpage>77</lpage>. doi: <pub-id pub-id-type="doi">10.2967/jnumed.112.109009</pub-id>, PMID: <pub-id pub-id-type="pmid">23166389</pub-id></citation>
</ref>
<ref id="ref47">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Landau</surname> <given-names>S. M.</given-names></name> <name><surname>Thomas</surname> <given-names>B. A.</given-names></name></person-group> (<year>2014</year>). <article-title>Amyloid PET imaging in Alzheimer&#x2019;s disease: a comparison of three radiotracers</article-title>. <source>Eur. J. Nucl. Med. Mol. Imaging</source> <volume>41</volume>, <fpage>1398</fpage>&#x2013;<lpage>1407</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00259-014-2753-3</pub-id>, PMID: <pub-id pub-id-type="pmid">24647577</pub-id></citation>
</ref>
<ref id="ref48">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lemoine</surname> <given-names>L.</given-names></name> <name><surname>Leuzy</surname> <given-names>A.</given-names></name> <name><surname>Chiotis</surname> <given-names>K.</given-names></name> <name><surname>Rodriguez-Vieitez</surname> <given-names>E.</given-names></name> <name><surname>Nordberg</surname> <given-names>A.</given-names></name></person-group> (<year>2018</year>). <article-title>Tau positron emission tomography imaging in tauopathies: the added hurdle of off-target binding</article-title>. <source>Alzheimers Dement (Amst).</source> <volume>10</volume>, <fpage>232</fpage>&#x2013;<lpage>236</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.dadm.2018.01.007</pub-id></citation>
</ref>
<ref id="ref49">
<citation citation-type="journal"><person-group person-group-type="author">
<name><surname>Li</surname> <given-names>J.</given-names></name>
</person-group> (<year>2022</year>). <article-title>Transforming medical imaging with transformers? A comparative review of key properties, current progresses, and future perspectives</article-title>. <source>arXiv</source> <volume>2206</volume>:<fpage>01136</fpage>.</citation>
</ref>
<ref id="ref50">
<citation citation-type="other"><person-group person-group-type="author">
<name><surname>Liu</surname> <given-names>M.</given-names></name>
</person-group>, (<year>2021</year>). Style transfer using generative adversarial networks for multi-site mri harmonization. In <italic>Medical Image Computing and Computer Assisted Intervention&#x2013;MICCAI 2021: 24th International Conference</italic>, Strasbourg, France, September 27&#x2013;October 1, 2021, Proceedings, Part III 24 (pp. 313&#x2013;322). Springer International Publishing.</citation>
</ref>
<ref id="ref51">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lu</surname> <given-names>B.</given-names></name> <name><surname>Li</surname> <given-names>H.-X.</given-names></name> <name><surname>Chang</surname> <given-names>Z.-K.</given-names></name> <name><surname>Li</surname> <given-names>L.</given-names></name> <name><surname>Chen</surname> <given-names>N. X.</given-names></name> <name><surname>Zhu</surname> <given-names>Z. C.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>A practical Alzheimer disease classifier via brain imaging-based deep learning on 85,721 samples</article-title>. <source>J. Big Data</source> <volume>9</volume>:<fpage>101</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s40537-022-00650-y</pub-id></citation>
</ref>
<ref id="ref52">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Masters</surname> <given-names>C. L.</given-names></name> <name><surname>Selkoe</surname> <given-names>D. J.</given-names></name></person-group> (<year>2012</year>). <article-title>Biochemistry of amyloid &#x03B2;-protein and amyloid deposits in Alzheimer disease</article-title>. <source>Cold Spring Harb. Perspect. Med.</source> <volume>2</volume>:<fpage>a006262</fpage>. doi: <pub-id pub-id-type="doi">10.1101/cshperspect.a006262</pub-id>, PMID: <pub-id pub-id-type="pmid">22675658</pub-id></citation>
</ref>
<ref id="ref53">
<citation citation-type="other"><person-group person-group-type="author">
<name><surname>Matsoukas</surname> <given-names>C.</given-names></name>
</person-group>, &#x201C;Is it time to replace CNNs with transformers for medical images?&#x201D; (<year>2021</year>). <comment>Available at: </comment><ext-link xlink:href="http://arxiv.org/abs/2108.09038" ext-link-type="uri">http://arxiv.org/abs/2108.09038</ext-link></citation>
</ref>
<ref id="ref54">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Morid</surname> <given-names>M. A.</given-names></name> <name><surname>Borjali</surname> <given-names>A.</given-names></name> <name><surname>Fiol</surname> <given-names>G. D.</given-names></name></person-group> (<year>2021</year>). <article-title>A scoping review of transfer learning research on medical image analysis using ImageNet</article-title>. <source>Comput. Biol. Med.</source> <volume>128</volume>:<fpage>104115</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.compbiomed.2020.104115</pub-id>, PMID: <pub-id pub-id-type="pmid">33227578</pub-id></citation>
</ref>
<ref id="ref55">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nelson</surname> <given-names>P. T.</given-names></name> <name><surname>Jicha</surname> <given-names>G. A.</given-names></name> <name><surname>Schmitt</surname> <given-names>F. A.</given-names></name> <name><surname>Liu</surname> <given-names>H.</given-names></name> <name><surname>Davis</surname> <given-names>D. G.</given-names></name> <name><surname>Mendiondo</surname> <given-names>M. S.</given-names></name> <etal/></person-group>. (<year>2007</year>). <article-title>Clinicopathologic correlations in a large Alzheimer disease center autopsy cohort: neuritic plaques and neurofibrillary tangles" do count" when staging disease severity</article-title>. <source>J. Neuropathol. Exp. Neurol.</source> <volume>66</volume>, <fpage>1136</fpage>&#x2013;<lpage>1146</lpage>. doi: <pub-id pub-id-type="doi">10.1097/nen.0b013e31815c5efb</pub-id></citation>
</ref>
<ref id="ref56">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nir</surname> <given-names>T. M.</given-names></name> <name><surname>Villal&#x00F3;n-Reina</surname> <given-names>J. E.</given-names></name> <name><surname>Salminen</surname> <given-names>L. E.</given-names></name></person-group> (<year>2023</year>). <article-title>Cortical microstructural associations with CSF amyloid and pTau</article-title>. <source>Mol. Psychiatry</source>  <fpage>1</fpage>&#x2013;<lpage>12</lpage>.</citation>
</ref>
<ref id="ref57">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Okello</surname> <given-names>A.</given-names></name> <name><surname>Koivunen</surname> <given-names>J.</given-names></name> <name><surname>Edison</surname> <given-names>P.</given-names></name> <name><surname>Archer</surname> <given-names>H. A.</given-names></name> <name><surname>Turkheimer</surname> <given-names>F. E.</given-names></name> <name><surname>N&#x00E5;gren</surname> <given-names>K.</given-names></name> <etal/></person-group>. (<year>2009</year>). <article-title>Conversion of amyloid positive and negative MCI to AD over 3 years: an 11C-PIB PET study</article-title>. <source>Neurology</source> <volume>73</volume>, <fpage>754</fpage>&#x2013;<lpage>760</lpage>. doi: <pub-id pub-id-type="doi">10.1212/WNL.0b013e3181b23564</pub-id>, PMID: <pub-id pub-id-type="pmid">19587325</pub-id></citation>
</ref>
<ref id="ref58">
<citation citation-type="other"><person-group person-group-type="author"><name><surname>Pan</surname> <given-names>Y.</given-names></name> <name><surname>Liu</surname> <given-names>M.</given-names></name> <name><surname>Lian</surname> <given-names>C.</given-names></name> <name><surname>Zhou</surname> <given-names>T.</given-names></name> <name><surname>Xia</surname> <given-names>Y.</given-names></name></person-group>, &#x201C;Synthesizing missing PET from MRI with cycle-consistent generative adversarial networks for Alzheimer&#x2019;s disease diagnosis,&#x201D; <italic>21st International Conference, Granada, Spain, Proceedings</italic>, Part 11072. (<year>2018</year>).</citation>
</ref>
<ref id="ref59">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Petersen</surname> <given-names>R. C.</given-names></name> <name><surname>Smith</surname> <given-names>G. E.</given-names></name> <name><surname>Waring</surname> <given-names>S. C.</given-names></name> <name><surname>Ivnik</surname> <given-names>R. J.</given-names></name> <name><surname>Tangalos</surname> <given-names>E. G.</given-names></name> <name><surname>Kokmen</surname> <given-names>E.</given-names></name></person-group> (<year>1999</year>). <article-title>Mild cognitive impairment: clinical characterization and outcome</article-title>. <source>Arch. Neurol.</source> <volume>56</volume>, <fpage>303</fpage>&#x2013;<lpage>308</lpage>. doi: <pub-id pub-id-type="doi">10.1001/archneur.56.3.303</pub-id></citation>
</ref>
<ref id="ref60">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Petrone</surname> <given-names>P. M.</given-names></name> <name><surname>Casamitjana</surname> <given-names>A.</given-names></name></person-group> (<year>2019</year>). <article-title>Prediction of amyloid pathology in cognitively unimpaired individuals using voxel-wise analysis of longitudinal structural brain MRI</article-title>. <source>Alzheimers Res. Ther.</source> <volume>11</volume>:<fpage>72</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s13195-019-0526-8</pub-id>, PMID: <pub-id pub-id-type="pmid">31421683</pub-id></citation>
</ref>
<ref id="ref61">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Qu</surname> <given-names>C.</given-names></name> <name><surname>Zou</surname> <given-names>Y.</given-names></name> <name><surname>Dai</surname> <given-names>Q.</given-names></name> <name><surname>Ma</surname> <given-names>Y.</given-names></name> <name><surname>He</surname> <given-names>J.</given-names></name> <name><surname>Liu</surname> <given-names>Q.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Advancing diagnostic performance and clinical applicability of deep learning-driven generative adversarial networks for Alzheimer's disease</article-title>. <source>Psychoradiology</source> <volume>1</volume>, <fpage>225</fpage>&#x2013;<lpage>248</lpage>. doi: <pub-id pub-id-type="doi">10.1093/psyrad/kkab017</pub-id>, PMID: <pub-id pub-id-type="pmid">38666217</pub-id></citation>
</ref>
<ref id="ref62">
<citation citation-type="other"><person-group person-group-type="author">
<collab id="coll6">Revised Again: Alzheimer&#x2019;s Diagnostic Criteria Get Another Makeover</collab>
</person-group>. (<year>2023</year>) ALZFORUM. Available at: <ext-link xlink:href="http://Www.alzforum.org" ext-link-type="uri">Www.alzforum.org</ext-link>.</citation>
</ref>
<ref id="ref63">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rowe</surname> <given-names>C. C.</given-names></name> <name><surname>Ellis</surname> <given-names>K. A.</given-names></name> <name><surname>Rimajova</surname> <given-names>M.</given-names></name> <name><surname>Bourgeat</surname> <given-names>P.</given-names></name> <name><surname>Pike</surname> <given-names>K. E.</given-names></name> <name><surname>Jones</surname> <given-names>G.</given-names></name> <etal/></person-group>. (<year>2010</year>). <article-title>Amyloid imaging results from the Australian imaging, biomarkers and lifestyle (AIBL) study of aging</article-title>. <source>Neurobiol. Aging</source> <volume>31</volume>, <fpage>1275</fpage>&#x2013;<lpage>1283</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neurobiolaging.2010.04.007</pub-id></citation>
</ref>
<ref id="ref64">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Shan</surname> <given-names>G.</given-names></name> <name><surname>Bernick</surname> <given-names>C.</given-names></name> <name><surname>Caldwell</surname> <given-names>J. Z. K.</given-names></name> <name><surname>Ritter</surname> <given-names>A.</given-names></name></person-group> (<year>2021</year>). <article-title>Machine learning methods to predict amyloid positivity using domain scores from cognitive tests</article-title>. <source>Sci. Rep.</source> <volume>11</volume>:<fpage>4822</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-021-83911-9</pub-id>, PMID: <pub-id pub-id-type="pmid">33649452</pub-id></citation>
</ref>
<ref id="ref65">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Singla</surname> <given-names>A.</given-names></name> <name><surname>Zhao</surname> <given-names>Q.</given-names></name> <name><surname>do</surname> <given-names>D. K.</given-names></name> <name><surname>Zhou</surname> <given-names>Y.</given-names></name> <name><surname>Pohl</surname> <given-names>K. M.</given-names></name> <name><surname>Adeli</surname> <given-names>E.</given-names></name></person-group> (<year>2022</year>). <article-title>Multiple Instance Neuroimage Transformer</article-title>. <source>Predictive Intelligence in Medicine: 5th International Workshop, MICCAI PRIME</source> <volume>13564</volume>, <fpage>36</fpage>&#x2013;<lpage>48</lpage>. doi: <pub-id pub-id-type="doi">10.1007/978-3-031-16919-9_4</pub-id></citation>
</ref>
<ref id="ref66">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Son</surname> <given-names>H. J.</given-names></name> <name><surname>Oh</surname> <given-names>J. S.</given-names></name> <name><surname>Oh</surname> <given-names>M.</given-names></name> <name><surname>Kim</surname> <given-names>S. J.</given-names></name> <name><surname>Lee</surname> <given-names>J. H.</given-names></name> <name><surname>Roh</surname> <given-names>J. H.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>The clinical feasibility of deep learning-based classification of amyloid PET images in visually equivocal cases</article-title>. <source>Eur. J. Nucl. Med. Mol. Imaging</source> <volume>47</volume>, <fpage>332</fpage>&#x2013;<lpage>341</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00259-019-04595-y</pub-id>, PMID: <pub-id pub-id-type="pmid">31811343</pub-id></citation>
</ref>
<ref id="ref67">
<citation citation-type="other"><person-group person-group-type="author">
<collab id="coll7">SPM12 software - Statistical Parametric Mapping</collab>
</person-group>. (<year>2014</year>). Functional Imaging Laboratory. <comment>Available at:</comment> <ext-link xlink:href="https://www.fil.ion.ucl.ac.uk/spm/software/spm12/" ext-link-type="uri">https://www.fil.ion.ucl.ac.uk/spm/software/spm12/</ext-link></citation>
</ref>
<ref id="ref68">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sudlow</surname> <given-names>C.</given-names></name> <name><surname>Gallacher</surname> <given-names>J.</given-names></name> <name><surname>Allen</surname> <given-names>N.</given-names></name> <name><surname>Beral</surname> <given-names>V.</given-names></name> <name><surname>Burton</surname> <given-names>P.</given-names></name> <name><surname>Danesh</surname> <given-names>J.</given-names></name> <etal/></person-group>. (<year>2015</year>). <article-title>UK biobank: an open access resource for identifying the causes of a wide range of complex diseases of middle and old age</article-title>. <source>PLoS Med.</source> <volume>12</volume>:<fpage>e1001779</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pmed.1001779</pub-id>, PMID: <pub-id pub-id-type="pmid">25826379</pub-id></citation>
</ref>
<ref id="ref69">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tarasoff-Conway</surname> <given-names>J. M.</given-names></name> <name><surname>Carare</surname> <given-names>R. O.</given-names></name> <name><surname>Osorio</surname> <given-names>R. S.</given-names></name> <name><surname>Glodzik</surname> <given-names>L.</given-names></name> <name><surname>Butler</surname> <given-names>T.</given-names></name> <name><surname>Fieremans</surname> <given-names>E.</given-names></name> <etal/></person-group>. (<year>2015</year>). <article-title>Clearance systems in the brain implications for Alzheimer disease</article-title>. <source>Nat. Rev. Neurol.</source> <volume>11</volume>, <fpage>457</fpage>&#x2013;<lpage>470</lpage>. doi: <pub-id pub-id-type="doi">10.1038/nrneurol.2015.119</pub-id>, PMID: <pub-id pub-id-type="pmid">26195256</pub-id></citation>
</ref>
<ref id="ref70">
<citation citation-type="journal"><person-group person-group-type="author">
<name><surname>Thompson</surname> <given-names>P. M.</given-names></name>
</person-group> (<year>2007</year>). <article-title>Tracking Alzheimer's disease</article-title>. <source>Ann. N. Y. Acad. Sci.</source> <volume>1097</volume>, <fpage>183</fpage>&#x2013;<lpage>214</lpage>. doi: <pub-id pub-id-type="doi">10.1196/annals.1379.017</pub-id>, PMID: <pub-id pub-id-type="pmid">17413023</pub-id></citation>
</ref>
<ref id="ref71">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Thompson</surname> <given-names>P. M.</given-names></name> <name><surname>Hayashi</surname> <given-names>K. M.</given-names></name> <name><surname>Sowell</surname> <given-names>E. R.</given-names></name> <name><surname>Gogtay</surname> <given-names>N.</given-names></name> <name><surname>Giedd</surname> <given-names>J. N.</given-names></name> <name><surname>Rapoport</surname> <given-names>J. L.</given-names></name> <etal/></person-group>. (<year>2004</year>). <article-title>Mapping cortical change in Alzheimer's disease, brain development, and schizophrenia</article-title>. <source>NeuroImage</source> <volume>23</volume>, <fpage>S2</fpage>&#x2013;<lpage>S18</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2004.07.071</pub-id>, PMID: <pub-id pub-id-type="pmid">15501091</pub-id></citation>
</ref>
<ref id="ref72">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>van Dyck</surname> <given-names>C. H.</given-names></name> <name><surname>Swanson</surname> <given-names>C. J.</given-names></name> <name><surname>Aisen</surname> <given-names>P.</given-names></name> <name><surname>Bateman</surname> <given-names>R. J.</given-names></name> <name><surname>Chen</surname> <given-names>C.</given-names></name> <name><surname>Gee</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Lecanemab in early Alzheimer's disease</article-title>. <source>N. Engl. J. Med.</source> <volume>388</volume>, <fpage>9</fpage>&#x2013;<lpage>21</lpage>. doi: <pub-id pub-id-type="doi">10.1056/NEJMoa2212948</pub-id></citation>
</ref>
<ref id="ref73">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Van Erp</surname> <given-names>T. G.</given-names></name> <name><surname>Hibar</surname> <given-names>D. P.</given-names></name></person-group> (<year>2016</year>). <article-title>Subcortical brain volume abnormalities in 2028 individuals with schizophrenia and 2540 healthy controls via the ENIGMA consortium</article-title>. <source>Mol. Psychiatry</source> <volume>21</volume>, <fpage>547</fpage>&#x2013;<lpage>553</lpage>. doi: <pub-id pub-id-type="doi">10.1038/mp.2015.63</pub-id>, PMID: <pub-id pub-id-type="pmid">26033243</pub-id></citation>
</ref>
<ref id="ref74">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>van Erp</surname> <given-names>T. G. M.</given-names></name> <name><surname>Walton</surname> <given-names>E.</given-names></name> <name><surname>Hibar</surname> <given-names>D. P.</given-names></name> <name><surname>Schmaal</surname> <given-names>L.</given-names></name> <name><surname>Jiang</surname> <given-names>W.</given-names></name> <name><surname>Glahn</surname> <given-names>D. C.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>Cortical brain abnormalities in 4474 individuals with schizophrenia and 5098 control subjects via the enhancing neuro imaging genetics through meta analysis (ENIGMA) consortium</article-title>. <source>Biol. Psychiatry</source> <volume>84</volume>, <fpage>644</fpage>&#x2013;<lpage>654</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.biopsych.2018.04.023</pub-id>, PMID: <pub-id pub-id-type="pmid">29960671</pub-id></citation>
</ref>
<ref id="ref75">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Veitch</surname> <given-names>D.</given-names></name> <name><surname>Weiner</surname> <given-names>M. W.</given-names></name> <name><surname>Aisen</surname> <given-names>P. S.</given-names></name> <name><surname>Beckett</surname> <given-names>L. A.</given-names></name> <name><surname>Cairns</surname> <given-names>N. J.</given-names></name> <name><surname>Green</surname> <given-names>R. C.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>Understanding disease progression and improving Alzheimer's disease clinical trials: recent highlights from the Alzheimer's disease neuroimaging initiative</article-title>. <source>Alzheimers Dement.</source> <volume>15</volume>, <fpage>106</fpage>&#x2013;<lpage>152</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jalz.2018.08.005</pub-id>, PMID: <pub-id pub-id-type="pmid">30321505</pub-id></citation>
</ref>
<ref id="ref76">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Villemagne</surname> <given-names>V. L.</given-names></name> <name><surname>Burnham</surname> <given-names>S.</given-names></name> <name><surname>Bourgeat</surname> <given-names>P.</given-names></name> <name><surname>Brown</surname> <given-names>B.</given-names></name> <name><surname>Ellis</surname> <given-names>K. A.</given-names></name> <name><surname>Salvado</surname> <given-names>O.</given-names></name> <etal/></person-group>. (<year>2013</year>). <article-title>Amyloid &#x03B2; deposition, neurodegeneration, and cognitive decline in sporadic Alzheimer&#x2019;s disease: a prospective cohort study</article-title>. <source>Lancet Neurol.</source> <volume>12</volume>, <fpage>357</fpage>&#x2013;<lpage>367</lpage>. doi: <pub-id pub-id-type="doi">10.1016/S1474-4422(13)70044-9</pub-id></citation>
</ref>
<ref id="ref77">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Villemagne</surname> <given-names>V. L.</given-names></name> <name><surname>Pike</surname> <given-names>K. E.</given-names></name> <name><surname>Ch&#x00E9;telat</surname> <given-names>G.</given-names></name> <name><surname>Ellis</surname> <given-names>K. A.</given-names></name> <name><surname>Mulligan</surname> <given-names>R. S.</given-names></name> <name><surname>Bourgeat</surname> <given-names>P.</given-names></name> <etal/></person-group>. (<year>2011</year>). <article-title>Longitudinal assessment of A&#x03B2; and cognition in aging and Alzheimer disease</article-title>. <source>Ann. Neurol.</source> <volume>69</volume>, <fpage>181</fpage>&#x2013;<lpage>192</lpage>. doi: <pub-id pub-id-type="doi">10.1002/ana.22248</pub-id>, PMID: <pub-id pub-id-type="pmid">21280088</pub-id></citation>
</ref>
<ref id="ref78">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>T.</given-names></name> <name><surname>Lei</surname> <given-names>Y.</given-names></name> <name><surname>Fu</surname> <given-names>Y.</given-names></name> <name><surname>Wynne</surname> <given-names>J. F.</given-names></name> <name><surname>Curran</surname> <given-names>W. J.</given-names></name> <name><surname>Liu</surname> <given-names>T.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>A review on medical imaging synthesis using deep learning and its clinical applications</article-title>. <source>J. Appl. Clin. Med. Phys.</source> <volume>22</volume>, <fpage>11</fpage>&#x2013;<lpage>36</lpage>. doi: <pub-id pub-id-type="doi">10.1002/acm2.13121</pub-id>, PMID: <pub-id pub-id-type="pmid">33305538</pub-id></citation>
</ref>
<ref id="ref79">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Willemink</surname> <given-names>M. J.</given-names></name> <name><surname>Roth</surname> <given-names>H. R.</given-names></name> <name><surname>Sandfort</surname> <given-names>V.</given-names></name></person-group> (<year>2022</year>). <article-title>Toward foundational deep learning models for medical imaging in the new era of transformer networks</article-title>. <source>Radiol. Artif. Intell.</source> <volume>4</volume>:<fpage>e210284</fpage>. doi: <pub-id pub-id-type="doi">10.1148/ryai.210284</pub-id></citation>
</ref>
<ref id="ref80">
<citation citation-type="other"><person-group person-group-type="author">
<collab id="coll8">World Health Organization</collab>
</person-group>. &#x201C;Dementia,&#x201D; (<year>2022</year>). <comment>Available at: </comment><ext-link xlink:href="https://www.who.int/news-room/fact-sheets/detail/dementia" ext-link-type="uri">https://www.who.int/news-room/fact-sheets/detail/dementia</ext-link>.</citation>
</ref>
<ref id="ref81">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yasuno</surname> <given-names>F.</given-names></name> <name><surname>Kazui</surname> <given-names>H.</given-names></name> <name><surname>Morita</surname> <given-names>N.</given-names></name> <name><surname>Kajimoto</surname> <given-names>K.</given-names></name> <name><surname>Ihara</surname> <given-names>M.</given-names></name> <name><surname>Taguchi</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>Use of T1-weighted/T2-weighted magnetic resonance ratio to elucidate changes due to amyloid &#x03B2; accumulation in cognitively normal subjects</article-title>. <source>NeuroImage: Clinical</source> <volume>13</volume>, <fpage>209</fpage>&#x2013;<lpage>214</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.nicl.2016.11.029</pub-id>, PMID: <pub-id pub-id-type="pmid">28003959</pub-id></citation>
</ref>
<ref id="ref82">
<citation citation-type="journal"><person-group person-group-type="author">
<name><surname>Youden</surname> <given-names>W. J.</given-names></name>
</person-group> (<year>1950</year>). <article-title>Index for rating diagnostic tests</article-title>. <source>Cancer</source> <volume>3</volume>, <fpage>32</fpage>&#x2013;<lpage>35</lpage>. doi: <pub-id pub-id-type="doi">10.1002/1097-0142(1950)3:1&#x003C;32::AID-CNCR2820030106&#x003E;3.0.CO;2-3</pub-id></citation>
</ref>
<ref id="ref83">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Young</surname> <given-names>C. B.</given-names></name> <name><surname>Landau</surname> <given-names>S. M.</given-names></name> <name><surname>Harrison</surname> <given-names>T. M.</given-names></name> <name><surname>Poston</surname> <given-names>K. L.</given-names></name> <name><surname>Mormino</surname> <given-names>E. C.</given-names></name></person-group> (<year>2021</year>). <article-title>Influence of common reference regions on regional tau patterns in cross-sectional and longitudinal [18F]-AV-1451 PET data</article-title>. <source>NeuroImage</source> <volume>243</volume>:<fpage>118553</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2021.118553</pub-id></citation>
</ref>
<ref id="ref84">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhou</surname> <given-names>T.</given-names></name> <name><surname>Ye</surname> <given-names>X. Y.</given-names></name> <name><surname>Lu</surname> <given-names>H. L.</given-names></name> <name><surname>Zheng</surname> <given-names>X.</given-names></name> <name><surname>Qiu</surname> <given-names>S.</given-names></name> <name><surname>Liu</surname> <given-names>Y. C.</given-names></name></person-group> (<year>2022</year>). <article-title>Dense convolutional network and its application in medical image analysis</article-title>. <source>Biomed. Res. Int.</source> <volume>2022</volume>, <fpage>1</fpage>&#x2013;<lpage>22</lpage>. doi: <pub-id pub-id-type="doi">10.1155/2022/2384830</pub-id></citation>
</ref>
<ref id="ref85">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhuang</surname> <given-names>F.</given-names></name> <name><surname>Qi</surname> <given-names>Z.</given-names></name> <name><surname>Duan</surname> <given-names>K.</given-names></name> <name><surname>Xi</surname> <given-names>D.</given-names></name> <name><surname>Zhu</surname> <given-names>Y.</given-names></name> <name><surname>Zhu</surname> <given-names>H.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>A comprehensive survey on transfer learning</article-title>. <source>Proc. IEEE</source> <volume>109</volume>, <fpage>43</fpage>&#x2013;<lpage>76</lpage>. doi: <pub-id pub-id-type="doi">10.1109/JPROC.2020.3004555</pub-id></citation>
</ref>
<ref id="ref86">
<citation citation-type="other"><person-group person-group-type="author"><name><surname>Zuo</surname> <given-names>L.</given-names></name> <name><surname>Dewey</surname> <given-names>B. E.</given-names></name> <name><surname>Carass</surname> <given-names>A.</given-names></name> <name><surname>Liu</surname> <given-names>Y.</given-names></name> <name><surname>He</surname> <given-names>Y.</given-names></name> <name><surname>Calabresi</surname> <given-names>P. A.</given-names></name></person-group>, (<year>2021</year>). Information-based disentangled representation learning for unsupervised MR harmonization. In the <italic>international conference on information processing in medical imaging</italic> (pp. 346&#x2013;359). Cham: Springer International Publishing.</citation>
</ref>
</ref-list>
</back>
</article>