<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Neurosci.</journal-id>
<journal-title>Frontiers in Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Neurosci.</abbrev-journal-title>
<issn pub-type="epub">1662-453X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnins.2022.1097019</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Neuroscience</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Weakly supervised learning analysis of A&#x03B2; plaque distribution in the whole rat brain</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Chen</surname> <given-names>Zhiyi</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/2009432/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Zheng</surname> <given-names>Weijie</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Pang</surname> <given-names>Keliang</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<xref ref-type="author-notes" rid="fn002"><sup>&#x2020;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/402462/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Xia</surname> <given-names>Debin</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Guo</surname> <given-names>Lingxiao</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Chen</surname> <given-names>Xuejin</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1546838/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Wu</surname> <given-names>Feng</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Wang</surname> <given-names>Hao</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="c002"><sup>&#x002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/2086186/overview"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>National Engineering Laboratory for Brain-Inspired Intelligence Technology and Application, School of Information Science and Technology, University of Science and Technology of China</institution>, <addr-line>Hefei</addr-line>, <country>China</country></aff>
<aff id="aff2"><sup>2</sup><institution>Institute of Artificial Intelligence, Hefei Comprehensive National Science Center</institution>, <addr-line>Hefei</addr-line>, <country>China</country></aff>
<aff id="aff3"><sup>3</sup><institution>AHU-IAI AI Joint Laboratory, Anhui University</institution>, <addr-line>Hefei</addr-line>, <country>China</country></aff>
<aff id="aff4"><sup>4</sup><institution>School of Pharmaceutical Sciences, IDG/McGovern Institute for Brain Research, Tsinghua University-Peking University Joint Center for Life Sciences, Tsinghua University</institution>, <addr-line>Beijing</addr-line>, <country>China</country></aff>
<author-notes>
<fn fn-type="edited-by"><p>Edited by: Guanglei Zhang, Beihang University, China</p></fn>
<fn fn-type="edited-by"><p>Reviewed by: Ben Long, Hainan University, China; Denise Ramirez, University of Texas Southwestern Medical Center, United States</p></fn>
<corresp id="c001">&#x002A;Correspondence: Keliang Pang, <email>keliang.pang@163.com</email></corresp>
<corresp id="c002">Hao Wang, <email>haowang@ustc.edu.cn</email></corresp>
<fn fn-type="present-address" id="fn002"><p><sup>&#x2020;</sup>Present address: Keliang Pang, Department of Geriatrics, Shanghai Ruijin Hospital, Shanghai Jiaotong University School of Medicine, Shanghai, China</p></fn>
<fn fn-type="other" id="fn004"><p>This article was submitted to Brain Imaging Methods, a section of the journal Frontiers in Neuroscience</p></fn>
</author-notes>
<pub-date pub-type="epub">
<day>19</day>
<month>01</month>
<year>2023</year>
</pub-date>
<pub-date pub-type="collection">
<year>2022</year>
</pub-date>
<volume>16</volume>
<elocation-id>1097019</elocation-id>
<history>
<date date-type="received">
<day>13</day>
<month>11</month>
<year>2022</year>
</date>
<date date-type="accepted">
<day>30</day>
<month>12</month>
<year>2022</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2023 Chen, Zheng, Pang, Xia, Guo, Chen, Wu and Wang.</copyright-statement>
<copyright-year>2023</copyright-year>
<copyright-holder>Chen, Zheng, Pang, Xia, Guo, Chen, Wu and Wang</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license>
</permissions>
<abstract>
<p>Alzheimer&#x2019;s disease (AD) is a great challenge for the world and hardly to be cured, partly because of the lack of animal models that fully mimic pathological progress. Recently, a rat model exhibiting the most pathological symptoms of AD has been reported. However, high-resolution imaging and accurate quantification of beta-amyloid (A&#x03B2;) plaques in the whole rat brain have not been fulfilled due to substantial technical challenges. In this paper, a high-efficiency data analysis pipeline is proposed to quantify A&#x03B2; plaques in whole rat brain through several terabytes of image data acquired by a high-speed volumetric imaging approach we have developed previously. A novel segmentation framework applying a high-performance weakly supervised learning method which can dramatically reduce the human labeling consumption is described in this study. The effectiveness of our segmentation framework is validated with different metrics. The segmented A&#x03B2; plaques were mapped to a standard rat brain atlas for quantitative analysis of the A&#x03B2; distribution in each brain area. This pipeline may also be applied to the segmentation and accurate quantification of other non-specific morphology objects.</p>
</abstract>
<kwd-group>
<kwd>A&#x03B2; plaque</kwd>
<kwd>rat brain</kwd>
<kwd>deep learning</kwd>
<kwd>light sheet microscopy</kwd>
<kwd>weakly supervised learning segmentation</kwd>
<kwd>quantitative analysis</kwd>
</kwd-group>
<contract-num rid="cn001">WK2100000022</contract-num>
<contract-num rid="cn002">32100896</contract-num>
<contract-sponsor id="cn001">Fundamental Research Funds for the Central Universities<named-content content-type="fundref-id">10.13039/501100012226</named-content></contract-sponsor>
<contract-sponsor id="cn002">National Natural Science Foundation of China<named-content content-type="fundref-id">10.13039/501100001809</named-content></contract-sponsor>
<counts>
<fig-count count="5"/>
<table-count count="0"/>
<equation-count count="6"/>
<ref-count count="30"/>
<page-count count="10"/>
<word-count count="7310"/>
</counts>
</article-meta>
</front>
<body>
<sec id="S1" sec-type="intro">
<title>1. Introduction</title>
<p>Alzheimer&#x2019;s disease (AD) is a progressive degenerative disease of the central nervous system that causes cognitive decline and extensive neuronal death (<xref ref-type="bibr" rid="B1">Barage and Sonawane, 2015</xref>). It is one of the most common dementias among elderly individuals. Modern society has to tackle the heavy burden of aging as a result of AD. Extensive explorations in developing drugs and therapy for AD have failed. One of the major reasons is the lack of animal models that could fully mimic the pathological features of AD (<xref ref-type="bibr" rid="B22">Scearce-Levie et al., 2020</xref>). The most prevalently used animal models in AD studies are transgenic mice overexpressing amyloid precursor protein (<italic>APP</italic>), including PDAPP and Tg2576 (<xref ref-type="bibr" rid="B6">Games et al., 1995</xref>; <xref ref-type="bibr" rid="B8">Hsiao et al., 1996</xref>). However, the non-physiological and ectopic expression of <italic>APP</italic> in transgenic mice has never been demonstrated in AD patients. Other transgenic mice with both <italic>APP</italic> and <italic>PSEN1</italic> mutations, including <italic>APPswe/PS1M46L</italic>, <italic>APPswe/PSEN1dE9</italic>, and 5&#x00D7; <italic>FAD</italic> mice, have been widely used in AD studies. However, these animal models showed little neuronal loss, no tau pathology, or even provoked beta-amyloid (A&#x03B2;) pathology in ectopic brain areas that were not present in human AD patients. Compared to mice, the physiology and behavior of rats are more similar to those of human beings. Recently, a new AD rat model was developed by knock-in of App with CRISPR/Cas9, exhibiting most of the pathological features of AD (<xref ref-type="bibr" rid="B17">Pang et al., 2022</xref>). This model rat shows both A&#x03B2; plaques and tau pathology in the brain, which is the first-ever rodent model demonstrating these two main deficits.</p>
<p>Among many pathological features of AD, the deposition of A&#x03B2; is one of the main phenotypes. The density and distribution of A&#x03B2; plaques are crucial indicators of disease development. Quantitative analysis of A&#x03B2; plaques is critical for studying the spatial-temporal origin and evolution of the disease (<xref ref-type="bibr" rid="B14">Long et al., 2019</xref>). Modern neuroimaging techniques such as computed tomography (CT), positron emission tomography (PET) imaging (<xref ref-type="bibr" rid="B12">Koychev et al., 2020</xref>), or magnetic resonance imaging (MRI) (<xref ref-type="bibr" rid="B23">Sheikh-Bahaei et al., 2017</xref>), are widely used in quantifying A&#x03B2; accumulation in the brain. These non-invasive imaging methods have been used for clinical diagnosis. However, due to the low resolution and specificity, the diagnostic results are not accurate. <italic>Ex vivo</italic> studies with model animals could use immunostaining and microscopic imaging of continuous brain slices. However, the quantitative analysis of A&#x03B2; plaques on whole-brain image datasets remains a great challenge, owing to the high consumption of money and time during the acquisition and analysis of the whole-brain A&#x03B2; imaging dataset. With the development of high-speed volumetric microscopic imaging techniques (<xref ref-type="bibr" rid="B7">Gong et al., 2016</xref>; <xref ref-type="bibr" rid="B24">Wang et al., 2019</xref>), rapid microscopic 3D imaging of the whole mouse brain can be achieved in days or hours. A previous study proposed a framework for A&#x03B2; staining, imaging, and quantification of the whole mouse brain (<xref ref-type="bibr" rid="B14">Long et al., 2019</xref>), but the morphological characterization of A&#x03B2; in the whole rat brain has not yet been investigated. Meanwhile, the imaging speed was relatively slow, which makes it quite time-consuming for whole rat brain imaging.</p>
<p>Using the high-speed volumetric imaging method we have developed (<xref ref-type="bibr" rid="B24">Wang et al., 2019</xref>), imaging of the whole rat brain at micrometer resolution can be completed within 4 h. This approach will generate approximately 8 TB raw data at a 1 &#x03BC;m &#x00D7; 1 &#x03BC;m &#x00D7; 3.5 &#x03BC;m voxel size with an intact adult rat brain. A highly efficient pipeline for automatic segmentation and quantitative analysis of whole-brain A&#x03B2; plaques is needed.</p>
<p>Segmentation is a crucial step in assessing the accurate brain-wide distribution of A&#x03B2; plaques. In traditional segmentation methods, features and parameters need to be set manually, which is not suitable for automatic and accurate segmentation of morphologically diverse A&#x03B2; plaques in whole-brain 3D microscopic images (<xref ref-type="bibr" rid="B2">Berg et al., 2019</xref>; <xref ref-type="bibr" rid="B4">Chen et al., 2020</xref>). With the rapid development of deep learning in recent years, segmentation methods based on fully supervised learning have been improved significantly, which requires large human labeling costs (<xref ref-type="bibr" rid="B20">Ronneberger et al., 2015</xref>; <xref ref-type="bibr" rid="B3">Chen et al., 2018</xref>). To reduce the cost of manual annotation, weakly supervised learning with weak annotations is more appropriate for biological microscopic images (<xref ref-type="bibr" rid="B9">Jia et al., 2017</xref>; <xref ref-type="bibr" rid="B29">Zhao et al., 2018</xref>). Here, we propose a segmentation method based on weakly supervised learning, only requiring object-level annotations, in which the cost of labeling is 1/15 of the pixel-level annotations. We adopt the high-resolution network (HRNet) as the feature extractor (<xref ref-type="bibr" rid="B25">Wang et al., 2020</xref>) and deploy it into a multi-stage object detection framework Faster-RCNN (<xref ref-type="bibr" rid="B19">Ren et al., 2015</xref>). Subsequently, extra visual cues using peak response mapping (<xref ref-type="bibr" rid="B30">Zhou et al., 2018</xref>) are provided for segmentation with the 2D-OTSU algorithm (<xref ref-type="bibr" rid="B28">Zhang and Hu, 2008</xref>) in post-processing. In addition, a pre-processing method is provided according to the characteristics of the image dataset, which can reduce the attenuation of the signal intensity caused by the thickness of tissue slices and improve the signal-to-noise ratio (SNR).</p>
<p>To quantitatively analyze the distribution of plaques in different brain areas, we registered the 3D whole-brain dataset to the Waxholm Space Sprague Dawley (WHS-SD) rat brain atlas (<xref ref-type="bibr" rid="B18">Papp et al., 2014</xref>). Utilizing the deformation field produced during registration to the A&#x03B2; binary mask of the whole brain, all brain areas of the imaged dataset were aligned to the standard brain map for subsequent quantification of A&#x03B2; plaques.</p>
</sec>
<sec id="S2" sec-type="materials|methods">
<title>2. Materials and methods</title>
<sec id="S2.SS1">
<title>2.1. Sample preparation</title>
<p>The rat brain was prepared as reported previously (<xref ref-type="bibr" rid="B17">Pang et al., 2022</xref>). Briefly, adult animals were sacrificed by transcardial perfusion of 40 mL 1&#x00D7; phosphate buffered saline (PBS) and 40 mL 4% paraformaldehyde (PFA) successively. Subsequently, the sample preparation is followed by the Volumetric Imaging with Synchronized on-the-fly-scan and Readout (VISoR) imaging procedure (<xref ref-type="bibr" rid="B24">Wang et al., 2019</xref>). The sample was transferred into ice-cold 4% hydrogel monomer solution (HMS) for post-fixation at 4 degrees for 48 h. After post-fixation, the 4% HMS was replaced with a mixture of 15 mL 4% HMS and 15 mL 20% bovine serum albumin (BSA). The solution was degassed in a vacuum pump for 20 min with ice surrounding the centrifuge tubes. The sample was polymerized at 37&#x00B0;C for 4 h and rinsed with PBS three times to remove residual reagents. Next, the sample was sectioned into 300 &#x03BC;m thick slices. All slices were cleared with 4% sodium dodecyl sulfate (SDS) solution at 37&#x00B0;C for 24 h with gentle shaking. The slices were washed with PBS three times, 1 h for each round. The slices were immune-stained with anti-A&#x03B2; primary antibody (Biolegend No. 803002, 1:500 dilution with PBS) for 24 h at room temperature and washed with PBS for three times, 1 h for each round. The secondary antibody (JacksonImmuno Research No. 715-545-150, 1:200 dilution with PBS) was applied for 6 h at room temperature with gentle shaking and washed out with PBS for three times, 1 h per round. Finally, all slices were mounted on a customized slide (100 mm &#x00D7; 100 mm) for imaging (<xref ref-type="fig" rid="F1">Figure 1A</xref>).</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption><p>Diagram of this study. <bold>(A)</bold> The pipeline of the whole process, including sample preparation, data acquisition, plaque segmentation, and quantification analysis. <bold>(B)</bold> Single slice imaged with micron resolution. <bold>(C)</bold> The 3D-reconstructed whole rat brain. <bold>(D)</bold> The raw image of a section in panel <bold>(B)</bold>, green boxed.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-16-1097019-g001.tif"/>
</fig>
</sec>
<sec id="S2.SS2">
<title>2.2. Data acquisition</title>
<p>A modified VISoR microscope described in a previous study was used for all data collection (<xref ref-type="bibr" rid="B27">Xu et al., 2021</xref>). Two-channel imaging was carried out successively with 488 and 552 nm excitation. The emission light was collected through an Olympus 10 &#x00D7; 0.3 NA water immersion objective and filtered with bandpass filters (520/40, 600/50, from Semrock). Images were collected with a sCMOS camera (Flash 4.0 v3, Hamamatsu) (<xref ref-type="fig" rid="F1">Figure 1A</xref>). All data were collected with a pixel resolution of 1 &#x03BC;m &#x00D7; 1 &#x03BC;m &#x00D7; 3.5 &#x03BC;m. The collected images were reconstructed into a dual channel whole rat brain for further segmentation and quantification (<xref ref-type="fig" rid="F1">Figures 1B&#x2013;D</xref>). The 3D reconstruction of the whole brain imaging dataset was the same as our previous study (<xref ref-type="bibr" rid="B24">Wang et al., 2019</xref>). Briefly, it mainly contains four steps. (i) Flattening the upper and lower surfaces of each brain section. (ii) Detecting the edges of adjacent sections and extracting the correspondences between two opposing surfaces. (iii) Morphing the correspondences of each section for limiting the morphological errors. (iv) Warping each section with the extracted and morphed correspondences.</p>
</sec>
<sec id="S2.SS3">
<title>2.3. Data preprocessing</title>
<p>A preprocessing pipeline of the dataset is presented to calibrate the brightness of serial brain sections as well as to enhance the signal-to-noise ratio. This step is crucial for the overall success of network training and testing. Two preprocessing strategies were applied successively:</p>
<p><bold>Brightness calibration of the brain slices.</bold> The brightness of the imaging channel varies at different depths of the slices. Two reasons contribute most: (a) the difference in antibody concentration at different depths and (b) excitation light absorption by tissue through the optical propagation path. The brightness over <italic>z</italic>-axis was measured and automatically calibrated (<xref ref-type="supplementary-material" rid="DS1">Supplementary Figures 1A, B</xref>). Specifically, the corrected brightness of image stacks can be formulated by:</p>
<disp-formula id="S2.E1">
<label>(1)</label>
<mml:math id="M1">
<mml:mrow>
<mml:mrow>
<mml:mi>I</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mpadded width="+3.3pt">
<mml:msup>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mi>z</mml:mi>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:msup>
<mml:mi/>
<mml:mo>&#x2032;</mml:mo>
</mml:msup>
</mml:msup>
</mml:mpadded>
</mml:mrow>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>I</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mi>z</mml:mi>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mi>I</mml:mi>
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>e</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>a</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>n</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2062;</mml:mo>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mi>z</mml:mi>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo>/</mml:mo>
<mml:msub>
<mml:mi>I</mml:mi>
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>e</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>a</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>n</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mo>&#x2062;</mml:mo>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:msub>
<mml:mi>z</mml:mi>
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>i</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<p><italic>I</italic>(&#x22C5;) represent the intensity of each pixel. <italic>I</italic><sub><italic>mean</italic></sub>(&#x22C5;) indicates the mean intensity of each image slice. <italic>z</italic> means the slice index of the image stacks. The non-brain-slice pixels are excluded while calculating the mean intensity.</p>
<p><bold>Signal-to-noise ratio (SNR) enhancement.</bold> As the A&#x03B2; plaques were labeled by immunofluorescent staining, the intensity of the fluorescence signal was relatively weak for image segmentation. The low SNR images will lead to poor performance in network training for image segmentation. To further enhance the segmentation performance, 3D Gaussian blur (&#x03C3;<sub><italic>x</italic>,<italic>y</italic>,<italic>z</italic></sub> = 2 for saving more details) was used to improve the SNR of all brain slices.</p>
<p>Due to the large size of 3D image stacks and the limitation of computer memory, we partitioned the image stacks into image blocks with size of 256 &#x00D7; 256 &#x00D7; 75. Image blocks having less than 1% brain tissue pixels were excluded for network training and testing.</p>
</sec>
<sec id="S2.SS4">
<title>2.4. HRNet structure</title>
<p>Segmentation is a position-sensitive computer vision task. Due to the high density and small size of A&#x03B2; plaques in the whole rat brain, over-down-sampling of training data in the neural network will lead to the missing and position offset of small objects. The parallel structure of HRNet can combine high-resolution features with high-level semantic information. The high-resolution representations learned from the HRNet are not only spatially precise but also semantically strong.</p>
<p>The HRNet is connected in parallel (<xref ref-type="fig" rid="F2">Figure 2A</xref>), which consists of parallel branches with high-to-low resolution. The resolution of the rth branch is 1/(2<italic><sup>r&#x2013;1</sup></italic>) of the resolution of the first stream, while the channel number is 2<italic><sup>r&#x2013;1</sup></italic> of the first stream.</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption><p>Schematic illustration of the entire analysis pipeline. <bold>(A)</bold> Weakly supervised segmentation with the high-resolution network. The pipeline is modified from the Faster-RCNN framework, and the peak response mapping method is embedded in our framework for extracting additional visual cues for subsequent segmentation. Note that only weak labels (bounding boxes) are required in our pipeline, which are shown as red boxes in the cropped image block. <bold>(B)</bold> Post-processing for obtaining whole-brain A&#x03B2; segmentation masks. <bold>(C)</bold> Obtaining the deformation field from the atlas registration process and then utilizing it to the segmentation masks from panel <bold>(B)</bold> for quantitative analysis of whole-brain plaques.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-16-1097019-g002.tif"/>
</fig>
<p>The basic module used in each subnetwork contains two 3 &#x00D7; 3 convolutional kernels where each kernel is followed by batch normalization (BN) and rectified linear unit (ReLU). Skip connection is used to connect the input and output of the module (<xref ref-type="supplementary-material" rid="DS1">Supplementary Figure 2A</xref>). The connection between different stages consists of transition modules and fusion modules (<xref ref-type="supplementary-material" rid="DS1">Supplementary Figure 2C</xref>), which are applied to exchange information between multi-resolution layers. Two output modes are provided (<xref ref-type="supplementary-material" rid="DS1">Supplementary Figure 2B</xref>): (i) HR-keep mode only outputs the high-resolution representation computed from the high-resolution convolution stream, (ii) HR-fuse mode combines the representations from all the high-to-low resolution parallel streams.</p>
</sec>
<sec id="S2.SS5">
<title>2.5. Weakly supervised learning with object-level labels</title>
<p>The training of neural networks is usually fully supervised and requires a large number of manual annotations, especially for image segmentation which requires pixel-level annotation. For 3D microscopic biomedical images, annotation requires extensive professional knowledge guidance, which undoubtedly leads to high expenses of money and time. In this paper, object-level weak annotations are used for pixel-level image segmentation tasks (<xref ref-type="fig" rid="F2">Figure 2A</xref>). Specifically, we train the object detection framework with bounding box annotations and then segmentation is carried out by post-processing methods (<xref ref-type="fig" rid="F2">Figure 2B</xref> and <xref ref-type="supplementary-material" rid="VS1">Supplementary Video 1</xref>).</p>
<p>Faster-RCNN (<xref ref-type="bibr" rid="B19">Ren et al., 2015</xref>) is a widely used object detection framework that usually consists of conv-body, region-proposal network (RPN), RoI-pooling, and classifier. In this work, a modified 3D form Faster-RCNN is used for detecting A&#x03B2; plaques in the volumetric image dataset. High-to-low resolution features of the 3D images are extracted by HRNet. After that, the features are input into the RPN and classifier to obtain the predicted bounding-box map. In Faster-RCNN, RPN depends on the default anchor settings, and we set the anchor size to fit the size range of A&#x03B2; plaques.</p>
<p>Subsequently, peak response mapping (PRM) (<xref ref-type="bibr" rid="B30">Zhou et al., 2018</xref>) is used to obtain additional visual cues to improve the segmentation performance (<xref ref-type="fig" rid="F2">Figure 2A</xref>). The main idea of PRM is to generate a peak-response map by stimulating peaks in the class-aware map. Then, the most informative regions of each plaque are identified and mapped by the back-propagated peaks. Score maps generated from RPN in the Faster-RCNN framework can be regarded as a class of peak correspondence maps related to location (<xref ref-type="bibr" rid="B5">Dong et al., 2019</xref>). Therefore, we assume that peaks in the score map represent strong visual cues for the objects. The peak is back-propagated while the score map is sent to the classifier for further classification. The peak back-propagation can be interpreted as a random walk process. Each location in the bottom layer&#x2019;s top-down relevance is formulated as its probability of being visited by the walker.</p>
<p>Consider a convolution layer with a filter size <italic>s</italic>&#x00D7;<italic>h</italic>&#x00D7;<italic>w</italic>. <italic>I</italic><sub><italic>ijk</italic></sub> and <italic>O</italic><sub><italic>pqt</italic></sub> are the spatial locations of the input and output feature maps, respectively. The visiting probability <italic>P</italic>(<italic>I</italic><sub><italic>ijk</italic></sub>) can be formulated by:</p>
<disp-formula id="S2.E2">
<label>(2)</label>
<mml:math id="M2">
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:msub>
<mml:mi>I</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>j</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo rspace="5.8pt">)</mml:mo>
</mml:mrow>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:munderover>
<mml:mo largeop="true" movablelimits="false" symmetric="true">&#x2211;</mml:mo>
<mml:mrow>
<mml:mpadded width="+3.3pt">
<mml:mi>p</mml:mi>
</mml:mpadded>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>-</mml:mo>
<mml:mfrac>
<mml:mi>s</mml:mi>
<mml:mn>2</mml:mn>
</mml:mfrac>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>+</mml:mo>
<mml:mfrac>
<mml:mi>s</mml:mi>
<mml:mn>2</mml:mn>
</mml:mfrac>
</mml:mrow>
</mml:munderover>
<mml:munderover>
<mml:mo largeop="true" movablelimits="false" symmetric="true">&#x2211;</mml:mo>
<mml:mrow>
<mml:mpadded width="+3.3pt">
<mml:mi>q</mml:mi>
</mml:mpadded>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>-</mml:mo>
<mml:mfrac>
<mml:mi>h</mml:mi>
<mml:mn>2</mml:mn>
</mml:mfrac>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>+</mml:mo>
<mml:mfrac>
<mml:mi>h</mml:mi>
<mml:mn>2</mml:mn>
</mml:mfrac>
</mml:mrow>
</mml:munderover>
<mml:munderover>
<mml:mo largeop="true" movablelimits="false" symmetric="true">&#x2211;</mml:mo>
<mml:mrow>
<mml:mpadded width="+3.3pt">
<mml:mi>t</mml:mi>
</mml:mpadded>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mrow>
<mml:mi>k</mml:mi>
<mml:mo>-</mml:mo>
<mml:mfrac>
<mml:mi>w</mml:mi>
<mml:mn>2</mml:mn>
</mml:mfrac>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mi>k</mml:mi>
<mml:mo>+</mml:mo>
<mml:mfrac>
<mml:mi>w</mml:mi>
<mml:mn>2</mml:mn>
</mml:mfrac>
</mml:mrow>
</mml:munderover>
<mml:mi>P</mml:mi>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:msub>
<mml:mi>I</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>j</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>|</mml:mo>
<mml:msub>
<mml:mi>O</mml:mi>
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>q</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo rspace="5.8pt">)</mml:mo>
</mml:mrow>
<mml:mo rspace="5.8pt">&#x00D7;</mml:mo>
<mml:mi>P</mml:mi>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:msub>
<mml:mi>O</mml:mi>
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>q</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</disp-formula>
<p>where the transition probability is formulated as:</p>
<disp-formula id="S2.E3">
<label>(3)</label>
<mml:math id="M3">
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:msub>
<mml:mi>I</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>j</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>|</mml:mo>
<mml:msub>
<mml:mi>O</mml:mi>
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>q</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo rspace="5.8pt">)</mml:mo>
</mml:mrow>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mpadded width="+3.3pt">
<mml:msub>
<mml:mi>Z</mml:mi>
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>q</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mpadded>
<mml:mo rspace="5.8pt">&#x00D7;</mml:mo>
<mml:msub>
<mml:mover accent="true">
<mml:mi>I</mml:mi>
<mml:mo>^</mml:mo>
</mml:mover>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>j</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mi>R</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>L</mml:mi>
<mml:mi>U</mml:mi>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:mrow>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>-</mml:mo>
<mml:mi>p</mml:mi>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo>&#x2062;</mml:mo>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>-</mml:mo>
<mml:mi>q</mml:mi>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo>&#x2062;</mml:mo>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:mi>k</mml:mi>
<mml:mo>-</mml:mo>
<mml:mi>t</mml:mi>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:msub>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</disp-formula>
<p><inline-formula><mml:math id="INEQ8"><mml:msub><mml:mover accent="true"><mml:mi>I</mml:mi><mml:mo stretchy="false">^</mml:mo></mml:mover><mml:mrow><mml:mi>i</mml:mi><mml:mo>&#x2062;</mml:mo><mml:mi>j</mml:mi><mml:mo>&#x2062;</mml:mo><mml:mi>k</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> is the bottom-up activation value during the forward process. <italic>ReLU</italic>(<italic>W</italic><sub>(<italic>i</italic>&#x2212;<italic>p</italic>)(<italic>j</italic>&#x2212;<italic>q</italic>)(<italic>k</italic>&#x2212;<italic>t</italic>)</sub>) means excluding negative weights as they are not helpful in improving the output response. <italic>Z</italic><sub><italic>pqt</italic></sub> is a factor to ensure &#x2211;<sub><italic>p</italic>,<italic>q</italic>,<italic>t</italic></sub> <italic>I</italic><sub><italic>ijk</italic></sub>|<italic>O</italic><sub><italic>pqt</italic></sub> = 1. Note that PRM only needs to be activated during testing rather than training.</p>
<p>Finally, we adopt an advanced 2D-OTSU (<xref ref-type="bibr" rid="B28">Zhang and Hu, 2008</xref>) algorithm for segmentation (<xref ref-type="fig" rid="F2">Figure 2B</xref>). Unlike the original algorithm that uses handcrafted features as the second-dimension input, alternatively, we use the peak response map extracted from the neural network, which provides additional information in addition to grayscale intensity.</p>
</sec>
<sec id="S2.SS6">
<title>2.6. Rat brain atlas registration and quantitative analysis</title>
<p>To quantitatively analyze the distribution of plaques in different brain regions, the 3D reconstructed rat brain needs to be registered to a rat brain atlas reference (<xref ref-type="fig" rid="F2">Figure 2C</xref>). The WHS-SD rat brain atlas (<xref ref-type="bibr" rid="B18">Papp et al., 2014</xref>) is one of the most commonly used digital rat brain atlases. In this paper, we utilized atlas version 2 from public resources<sup><xref ref-type="fn" rid="footnote1">1</xref></sup>, which has 80 brain areas. We modified the images to remove the skull structures in the T2&#x002A; template images, leaving only the brain structures.</p>
<p>The reconstructed 3D rat brain image dataset was registered to the WHS-SD brain atlas. Specifically, we used the WHS-SD rat brain template, annotation file, and atlas file provided by the public resources (see text footnote 1). The images were then converted to SimpleITK (<xref ref-type="bibr" rid="B15">Lowekamp et al., 2013</xref>) format to achieve fast alignment with the Elastix (<xref ref-type="bibr" rid="B11">Klein et al., 2009</xref>) toolbox. We adopted the non-rigid B-spline as the transform model. Mutual information (MI) and rigidity penalty was used as metrics for the similarity measure. The deformation field was optimized globally by using the stochastic gradient descent (SGD) algorithm. Order of B-spline interpolation was set to 3. Subsequently, a neuroanatomy expert was engaged to fine-tune the borderlines of all brain areas.</p>
</sec>
<sec id="S2.SS7">
<title>2.7. Evaluation metrics</title>
<p>To comprehensively evaluate the accuracy of our weakly supervised segmentation method, we use three metrics for quantitative analysis, including the Dice score (DSC), Sensitivity (SST), and Hausdorff distance (HD). These metrics are defined as follows:</p>
<disp-formula id="S2.E4">
<label>(4)</label>
<mml:math id="M4">
<mml:mrow>
<mml:mrow>
<mml:mi>D</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>S</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mpadded width="+3.3pt">
<mml:mi>C</mml:mi>
</mml:mpadded>
</mml:mrow>
<mml:mo rspace="8.6pt">=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>2</mml:mn>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>T</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>T</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mo>+</mml:mo>
<mml:mrow>
<mml:mi>F</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mo>+</mml:mo>
<mml:mrow>
<mml:mi>F</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<disp-formula id="S2.E5">
<label>(5)</label>
<mml:math id="M5">
<mml:mrow>
<mml:mrow>
<mml:mi>S</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>S</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mpadded width="+3.3pt">
<mml:mi>T</mml:mi>
</mml:mpadded>
</mml:mrow>
<mml:mo rspace="8.6pt">=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mo>+</mml:mo>
<mml:mrow>
<mml:mi>F</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<p>where TP denotes the true positive of the predicted pixels. FP denotes the false positive. FN denotes the false negative.</p>
<disp-formula id="S2.Ex1">
<label>(6)</label>
<mml:math id="M6">
<mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mi>d</mml:mi>
<mml:mi>H</mml:mi>
</mml:msub>
<mml:mo>&#x2062;</mml:mo>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mi>X</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>Y</mml:mi>
<mml:mo rspace="5.8pt">)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo rspace="8.6pt">=</mml:mo>
<mml:mi/>
</mml:mrow>
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>a</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>x</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mrow>
<mml:mo>{</mml:mo>
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>u</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:msub>
<mml:mi>p</mml:mi>
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>&#x2208;</mml:mo>
<mml:mi>X</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>i</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>n</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mrow>
<mml:mi>y</mml:mi>
<mml:mo>&#x2208;</mml:mo>
<mml:mi>Y</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>d</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo>,</mml:mo>
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>u</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:msub>
<mml:mi>p</mml:mi>
<mml:mrow>
<mml:mi>y</mml:mi>
<mml:mo>&#x2208;</mml:mo>
<mml:mi>Y</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>i</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>n</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>&#x2208;</mml:mo>
<mml:mi>X</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>d</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo rspace="7.5pt">}</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</disp-formula>
<p>where <italic>X</italic>,<italic>Y</italic> denote the ground-truth pixel set and segmentation pixel set in the image segmentation task. <italic>sup</italic> represents the supremum. <italic>inf</italic> represents the infimum. <italic>d</italic>(&#x22C5;) is the distance metric.</p>
<p>Dice score is sensitive to interior pixels. SST represents the omission rate. HD is sensitive to boundary pixels. Note that 95% Hausdorff distance (HD95) is used here to remove the effect of minimal outliers.</p>
</sec>
</sec>
<sec id="S3">
<title>3. Result</title>
<sec id="S3.SS1">
<title>3.1. Weakly supervised segmentation results</title>
<p>We randomly selected 30 cropped blocks from the cerebral cortex as training data, and each block size was 256 &#x00D7; 256 &#x00D7; 75. Two experts were engaged to annotate images with bounding boxes. In addition, 28 blocks were randomly selected from different brain areas in three categories: cerebral cortex (Cortex), hippocampus (Hippo), and other brain areas (Other) as test data. All test image blocks were labeled with pixel-level annotations to evaluate the performance of our weakly supervised segmentation method. The preprocessing method was applied to each image block. All experiments were trained and tested with the PyTorch framework on a workstation with one NVIDIA Tesla V100S and 768 GB RAM. We carefully set the anchor sizes in range of [6,28] and the stride of RPN as 4. We used the SGD optimizer with the learning rate 0.01, the weight decay of 0.0001. The training process was terminated within 3,000 iterations.</p>
<p>We compared the proposed framework with widely used segmentation methods, including (a) traditional methods based on artificial features, such as Ilastik (<xref ref-type="bibr" rid="B2">Berg et al., 2019</xref>) and Segmenter-PMP34 (<xref ref-type="bibr" rid="B4">Chen et al., 2020</xref>); (b) fully supervised learning methods, such as U-Net (<xref ref-type="bibr" rid="B20">Ronneberger et al., 2015</xref>) and HRNet; and (c) weakly supervised learning methods with different weak labels, such as U-Net3D_rect and U-Net3D_grabcut (<xref ref-type="bibr" rid="B10">Khoreva et al., 2017</xref>). Different segmentation methods showed discernible results (<xref ref-type="fig" rid="F3">Figures 3A&#x2013;I</xref>). The evaluation metrics of different methods were calculated in distinct brain areas (<xref ref-type="fig" rid="F3">Figures 3J&#x2013;L</xref> and <xref ref-type="supplementary-material" rid="DS1">Supplementary Table 1</xref>).</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption><p><bold>(A&#x2013;I)</bold> Visualization of comparing the results of different segmentation methods on an image block of test set. Green: raw image, blue: ground truth, red: segmentation result. <bold>(A)</bold> Raw image. <bold>(B)</bold> Ground truth. <bold>(C,D)</bold> Classical segmentation methods with the handcrafted label, <bold>(C)</bold> ilastik, <bold>(D)</bold> segmenter-PMP34. <bold>(E,F)</bold> Fully supervised methods, <bold>(E)</bold> U-Net3d, <bold>(F)</bold> HRNet3D. <bold>(G,H)</bold> Weakly supervised methods labeled with rectangles and Grabcuts, respectively. <bold>(I)</bold> Our method. <bold>(J&#x2013;L)</bold> Metrics of segmentation performance. <bold>(J)</bold> Dice score, <bold>(K)</bold> Sensitivity, <bold>(L)</bold> HD95. Note that HD95 indicates the distance between the segmentation mask and the ground truth, lower score indicates better performance.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-16-1097019-g003.tif"/>
</fig>
<p>Ilastik is a machine learning method based on the random forest algorithm. The user explicitly marks the features manually and applies batch processing to segment other images. However, this grayscale feature-based method showed the worst robustness through the whole brain, resulting in the poorest performance in the experiments. In addition, the computational cost of Ilastik is the highest due to CPU-based implementation. Segmenter_PMP34 is a conventional segmentation pipeline consists of Min-max intensity normalization, 2D Gaussian smoothing, 2D spot filter, watershed algorithm and size filter, which requires manual parameter adjustment while all methods in this experiment were fully automatic. For fairness, the parameters were adjusted in a cerebral cortex slice to reach the best performance and then applied to other brain slices. The metrics of the Hippo and other regions were significantly lower than our method. Interestingly, in the cerebral cortex region, the HD95 of Segmenter_PMP34 was better than our method but severely sacrificed SST indicating that many object pixels had not been detected.</p>
<p>To train the fully supervised network, two experts relabeled the training dataset with pixel-level labels. The U-Net architecture consists of an encoding and a decoding part. Encoding part repeatedly applies two 3 &#x00D7;3 convolutional layers, each followed by a batch normalization layer and a ReLU. At each down-sampling step, the number of features is doubled. Decoding part recovers the original size by up-sampling the feature map. Every step of up-sampling consists of an up-sample layer that halves the number of features, and two 3 &#x00D7;3 convolutional layers, each followed by a batch normalization layer and a ReLU. The final layer is a softmax layer. The HRNet architecture is the same as which used in our methods and HR-fuse mode is utilized for the best performance. We used the SGD optimizer with the learning rate 0.01, the weight decay of 0.0001 and momentum of 0.9. However, fully supervised segmentation requires a large number of pixel-level labeled image datasets. U-Net and HRNet produced poor segmentation results, partly because the training set was relatively small. All metrics were lower than our method, especially in SST.</p>
<p>We generated pixel-level labels through weakly labeled bounding boxes and then trained the semantic segmentation network iteratively (<xref ref-type="bibr" rid="B10">Khoreva et al., 2017</xref>). U-Net_3D_rect treats the pixels inside the 3D bounding box as pseudo labels. U-Net_3D_grabcut utilizes the GrabCut (<xref ref-type="bibr" rid="B21">Rother et al., 2004</xref>) algorithm inside the 3D bounding box to obtain the initial pseudo labels. We used these two pseudo labels to train the U-Net, respectively. The network architecture and parameter settings were the same as fully supervised network. The SST of these two methods was slightly better than our method, but Dice and HD95 were worse than ours as these methods treated too many background pixels as object pixels.</p>
<p>Furthermore, we clustered the plaques into three different categories according to their volumes, (a) small (&#x003C;300 voxels), (b) medium (300&#x2013;1,500 voxels), and (c) big (&#x003E;1,500 voxels). The voxel size used here is 4 &#x03BC;m &#x00D7; 4 &#x03BC;m &#x00D7; 4 &#x03BC;m. We evaluated the performance of our method to analyze plaques of different sizes (<xref ref-type="supplementary-material" rid="DS1">Supplementary Figure 3</xref> and <xref ref-type="supplementary-material" rid="DS1">Supplementary Tables 3</xref>, <xref ref-type="supplementary-material" rid="DS1">4</xref>).</p>
<p>Our method only needed the bounding boxes as the weak labels. Almost all metrics achieved state-of-the-art performance. In addition, the ablation study was completed to verify the effectiveness of each module in our framework (<xref ref-type="supplementary-material" rid="DS1">Supplementary Table 2</xref>), which proved that HRNet (including fuse output mode), PRM, and 2D-OTSU all contributed to improving the segmentation performance.</p>
</sec>
<sec id="S3.SS2">
<title>3.2. Quantification of A&#x03B2; plaques in the whole rat brain</title>
<p>To quantitatively analyze the distribution of A&#x03B2; plaques in the whole rat brain, segmented image blocks were montaged into brain slices and then reconstructed into the whole brain (<xref ref-type="supplementary-material" rid="VS2">Supplementary Videos 2</xref>, <xref ref-type="supplementary-material" rid="VS3">3</xref>). Then we utilized a registration process to align the whole brain to the WHS-SD rat brain atlas (<xref ref-type="fig" rid="F2">Figure 2C</xref>). The implementation of the registration method was based on SimpleITK (<xref ref-type="bibr" rid="B15">Lowekamp et al., 2013</xref>) and Elastix toolbox (<xref ref-type="bibr" rid="B11">Klein et al., 2009</xref>), performed on a workstation with 384 GB of RAM. The initial deformation field was subsequently obtained and fine-tuned by an expert. After registration, we applied the deformation field to the A&#x03B2; whole-brain segmentation binary masks. 3D rendering was performed for visualization of A&#x03B2; plaque distribution in the whole rat brain of several brain areas, including the cortex, hippocampus, and thalamus (<xref ref-type="fig" rid="F4">Figures 4A&#x2013;D</xref>). A&#x03B2; plaque maximum intensity projection in the coronal plane from the olfactory bulb to the caudal end revealed plaque density diversity in different brain areas or nuclei (<xref ref-type="fig" rid="F4">Figure 4E</xref>). Finally, the A&#x03B2; plaque distribution was accurately calculated based on segmentation and registration by 61 brain regions that excluded several brain areas from the original atlas which did not contribute to our quantitative analysis, such as nerves, decussations, and commissures (<xref ref-type="fig" rid="F5">Figure 5</xref>).</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption><p>Whole rat brain rendering of segmented A&#x03B2; plaques. <bold>(A)</bold> The A&#x03B2; plaque distribution in the whole brain of a 3-month-old rat. Different colors represent the A&#x03B2; plaque distribution in different brain regions. Red: cortex, green: hippocampus, blue: thalamus. <bold>(B&#x2013;D)</bold> Enlarged view of the A&#x03B2; plaque distribution in the cortex, hippocampus, and thalamus. <bold>(E)</bold> A&#x03B2; plaque distribution in 300 &#x03BC;m thick brain slices at different coronal planes.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-16-1097019-g004.tif"/>
</fig>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption><p>Quantitative analysis of the A&#x03B2; plaque distribution in different brain areas. <bold>(A&#x2013;D)</bold> The total volume, total counts, volume ratio, and the count density of A&#x03B2; plaques in different brain areas. <bold>(E)</bold> The volume variation of A&#x03B2; plaques in distinct brain areas.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-16-1097019-g005.tif"/>
</fig>
<p>Most plaques were found in the neocortex, followed by the thalamus, brainstem, striatum, and other brain regions (<xref ref-type="fig" rid="F5">Figure 5A</xref>). Due to the lack of a high-resolution rat brain atlas, we could not analyze our segmentation results by more fine-grained hierarchical brain areas or subregions. The total volume of segmented plaques in each region was similar to the tendency of total plaque counts (<xref ref-type="fig" rid="F5">Figure 5B</xref>). We also measured the volume ratio of total plaque volume to the brain region volume by each brain region, and found that the volume ratio is higher in the frontal association cortex than in others (<xref ref-type="fig" rid="F5">Figure 5C</xref>). We further calculated the plaque count density and the average plaque volume in each brain region (<xref ref-type="fig" rid="F5">Figures 5D, E</xref>). Very few vascular-associated amyloid depositions were found in the early stage AD rat brain imaging data. Since we do not intend to distinguish these vascular-associated amyloid depositions from non-vascular amyloid depositions by our deep learning-based analysis pipeline, the segmentation result involves all kinds of amyloids.</p>
</sec>
</sec>
<sec id="S4" sec-type="discussion">
<title>4. Discussion</title>
<p>In this study, we presented a pipeline for systematic quantitative analysis of whole rat brain A&#x03B2; plaque distribution. First, we applied the high-throughput volumetric imaging method VISoR to the newly developed AD rat model brain and acquired micron-resolution 3D images of A&#x03B2; plaques of the whole brain. Then, we developed a weakly supervised framework for the segmentation of A&#x03B2; plaques. Finally, we registered the segmentation results to the WHS-SD rat brain atlas for quantitative analysis of A&#x03B2; plaques in each brain area.</p>
<p>The high-throughput volumetric imaging method VISoR used in this study demonstrated fast tissue immunostaining and high-speed microscopic imaging of rat brains. Traditionally, whole rat brain imaging with the confocal microscope of serial cryo-sections may take months, not even to mention the high risk of losing 10 &#x03BC;m thick sections during experiments or reconstructing thousands of serial images into a complete brain. The VISoR method can image a 300 &#x03BC;m thick section by a single scan, which dramatically increases the thickness of the section from dozens of micrometers to hundreds of micrometers. Therefore, the section number of each brain required is significantly decreased, e.g., 85 slices for the brain of a 3-month-old rat. Since the thickness of each brain slice is 300 &#x03BC;m, it takes only 24 h for tissue clearing and following 39 h for immunostaining. This is the most efficient whole rat brain tissue clearing and immunostaining approach to our best knowledge. Tissue clearing of thick sections is the key for fast immunostaining and high-throughput whole brain imaging. Immunostaining is capable of labeling the diffuse plaques, while the direct staining with methoxy-X04 dye, the most commonly used staining method for A&#x03B2; plaques in previous studies, could only label the dense-core plaques (<xref ref-type="bibr" rid="B26">Whitesell et al., 2019</xref>). In addition, the thick slice provides higher stiffness and mechanical integrity, which makes the 3D dataset of serial slices much easier to be reconstructed into an intact brain. One major limitation of this method is that 300 &#x03BC;m thickness increased the difficulty of antibodies to penetrate uniformly through the whole slice in comparison to immunostaining on thin sections, e.g., most commonly used 10 &#x03BC;m cryo-sectioned slices. This could be further improved with more experiments on optimizing the conditions of tissue clearing and immunostaining. Another limitation is that mechanical sectioning method inevitably results in signal loss, we verified this by calculating the percentage of small plaques over total plaques between adjacent sections. The small plaques percentage in inter-sections is lower than inner-sections that means the missed detection of small plaques (<xref ref-type="supplementary-material" rid="DS1">Supplementary Figure 4</xref>). Our thick section method could reduce less miss-detection of small plaques than thin section. Since all slices were immune-stained under the same conditions, we observed a consistent signal intensity distribution through depth on all slices, which means we could calibrate the signal before any quantitative analysis. In addition, this high-throughput imaging method has already been successfully used in whole mouse brain and whole monkey brain imaging (<xref ref-type="bibr" rid="B24">Wang et al., 2019</xref>; <xref ref-type="bibr" rid="B27">Xu et al., 2021</xref>).</p>
<p>The high-dynamic-range volumetric microscopic fluorescence image is difficult to be segmented by traditional intensity-based algorithms. Deep neural networks could achieve better segmentation performance by learning texture features and semantic information of images. Meanwhile, the computation efficiency of deep learning-based methods implemented on the GPUs prevailed the most traditional methods implemented on CPUs. To achieve high-precision segmentation of A&#x03B2; plaques for quantitative analysis in the whole rat brain, this study proposes a weakly supervised segmentation framework based on HRNet and Faster-RCNN. To our knowledge, although HRNet and Faster-RCNN are widely used in diverse applications of artificial intelligence, there are few researchers using them in weakly labeled biomedical image segmentation tasks, especially in whole-brain 3D microscopic images of different biomarkers.</p>
<p>High-resolution network is the backbone for feature extraction in this framework, which connects the high-to-low resolution branches in parallel rather than in series, allowing high-resolution features to be maintained. This parallel structure can avoid the distortion of features during the up-sampling process in a serial structure such as U-Net. Therefore, the features are spatially precise. In addition, the repeated multi-resolution fusion modules make the high-to-low resolution features semantically strong. Furthermore, due to the high requirement of annotated image dataset of fully supervised learning method, we alternatively adopt the weakly supervised learning method, which only requires little human-labeling cost for network training and whole-brain A&#x03B2; plaque segmentation. Since Faster-RCNN is a multi-stage object detection network, our weakly supervised segmentation network would increase the computational complexity in comparison with other semantic segmentation networks which had been trained for classifying each pixel of the image. Peak response mapping introduces additional information into the network, with each peak representing a strong visual cue of the object. The peak response maps can also be used as the second-dimension input of the 2D-OTSU algorithm to improve the segmentation accuracy. However, the segmentation accuracy on small plaques (smaller than 6 pixels in diameter) is lower than medium and big plaques, as small object detection is a longstanding challenge in deep learning field (<xref ref-type="supplementary-material" rid="DS1">Supplementary Figure 3</xref> and <xref ref-type="supplementary-material" rid="DS1">Supplementary Tables 3</xref>, <xref ref-type="supplementary-material" rid="DS1">4</xref>).</p>
<p>Previous studies have reported several A&#x03B2; plaque analysis methods for mouse brain, but there is no such a study of analyzing the whole rat brain. <xref ref-type="bibr" rid="B13">Liebmann et al. (2016)</xref> proposed a pipeline for 3D study of AD pathologies in mouse brain hemispheres and human brain sections. They have demonstrated that tissue clearing and immunostaining of large samples enables high-throughput quantitation of complicated 3D-pathological features. They mainly focused on the individual plaque properties rather than the brain-wide plaque distribution. <xref ref-type="bibr" rid="B26">Whitesell et al. (2019)</xref> constructed a pipeline for mapping the spatial patterns of A&#x03B2; plaques in the whole mouse brain. Spatial patterns of A&#x03B2; deposits in the whole mouse brains were compared extensively among three transgenic animal lines at different ages. However, the dye methoxy-X04 could only label the dense-core plaques which might underestimate the plaque density. Nguyen et al. proposed a supervised learning method for quantifying A&#x03B2; plaques in the whole mouse brain with the random forest algorithm (<xref ref-type="bibr" rid="B16">Nguyen et al., 2019</xref>). This ilastik-based supervised learning method trained with few training data is not suitable for large-scale 3D whole-brain images with high dynamic range. Our deep learning-based method benefited from the rich semantic features extracted from the deep neural network is capable of analyzing the high-dynamic-range 3D whole brain images. Meanwhile, only a small size labeled training dataset is needed for network training makes deep learning-based analysis of the A&#x03B2; plaques in the whole brain more accessible.</p>
<p>Through this study, we have developed a systematic toolset for high-resolution imaging of the whole rat brain after labeling by pathological biomarkers. This toolset achieves high accuracy and requires lower consumption for the segmentation of immuno-labeled objects in 3D microscopic images and quantitative analysis of such objects by brain areas. We demonstrated the method with the first ever A&#x03B2; distribution atlas of the whole rat brain. Since this weakly supervised method greatly reduces the cost of manual labeling and the segmentation performance is almost state-of-the-art, this method could be commonly used in the segmentation and quantification of objects labeled with different biomarkers, such as cell bodies and protein aggregates.</p>
</sec>
<sec id="S5" sec-type="data-availability">
<title>Data availability statement</title>
<p>The original contributions presented in this study are included in this article/<xref ref-type="supplementary-material" rid="DS1">Supplementary material</xref>, further inquiries can be directed to the corresponding authors.</p>
</sec>
<sec id="S6" sec-type="ethics-statement">
<title>Ethics statement</title>
<p>The animal study was reviewed and approved by the Institutional Animal Care and Use Committee of Tsinghua University.</p>
</sec>
<sec id="S7" sec-type="author-contributions">
<title>Author contributions</title>
<p>ZC and HW designed the study, analysis method, and pipeline, and wrote the manuscript with inputs from WZ, KP, DX, LG, XC, and FW. ZC, WZ, XC, and FW architected the segmentation method. ZC performed the quantitative analysis of the data. KP proposed the study and provided the brain sample. ZC and DX performed the data labeling. ZC, HW, and LG prepared the sample and acquired the data. HW conceived and supervised the study. All authors contributed to the article and approved the submitted version.</p>
</sec>
</body>
<back>
<sec id="S8" sec-type="funding-information">
<title>Funding</title>
<p>This work was supported by the Fundamental Research Funds for the Central Universities (WK2100000022 to HW), the National Natural Science Foundation of China (32100896 to HW), and the University Synergy Innovation Program of Anhui Province (GXXT-2019-025 to FW).</p>
</sec>
<ack>
<p>We would like to thank C. Y. Liu for all kinds of administrative assistance in data collection and lab administration.</p>
</ack>
<sec id="S9" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="S10" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec id="S11" sec-type="supplementary-material">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fnins.2022.1097019/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fnins.2022.1097019/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Data_Sheet_1.PDF" id="DS1" mimetype="application/pdf" xmlns:xlink="http://www.w3.org/1999/xlink"/>
<supplementary-material xlink:href="Video_1.MP4" id="VS1" mimetype="video/mp4" xmlns:xlink="http://www.w3.org/1999/xlink"/>
<supplementary-material xlink:href="Video_2.MP4" id="VS2" mimetype="video/mp4" xmlns:xlink="http://www.w3.org/1999/xlink"/>
<supplementary-material xlink:href="Video_3.MP4" id="VS3" mimetype="video/mp4" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<fn-group>
<fn id="footnote1">
<label>1</label>
<p><ext-link ext-link-type="uri" xlink:href="https://www.nitrc.org/projects/whs-sd-atlas">https://www.nitrc.org/projects/whs-sd-atlas</ext-link></p></fn>
</fn-group>
<ref-list>
<title>References</title>
<ref id="B1"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Barage</surname> <given-names>S. H.</given-names></name> <name><surname>Sonawane</surname> <given-names>K. D.</given-names></name></person-group> (<year>2015</year>). <article-title>Amyloid cascade hypothesis: Pathogenesis and therapeutic strategies in Alzheimer&#x2019;s disease.</article-title> <source><italic>Neuropeptides</italic></source> <volume>52</volume> <fpage>1</fpage>&#x2013;<lpage>18</lpage>.</citation></ref>
<ref id="B2"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Berg</surname> <given-names>S.</given-names></name> <name><surname>Kutra</surname> <given-names>D.</given-names></name> <name><surname>Kroeger</surname> <given-names>T.</given-names></name> <name><surname>Straehle</surname> <given-names>C. N.</given-names></name> <name><surname>Kausler</surname> <given-names>B. X.</given-names></name> <name><surname>Haubold</surname> <given-names>C.</given-names></name><etal/></person-group> (<year>2019</year>). <article-title>Ilastik: Interactive machine learning for (bio) image analysis.</article-title> <source><italic>Nat. Methods</italic></source> <volume>16</volume> <fpage>1226</fpage>&#x2013;<lpage>1232</lpage>. <pub-id pub-id-type="doi">10.1038/s41592-019-0582-9</pub-id> <pub-id pub-id-type="pmid">31570887</pub-id></citation></ref>
<ref id="B3"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>H.</given-names></name> <name><surname>Dou</surname> <given-names>Q.</given-names></name> <name><surname>Yu</surname> <given-names>L.</given-names></name> <name><surname>Qin</surname> <given-names>J.</given-names></name> <name><surname>Heng</surname> <given-names>P.-A.</given-names></name></person-group> (<year>2018</year>). <article-title>VoxResNet: Deep voxelwise residual networks for brain segmentation from 3D MR images.</article-title> <source><italic>Neuroimage</italic></source> <volume>170</volume> <fpage>446</fpage>&#x2013;<lpage>455</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2017.04.041</pub-id> <pub-id pub-id-type="pmid">28445774</pub-id></citation></ref>
<ref id="B4"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>J.</given-names></name> <name><surname>Ding</surname> <given-names>L.</given-names></name> <name><surname>Viana</surname> <given-names>M. P.</given-names></name> <name><surname>Lee</surname> <given-names>H.</given-names></name> <name><surname>Sluezwski</surname> <given-names>M. F.</given-names></name> <name><surname>Morris</surname> <given-names>B.</given-names></name><etal/></person-group> (<year>2020</year>). <article-title>The allen cell and structure segmenter: A new open source toolkit for segmenting 3D intracellular structures in fluorescence microscopy images.</article-title> <source><italic>BioRxiv</italic></source> [<comment>Preprint</comment>]. <pub-id pub-id-type="doi">10.1101/491035</pub-id></citation></ref>
<ref id="B5"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dong</surname> <given-names>M.</given-names></name> <name><surname>Liu</surname> <given-names>D.</given-names></name> <name><surname>Xiong</surname> <given-names>Z.</given-names></name> <name><surname>Chen</surname> <given-names>X.</given-names></name> <name><surname>Zhang</surname> <given-names>Y.</given-names></name> <name><surname>Zha</surname> <given-names>Z.-J.</given-names></name><etal/></person-group> (<year>2019</year>). &#x201C;<article-title>Instance segmentation from volumetric biomedical images without voxel-wise labeling</article-title>,&#x201D; in <source><italic>Proceeding of the international conference on medical image computing and computer-assisted intervention</italic></source>, (<publisher-loc>Berlin</publisher-loc>: <publisher-name>Springer</publisher-name>), <fpage>83</fpage>&#x2013;<lpage>91</lpage>.</citation></ref>
<ref id="B6"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Games</surname> <given-names>D.</given-names></name> <name><surname>Adams</surname> <given-names>D.</given-names></name> <name><surname>Alessandrini</surname> <given-names>R.</given-names></name> <name><surname>Barbour</surname> <given-names>R.</given-names></name> <name><surname>Borthelette</surname> <given-names>P.</given-names></name> <name><surname>Blackwell</surname> <given-names>C.</given-names></name><etal/></person-group> (<year>1995</year>). <article-title>Alzheimer-type neuropathology in transgenic mice overexpressing V717F &#x03B2;-amyloid precursor protein.</article-title> <source><italic>Nature</italic></source> <volume>373</volume> <fpage>523</fpage>&#x2013;<lpage>527</lpage>. <pub-id pub-id-type="doi">10.1038/373523a0</pub-id> <pub-id pub-id-type="pmid">7845465</pub-id></citation></ref>
<ref id="B7"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gong</surname> <given-names>H.</given-names></name> <name><surname>Xu</surname> <given-names>D.</given-names></name> <name><surname>Yuan</surname> <given-names>J.</given-names></name> <name><surname>Li</surname> <given-names>X.</given-names></name> <name><surname>Guo</surname> <given-names>C.</given-names></name> <name><surname>Peng</surname> <given-names>J.</given-names></name><etal/></person-group> (<year>2016</year>). <article-title>High-throughput dual-colour precision imaging for brain-wide connectome with cytoarchitectonic landmarks at the cellular level.</article-title> <source><italic>Nat. Commun.</italic></source> <volume>7</volume> <fpage>1</fpage>&#x2013;<lpage>12</lpage>. <pub-id pub-id-type="doi">10.1038/ncomms12142</pub-id> <pub-id pub-id-type="pmid">27374071</pub-id></citation></ref>
<ref id="B8"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hsiao</surname> <given-names>K.</given-names></name> <name><surname>Chapman</surname> <given-names>P.</given-names></name> <name><surname>Nilsen</surname> <given-names>S.</given-names></name> <name><surname>Eckman</surname> <given-names>C.</given-names></name> <name><surname>Harigaya</surname> <given-names>Y.</given-names></name> <name><surname>Younkin</surname> <given-names>S.</given-names></name><etal/></person-group> (<year>1996</year>). <article-title>Correlative memory deficits, A&#x03B2; elevation, and amyloid plaques in transgenic mice.</article-title> <source><italic>Science</italic></source> <volume>274</volume> <fpage>99</fpage>&#x2013;<lpage>103</lpage>. <pub-id pub-id-type="doi">10.1126/science.274.5284.99</pub-id> <pub-id pub-id-type="pmid">8810256</pub-id></citation></ref>
<ref id="B9"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jia</surname> <given-names>Z.</given-names></name> <name><surname>Huang</surname> <given-names>X.</given-names></name> <name><surname>Eric</surname> <given-names>I.</given-names></name> <name><surname>Chang</surname> <given-names>C.</given-names></name> <name><surname>Xu</surname> <given-names>Y.</given-names></name></person-group> (<year>2017</year>). <article-title>Constrained deep weak supervision for histopathology image segmentation.</article-title> <source><italic>IEEE Trans. Med. Imag.</italic></source> <volume>36</volume> <fpage>2376</fpage>&#x2013;<lpage>2388</lpage>. <pub-id pub-id-type="doi">10.1109/TMI.2017.2724070</pub-id> <pub-id pub-id-type="pmid">28692971</pub-id></citation></ref>
<ref id="B10"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Khoreva</surname> <given-names>A.</given-names></name> <name><surname>Benenson</surname> <given-names>R.</given-names></name> <name><surname>Hosang</surname> <given-names>J.</given-names></name> <name><surname>Hein</surname> <given-names>M.</given-names></name> <name><surname>Schiele</surname> <given-names>B.</given-names></name></person-group> (<year>2017</year>). &#x201C;<article-title>Simple does it: Weakly supervised instance and semantic segmentation</article-title>,&#x201D; in <source><italic>Proceedings of the IEEE conference on computer vision and pattern recognition</italic></source>, (<publisher-loc>Venice</publisher-loc>), <fpage>876</fpage>&#x2013;<lpage>885</lpage>.</citation></ref>
<ref id="B11"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Klein</surname> <given-names>S.</given-names></name> <name><surname>Staring</surname> <given-names>M.</given-names></name> <name><surname>Murphy</surname> <given-names>K.</given-names></name> <name><surname>Viergever</surname> <given-names>M. A.</given-names></name> <name><surname>Pluim</surname> <given-names>J. P.</given-names></name></person-group> (<year>2009</year>). <article-title>Elastix: A toolbox for intensity-based medical image registration.</article-title> <source><italic>IEEE Trans. Med. Imag.</italic></source> <volume>29</volume> <fpage>196</fpage>&#x2013;<lpage>205</lpage>.</citation></ref>
<ref id="B12"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Koychev</surname> <given-names>I.</given-names></name> <name><surname>Hofer</surname> <given-names>M.</given-names></name> <name><surname>Friedman</surname> <given-names>N.</given-names></name></person-group> (<year>2020</year>). <article-title>Correlation of Alzheimer disease neuropathologic staging with amyloid and tau scintigraphic imaging biomarkers.</article-title> <source><italic>J. Nuclear Med.</italic></source> <volume>61</volume> <fpage>1413</fpage>&#x2013;<lpage>1418</lpage>. <pub-id pub-id-type="doi">10.2967/jnumed.119.230458</pub-id> <pub-id pub-id-type="pmid">32764121</pub-id></citation></ref>
<ref id="B13"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liebmann</surname> <given-names>T.</given-names></name> <name><surname>Renier</surname> <given-names>N.</given-names></name> <name><surname>Bettayeb</surname> <given-names>K.</given-names></name> <name><surname>Greengard</surname> <given-names>P.</given-names></name> <name><surname>Tessier-Lavigne</surname> <given-names>M.</given-names></name> <name><surname>Flajolet</surname> <given-names>M.</given-names></name></person-group> (<year>2016</year>). <article-title>Three-dimensional study of Alzheimer&#x2019;s disease hallmarks using the iDISCO clearing method.</article-title> <source><italic>Cell Rep.</italic></source> <volume>16</volume> <fpage>1138</fpage>&#x2013;<lpage>1152</lpage>. <pub-id pub-id-type="doi">10.1016/j.celrep.2016.06.060</pub-id> <pub-id pub-id-type="pmid">27425620</pub-id></citation></ref>
<ref id="B14"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Long</surname> <given-names>B.</given-names></name> <name><surname>Li</surname> <given-names>X.</given-names></name> <name><surname>Zhang</surname> <given-names>J.</given-names></name> <name><surname>Chen</surname> <given-names>S.</given-names></name> <name><surname>Li</surname> <given-names>W.</given-names></name> <name><surname>Zhong</surname> <given-names>Q.</given-names></name><etal/></person-group> (<year>2019</year>). <article-title>Three-dimensional quantitative analysis of amyloid plaques in the whole brain with high voxel resolution.</article-title> <source><italic>Sci. Sin. Vitae</italic></source> <volume>49</volume> <fpage>140</fpage>&#x2013;<lpage>150</lpage>.</citation></ref>
<ref id="B15"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lowekamp</surname> <given-names>B. C.</given-names></name> <name><surname>Chen</surname> <given-names>D. T.</given-names></name> <name><surname>Ib&#x00E1;&#x00F1;ez</surname> <given-names>L.</given-names></name> <name><surname>Blezek</surname> <given-names>D.</given-names></name></person-group> (<year>2013</year>). <article-title>The design of SimpleITK.</article-title> <source><italic>Front. Neuroinform.</italic></source> <volume>7</volume>:<issue>45</issue>. <pub-id pub-id-type="doi">10.3389/fninf.2013.00045</pub-id> <pub-id pub-id-type="pmid">24416015</pub-id></citation></ref>
<ref id="B16"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nguyen</surname> <given-names>D.</given-names></name> <name><surname>Uhlmann</surname> <given-names>V.</given-names></name> <name><surname>Planchette</surname> <given-names>A. L.</given-names></name> <name><surname>Marchand</surname> <given-names>P. J.</given-names></name> <name><surname>Van De Ville</surname> <given-names>D.</given-names></name> <name><surname>Lasser</surname> <given-names>T.</given-names></name><etal/></person-group> (<year>2019</year>). <article-title>Supervised learning to quantify amyloidosis in whole brains of an Alzheimer&#x2019;s disease mouse model acquired with optical projection tomography.</article-title> <source><italic>Biomed. Opt. Exp.</italic></source> <volume>10</volume> <fpage>3041</fpage>&#x2013;<lpage>3060</lpage>. <pub-id pub-id-type="doi">10.1364/BOE.10.003041</pub-id> <pub-id pub-id-type="pmid">31259073</pub-id></citation></ref>
<ref id="B17"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pang</surname> <given-names>K.</given-names></name> <name><surname>Jiang</surname> <given-names>R.</given-names></name> <name><surname>Zhang</surname> <given-names>W.</given-names></name> <name><surname>Yang</surname> <given-names>Z.</given-names></name> <name><surname>Li</surname> <given-names>L.-L.</given-names></name> <name><surname>Shimozawa</surname> <given-names>M.</given-names></name><etal/></person-group> (<year>2022</year>). <article-title>An app knock-in rat model for Alzheimer&#x2019;s disease exhibiting A&#x03B2; and tau pathologies, neuronal death and cognitive impairments.</article-title> <source><italic>Cell Res.</italic></source> <volume>32</volume> <fpage>157</fpage>&#x2013;<lpage>175</lpage>. <pub-id pub-id-type="doi">10.1038/s41422-021-00582-x</pub-id> <pub-id pub-id-type="pmid">34789895</pub-id></citation></ref>
<ref id="B18"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Papp</surname> <given-names>E. A.</given-names></name> <name><surname>Leergaard</surname> <given-names>T. B.</given-names></name> <name><surname>Calabrese</surname> <given-names>E.</given-names></name> <name><surname>Johnson</surname> <given-names>G. A.</given-names></name> <name><surname>Bjaalie</surname> <given-names>J. G.</given-names></name></person-group> (<year>2014</year>). <article-title>Waxholm space atlas of the sprague dawley rat brain.</article-title> <source><italic>Neuroimage</italic></source> <volume>97</volume> <fpage>374</fpage>&#x2013;<lpage>386</lpage>.</citation></ref>
<ref id="B19"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ren</surname> <given-names>S.</given-names></name> <name><surname>He</surname> <given-names>K.</given-names></name> <name><surname>Girshick</surname> <given-names>R.</given-names></name> <name><surname>Sun</surname> <given-names>J.</given-names></name></person-group> (<year>2015</year>). <article-title>Faster r-cnn: Towards real-time object detection with region proposal networks.</article-title> <source><italic>Adv. Neural Inform. Proc. Syst.</italic></source> <volume>28</volume> <fpage>91</fpage>&#x2013;<lpage>99</lpage>. <pub-id pub-id-type="doi">10.1109/TPAMI.2016.2577031</pub-id> <pub-id pub-id-type="pmid">27295650</pub-id></citation></ref>
<ref id="B20"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ronneberger</surname> <given-names>O.</given-names></name> <name><surname>Fischer</surname> <given-names>P.</given-names></name> <name><surname>Brox</surname> <given-names>T.</given-names></name></person-group> (<year>2015</year>). &#x201C;<article-title>U-net: Convolutional networks for biomedical image segmentation</article-title>,&#x201D; in <source><italic>Proceeding of the international conference on medical image computing and computer-assisted intervention</italic></source>, (<publisher-loc>Berlin</publisher-loc>: <publisher-name>Springer</publisher-name>), <fpage>234</fpage>&#x2013;<lpage>241</lpage>.</citation></ref>
<ref id="B21"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rother</surname> <given-names>C.</given-names></name> <name><surname>Kolmogorov</surname> <given-names>V.</given-names></name> <name><surname>Blake</surname> <given-names>A.</given-names></name></person-group> (<year>2004</year>). <article-title>GrabCut&#x201D; interactive foreground extraction using iterated graph cuts.</article-title> <source><italic>ACM Trans. Graph. (TOG)</italic></source> <volume>23</volume> <fpage>309</fpage>&#x2013;<lpage>314</lpage>.</citation></ref>
<ref id="B22"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Scearce-Levie</surname> <given-names>K.</given-names></name> <name><surname>Sanchez</surname> <given-names>P. E.</given-names></name> <name><surname>Lewcock</surname> <given-names>J. W.</given-names></name></person-group> (<year>2020</year>). <article-title>Leveraging preclinical models for the development of Alzheimer disease therapeutics.</article-title> <source><italic>Nat. Rev. Drug Discov.</italic></source> <volume>19</volume> <fpage>447</fpage>&#x2013;<lpage>462</lpage>.</citation></ref>
<ref id="B23"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sheikh-Bahaei</surname> <given-names>N.</given-names></name> <name><surname>Sajjadi</surname> <given-names>S. A.</given-names></name> <name><surname>Manavaki</surname> <given-names>R.</given-names></name> <name><surname>Gillard</surname> <given-names>J. H.</given-names></name></person-group> (<year>2017</year>). <article-title>Imaging biomarkers in Alzheimer&#x2019;s disease: A practical guide for clinicians.</article-title> <source><italic>J. Alzheimer&#x2019;s Dis. Rep.</italic></source> <volume>1</volume> <fpage>71</fpage>&#x2013;<lpage>88</lpage>.</citation></ref>
<ref id="B24"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>H.</given-names></name> <name><surname>Zhu</surname> <given-names>Q.</given-names></name> <name><surname>Ding</surname> <given-names>L.</given-names></name> <name><surname>Shen</surname> <given-names>Y.</given-names></name> <name><surname>Yang</surname> <given-names>C.-Y.</given-names></name> <name><surname>Xu</surname> <given-names>F.</given-names></name><etal/></person-group> (<year>2019</year>). <article-title>Scalable volumetric imaging for ultrahigh-speed brain mapping at synaptic resolution.</article-title> <source><italic>Natl. Sci. Rev.</italic></source> <volume>6</volume> <fpage>982</fpage>&#x2013;<lpage>992</lpage>. <pub-id pub-id-type="doi">10.1093/nsr/nwz053</pub-id> <pub-id pub-id-type="pmid">34691959</pub-id></citation></ref>
<ref id="B25"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>J.</given-names></name> <name><surname>Sun</surname> <given-names>K.</given-names></name> <name><surname>Cheng</surname> <given-names>T.</given-names></name> <name><surname>Jiang</surname> <given-names>B.</given-names></name> <name><surname>Deng</surname> <given-names>C.</given-names></name> <name><surname>Zhao</surname> <given-names>Y.</given-names></name><etal/></person-group> (<year>2020</year>). <article-title>Deep high-resolution representation learning for visual recognition.</article-title> <source><italic>IEEE Trans. Pattern Anal. Mach. Intell.</italic></source> <volume>43</volume> <fpage>3349</fpage>&#x2013;<lpage>3364</lpage>.</citation></ref>
<ref id="B26"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Whitesell</surname> <given-names>J. D.</given-names></name> <name><surname>Buckley</surname> <given-names>A. R.</given-names></name> <name><surname>Knox</surname> <given-names>J. E.</given-names></name> <name><surname>Kuan</surname> <given-names>L.</given-names></name> <name><surname>Graddis</surname> <given-names>N.</given-names></name> <name><surname>Pelos</surname> <given-names>A.</given-names></name><etal/></person-group> (<year>2019</year>). <article-title>Whole brain imaging reveals distinct spatial patterns of amyloid beta deposition in three mouse models of Alzheimer&#x2019;s disease.</article-title> <source><italic>J. Comp. Neurol.</italic></source> <volume>527</volume> <fpage>2122</fpage>&#x2013;<lpage>2145</lpage>. <pub-id pub-id-type="doi">10.1002/cne.24555</pub-id> <pub-id pub-id-type="pmid">30311654</pub-id></citation></ref>
<ref id="B27"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Xu</surname> <given-names>F.</given-names></name> <name><surname>Shen</surname> <given-names>Y.</given-names></name> <name><surname>Ding</surname> <given-names>L. F.</given-names></name> <name><surname>Yang</surname> <given-names>C. Y.</given-names></name> <name><surname>Tan</surname> <given-names>H.</given-names></name> <name><surname>Wang</surname> <given-names>H.</given-names></name><etal/></person-group> (<year>2021</year>). <article-title>High-throughput mapping of a whole rhesus monkey brain at micrometer resolution.</article-title> <source><italic>Nat. Biotechnol.</italic></source> <volume>39</volume> <fpage>1521</fpage>&#x2013;<lpage>1528</lpage>. <pub-id pub-id-type="doi">10.1038/s41587-021-00986-5</pub-id> <pub-id pub-id-type="pmid">34312500</pub-id></citation></ref>
<ref id="B28"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>J.</given-names></name> <name><surname>Hu</surname> <given-names>J.</given-names></name></person-group> (<year>2008</year>). &#x201C;<article-title>Image segmentation based on 2D Otsu method with histogram analysis</article-title>,&#x201D; in <source><italic>Proceeding of the 2008 international conference on computer science and software engineering</italic></source>, (<publisher-loc>Piscataway, NJ</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>105</fpage>&#x2013;<lpage>108</lpage>. <pub-id pub-id-type="doi">10.1109/CSSE.2008.206</pub-id> <pub-id pub-id-type="pmid">35940130</pub-id></citation></ref>
<ref id="B29"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhao</surname> <given-names>Z.</given-names></name> <name><surname>Yang</surname> <given-names>L.</given-names></name> <name><surname>Zheng</surname> <given-names>H.</given-names></name> <name><surname>Guldner</surname> <given-names>I. H.</given-names></name> <name><surname>Zhang</surname> <given-names>S.</given-names></name> <name><surname>Chen</surname> <given-names>D. Z.</given-names></name></person-group> (<year>2018</year>). &#x201C;<article-title>Deep learning based instance segmentation in 3D biomedical images using weak annotation</article-title>,&#x201D; in <source><italic>Proceeding of the international conference on medical image computing and computer-assisted intervention</italic></source>, (<publisher-loc>Berlin</publisher-loc>: <publisher-name>Springer</publisher-name>), <fpage>352</fpage>&#x2013;<lpage>360</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-030-00937-3_41</pub-id> <pub-id pub-id-type="pmid">34040275</pub-id></citation></ref>
<ref id="B30"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhou</surname> <given-names>Y.</given-names></name> <name><surname>Zhu</surname> <given-names>Y.</given-names></name> <name><surname>Ye</surname> <given-names>Q.</given-names></name> <name><surname>Qiu</surname> <given-names>Q.</given-names></name> <name><surname>Jiao</surname> <given-names>J.</given-names></name></person-group> (<year>2018</year>). &#x201C;<article-title>Weakly supervised instance segmentation using class peak response</article-title>,&#x201D; in <source><italic>Proceedings of the IEEE conference on computer vision and pattern recognition</italic></source>, (<publisher-loc>San Juan, PR</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>3791</fpage>&#x2013;<lpage>3800</lpage>.</citation></ref>
</ref-list>
</back>
</article>
