<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Neuroanat.</journal-id>
<journal-title>Frontiers in Neuroanatomy</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Neuroanat.</abbrev-journal-title>
<issn pub-type="epub">1662-5129</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnana.2014.00126</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Neuroscience</subject>
<subj-group>
<subject>Methods Article</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>A workflow for the automatic segmentation of organelles in electron microscopy image stacks</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name><surname>Perez</surname> <given-names>Alex J.</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="author-notes" rid="fn001"><sup>&#x0002A;</sup></xref>
<uri xlink:href="http://community.frontiersin.org/people/u/125325"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Seyedhosseini</surname> <given-names>Mojtaba</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Deerinck</surname> <given-names>Thomas J.</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Bushong</surname> <given-names>Eric A.</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="http://community.frontiersin.org/people/u/26399"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Panda</surname> <given-names>Satchidananda</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
</contrib>
<contrib contrib-type="author">
<name><surname>Tasdizen</surname> <given-names>Tolga</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="http://community.frontiersin.org/people/u/28741"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Ellisman</surname> <given-names>Mark H.</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<xref ref-type="author-notes" rid="fn001"><sup>&#x0002A;</sup></xref>
<uri xlink:href="http://community.frontiersin.org/people/u/3072"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Center for Research in Biological Systems, National Center for Microscopy and Imaging Research, University of California</institution> <country>San Diego, La Jolla, CA, USA</country></aff>
<aff id="aff2"><sup>2</sup><institution>Department of Bioengineering, University of California</institution> <country>San Diego, La Jolla, CA, USA</country></aff>
<aff id="aff3"><sup>3</sup><institution>Scientific Computing and Imaging Institute, University of Utah</institution> <country>Salt Lake City, UT, USA</country></aff>
<aff id="aff4"><sup>4</sup><institution>Regulatory Biology Laboratory, Salk Institute for Biological Studies</institution> <country>La Jolla, CA, USA</country></aff>
<aff id="aff5"><sup>5</sup><institution>Department of Neurosciences, University of California</institution> <country>San Diego, La Jolla, CA, USA</country></aff>
<author-notes>
<fn fn-type="edited-by"><p>Edited by: Julian Budd, University of Sussex, UK</p></fn>
<fn fn-type="edited-by"><p>Reviewed by: Kevin Briggman, National Institutes of Health, USA; Anna Kreshuk, University of Heidelberg, Germany; Hanspeter Pfister, Harvard University, USA</p></fn>
<fn fn-type="corresp" id="fn001"><p>&#x0002A;Correspondence: Alex J. Perez and Mark H. Ellisman, National Center for Microscopy and Imaging Research, Center for Research in Biological Systems, University of California, San Diego, Biomedical Sciences Building, Room 1000, 9500 Gilman Drive, Dept. Code 0608, La Jolla, CA 92093, USA e-mail: <email>aperez&#x00040;ncmir.ucsd.edu</email>; <email>mellisman&#x00040;ucsd.edul</email></p></fn>
<fn fn-type="other" id="fn002"><p>This article was submitted to the journal Frontiers in Neuroanatomy.</p></fn>
</author-notes>
<pub-date pub-type="epub">
<day>07</day>
<month>11</month>
<year>2014</year>
</pub-date>
<pub-date pub-type="collection">
<year>2014</year>
</pub-date>
<volume>8</volume>
<elocation-id>126</elocation-id>
<history>
<date date-type="received">
<day>21</day>
<month>07</month>
<year>2014</year>
</date>
<date date-type="accepted">
<day>19</day>
<month>10</month>
<year>2014</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2014 Perez, Seyedhosseini, Deerinck, Bushong, Panda, Tasdizen and Ellisman.</copyright-statement>
<copyright-year>2014</copyright-year>
<license license-type="open-access" xlink:href="http://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) or licensor are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract><p>Electron microscopy (EM) facilitates analysis of the form, distribution, and functional status of key organelle systems in various pathological processes, including those associated with neurodegenerative disease. Such EM data often provide important new insights into the underlying disease mechanisms. The development of more accurate and efficient methods to quantify changes in subcellular microanatomy has already proven key to understanding the pathogenesis of Parkinson&#x00027;s and Alzheimer&#x00027;s diseases, as well as glaucoma. While our ability to acquire large volumes of 3D EM data is progressing rapidly, more advanced analysis tools are needed to assist in measuring precise three-dimensional morphologies of organelles within data sets that can include hundreds to thousands of whole cells. Although new imaging instrument throughputs can exceed teravoxels of data per day, image segmentation and analysis remain significant bottlenecks to achieving quantitative descriptions of whole cell structural organellomes. Here, we present a novel method for the automatic segmentation of organelles in 3D EM image stacks. Segmentations are generated using only 2D image information, making the method suitable for anisotropic imaging techniques such as serial block-face scanning electron microscopy (SBEM). Additionally, no assumptions about 3D organelle morphology are made, ensuring the method can be easily expanded to any number of structurally and functionally diverse organelles. Following the presentation of our algorithm, we validate its performance by assessing the segmentation accuracy of different organelle targets in an example SBEM dataset and demonstrate that it can be efficiently parallelized on supercomputing resources, resulting in a dramatic reduction in runtime.</p></abstract>
<kwd-group>
<kwd>serial block-face scanning electron microscopy</kwd>
<kwd>3D electron microscopy</kwd>
<kwd>electron microscopy</kwd>
<kwd>automatic segmentation</kwd>
<kwd>image processing</kwd>
<kwd>organelle morphology</kwd>
<kwd>neuroinformatics</kwd>
</kwd-group>
<counts>
<fig-count count="10"/>
<table-count count="4"/>
<equation-count count="2"/>
<ref-count count="51"/>
<page-count count="13"/>
<word-count count="9074"/>
</counts>
</article-meta>
</front>
<body>
<sec sec-type="introduction" id="s1">
<title>Introduction</title>
<p>Advances in instrumentation for 3D EM are fueling a renaissance in the study of quantitative neuroanatomy (Peddie and Collinson, <xref ref-type="bibr" rid="B38">2014</xref>). Data obtained from techniques such as SBEM (Denk and Horstmann, <xref ref-type="bibr" rid="B11">2004</xref>) provide unprecedented volumetric snapshots of the <italic>in situ</italic> biological organization of the mammalian brain across a multitude of scales (Figure <xref ref-type="fig" rid="F1">1A</xref>). When combined with breakthroughs in specimen preparation (Deerinck et al., <xref ref-type="bibr" rid="B10">2010</xref>), such datasets reveal not only a complete view of the membrane topography of cells and organelles, but also the location of cytoskeletal elements, synaptic vesicles, and certain macromolecular complexes.</p>
<fig id="F1" position="float">
<label>Figure 1</label>
<caption><p><bold>The manual segmentation of organelles from SBEM image stacks represents a significant bottleneck to quantitative analyses</bold>. <bold>(A)</bold> A typical SBEM dataset consists of individual image slices collected in increments of &#x003B4; nm, with the values of &#x003B4; reported in the literature typically falling in the range of 20&#x02013;100 nm (Peddie and Collinson, <xref ref-type="bibr" rid="B38">2014</xref>). To cover a neuroanatomical region of any significance, the size of such datasets quickly enters the realm of teravoxels and analyses utilizing manual segmentation become intractable. <bold>(B)</bold> A scatter plot of the amount of time required for a highly trained neuroanatomist to segment all instances of a specific organelle in SBEM tiles of size 2000 &#x000D7; 2000 pixels demonstrates this impediment. Average values are represented by horizontal bars (mitochondria &#x0003D; 5.01 min, lysosomes &#x0003D; 3.43 min, nuclei &#x0003D; 0.93 min, nucleoli &#x0003D; 1.24 min). Since mitochondria are ubiquitously present throughout most tissues, extrapolation of their average segmentation time per tile to the size of a full dataset can reliably predict the actual segmentation time required for such a volume. For a dataset the size of the one used in this report (stack volume &#x0007E;450,000 &#x003BC;m<sup>3</sup>, tile size &#x0007E;60 &#x003BC;m<sup>2</sup>), the manual segmentation of all mitochondria would require roughly 2.3 years, placing it well outside the realm of feasibility. This effect is further exacerbated when experiments requiring segmentations from SBEM stacks over multiple samples or experimental conditions are desired.</p></caption>
<graphic xlink:href="fnana-08-00126-g0001.tif"/>
</fig>
<p>Harnessing the power of these emerging 3D techniques to study the structure of whole cell organellomes is of critical importance to the field of neuroscience. Abnormal organelle morphologies and distributions within cells of the nervous system are characteristic phenotypes of a growing number of neurodegenerative diseases. Aberrant mitochondrial fragmentation is believed to be an early and key event in neurodegeneration (Knott et al., <xref ref-type="bibr" rid="B26">2008</xref>; Campello and Scorrano, <xref ref-type="bibr" rid="B7">2010</xref>), and changes in mitochondrial structure have been observed in Alzheimer&#x00027;s disease (AD) neurons from human biopsies (Hirai et al., <xref ref-type="bibr" rid="B18">2001</xref>; Zhu et al., <xref ref-type="bibr" rid="B50">2013</xref>). Additionally, altered nuclear or nucleolar morphologies have been observed in a host of pathologies, including AD (Mann et al., <xref ref-type="bibr" rid="B31">1985</xref>; Riudavets et al., <xref ref-type="bibr" rid="B39">2007</xref>), torsion dystonia, (Kim et al., <xref ref-type="bibr" rid="B22">2010</xref>), and Lewy body dementia (Gagyi et al., <xref ref-type="bibr" rid="B13">2012</xref>).</p>
<p>Our ability to quantify and understand the details of these subcellular components within the context of large-scale 3D EM datasets is dependent upon advances in the accuracy, throughput, and robustness of automatic segmentation routines. Although a number of studies have extracted organelle morphologies from SBEM datasets via manual segmentation, (Zhuravleva et al., <xref ref-type="bibr" rid="B51">2012</xref>; Herms et al., <xref ref-type="bibr" rid="B17">2013</xref>; Holcomb et al., <xref ref-type="bibr" rid="B19">2013</xref>; Wilke et al., <xref ref-type="bibr" rid="B49">2013</xref>; Boh&#x000F3;rquez et al., <xref ref-type="bibr" rid="B4">2014</xref>), their applications are limited to only small subsets of the full stack due to the notoriously high labor cost associated with manual segmentation (Figure <xref ref-type="fig" rid="F1">1B</xref>). Automatic segmentations generated based on thresholds or manipulations of the image histogram (Jaume et al., <xref ref-type="bibr" rid="B20">2012</xref>; Vihinen et al., <xref ref-type="bibr" rid="B47">2013</xref>) may require extensive manual editing of their results to achieve the accurate quantification of single organelle morphologies.</p>
<p>The development of computationally advanced methods for the automatic segmentation of organelles in 3D EM stacks has led to increasingly accurate results (Vitaladevuni et al., <xref ref-type="bibr" rid="B48">2008</xref>; Narashima et al., <xref ref-type="bibr" rid="B35">2009</xref>; Smith et al., <xref ref-type="bibr" rid="B43">2009</xref>; Kumar et al., <xref ref-type="bibr" rid="B28">2010</xref>; Seyedhosseini et al., <xref ref-type="bibr" rid="B40">2013a</xref>). Recently, Giuly and co-workers proposed a method to segment mitochondria utilizing patch classification followed by isocontour pair classification and level sets (Giuly et al., <xref ref-type="bibr" rid="B15">2012</xref>). Lucchi et al. (<xref ref-type="bibr" rid="B29">2010</xref>, <xref ref-type="bibr" rid="B30">2012</xref>) developed an approach that trains a classifier to detect supervoxels that are most likely to belong to the boundary of the desired organelle. An approach to automatically segment cell nuclei using the software package ilastik to train a Random forest voxel classifier followed by morphological post-processing and object classification was proposed by Sommer et al. (<xref ref-type="bibr" rid="B44">2011</xref>), Tek et al. (<xref ref-type="bibr" rid="B45">2014</xref>). Though they yield impressive results, many current approaches utilize assumptions about the 3D morphology of the organelle target. This is problematic not only because it makes their expansion to the segmentation of other organelles non-trivial, but also because the typical SBEM dataset contains a heterogeneous mixture of organelle morphologies across multiple cell types. Therefore, there is a clear need for a robust method to accurately segment various organelles in SBEM stacks without any <italic>a priori</italic> assumptions about organelle morphology.</p>
<p>In this work, we present a method for the robust and accurate automatic segmentation of morphologically and functionally diverse organelles in EM image stacks. Organelle-specific pixel classifiers are trained using the cascaded hierarchical model (CHM), a state-of-the-art, supervised, multi-resolution framework for image segmentation that utilizes only 2D image information (Seyedhosseini et al., <xref ref-type="bibr" rid="B41">2013b</xref>). A series of tunable 2D filters are then applied to generate accurate segmentations from the outputs of pixel classification. In the final processing step, 3D connected components are meshed together in a manner that minimizes the deleterious effects of local and global imaging artifacts. Finally, we demonstrate that our method can be easily and efficiently scaled-up to handle the segmentation of all organelles in teravoxel-sized 3DEM datasets.</p>
</sec>
<sec>
<title>Material and methods</title>
<p>The description and validation of our method are arranged into three sections. In the first section, the workflow is described in detail. In the second, the robustness and accuracy of our method are validated by applying it to four different organelle targets (mitochondria, lysosomes, nuclei, and nucleoli) from a test SBEM dataset. In the third section, we describe experiments that demonstrate how our method can be easily scaled-up to accommodate the segmentation of teravoxel-sized datasets.</p>
<sec>
<title>The proposed method</title>
<sec>
<title>Image alignment and histogram specification</title>
<p>All individual images of the input SBEM stack are converted to the MRC format and appended to an 8-bit MRC stack using the IMOD programs <italic>dm2mrc</italic> and <italic>newstack</italic>, respectively (Kremer et al., <xref ref-type="bibr" rid="B27">1996</xref>). Sequential images within the stack are then translationally aligned to one another in the XY-plane using the cross-correlational alignment algorithm of the IMOD program <italic>tiltxcorr</italic>. To ensure consistency throughout the stack, the histograms of all images are matched to that of the first image in the stack using a MATLAB (The MathWorks, Inc., Natick, MA, U.S.A.) implementation of the exact histogram specification algorithm (Coltuc et al., <xref ref-type="bibr" rid="B9">2006</xref>).</p>
</sec>
<sec>
<title>Generation of training images and labels</title>
<p>Once an organelle target has been selected by the experimenter, the next step is to generate a set of organelle-specific training images and labels to subsequently train a CHM pixel classifier. A set of N seed points, P, are selected throughout the processed SBEM stack in locations that possess at least one instance of the desired organelle, such that:</p>
<disp-formula id="E1"><mml:math id="M1"><mml:mrow><mml:msub><mml:mtext>P</mml:mtext><mml:mtext>i</mml:mtext></mml:msub><mml:mo>=</mml:mo><mml:mo stretchy='false'>(</mml:mo><mml:msub><mml:mtext>x</mml:mtext><mml:mtext>i</mml:mtext></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mtext>y</mml:mtext><mml:mtext>i</mml:mtext></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mtext>z</mml:mtext><mml:mtext>i</mml:mtext></mml:msub><mml:mo stretchy='false'>)</mml:mo><mml:mo>&#x02200;</mml:mo><mml:mtext>i</mml:mtext><mml:mo>&#x02208;</mml:mo><mml:mo>&#x0007B;</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mo>&#x02026;</mml:mo><mml:mo>,</mml:mo><mml:mtext>N</mml:mtext><mml:mo>&#x0007D;</mml:mo></mml:mrow></mml:math></disp-formula>
<p>These points should be chosen in a manner that yields a wide distribution throughout the stack. After the selection of seed points, every instance of the chosen organelle is manually segmented in a Q &#x000D7; R pixel tile centered at each P<sub>i</sub>. Following manual segmentation, all tiles are extracted from the full SBEM stack using the IMOD program <italic>boxstartend</italic>. The extracted tiles will serve as training images, T<sub>i</sub>. Binary training labels, B<sub>i</sub>, are generated from each T<sub>i</sub> by applying the corresponding manual segmentation as a mask using the IMOD program <italic>imodmop</italic>. Thus, the final outputs from training data generation are (1) a stack of 8-bit, grayscale training images, T<sub>i</sub>, and (2) a stack of corresponding binary organelle masks, B<sub>i</sub>. Both stacks are of size Q &#x000D7; R &#x000D7; N. A flow chart illustrating this process is shown in Figure <xref ref-type="fig" rid="F2">2</xref>.</p>
<fig id="F2" position="float">
<label>Figure 2</label>
<caption><p><bold>A flow chart of the steps involved in training data generation</bold>. The generation of a set of training data for mitochondrial automatic segmentation is shown here. First, a set of seed points, P<sub>i</sub>, are selected such that a wide distribution throughout the volume is achieved (bottom left). Tiles of size Q &#x000D7; R centered at each seed point are extracted to serve as training images, T<sub>i</sub>. All instances of the desired organelle target are manually segmented by a trained neuroanatomist on each training image. These manual segmentations are then used as masks to binarize each T<sub>i</sub> such that pixels of value one correspond to pixels of T<sub>i</sub> that are positive for the desired organelle. This process is repeated N times to yield stacks of training images and their corresponding training labels, B<sub>i</sub>. These stacks are then used to train a CHM classifier, C<sub>S,L</sub>, with the desired number of stages, S, and levels, L.</p></caption>
<graphic xlink:href="fnana-08-00126-g0002.tif"/>
</fig>
</sec>
<sec>
<title>Training organelle pixel classifiers with the cascaded hierarchical model</title>
<p>The CHM consists of bottom-up and top-down steps cascaded in multiple stages (Seyedhosseini et al., <xref ref-type="bibr" rid="B41">2013b</xref>). The bottom-up step occurs in a user-specified number of hierarchical levels, L. At each level, the input stacks T<sub>i</sub> and B<sub>i</sub> are sequentially downsampled and a classifier is trained based on features extracted from the downsampled data as well as information from all lower levels of the hierarchy. After classifiers have been trained at all levels, the top-down path combines the coarse contextual information from higher levels into a single classifier that is applicable to images at native resolution. This whole process is then cascaded in a number of stages, S, where the output classifier from the previous stage serves as the input classifier for the subsequent stage. The final output is a pixel classifier, C<sub>S,L</sub>, that is applicable to images at the native pixel size of T<sub>i</sub> and B<sub>i</sub>. For optimal results, the number of stages chosen should be greater than one. The exact number of stages and levels chosen depends on a host of factors, including the size of T<sub>i</sub> and B<sub>i</sub> and the computational resources available to the experimenter.</p>
</sec>
<sec>
<title>Probability map generation</title>
<p>In the next step, a stack of test images, I<sub>j</sub>, are selected to apply the pixel classifier to. Depending on the goals of the experiment, these images may be full slices of the SBEM volume or extracted subvolumes. Prior to pixel classification, each I<sub>j</sub> is split into an <italic>m &#x000D7; n</italic> array of tiles such that the dimensions of each tile are roughly equivalent to the lateral dimensions of the training stacks, Q &#x000D7; R (step 3 of Algorithm <xref ref-type="table" rid="T4">1</xref>). Tiling is performed with an overlap of U pixels between adjacent tiles. The choice of U is dependent on the size of the training stacks as well as the organelle target; in general, ideal values of U should fall in the range of 2&#x02013;10% of Q and R. The previously generated CHM pixel classifier, C<sub>S,L</sub>, is then applied to each tile, yielding <italic>m &#x000D7; n</italic> probability map tiles (step 5 of Algorithm <xref ref-type="table" rid="T4">1</xref>). All processed tiles are then stitched together to yield a final probability map, M<sub>j</sub> (step 7 of Algorithm <xref ref-type="table" rid="T4">1</xref>). When stitching, the pixels in M<sub>j</sub> that correspond to regions of overlap between adjacent tiles are set to the maximum intensity pixel from all contributing tiles. Finally, M<sub>j</sub> is normalized such that each pixel ranges from [0, 1], with one representing the highest probability (step 8 of Algorithm <xref ref-type="table" rid="T4">1</xref>). This process is then repeated over each I<sub>j</sub> to yield the final stack of probability maps.</p>
<table-wrap position="float" id="T4">
<label>Algorithm 1</label>
<caption><p><bold>Organelle segmentation using tiled input images</bold>.</p></caption>
<table frame="hsides" rules="groups">
<tbody>
<tr>
<td align="left">1:</td>
<td align="left">Declare values of m, n, U, G, &#x003B1;, and &#x003BB;</td>
</tr>
<tr>
<td align="left">2:</td>
<td align="left"><bold>for</bold> every test image I<sub>j</sub> <bold>do</bold></td>
</tr>
<tr>
<td align="left">3:</td>
<td align="left">&#x000A0;&#x000A0;&#x000A0;&#x000A0;Generate k &#x0003D; m &#x000D7; n tiles of I<sub>j</sub> with overlap U</td>
</tr>
<tr>
<td align="left">4:</td>
<td align="left">&#x000A0;&#x000A0;&#x000A0;&#x000A0;<bold>for</bold> every k <bold>do</bold></td>
</tr>
<tr>
<td align="left">5:</td>
<td align="left">&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;Apply the CHM classifier C<sub>S,L</sub> to the k-th tile</td>
</tr>
<tr>
<td align="left">6:</td>
<td align="left">&#x000A0;&#x000A0;&#x000A0;&#x000A0;<bold>end for</bold></td>
</tr>
<tr>
<td align="left">7:</td>
<td align="left">&#x000A0;&#x000A0;&#x000A0;&#x000A0;Stitch all k tiles together to yield the probability map, M<sub>j</sub></td>
</tr>
<tr>
<td align="left">8:</td>
<td align="left">&#x000A0;&#x000A0;&#x000A0;&#x000A0;Normalize M<sub>j</sub></td>
</tr>
<tr>
<td align="left">9:</td>
<td align="left">&#x000A0;&#x000A0;&#x000A0;&#x000A0;Classify M<sub>j</sub> using Otsu&#x00027;s multi-level method with G gray levels, yielding O<sub>j</sub></td>
</tr>
<tr>
<td align="left">10:</td>
<td align="left">&#x000A0;&#x000A0;&#x000A0;&#x000A0;Threshold O<sub>j</sub> at the G-th level, giving the initial position mask K<sub>j</sub></td>
</tr>
<tr>
<td align="left">11:</td>
<td align="left">&#x000A0;&#x000A0;&#x000A0;&#x000A0;Perform morphological shrinking on K<sub>j</sub></td>
</tr>
<tr>
<td align="left">12:</td>
<td align="left">&#x000A0;&#x000A0;&#x000A0;&#x000A0;Segment M<sub>j</sub> by evolving active contours at initial positions specified by each unique 2D connected component of K<sub>j</sub>.</td>
</tr>
<tr>
<td/>
<td align="left">&#x000A0;&#x000A0;&#x000A0;&#x000A0;Iterate &#x003B1; times with a smoothing factor of &#x003BB;. The output is SEG<sub>j</sub>, the final segmentation of I<sub>j</sub>.</td>
</tr>
<tr>
<td align="left">13:</td>
<td align="left"><bold>end for</bold></td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec>
<title>Binarization of probability maps</title>
<p>Each probability map, M<sub>j</sub>, is binarized by evolving active contours (Chan and Vese, <xref ref-type="bibr" rid="B8">2001</xref>) at automatically determined initial positions. For an unsupervised determination of the initial positions, the probability map M<sub>j</sub> is first thresholded using Otsu&#x00027;s multi-level method (Otsu, <xref ref-type="bibr" rid="B37">1979</xref>) with G unique gray levels (step 9 of Algorithm <xref ref-type="table" rid="T4">1</xref>). The output from this operation is O<sub>j</sub>, a map in which each pixel of M<sub>j</sub> has been classified into one of G unique levels, with the zeroth level corresponding to the approximate background. This map is then binarized by thresholding O<sub>j</sub> at a pixel intensity of G, yielding a mask of initial positions, K<sub>j</sub> (step 10 of Algorithm <xref ref-type="table" rid="T4">1</xref>). This binary mask is then made smaller by applying two iterations of morphological shrinking (step 11 of Algorithm <xref ref-type="table" rid="T4">1</xref>) and used to initialize the evolution of active contours with a number of iterations and smoothing factor specified by &#x003B1; and &#x003BB;, respectively (step 12 of Algorithm <xref ref-type="table" rid="T4">1</xref>). Each 2D connected component of K<sub>j</sub> serves as a unique initial position for contour evolution. For best results, &#x003B1; should be at least 50. The choice of &#x003BB; depends largely on the organelle target and pixel size of the test images, but in general should fall in the range of 0&#x02013;8. Larger values of &#x003BB; can be used when the pixel size is small. If the pixel size is too large (i.e., above 10 nm/pixel), smoothing should be turned off by setting &#x003BB; to zero. The value of G significantly alters the results, and its choice is dependent on the goals of the experimenter. Low values of G tend to emphasize true positives at the risk of retaining false positives. As G is increased, false positives are more readily removed, but so are true positives. The final output from this process is SEG<sub>j</sub>, the organelle segmentation of the input grayscale image, I<sub>j</sub>. An illustration of this process is shown for two test images in Figure <xref ref-type="fig" rid="F3">3</xref>.</p>
<fig id="F3" position="float">
<label>Figure 3</label>
<caption><p><bold>The binarization of probability maps using active contours initialized by a multi-level Otsu threshold yields accurate segmentation results</bold>. Colorized maps, M, of a nucleus <bold>(A)</bold> and lysosomes <bold>(D)</bold> generated by applying Otsu&#x00027;s method with multiple levels to probability maps obtained by CHM pixel classification. Each color corresponds to a unique level of the threshold. Six gray levels (<italic>G</italic> &#x0003D; 6) were used for the nucleus and four (<italic>G</italic> &#x0003D; 4) were used for the lysosomes. Initial positions <bold>(B,E)</bold> were determined by selecting pixels corresponding to only the highest levels of each threshold followed by two iterations of morphological shrinking. Output segmentations <bold>(C,F)</bold> were obtained by evolving active contours about each of the initial positions in <bold>(B,E)</bold> with 100 iterations and a smoothing factor of 8 (&#x003B1; &#x0003D; 100, &#x003BB; &#x0003D; 8). In the case of the lysosome images, note that a myelinated axon that was originally detected by the classifier as a false positive (<bold>D</bold>, arrow) has been removed from the final segmentation by the application of our method (<bold>F</bold>, arrow).</p></caption>
<graphic xlink:href="fnana-08-00126-g0003.tif"/>
</fig>
</sec>
<sec>
<title>Meshing</title>
<p>Each output SEG<sub>j</sub> is converted to the MRC format and appended to an MRC stack. Contours are drawn around each 2D connected component using the IMOD program <italic>imodauto</italic>. The output contours are then three-dimensionally meshed together using the program <italic>imodmesh</italic>, and separate 3D connected components are sorted into different objects using the program <italic>imodsortsurf</italic>. Meshing is performed using the low resolution option to reduce the effect of translational artifacts between subsequent image slices.</p>
</sec>
</sec>
<sec>
<title>Experimental validation</title>
<sec>
<title>Tissue processing, image acquisition, and preprocessing</title>
<p>The suprachiasmatic nucleus (SCN) of one 3-month-old, male C57BL/6J mouse was harvested and prepared for SBEM using a standard protocol (Wilke et al., <xref ref-type="bibr" rid="B49">2013</xref>). The resin-embedded tissue was mounted on an aluminum specimen pin and prepared for SBEM imaging as previously described (Holcomb et al., <xref ref-type="bibr" rid="B19">2013</xref>). Imaging was performed by detection of backscattered electrons (BSE) using a Zeiss Merlin scanning electron microscope equipped with a 3View ultramicrotome (Gatan). The SBEM image stack was acquired in ultrahigh vacuum mode using an accelerating voltage of 1.9 kV, a pixel dwell time of 500 ns, and a spot size of 1.0. Sectioning was performed with a cutting thickness of 30 nm. BSE images were acquired at 800x magnification with a raster size of 32,000 pixels &#x000D7; 24,000 pixels, yielding a pixel size of 3.899 nm/pixel. A total of 1283 serial images were acquired, resulting in an image stack with tissue dimensions of roughly 124.8 &#x000D7; 93.6 &#x000D7; 38.5 &#x003BC;m (&#x0007E;450,000 &#x003BC;m<sup>3</sup>). The specimen was then removed from the chamber, and an image of a diffraction grating replica specimen (Ted Pella, Redding, CA, U.S.A.) was acquired for calibration of the lateral pixel size. Low magnification images of the block-face were acquired before and after sectioning. Image alignment was performed as described in Section Image Alignment and Histogram Specification. Following alignment, the stack was downsampled in the XY-plane by a factor of two, yielding a final stack with pixel dimensions of 16,000 &#x000D7; 12,000 &#x000D7; 1283 and pixel sizes of 7.799 nm/pixel and 30 nm/pixel in the lateral and axial dimensions, respectively. Since preliminary results did not demonstrate noticeable differences in the output of our method between the native resolution stack and the downsampled stack, downsampling was performed to reduce processing time. Exact histogram specification was performed as previously described. All image alignment and pre-processing steps were performed on a custom workstation (Advanced HPC, San Diego, CA, U.S.A.) with the following configuration: Xeon X5690 3.47 GHZ CPU, 48 GB RAM, 32 TB HDD, NVIDIA Quadro FX 3800, CentOS release 6.2.</p>
</sec>
<sec>
<title>Automatic segmentation</title>
<p>The four types of organelles targeted for automatic segmentation were mitochondria, lysosomes, nuclei, and nucleoli. These targets were chosen because they are morphologically and texturally diverse, and thus pose a significant test of the robustness of our method.</p>
<p>For each organelle target, 90 seed points were placed throughout the SBEM stack as described in Section Generation of Training Images and Labels. Training data and labels were created using the values shown in Table <xref ref-type="table" rid="T1">1</xref>. Of the 90 tiles generated for each organelle, 50 were randomly selected for use in training a CHM classifier; the other 40 were set aside to use as test data for validation. Organelle-specific CHM classifiers were trained using the values shown in Table <xref ref-type="table" rid="T1">1</xref>. The performances of all classifiers were evaluated by preparing receiver operating characteristic (ROC) curves (Fawcett, <xref ref-type="bibr" rid="B12">2006</xref>). Each classifier was then used to generate probability maps of the 40 test images corresponding to its organelle. Segmentation was performed as described in Section Binarization of Probability Maps using the values shown in Table <xref ref-type="table" rid="T1">1</xref>. All training, pixel classification, and segmentation steps were performed on the National Biomedical Computation Resource (NBCR) cluster, rocce.ucsd.edu (<ext-link ext-link-type="uri" xlink:href="http://rocce-mgr.ucsd.edu/">http://rocce-mgr.ucsd.edu/</ext-link>).</p>
<table-wrap position="float" id="T1">
<label>Table 1</label>
<caption><p><bold>Parameter sets used for the validation of specific organelle targets</bold>.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left"><bold>Parameter</bold></th>
<th align="center"><bold>Variable</bold></th>
<th align="center"><bold>Mitochondria</bold></th>
<th align="center"><bold>Lysosomes</bold></th>
<th align="center"><bold>Nuclei</bold></th>
<th align="center"><bold>Nucleoli</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">Number of training slices</td>
<td align="center">N</td>
<td align="center">50</td>
<td align="center">50</td>
<td align="center">50</td>
<td align="center">50</td>
</tr>
<tr>
<td align="left">Lateral dimensions of each training slice</td>
<td align="center">Q, R</td>
<td align="center">500, 500</td>
<td align="center">500, 500</td>
<td align="center">500, 500</td>
<td align="center">500, 500</td>
</tr>
<tr>
<td align="left">Number of CHM levels</td>
<td align="center">L</td>
<td align="center">2</td>
<td align="center">2</td>
<td align="center">2</td>
<td align="center">2</td>
</tr>
<tr>
<td align="left">Number of CHM stages</td>
<td align="center">S</td>
<td align="center">2</td>
<td align="center">2</td>
<td align="center">2</td>
<td align="center">2</td>
</tr>
<tr>
<td align="left">Size of tile array</td>
<td align="center">m, n</td>
<td align="center">2, 2</td>
<td align="center">2, 2</td>
<td align="center">2, 2</td>
<td align="center">2, 2</td>
</tr>
<tr>
<td align="left">Tiling overlap</td>
<td align="center">U</td>
<td align="center">50</td>
<td align="center">50</td>
<td align="center">20</td>
<td align="center">50</td>
</tr>
<tr>
<td align="left">Gray levels for multi-level Otsu thresholding</td>
<td align="center">G</td>
<td align="center">3</td>
<td align="center">2</td>
<td align="center">2</td>
<td align="center">2</td>
</tr>
<tr>
<td align="left">Active contour iterations</td>
<td align="center">&#x003B1;</td>
<td align="center">80</td>
<td align="center">200</td>
<td align="center">300</td>
<td align="center">90</td>
</tr>
<tr>
<td align="left">Smoothing factor</td>
<td align="center">&#x003BB;</td>
<td align="center">7</td>
<td align="center">4</td>
<td align="center">8</td>
<td align="center">10</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec>
<title>Validation of the active contour segmentation of CHM probability maps</title>
<p>Evaluation metrics were computed for each set of organelle-specific test images by comparing their segmentations with manually segmented ground truth. For each stack, the confusion matrix consisting of the number of true positive (TP), false positive (FP), true negative (TN), and false negative (FN) pixels was computed and used to calculate the true positive rate (TPR), false positive rate (FPR), precision, accuracy, and <italic>F</italic>-value, such that:</p>
<disp-formula id="E2"><mml:math id="M2"><mml:mtable columnalign='left'><mml:mtr><mml:mtd><mml:mtext>TPR</mml:mtext><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mtext>TP</mml:mtext></mml:mrow><mml:mrow><mml:mtext>TP+FN</mml:mtext></mml:mrow></mml:mfrac></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mtext>FPR</mml:mtext><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mtext>FP</mml:mtext></mml:mrow><mml:mrow><mml:mtext>FP+TN</mml:mtext></mml:mrow></mml:mfrac></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mtext>Precision</mml:mtext><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mtext>TP</mml:mtext></mml:mrow><mml:mrow><mml:mtext>TP+FP</mml:mtext></mml:mrow></mml:mfrac></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mtext>Accuracy</mml:mtext><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mtext>TP+TN</mml:mtext></mml:mrow><mml:mrow><mml:mtext>TP+FN+FP+TN</mml:mtext></mml:mrow></mml:mfrac></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mtext>F-value</mml:mtext><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mtext>2&#x02009;</mml:mtext><mml:mo>&#x000D7;</mml:mo><mml:mtext>&#x02009;Precision&#x02009;</mml:mtext><mml:mo>&#x000D7;</mml:mo><mml:mtext>&#x02009;TPR</mml:mtext></mml:mrow><mml:mrow><mml:mtext>Precision+TPR</mml:mtext></mml:mrow></mml:mfrac></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>This analysis was then repeated with segmentations generated from the same probability maps, but with a number of different unsupervised binarization algorithms: (1) Minimum error thresholding (Kittler and Illingworth, <xref ref-type="bibr" rid="B24">1986</xref>), (2) Maximum entropy thresholding (Kapur et al., <xref ref-type="bibr" rid="B21">1985</xref>), and (3) Otsu&#x00027;s single-level method (Otsu, <xref ref-type="bibr" rid="B37">1979</xref>). The performance of each algorithm, as quantified with the above metrics, was compared against that of our own method for each organelle target.</p>
<p>Since ground truth was available, the pixel intensity threshold that maximized the <italic>F</italic>-value of each probability map with respect to its corresponding ground truth was determined by computing the <italic>F</italic>-value at incrementally increasing thresholds from [0, &#x02026;, 1] and taking the maximum value.</p>
</sec>
</sec>
<sec>
<title>Scale-up to teravoxel-sized datasets</title>
<sec>
<title>Determination of optimal downsampling levels for different organelles</title>
<p>Since the segmentation of entire SBEM datasets is computationally intensive, we first decided to determine to what degree input images could be downsampled before segmentation results were adversely affected. Downsampled versions of each set of training images, training labels, and test images were prepared for all four organelle targets. Downsampling was performed by factors of two, three, four, and five, yielding pixel sizes of roughly 15.59, 23.39, 31.19, and 38.90 nm/pixel, respectively. CHM classifiers with two stages and two levels were trained for each set of downsampled, organelle-specific training images and labels. Probability maps were computed with <italic>m</italic> &#x0003D; 2, <italic>n</italic> &#x0003D; 2, and <italic>U</italic> &#x0003D; 20. Segmentations were generated using the active contour method with <italic>G</italic> &#x0003D; 2, &#x003B1; &#x0003D; 100, and &#x003BB; &#x0003D; 0. For each set of output segmentations, evaluation metrics were computed as described in Section Validation of the Active Contour Segmentation of CHM Probability Maps.</p>
</sec>
<sec>
<title>Segmentation of organelles from a full SBEM stack</title>
<p>The entire test dataset was laterally downsampled by a factor of eight, yielding a final stack with dimensions of 4000 &#x000D7; 3000 &#x000D7; 1283 pixels. The corresponding CHM classifiers generated in Section Determination of Optimal Downsampling Levels for Different Organelles were applied to produce stacks of probability maps at this pixel size for nuclei, nucleoli, and mitochondria. Processing was performed using an 8 &#x000D7; 6 tile array with an overlap of 20 pixels between adjacent tiles. Tiling, pixel classification, stitching, and binarization were performed using one CPU for each input image. One hundred total CPUs were used, such that 100 images were processed in parallel to expedite processing. All steps were performed on the National Biomedical Computation Resource (NBCR) cluster, rocce.ucsd.edu. Following probability map generation, all images were appended to organelle-specific MRC stacks, and contours and surface renderings were generated as described in Section Meshing.</p>
</sec>
</sec>
<sec>
<title>Comparison to a previously published algorithm</title>
<p>The results of our approach to nuclear automatic segmentation were validated by comparison with the results obtained by the algorithm of Tek et al. (<xref ref-type="bibr" rid="B45">2014</xref>). The full dataset was first downsampled to isotropic voxel dimensions (30 &#x000D7; 30 &#x000D7; 30 nm), resulting in a stack of size 4029 &#x000D7; 3120 &#x000D7; 1283 voxels. Training data and images consisted of a 500 &#x000D7; 500 &#x000D7; 50 subvolume of the downsampled stack containing two adjacent nuclei. Ground truth data were generated by manual segmentation of all neuronal, glial, and endothelial cell nuclei across fifty consecutive slices from the center of the dataset. A CHM pixel classifier with two stages and two levels was trained and applied to all images in the stack. Similarly, an ilastik voxel classifier was trained using all possible features with the same training images serving as input (Sommer et al., <xref ref-type="bibr" rid="B44">2011</xref>). This classifier was subsequently applied to all images in the downsampled stack. CHM probability maps were binarized using the proposed method. The ilastik probability maps were binarized by thresholding at the level <italic>p</italic> &#x0003D; 0.5, followed by the application of the object detection algorithm of Tek and colleagues with V<sub>th1</sub> and V<sub>th2</sub> set to 25 and 10,000, respectively (Tek et al., <xref ref-type="bibr" rid="B45">2014</xref>).</p>
<p>The source code for CHM and all related scripts are available to download from <ext-link ext-link-type="uri" xlink:href="http://www.sci.utah.edu/software/chm.html">http://www.sci.utah.edu/software/chm.html</ext-link>. The training images, training labels, and test images used in this study have also been made available to download at this URL.</p>
</sec>
</sec>
<sec sec-type="results" id="s2">
<title>Results</title>
<p>ROC curves for each organelle-specific CHM classifier are shown in Figure <xref ref-type="fig" rid="F4">4</xref>. In comparison to those for the other organelle classifiers, the ROC curve for the lysosomal classifier (Figure <xref ref-type="fig" rid="F4">4B</xref>) demonstrates a sparseness of data points with a low FPR. This is due to the extreme electron density of the lysosomal compartment and the number of other features in EM images that closely approximate it. Myelin sheaths (Figure <xref ref-type="fig" rid="F3">3D</xref>), plasma membranes, and other organelles cut <italic>en face</italic> can resemble the lysosomal compartment in both pixel intensity and texture and are frequently detected as false positives. Therefore, intelligent post-processing routines that utilize size and morphology are needed to separate lysosomes from such false positives.</p>
<fig id="F4" position="float">
<label>Figure 4</label>
<caption><p><bold>ROC curves for CHM classifiers of various organelles</bold>. ROC curves for mitochondrial <bold>(A)</bold>, lysosomal <bold>(B)</bold>, nuclear <bold>(C)</bold>, and nucleolar <bold>(D)</bold> CHM classifiers generated with two stages and two levels.</p></caption>
<graphic xlink:href="fnana-08-00126-g0004.tif"/>
</fig>
<p>A comparison of our proposed active contour binarization method to the other methods tested is shown in Figure <xref ref-type="fig" rid="F5">5</xref> using mitochondria as an example. Since the Golgi apparatus can sometimes display a texture similar to that of the mitochondrial matrix, the presence of this organelle can confuse the mitochondrial classifier (Figures <xref ref-type="fig" rid="F5">5A,B</xref>, arrows). Segmentations generated with the maximum entropy algorithm (Figure <xref ref-type="fig" rid="F5">5C</xref>, recall &#x0003D; 0.992, precision &#x0003D; 0.498, <italic>F</italic> &#x0003D; 0.670, accuracy &#x0003D; 0.948) and Otsu&#x00027;s single-level method (Figure <xref ref-type="fig" rid="F5">5D</xref>, recall &#x0003D; 0.958, precision &#x0003D; 0.687, <italic>F</italic> &#x0003D; 0.812, accuracy &#x0003D; 0.977) retain elements of the Golgi apparatus as false positives. However, probability map binarization using the proposed active contour method eliminates these false positives (Figure <xref ref-type="fig" rid="F5">5D</xref>, recall &#x0003D; 0.908, precision &#x0003D; 0.804, <italic>F</italic> &#x0003D; 0.863, accuracy &#x0003D; 0.985) when compared to the ground truth (Figure <xref ref-type="fig" rid="F5">5E</xref>). Output probability maps and active contour segmentations from example test images of each organelle are shown in comparison to their corresponding ground truth in Figure <xref ref-type="fig" rid="F6">6</xref>.</p>
<fig id="F5" position="float">
<label>Figure 5</label>
<caption><p><bold>Binarization of probability maps using active contours outperforms other methods</bold>. A CHM classifier for mitochondria was applied to a 500 &#x000D7; 500 pixel test image <bold>(A)</bold>, generating the probability map shown in <bold>(B)</bold>. Note that regions of pixels corresponding to the Golgi apparatus (yellow arrows) were detected in the probability map. The Golgi apparatus can often confuse mitochondrial pixel classifiers because it has a texture very similar to that of the mitochondrial matrix. The results of binarization of the probability map using maximum entropy <bold>(C)</bold> and Otsu&#x00027;s single-level method <bold>(D)</bold> are shown. Using these techniques, regions of the Golgi are permitted into the final segmentation as false positives. The resultant segmentation obtained by our method of binarization with active contours (<italic>G</italic> &#x0003D; 2, &#x003B1; &#x0003D; 100, &#x003BB; &#x0003D; 8) is shown in <bold>(E)</bold>. Instances of the Golgi apparatus were automatically removed during processing. This segmentation (<italic>F</italic> &#x0003D; 0.863, accuracy &#x0003D; 0.985) is a highly faithful representation of the ground truth <bold>(F)</bold>.</p></caption>
<graphic xlink:href="fnana-08-00126-g0005.tif"/>
</fig>
<fig id="F6" position="float">
<label>Figure 6</label>
<caption><p><bold>The results of our method are consistent when applied to diverse organelle targets</bold>. The application of our method to different organelle targets yields consistent results without the need to significantly change the input parameters. Shown here are test images, each of size 500 &#x000D7; 500 pixels, and their corresponding probability maps, segmentations, and manually segmented ground truth images. The final column shows a transparent overlay of the segmentation onto the test image. The evaluation metrics for each test image are as follows: Mitochondria, <italic>F</italic> &#x0003D; 0.844, accuracy &#x0003D; 0.984; lysosomes, <italic>F</italic> &#x0003D; 0.872, accuracy &#x0003D; 0.997; nuclei, <italic>F</italic> &#x0003D; 0.971, accuracy &#x0003D; 0.971; nucleoli, <italic>F</italic> &#x0003D; 0.91, accuracy &#x0003D; 0.977.</p></caption>
<graphic xlink:href="fnana-08-00126-g0006.tif"/>
</fig>
<p>The segmentation evaluation metrics for each full stack of 40 organelle-specific test images are shown in Table <xref ref-type="table" rid="T2">2</xref>. The proposed active contour segmentation method resulted in a superior recall for all four organelles and a superior <italic>F</italic>-value for mitochondria, lysosomes, and nucleoli when compared to the other segmentation methods. The <italic>F</italic>-value for nuclear segmentation is negligibly better using Otsu&#x00027;s single-level method. The lack of distinction between these two binarization methods for nuclei is due largely to the already high quality of nuclear probability maps. The accuracy values obtained for each stack using active contour segmentation were 0.985, 0.997, 0.972, and 0.979 for mitochondria, lysosomes, nuclei, and nucleoli, respectively.</p>
<table-wrap position="float" id="T2">
<label>Table 2</label>
<caption><p><bold>Segmentation evaluation metrics for the tested organelle targets using various methods of probability map binarization</bold>.</p></caption>
<table frame="hsides" rules="groups">
<tbody>
<tr>
<td/>
<td align="center"><bold><italic>F</italic>-value</bold></td>
<td align="center"><bold>Precision</bold></td>
<td align="center"><bold>Recall</bold></td>
<td align="center"><bold>Jaccard Index</bold></td>
</tr>
<tr>
<td align="left" colspan="5"><bold>MITOCHONDRIA</bold></td>
</tr>
<tr>
<td align="left">Minimum Error</td>
<td align="center">0.635</td>
<td align="center">0.994</td>
<td align="center">0.466</td>
<td align="center">&#x02013;</td>
</tr>
<tr>
<td align="left">Max. Entropy</td>
<td align="center">0.669</td>
<td align="center">0.991</td>
<td align="center">0.505</td>
<td align="center">&#x02013;</td>
</tr>
<tr>
<td align="left">Otsu Single-level</td>
<td align="center">0.816</td>
<td align="center">0.957</td>
<td align="center">0.712</td>
<td align="center">&#x02013;</td>
</tr>
<tr>
<td align="left">Active Contours</td>
<td align="center">0.877</td>
<td align="center">0.867</td>
<td align="center">0.886</td>
<td align="center">0.780</td>
</tr>
<tr>
<td align="left" colspan="5"><bold>LYSOSOMES</bold></td>
</tr>
<tr>
<td align="left">Minimum Error</td>
<td align="center">0.433</td>
<td align="center">0.985</td>
<td align="center">0.277</td>
<td align="center">&#x02013;</td>
</tr>
<tr>
<td align="left">Max. Entropy</td>
<td align="center">0.492</td>
<td align="center">0.940</td>
<td align="center">0.508</td>
<td align="center">&#x02013;</td>
</tr>
<tr>
<td align="left">Otsu Single-level</td>
<td align="center">0.812</td>
<td align="center">0.899</td>
<td align="center">0.737</td>
<td align="center">&#x02013;</td>
</tr>
<tr>
<td align="left">Active Contours</td>
<td align="center">0.841</td>
<td align="center">0.854</td>
<td align="center">0.828</td>
<td align="center">0.726</td>
</tr>
<tr>
<td align="left" colspan="5"><bold>NUCLEI</bold></td>
</tr>
<tr>
<td align="left">Minimum Error</td>
<td align="center">0.963</td>
<td align="center">0.958</td>
<td align="center">0.968</td>
<td align="center">&#x02013;</td>
</tr>
<tr>
<td align="left">Max. Entropy</td>
<td align="center">0.644</td>
<td align="center">0.603</td>
<td align="center">0.692</td>
<td align="center">&#x02013;</td>
</tr>
<tr>
<td align="left">Otsu Single-level</td>
<td align="center">0.971</td>
<td align="center">0.979</td>
<td align="center">0.963</td>
<td align="center">&#x02013;</td>
</tr>
<tr>
<td align="left">Active Contours</td>
<td align="center">0.970</td>
<td align="center">0.973</td>
<td align="center">0.968</td>
<td align="center">0.942</td>
</tr>
<tr>
<td align="left" colspan="5"><bold>NUCLEOLI</bold></td>
</tr>
<tr>
<td align="left">Minimum Error</td>
<td align="center">0.781</td>
<td align="center">0.998</td>
<td align="center">0.641</td>
<td align="center">&#x02013;</td>
</tr>
<tr>
<td align="left">Max. Entropy</td>
<td align="center">0.811</td>
<td align="center">0.996</td>
<td align="center">0.684</td>
<td align="center">&#x02013;</td>
</tr>
<tr>
<td align="left">Otsu Single-level</td>
<td align="center">0.898</td>
<td align="center">0.973</td>
<td align="center">0.835</td>
<td align="center">&#x02013;</td>
</tr>
<tr>
<td align="left">Active Contours</td>
<td align="center">0.910</td>
<td align="center">0.902</td>
<td align="center">0.918</td>
<td align="center">0.835</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>A histogram of the probability map pixel intensity thresholds that maximize the <italic>F</italic>-value for each test image are show in Figure <xref ref-type="fig" rid="F7">7</xref>. The wide spread of optimal threshold values for each organelle demonstrates the importance of using an unsupervised algorithm for probability map binarization, such as the one proposed here. Simply setting a pixel intensity threshold for each probability map would yield poor segmentations for a number of test images. This is especially true in very large SBEM images, where alterations in staining or focus may occur differentially throughout regions of the image stack.</p>
<fig id="F7" position="float">
<label>Figure 7</label>
<caption><p><bold>The wide distribution of optimum pixel intensity thresholds demonstrates the usefulness of our method for probability map binarization</bold>. The probability map pixel intensity threshold that maximized the <italic>F</italic>-value with respect to ground truth was determined for all of the 40 test images analyzed for each organelle. The histogram of optimal thresholds shown here demonstrates the need for an unsupervised method of binarization. Simple thresholding of all probability maps at a single user-specified intensity level would result in poor results for many of these test images. Binarization using our method circumvents this problem by adapting the results to the unique histogram of each probability map in an unsupervised manner.</p></caption>
<graphic xlink:href="fnana-08-00126-g0007.tif"/>
</fig>
<p>The results of our downsampling experiment are shown in Figure <xref ref-type="fig" rid="F8">8</xref>. The resultant <italic>F</italic>-value for segmentation of nuclei and nucleoli remains remarkably consistent across the whole range of pixel sizes tested. The <italic>F</italic>-values for mitochondria and lysosomes exhibit substantial reductions at pixel sizes greater than &#x0007E;15 nm/pixel, corresponding to an overall downsampling of the original SBEM stack by a factor of four. The persistence of a high <italic>F</italic>-value across all scales tested for nuclei and nucleoli is likely due to their larger size and more regular texture in comparison to the other organelles. This is especially true for mitochondria, whose cristae architectures may differ dramatically from region to region.</p>
<fig id="F8" position="float">
<label>Figure 8</label>
<caption><p><bold>Input images can be downsampled to various degrees before the segmentation results are negatively affected</bold>. Each organelle-specific stack was downsampled by factors of two, four, six, eight, and ten. Separate classifiers were trained at each different pixel size and segmentations were generated for each stack using our method. Here, the <italic>F</italic>-value of each resultant stack is compared across the different pixel sizes obtained after downsampling. The <italic>F</italic>-value of nuclei (blue) and nucleoli (magenta) is remarkably independent of the level of downsampling across all levels tested. The <italic>F</italic>-values for mitochondria (red) and lysosomes (green) significantly decline as the level of downsampling is increased.</p></caption>
<graphic xlink:href="fnana-08-00126-g0008.tif"/>
</fig>
<p>The required wall clock time and random access memory (RAM) required for CHM classifier training and pixel classification for each organelle at each level of downsampling are shown in Table <xref ref-type="table" rid="T3">3</xref>. The time and RAM required for probability map binarization are not shown because they are negligible with respect to training and classification. These results indicate that, in cases where segmentation accuracy is not dramatically affected, a vast amount of time and computational resources can be saved by downsampling the input image stacks. Simple extrapolation of pixel classification times shows that the time required by a single CPU to apply a nuclear pixel classifier to our full test dataset would be reduced from &#x0007E;5.9 to &#x0007E;0.4 years when the input data are downsampled by a factor of 10.</p>
<table-wrap position="float" id="T3">
<label>Table 3</label>
<caption><p><bold>Runtime and memory requirements for nuclear CHM classifier training and pixel classification at various levels of downsampling</bold>.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left"><bold>nm/pixel</bold></th>
<th align="center" colspan="3"><bold>Classifier Training</bold></th>
<th align="center" colspan="2"><bold>Pixel Classification</bold></th>
</tr>
<tr>
<th/>
<th align="center"><bold>Dimensions</bold></th>
<th align="center"><bold>Time (h)</bold></th>
<th align="center"><bold>RAM (GB)</bold></th>
<th align="center"><bold>Time (min)</bold></th>
<th align="center"><bold>RAM (GB)</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">7.79</td>
<td align="center">500 &#x000D7; 500 &#x000D7; 50</td>
<td align="center">23.98</td>
<td align="center">87.24</td>
<td align="center">12.73 &#x000B1; 0.90</td>
<td align="center">4.54 &#x000B1; 0.03</td>
</tr>
<tr>
<td align="left">15.59</td>
<td align="center">250 &#x000D7; 250 &#x000D7; 50</td>
<td align="center">20.35</td>
<td align="center">39.38</td>
<td align="center">4.67 &#x000B1; 0.15</td>
<td align="center">2.08 &#x000B1; 0.04</td>
</tr>
<tr>
<td align="left">23.39</td>
<td align="center">166 &#x000D7; 166 &#x000D7; 50</td>
<td align="center">7.95</td>
<td align="center">18.16</td>
<td align="center">2.03 &#x000B1; 0.03</td>
<td align="center">1.68 &#x000B1; 0.05</td>
</tr>
<tr>
<td align="left">31.19</td>
<td align="center">125 &#x000D7; 125 &#x000D7; 50</td>
<td align="center">4.71</td>
<td align="center">10.83</td>
<td align="center">1.18 &#x000B1; 0.02</td>
<td align="center">1.52 &#x000B1; 0.04</td>
</tr>
<tr>
<td align="left">38.90</td>
<td align="center">100 &#x000D7; 100 &#x000D7; 50</td>
<td align="center">3.18</td>
<td align="center">7.38</td>
<td align="center">0.90 &#x000B1; 0.04</td>
<td align="center">1.41 &#x000B1; 0.04</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p><italic>The dimensions of the stack of training images and labels used to train the classifier are given. The values for pixel classification correspond to the average values required to generate a probability map for one tile of roughly 60 &#x003BC;m<sup><italic>2</italic></sup> at the tissue level (1000 &#x000D7; 1000 pixels at 2x downsampling). Values are reported as the mean and standard deviation (N &#x0003D; 40 for each). Time is reported as the wall clock time for the indicated process.</italic></p>
</table-wrap-foot>
</table-wrap>
<p>These time and memory requirements were dramatically reduced by implementing tiling and processing over multiple CPUs. During segmentation of the full, downsampled dataset, the average processing time per 500 &#x000D7; 500 tile was 3.28 &#x000B1; 0.39 min (average and standard deviation, <italic>N</italic> &#x0003D; 600), with no significant difference in average time between organelles. By utilizing parallel processing with 100 CPUs, probability maps for the entire stack were generated in roughly 33 h. An example full slice and its corresponding nuclear probability map are shown in Figures <xref ref-type="fig" rid="F9">9A,C</xref>. Figures <xref ref-type="fig" rid="F9">9B,D</xref> depict additional probability maps of mitochondria and nucleoli, respectively. The full slice probability maps of these other organelles were computed in a manner similar to that of the nuclei.</p>
<fig id="F9" position="float">
<label>Figure 9</label>
<caption><p><bold>Automatic segmentation can be efficiently scaled to handle full slices from teravoxel-sized SBEM datasets</bold>. Probability maps of full images from the SCN dataset were generated by downsampling the image, computing probability maps of individual tiles, and stitching these tiled maps together. Shown here are probability maps of mitochondria <bold>(B)</bold>, nuclei <bold>(C)</bold>, and nucleoli <bold>(D)</bold> computed from the same full slice <bold>(A)</bold>. The full slice was downsampled by a factor of two prior to mitochondrial pixel classification and a factor of eight before nuclear and nucleolar pixel classification. Common residual errors during mitochondrial pixel classification are the false detection of endothelial cells (arrow) and nucleoli or clusters of chromatin in the nucleus (asterisk). A common error encountered during nuclear pixel classification is the false detection or regions of cytoplasm devoid of membrane-bound organelles (arrowhead). These residuals are frequently removed by the application of the proposed probability map segmentation algorithm. Scale bar &#x0003D; 20 &#x003BC;m.</p></caption>
<graphic xlink:href="fnana-08-00126-g0009.tif"/>
</fig>
<p>When applied to the segmentation of nuclei from the full SCN dataset following downsampling to isotropic voxel dimensions, the proposed method achieved a precision, recall, and <italic>F</italic>-value of 0.976, 0.977, and 0.977, respectively. Similarly, the method of Tek et al. (<xref ref-type="bibr" rid="B45">2014</xref>) achieved a precision, recall, and <italic>F</italic>-value of 0.976, 0.542, and 0.697, respectively, when applied to the same dataset using the same training data. Due to an already high precision and low number of false positives, the final object classification step performed by Tek and coworkers was omitted. Evaluation metrics were computed using fifty consecutive manually annotated slices as ground truth.</p>
<p>A surface rendering of a full SCN neuron containing renderings of its nucleus, nucleolus, and mitochondria is shown in Figure <xref ref-type="fig" rid="F10">10</xref>. The plasma membrane of the neuron was manually segmented by a trained neuroanatomist. The surface renderings of all organelles were automatically generated, with minor manual corrections applied.</p>
<fig id="F10" position="float">
<label>Figure 10</label>
<caption><p><bold>Output surface renderings of manually segmented organelles within an SCN neuron</bold>. The plasma membrane of a neuron was manually traced in its entirety throughout the dataset. The size of this neuron with respect to the full dataset (bottom left, scale bar &#x0003D; 20 &#x003BC;m) demonstrates the scale of the segmentation challenge. An enlarged version of this neuron with a transparent plasma membrane is shown in the upper left corner. Surface renderings of the nucleus (yellow), nucleolus (cyan), and mitochondria (green) were generated from the output of our automatic segmentation workflow. Two cross-sectional planes through the neuron reveal the corresponding SBEM slice with transparent overlays of the probability maps for the three organelles (scale bar &#x0003D; 2 &#x003BC;m). Output renderings such as these can be used to analyze any number of parameters, including organelle morphology and clustering throughout the whole cell.</p></caption>
<graphic xlink:href="fnana-08-00126-g0010.tif"/>
</fig>
</sec>
<sec sec-type="discussion" id="s3">
<title>Discussion</title>
<p>As recently as a few years ago, the notion of reconstructing and morphologically characterizing the organelle networks of even a few whole cells was considered a monumental challenge (Noske et al., <xref ref-type="bibr" rid="B36">2008</xref>). The advent and widespread adoption of high throughput, volumetric EM techniques has threatened to change that notion, with the caveat that our ability to segment and analyze data must first catch up with our ability to collect it. With that goal in mind, this study aimed to develop a method for the accurate automatic segmentation of organelles in EM image stacks that: (1) could be easily adapted to any organelle of interest, and (2) could be applied to teravoxel-sized datasets in a computationally efficient manner.</p>
<p>Since it does not make any large-scale, <italic>a priori</italic> assumptions about the morphology of the segmentation target, the proposed method can be applied to segment diverse organelles with ease. The only geometrical properties assumed throughout the method are boundary smoothness and a cross-sectional area that is sufficient enough to prevent the removal of true positives following binary shrinking. Both of these assumptions are valid for virtually all organelles under practical imaging conditions. CHM classifiers can be trained for any dataset or organelle target if given the proper training data, and the output segmentations from our method can be tuned to the demands of unique experiments. For example, decreasing the number of gray levels, G, used in the multi-level Otsu thresholding step will emphasize true positives at the expense of including false positives, which can often be excluded by post-processing filters. Additionally, it is easier to remove false positives by manual correction or crowd-sourcing (Giuly et al., <xref ref-type="bibr" rid="B14">2013</xref>) than it is to add missing true positives.</p>
<p>The proposed method performed favorably when compared to a recently published algorithm for the automatic segmentation of cell nuclei (Tek et al., <xref ref-type="bibr" rid="B45">2014</xref>). It is interesting to note that the performance of our method was very similar when trained using either images from consecutive slices of the same nuclei (precision &#x0003D; 0.976, recall &#x0003D; 0.977) or single slice images from a variety of nuclei (precision &#x0003D; 0.973, recall &#x0003D; 0.968). This similarity demonstrates the robustness of the CHM pixel classifier for this task. It is likely that the segmentation results obtained by applying the method of Tek and colleagues to the SCN dataset could be strengthened by training an ilastik voxel classifier against a greater diversity of nuclei.</p>
<p>Another advantage of the proposed method lies in its scalability to full datasets. The generation of probability maps from small tiles of the input image minimizes the required RAM. Additionally, it allows for computation to be easily expedited by parallelizing the processing of individual tiles across multiple CPUs. Our demonstration that accurate results for certain organelles can be achieved on downsampled stacks also helps expedite processing. One can envision an experiment in which a teravoxel-sized SBEM stack collected at high resolution for axon tracking can then be downsampled and have its nuclei or mitochondria automatically segmented at a fraction of the computational cost that would have been required at its native resolution. As innovative methods to rapidly acquire even larger datasets continue to be developed (Mohammadi-Gheidari and Kruit, <xref ref-type="bibr" rid="B34">2011</xref>; Helmstaedter et al., <xref ref-type="bibr" rid="B16">2013</xref>; Marx, <xref ref-type="bibr" rid="B33">2013</xref>), this reduction in computational cost will prove critical.</p>
<p>Although it is beyond the scope of this paper, a number of 3D post-processing steps that would lead to further improvements in the results of automatic segmentation can be proposed. A simple size exclusion filter could be applied to 3D connected components to remove false positives that do not fall within the possible size range for the given organelle. A scan over every segmented slice of each 3D component could be performed to look for aberrant spikes or troughs in 2D metrics such as perimeter or area. The locations of these spikes and troughs would indicate slices on which a poor segmentation occurred, and these slices could be correspondingly removed and replaced by interslice interpolations. The application of such processes to the output from our method will be the subject of future development.</p>
<p>In conclusion, this paper introduces novel methods for the automatic segmentation of organelles from EM image stacks that are both robust and able to handle datasets of any size. These tools fill a critical need by allowing for the quantitative analysis of volumetric EM datasets at a scale between that of current connectomics approaches (Briggman and Denk, <xref ref-type="bibr" rid="B5">2006</xref>; Anderson et al., <xref ref-type="bibr" rid="B1">2011</xref>; Bock et al., <xref ref-type="bibr" rid="B3">2011</xref>; Briggman et al., <xref ref-type="bibr" rid="B6">2011</xref>; Kleinfeld et al., <xref ref-type="bibr" rid="B25">2011</xref>; Varshney et al., <xref ref-type="bibr" rid="B46">2011</xref>; Helmstaedter et al., <xref ref-type="bibr" rid="B16">2013</xref>; Kim et al., <xref ref-type="bibr" rid="B23">2014</xref>) and that afforded by genetically encoded markers for small molecule localization (Shu et al., <xref ref-type="bibr" rid="B42">2011</xref>; Martell et al., <xref ref-type="bibr" rid="B32">2012</xref>; Boassa et al., <xref ref-type="bibr" rid="B2">2013</xref>).</p>
</sec>
<sec>
<title>Authors and contributors</title>
<p>Alex J. Perez, Mojtaba Seyedhosseini, Tolga Tasdizen, Satchidananda Panda, and Mark H. Ellisman designed research. Alex J. Perez, Mojtaba Seyedhosseini, Thomas J. Deerinck, and Eric A. Bushong performed research. Alex J. Perez and Mojtaba Seyedhosseini analyzed data. Alex J. Perez wrote the paper.</p>
<sec>
<title>Conflict of interest statement</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p></sec>
</sec>
</body>
<back>
<ack>
<p>The authors would like to thank Christopher Churas for his assistance with CHM and Anna Kreshuk and Stuart Berg for their assistance with ilastik. This work was supported by grants from the following entities: the National Institute of General Medical Science (NIGMS) under award P41 GM103412 to Mark H. Ellisman, the National Institute of Neurological Disorders and Stroke under award number 1R01NS075314 to Mark H. Ellisman and Tolga Tasdizen, the National Biomedical Computation Resource (NBCR) with support from NIGMS under award P41 GM103426, the National Institutes of Health (NIH) under award RO1 EY016807 to Satchidananda Panda, and Fellowship support (Alex J. Perez) from the National Institute on Drug Abuse under award 5T32DA007315-11.</p>
</ack>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Anderson</surname> <given-names>J. R.</given-names></name> <name><surname>Jones</surname> <given-names>B. W.</given-names></name> <name><surname>Watt</surname> <given-names>C. B.</given-names></name> <name><surname>Shaw</surname> <given-names>M. V.</given-names></name> <name><surname>Yang</surname> <given-names>J. H.</given-names></name> <name><surname>Demill</surname> <given-names>D.</given-names></name> <etal/></person-group>. (<year>2011</year>). <article-title>Exploring the retinal connectome</article-title>. <source>Mol. Vis</source>. <volume>17</volume>, <fpage>355</fpage>&#x02013;<lpage>379</lpage>. <pub-id pub-id-type="pmid">21311605</pub-id></citation>
</ref>
<ref id="B2">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Boassa</surname> <given-names>D.</given-names></name> <name><surname>Berlanga</surname> <given-names>M. L.</given-names></name> <name><surname>Yang</surname> <given-names>M. A.</given-names></name> <name><surname>Terada</surname> <given-names>M.</given-names></name> <name><surname>Hu</surname> <given-names>J.</given-names></name> <name><surname>Bushong</surname> <given-names>E. A.</given-names></name> <etal/></person-group>. (<year>2013</year>). <article-title>Mapping the subcellular distribution of &#x003B1;-synuclein in neurons using genetically encoded probes for correlated light and electron microscopy: implications for Parkinson&#x00027;s disease pathogenesis</article-title>. <source>J. Neurosci</source>. <volume>33</volume>, <fpage>2605</fpage>&#x02013;<lpage>2615</lpage>. <pub-id pub-id-type="doi">10.1523/JNEUROSCI.2898-12.2013</pub-id><pub-id pub-id-type="pmid">23392688</pub-id></citation>
</ref>
<ref id="B3">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bock</surname> <given-names>D. D.</given-names></name> <name><surname>Lee</surname> <given-names>W.-C. A.</given-names></name> <name><surname>Kerlin</surname> <given-names>A. M.</given-names></name> <name><surname>Andermann</surname> <given-names>M. L.</given-names></name> <name><surname>Hood</surname> <given-names>G.</given-names></name> <name><surname>Wetzel</surname> <given-names>A. W.</given-names></name> <etal/></person-group>. (<year>2011</year>). <article-title>Network anatomy and <italic>in vivo</italic> physiology of visual cortical neurons</article-title>. <source>Nature</source> <volume>471</volume>, <fpage>177</fpage>&#x02013;<lpage>182</lpage>. <pub-id pub-id-type="doi">10.1038/nature09802</pub-id><pub-id pub-id-type="pmid">21390124</pub-id></citation>
</ref>
<ref id="B4">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Boh&#x000F3;rquez</surname> <given-names>D. V.</given-names></name> <name><surname>Samsa</surname> <given-names>L. A.</given-names></name> <name><surname>Roholt</surname> <given-names>A.</given-names></name> <name><surname>Medicetty</surname> <given-names>S.</given-names></name> <name><surname>Chandra</surname> <given-names>R.</given-names></name> <name><surname>Liddle</surname> <given-names>R. A.</given-names></name></person-group> (<year>2014</year>). <article-title>An enteroendocrine cell-enteric glia connection revealed by 3D electron microscopy</article-title>. <source>PLoS ONE</source> <volume>9</volume>:<fpage>e89881</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0089881</pub-id><pub-id pub-id-type="pmid">24587096</pub-id></citation>
</ref>
<ref id="B5">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Briggman</surname> <given-names>K. L.</given-names></name> <name><surname>Denk</surname> <given-names>W.</given-names></name></person-group> (<year>2006</year>). <article-title>Towards neural circuit reconstruction with volume electron microscopy techniques</article-title>. <source>Curr. Opin. Neurobiol</source>. <volume>16</volume>, <fpage>562</fpage>&#x02013;<lpage>570</lpage>. <pub-id pub-id-type="doi">10.1016/j.conb.2006.08.010</pub-id><pub-id pub-id-type="pmid">16962767</pub-id></citation>
</ref>
<ref id="B6">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Briggman</surname> <given-names>K. L.</given-names></name> <name><surname>Helmstaedter</surname> <given-names>M.</given-names></name> <name><surname>Denk</surname> <given-names>W.</given-names></name></person-group> (<year>2011</year>). <article-title>Wiring specificity in the direction-selectivity circuit of the retina</article-title>. <source>Nature</source> <volume>471</volume>, <fpage>183</fpage>&#x02013;<lpage>188</lpage>. <pub-id pub-id-type="doi">10.1038/nature09818</pub-id><pub-id pub-id-type="pmid">21390125</pub-id></citation>
</ref>
<ref id="B7">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Campello</surname> <given-names>S.</given-names></name> <name><surname>Scorrano</surname> <given-names>L.</given-names></name></person-group> (<year>2010</year>). <article-title>Mitochondrial shape changes: orchestrating cell pathophysiology</article-title>. <source>EMBO Rep</source>. <volume>11</volume>, <fpage>678</fpage>&#x02013;<lpage>684</lpage>. <pub-id pub-id-type="doi">10.1038/embor.2010.115</pub-id><pub-id pub-id-type="pmid">20725092</pub-id></citation>
</ref>
<ref id="B8">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chan</surname> <given-names>T. F.</given-names></name> <name><surname>Vese</surname> <given-names>L. A.</given-names></name></person-group> (<year>2001</year>). <article-title>Active contours without edges</article-title>. <source>IEEE Trans. Image Process</source>. <volume>10</volume>, <fpage>266</fpage>&#x02013;<lpage>277</lpage>. <pub-id pub-id-type="doi">10.1109/83.902291</pub-id><pub-id pub-id-type="pmid">18249617</pub-id></citation>
</ref>
<ref id="B9">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Coltuc</surname> <given-names>D.</given-names></name> <name><surname>Bolon</surname> <given-names>P.</given-names></name> <name><surname>Chassery</surname> <given-names>J. M.</given-names></name></person-group> (<year>2006</year>). <article-title>Exact histogram specification</article-title>. <source>IEEE Trans. Image Process</source>. <volume>15</volume>, <fpage>1143</fpage>&#x02013;<lpage>1152</lpage>. <pub-id pub-id-type="doi">10.1109/TIP.2005.864170</pub-id><pub-id pub-id-type="pmid">16671295</pub-id></citation>
</ref>
<ref id="B10">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Deerinck</surname> <given-names>T. J.</given-names></name> <name><surname>Bushong</surname> <given-names>E. A.</given-names></name> <name><surname>Lev-Ram</surname> <given-names>V.</given-names></name> <name><surname>Shu</surname> <given-names>X.</given-names></name> <name><surname>Tsien</surname> <given-names>R. Y.</given-names></name> <name><surname>Ellisman</surname> <given-names>M. H.</given-names></name></person-group> (<year>2010</year>). <article-title>Enhancing serial block-face scanning electron microscopy to enable high resolution 3-D nanohistology of cells and tissues</article-title>. <source>Microsc. Microanal</source>. <volume>16</volume>, <fpage>1138</fpage>&#x02013;<lpage>1139</lpage>. <pub-id pub-id-type="doi">10.1017/S1431927610055170</pub-id></citation>
</ref>
<ref id="B11">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Denk</surname> <given-names>W.</given-names></name> <name><surname>Horstmann</surname> <given-names>H.</given-names></name></person-group> (<year>2004</year>). <article-title>Serial block-face scanning electron microscopy to reconstruct three-dimensional tissue nanostructure</article-title>. <source>PLoS Biol</source>. <volume>2</volume>:<fpage>e329</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pbio.0020329</pub-id><pub-id pub-id-type="pmid">15514700</pub-id></citation>
</ref>
<ref id="B12">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fawcett</surname> <given-names>T.</given-names></name></person-group> (<year>2006</year>). <article-title>An introduction to ROC analysis</article-title>. <source>Pattern Recogn. Lett</source>. <volume>27</volume>:<fpage>861</fpage>&#x02013;<lpage>874</lpage>. <pub-id pub-id-type="doi">10.1016/j.patrec.2005.10.010</pub-id></citation>
</ref>
<ref id="B13">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gagyi</surname> <given-names>E.</given-names></name> <name><surname>Kormos</surname> <given-names>B.</given-names></name> <name><surname>Castellanos</surname> <given-names>K. J.</given-names></name> <name><surname>Valyi-Nagy</surname> <given-names>K.</given-names></name> <name><surname>Korneff</surname> <given-names>D.</given-names></name> <name><surname>LoPresti</surname> <given-names>P.</given-names></name> <etal/></person-group>. (<year>2012</year>). <article-title>Decreased oligodendrocyte nuclear diameter in Alzheimer&#x00027;s disease and Lewy body dementia</article-title>. <source>Brain Pathol</source>. <volume>22</volume>, <fpage>803</fpage>&#x02013;<lpage>810</lpage>. <pub-id pub-id-type="doi">10.1111/j.1750-3639.2012.00595.x</pub-id><pub-id pub-id-type="pmid">22429607</pub-id></citation>
</ref>
<ref id="B14">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Giuly</surname> <given-names>R. J.</given-names></name> <name><surname>Kim</surname> <given-names>K.-Y.</given-names></name> <name><surname>Ellisman</surname> <given-names>M. H.</given-names></name></person-group> (<year>2013</year>). <article-title>DP2: distributed 3D image segmentation using micro-labor workforce</article-title>. <source>Bioinformatics</source> <volume>29</volume>, <fpage>1359</fpage>&#x02013;<lpage>1360</lpage>. <pub-id pub-id-type="doi">10.1093/bioinformatics/btt154</pub-id><pub-id pub-id-type="pmid">23574738</pub-id></citation>
</ref>
<ref id="B15">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Giuly</surname> <given-names>R. J.</given-names></name> <name><surname>Martone</surname> <given-names>M. E.</given-names></name> <name><surname>Ellisman</surname> <given-names>M. H.</given-names></name></person-group> (<year>2012</year>). <article-title>Method: automatic segmentation of mitochondria utilizing patch classification, contour pair classification, and automatically seeded level sets</article-title>. <source>BMC Bioinformatics</source> <volume>13</volume>:<fpage>29</fpage>. <pub-id pub-id-type="doi">10.1186/1471-2105-13-29</pub-id><pub-id pub-id-type="pmid">22321695</pub-id></citation>
</ref>
<ref id="B16">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Helmstaedter</surname> <given-names>M.</given-names></name> <name><surname>Briggman</surname> <given-names>K. L.</given-names></name> <name><surname>Turaga</surname> <given-names>S. C.</given-names></name> <name><surname>Jain</surname> <given-names>V.</given-names></name> <name><surname>Seung</surname> <given-names>H. S.</given-names></name> <name><surname>Denk</surname> <given-names>W.</given-names></name></person-group> (<year>2013</year>). <article-title>Connectomic reconstruction of the inner plexiform layer in the mouse retina</article-title>. <source>Nature</source> <volume>500</volume>, <fpage>168</fpage>&#x02013;<lpage>174</lpage>. <pub-id pub-id-type="doi">10.1038/nature12346</pub-id><pub-id pub-id-type="pmid">23925239</pub-id></citation>
</ref>
<ref id="B17">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Herms</surname> <given-names>A.</given-names></name> <name><surname>Bosch</surname> <given-names>M.</given-names></name> <name><surname>Ariotti</surname> <given-names>N.</given-names></name> <name><surname>Reddy</surname> <given-names>B. J. N.</given-names></name> <name><surname>Fajardo</surname> <given-names>A.</given-names></name> <name><surname>Fern&#x000E1;ndez-Vidal</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2013</year>). <article-title>Cell-to-cell heterogeneity in lipid droplets suggests a mechanism to reduce lipotoxicity</article-title>. <source>Curr. Biol</source>. <volume>23</volume>, <fpage>1489</fpage>&#x02013;<lpage>1496</lpage>. <pub-id pub-id-type="doi">10.1016/j.cub.2013.06.032</pub-id><pub-id pub-id-type="pmid">23871243</pub-id></citation>
</ref>
<ref id="B18">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hirai</surname> <given-names>K.</given-names></name> <name><surname>Aliev</surname> <given-names>G.</given-names></name> <name><surname>Nunomura</surname> <given-names>A.</given-names></name> <name><surname>Fujioka</surname> <given-names>H.</given-names></name> <name><surname>Russell</surname> <given-names>R. L.</given-names></name> <name><surname>Atwood</surname> <given-names>C. S.</given-names></name> <etal/></person-group>. (<year>2001</year>). <article-title>Mitochondrial abnormalities in Alzheimer&#x00027;s disease</article-title>. <source>J. Neurosci</source>. <volume>21</volume>, <fpage>3017</fpage>&#x02013;<lpage>3023</lpage>. <pub-id pub-id-type="pmid">11312286</pub-id></citation>
</ref>
<ref id="B19">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Holcomb</surname> <given-names>P. S.</given-names></name> <name><surname>Hoffpauir</surname> <given-names>B. K.</given-names></name> <name><surname>Hoyson</surname> <given-names>M. C.</given-names></name> <name><surname>Jackson</surname> <given-names>D. R.</given-names></name> <name><surname>Deerinck</surname> <given-names>T. J.</given-names></name> <name><surname>Marrs</surname> <given-names>G. S.</given-names></name> <etal/></person-group>. (<year>2013</year>). <article-title>Synaptic inputs compete during rapid formation of the calyx of Held: a new model system for neural development</article-title>. <source>J. Neurosci</source>. <volume>33</volume>, <fpage>12954</fpage>&#x02013;<lpage>12969</lpage>. <pub-id pub-id-type="doi">10.1523/JNEUROSCI.1087-13.2013</pub-id><pub-id pub-id-type="pmid">23926251</pub-id></citation>
</ref>
<ref id="B20">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jaume</surname> <given-names>S.</given-names></name> <name><surname>Knobe</surname> <given-names>K.</given-names></name> <name><surname>Newton</surname> <given-names>R. R.</given-names></name> <name><surname>Schlimbach</surname> <given-names>F.</given-names></name> <name><surname>Blower</surname> <given-names>M.</given-names></name> <name><surname>Reid</surname> <given-names>R. C.</given-names></name></person-group> (<year>2012</year>). <article-title>A multiscale parallel computing architecture for automated segmentation of the brain connectome</article-title>. <source>IEEE Trans. Biomed. Eng</source>. <volume>59</volume>, <fpage>35</fpage>&#x02013;<lpage>38</lpage>. <pub-id pub-id-type="doi">10.1109/TBME.2011.2168396</pub-id><pub-id pub-id-type="pmid">21926011</pub-id></citation>
</ref>
<ref id="B21">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kapur</surname> <given-names>J. N.</given-names></name> <name><surname>Sahoo</surname> <given-names>P. K.</given-names></name> <name><surname>Wong</surname> <given-names>A. C. K.</given-names></name></person-group> (<year>1985</year>). <article-title>A new method for gray-level picture thresholding using the entropy of the histogram</article-title>. <source>Graph. Model. Image Process</source>. <volume>29</volume>, <fpage>273</fpage>&#x02013;<lpage>285</lpage>. <pub-id pub-id-type="doi">10.1016/0734-189X(85)90125-2</pub-id></citation>
</ref>
<ref id="B22">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kim</surname> <given-names>C. E.</given-names></name> <name><surname>Perez</surname> <given-names>A.</given-names></name> <name><surname>Perkins</surname> <given-names>G.</given-names></name> <name><surname>Ellisman</surname> <given-names>M. H.</given-names></name> <name><surname>Dauer</surname> <given-names>W. T.</given-names></name></person-group> (<year>2010</year>). <article-title>A molecular mechanism underlying the neural-specific defect in torsinA mutant mice</article-title>. <source>Proc. Natl. Acad. Sci. U.S.A</source>. <volume>107</volume>, <fpage>9861</fpage>&#x02013;<lpage>9866</lpage>. <pub-id pub-id-type="doi">10.1073/pnas.0912877107</pub-id><pub-id pub-id-type="pmid">20457914</pub-id></citation>
</ref>
<ref id="B23">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kim</surname> <given-names>J. S.</given-names></name> <name><surname>Greene</surname> <given-names>M. J.</given-names></name> <name><surname>Zlateski</surname> <given-names>A.</given-names></name> <name><surname>Lee</surname> <given-names>K.</given-names></name> <name><surname>Richardson</surname> <given-names>M.</given-names></name> <name><surname>Turaga</surname> <given-names>S. C.</given-names></name> <etal/></person-group>. (<year>2014</year>). <article-title>Space-time wiring specificity supports direction selectivity in the retina</article-title>. <source>Nature</source> <volume>509</volume>, <fpage>331</fpage>&#x02013;<lpage>336</lpage>. <pub-id pub-id-type="doi">10.1038/nature13240</pub-id><pub-id pub-id-type="pmid">24805243</pub-id></citation>
</ref>
<ref id="B24">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kittler</surname> <given-names>J.</given-names></name> <name><surname>Illingworth</surname> <given-names>J.</given-names></name></person-group> (<year>1986</year>). <article-title>Minimum error thresholding</article-title>. <source>Pattern Recogn</source>. <volume>19</volume>, <fpage>41</fpage>&#x02013;<lpage>47</lpage>.</citation>
</ref>
<ref id="B25">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kleinfeld</surname> <given-names>D.</given-names></name> <name><surname>Bharioke</surname> <given-names>A.</given-names></name> <name><surname>Blinder</surname> <given-names>P.</given-names></name> <name><surname>Bock</surname> <given-names>D. D.</given-names></name> <name><surname>Briggman</surname> <given-names>K. L.</given-names></name> <name><surname>Chklovskii</surname> <given-names>D. B.</given-names></name> <etal/></person-group>. (<year>2011</year>). <article-title>Large-scale automated histology in the pursuit of connectomes</article-title>. <source>J. Neurosci</source>. <volume>31</volume>, <fpage>16125</fpage>&#x02013;<lpage>16138</lpage>. <pub-id pub-id-type="doi">10.1523/JNEUROSCI.4077-11.2011</pub-id><pub-id pub-id-type="pmid">22072665</pub-id></citation>
</ref>
<ref id="B26">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Knott</surname> <given-names>A. B.</given-names></name> <name><surname>Perkins</surname> <given-names>G.</given-names></name> <name><surname>Schwarzenbacher</surname> <given-names>R.</given-names></name> <name><surname>Bossy-Wetzel</surname> <given-names>E.</given-names></name></person-group> (<year>2008</year>). <article-title>Mitochondrial fragmentation in neurodegeneration</article-title>. <source>Nat. Rev. Neurosci</source>. <volume>9</volume>, <fpage>505</fpage>&#x02013;<lpage>518</lpage>. <pub-id pub-id-type="doi">10.1038/nrn2417</pub-id><pub-id pub-id-type="pmid">18568013</pub-id></citation>
</ref>
<ref id="B27">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kremer</surname> <given-names>J. R.</given-names></name> <name><surname>Mastronarde</surname> <given-names>D. N.</given-names></name> <name><surname>McIntosh</surname> <given-names>J. R.</given-names></name></person-group> (<year>1996</year>). <article-title>Computer visualization of three-dimensional image data using IMOD</article-title>. <source>J. Struct. Biol</source>. <volume>116</volume>, <fpage>71</fpage>&#x02013;<lpage>76</lpage>. <pub-id pub-id-type="pmid">8742726</pub-id></citation>
</ref>
<ref id="B28">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kumar</surname> <given-names>R.</given-names></name> <name><surname>Vazquez-Reina</surname> <given-names>A.</given-names></name> <name><surname>Pfister</surname> <given-names>H.</given-names></name></person-group> (<year>2010</year>). <article-title>Radon-like features and their application to connectomics</article-title>. <source>IEEE Comput. Soc. Workshop Math. Methods Biomed. Image Anal</source>. <volume>2010</volume>, <fpage>186</fpage>&#x02013;<lpage>193</lpage>. <pub-id pub-id-type="doi">10.1109/CVPRW.2010.5543594</pub-id></citation>
</ref>
<ref id="B29">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lucchi</surname> <given-names>A.</given-names></name> <name><surname>Smith</surname> <given-names>K.</given-names></name> <name><surname>Achanra</surname> <given-names>R.</given-names></name> <name><surname>Lepetit</surname> <given-names>V.</given-names></name> <name><surname>Fua</surname> <given-names>P.</given-names></name></person-group> (<year>2010</year>). <article-title>A fully automated approach to segmentation of irregularly shaped cellular structures in EM images</article-title>. <source>Med. Image Comput. Comput. Assist. Interv</source>. <volume>2010</volume>, <fpage>463</fpage>&#x02013;<lpage>471</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-642-15745-5_57</pub-id><pub-id pub-id-type="pmid">20879348</pub-id></citation>
</ref>
<ref id="B30">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lucchi</surname> <given-names>A.</given-names></name> <name><surname>Smith</surname> <given-names>K.</given-names></name> <name><surname>Achanta</surname> <given-names>R.</given-names></name> <name><surname>Knott</surname> <given-names>G.</given-names></name> <name><surname>Fua</surname> <given-names>P.</given-names></name></person-group> (<year>2012</year>). <article-title>Supervoxel-based segmentation of mitochondria in EM image stacks with learned shape features</article-title>. <source>IEEE Trans. Med. Imaging</source> <volume>31</volume>, <fpage>474</fpage>&#x02013;<lpage>486</lpage>. <pub-id pub-id-type="doi">10.1109/TMI.2011.2171705</pub-id><pub-id pub-id-type="pmid">21997252</pub-id></citation>
</ref>
<ref id="B31">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mann</surname> <given-names>D. M. A.</given-names></name> <name><surname>Yate</surname> <given-names>P. O.</given-names></name> <name><surname>Marcyniuk</surname> <given-names>B.</given-names></name></person-group> (<year>1985</year>). <article-title>Some morphometric observations on the cerebral cortex and hippocampus in presenile Alzheimer&#x00027;s disease, senile dementia, of Alzheimer type, and Down&#x00027;s syndrome in middle age</article-title>. <source>J. Neurol. Sci</source>. <volume>69</volume>, <fpage>139</fpage>&#x02013;<lpage>159</lpage>. <pub-id pub-id-type="pmid">3162000</pub-id></citation>
</ref>
<ref id="B32">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Martell</surname> <given-names>J. D.</given-names></name> <name><surname>Deerinck</surname> <given-names>T. J.</given-names></name> <name><surname>Sancak</surname> <given-names>Y.</given-names></name> <name><surname>Poulos</surname> <given-names>T. L.</given-names></name> <name><surname>Mootha</surname> <given-names>V. K.</given-names></name> <name><surname>Sosinsky</surname> <given-names>G. E.</given-names></name> <etal/></person-group>. (<year>2012</year>). <article-title>Engineered ascorbate peroxidase as a genetically encoded reporter for electron microscopy</article-title>. <source>Nat. Biotechnol</source>. <volume>30</volume>, <fpage>1143</fpage>&#x02013;<lpage>1148</lpage>. <pub-id pub-id-type="doi">10.1038/nbt.2375</pub-id><pub-id pub-id-type="pmid">23086203</pub-id></citation>
</ref>
<ref id="B33">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Marx</surname> <given-names>V.</given-names></name></person-group> (<year>2013</year>). <article-title>Neurobiology: brain mapping in high resolution</article-title>. <source>Nature</source> <volume>503</volume>, <fpage>147</fpage>&#x02013;<lpage>152</lpage>. <pub-id pub-id-type="doi">10.1038/503147a</pub-id><pub-id pub-id-type="pmid">24201287</pub-id></citation>
</ref>
<ref id="B34">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mohammadi-Gheidari</surname> <given-names>A.</given-names></name> <name><surname>Kruit</surname> <given-names>P.</given-names></name></person-group> (<year>2011</year>). <article-title>Electron optics of multi-beam scanning electron microscope</article-title>. <source>Nucl. Instrum. Methods A</source> <volume>645</volume>, <fpage>60</fpage>&#x02013;<lpage>67</lpage>. <pub-id pub-id-type="doi">10.1016/j.nima.2010.12.090</pub-id></citation>
</ref>
<ref id="B35">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Narashima</surname> <given-names>R.</given-names></name> <name><surname>Ouyang</surname> <given-names>H.</given-names></name> <name><surname>Gray</surname> <given-names>A.</given-names></name> <name><surname>McLaughlin</surname> <given-names>S. W.</given-names></name> <name><surname>Subraniam</surname> <given-names>S.</given-names></name></person-group> (<year>2009</year>). <article-title>Automatic joint classification and segmenation of whole cell 3D images</article-title>. <source>Pattern Recogn</source>. <volume>42</volume>, <fpage>1067</fpage>&#x02013;<lpage>1079</lpage>. <pub-id pub-id-type="doi">10.1016/j.patcog.2008.08.009</pub-id></citation>
</ref>
<ref id="B36">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Noske</surname> <given-names>A. B.</given-names></name> <name><surname>Costin</surname> <given-names>A. J.</given-names></name> <name><surname>Morgan</surname> <given-names>G. P.</given-names></name> <name><surname>Marsh</surname> <given-names>B. J.</given-names></name></person-group> (<year>2008</year>). <article-title>Expedited approaches to whole cell electron tomography and organelle mark-up <italic>in situ</italic> in high-pressure frozen pancreatic islets</article-title>. <source>J. Struct. Biol</source>. <volume>161</volume>, <fpage>298</fpage>&#x02013;<lpage>313</lpage>. <pub-id pub-id-type="doi">10.1016/j.jsb.2007.09.015</pub-id><pub-id pub-id-type="pmid">18069000</pub-id></citation>
</ref>
<ref id="B37">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Otsu</surname> <given-names>N.</given-names></name></person-group> (<year>1979</year>). <article-title>A threshold selection method from gray-level histograms</article-title>. <source>IEEE Trans. Syst. Man Cybern</source>. <volume>9</volume>, <fpage>62</fpage>&#x02013;<lpage>66</lpage>.</citation>
</ref>
<ref id="B38">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Peddie</surname> <given-names>C. J.</given-names></name> <name><surname>Collinson</surname> <given-names>L. M.</given-names></name></person-group> (<year>2014</year>). <article-title>Exploring the third dimension: volume electron microscopy comes of age</article-title>. <source>Micron</source> <volume>61</volume>, <fpage>9</fpage>&#x02013;<lpage>19</lpage>. <pub-id pub-id-type="doi">10.1016/j.micron.2014.01.009</pub-id><pub-id pub-id-type="pmid">24792442</pub-id></citation>
</ref>
<ref id="B39">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Riudavets</surname> <given-names>M. A.</given-names></name> <name><surname>Iacono</surname> <given-names>D.</given-names></name> <name><surname>Resnick</surname> <given-names>S. M.</given-names></name> <name><surname>O&#x00027;Brien</surname> <given-names>R.</given-names></name> <name><surname>Zonderman</surname> <given-names>A. B.</given-names></name> <name><surname>Martin</surname> <given-names>L. J.</given-names></name> <etal/></person-group>. (<year>2007</year>). <article-title>Resistance to Alzheimer&#x00027;s pathology is associated with nuclear hypertrophy in neurons</article-title>. <source>Neurobiol. Aging</source> <volume>28</volume>, <fpage>1484</fpage>&#x02013;<lpage>1492</lpage>. <pub-id pub-id-type="doi">10.1016/j.neurobiolaging.2007.05.005</pub-id><pub-id pub-id-type="pmid">17599696</pub-id></citation>
</ref>
<ref id="B40">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Seyedhosseini</surname> <given-names>M.</given-names></name> <name><surname>Ellisman</surname> <given-names>M. H.</given-names></name> <name><surname>Tasdizen</surname> <given-names>T.</given-names></name></person-group> (<year>2013a</year>). <article-title>Segmentation of mitochondria in electron microscopy images using algebraic curves</article-title>, in <source>2013 IEEE 10th International Symposium on Biomedical Imaging (ISBI)</source> (<publisher-loc>San Francisco, CA</publisher-loc>).</citation>
</ref>
<ref id="B41">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Seyedhosseini</surname> <given-names>M.</given-names></name> <name><surname>Sajjadi</surname> <given-names>M.</given-names></name> <name><surname>Tasdizen</surname> <given-names>T.</given-names></name></person-group> (<year>2013b</year>). <article-title>Image segmentation with cascaded hierarchical models and logistic disjunctive normal networks</article-title>, in <source>2013 IEEE International Conference on Computer Vision</source> (<publisher-loc>Sydney, NSW</publisher-loc>).</citation>
</ref>
<ref id="B42">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Shu</surname> <given-names>X.</given-names></name> <name><surname>Lev-Ram</surname> <given-names>V.</given-names></name> <name><surname>Deerinck</surname> <given-names>T. J.</given-names></name> <name><surname>Qi</surname> <given-names>Y.</given-names></name> <name><surname>Ramko</surname> <given-names>E. B.</given-names></name> <name><surname>Davidson</surname> <given-names>M. W.</given-names></name> <etal/></person-group>. (<year>2011</year>). <article-title>A genetically encoded tag for correlated light and electron microscopy of intact cells, tissues, and organisms</article-title>. <source>PLoS Biol</source>. <volume>9</volume>:<fpage>e1001041</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pbio.1001041</pub-id><pub-id pub-id-type="pmid">21483721</pub-id></citation>
</ref>
<ref id="B43">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Smith</surname> <given-names>K.</given-names></name> <name><surname>Carleton</surname> <given-names>A.</given-names></name> <name><surname>Lepetit</surname> <given-names>V.</given-names></name></person-group> (<year>2009</year>). <article-title>Fast ray features for learning irregular shapes</article-title>, in <source>IEEE 12th International Conference on Computer Vision</source> (<publisher-loc>Kyoto</publisher-loc>).</citation>
</ref>
<ref id="B44">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Sommer</surname> <given-names>C.</given-names></name> <name><surname>Strahle</surname> <given-names>C.</given-names></name> <name><surname>K&#x000F6;the</surname> <given-names>U.</given-names></name> <name><surname>Hamprecht</surname> <given-names>F. A.</given-names></name></person-group> (<year>2011</year>). <article-title>ilastik: interactive learning and segmentation toolkit</article-title>, in <source>2011 IEEE 8th International Symposium on Biomedical Imaging (ISBI)</source> (<publisher-loc>Chicago, IL</publisher-loc>).</citation>
</ref>
<ref id="B45">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Tek</surname> <given-names>F.</given-names></name> <name><surname>Kroeger</surname> <given-names>S.</given-names></name> <name><surname>Mikula</surname> <given-names>S.</given-names></name> <name><surname>Hamprecht</surname> <given-names>F. A.</given-names></name></person-group> (<year>2014</year>). <article-title>Automated cell nucleus detection for large-volume electron microscopy of neural tissue</article-title>, <source>2014 IEEE 11th International Symposium on Biomedial Imaging (ISBI)</source> (<publisher-loc>Beijing</publisher-loc>).</citation>
</ref>
<ref id="B46">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Varshney</surname> <given-names>L. R.</given-names></name> <name><surname>Chen</surname> <given-names>B. L.</given-names></name> <name><surname>Paniagua</surname> <given-names>E.</given-names></name> <name><surname>Hall</surname> <given-names>D. H.</given-names></name> <name><surname>Chklovskii</surname> <given-names>D. B.</given-names></name></person-group> (<year>2011</year>). <article-title>Structural properties of the <italic>Caenorhabditis elegans</italic> neuronal network</article-title>. <source>PLoS Comput. Biol</source>. <volume>7</volume>:<fpage>e1001066</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pcbi.1001066</pub-id><pub-id pub-id-type="pmid">21304930</pub-id></citation>
</ref>
<ref id="B47">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Vihinen</surname> <given-names>H.</given-names></name> <name><surname>Belevich</surname> <given-names>I.</given-names></name> <name><surname>Jokitalo</surname> <given-names>E.</given-names></name></person-group> (<year>2013</year>). <article-title>Three dimensional electron microscopy of cellular organelles by serial block face SEM and ET</article-title>. <source>Microsc. Anal</source>. <volume>27</volume>, <fpage>7</fpage>&#x02013;<lpage>10</lpage>.</citation>
</ref>
<ref id="B48">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Vitaladevuni</surname> <given-names>S.</given-names></name> <name><surname>Mischenko</surname> <given-names>Y.</given-names></name> <name><surname>Genkin</surname> <given-names>A.</given-names></name> <name><surname>Chklovskii</surname> <given-names>D.</given-names></name> <name><surname>Harris</surname> <given-names>K.</given-names></name></person-group> (<year>2008</year>). <article-title>Mitochondria detection in electron microscopy image</article-title>, in <source>Workshop on Microscopy Image Analysis with Applications in Biology</source>, vol. 42, (<publisher-loc>New York, NY</publisher-loc>).</citation>
</ref>
<ref id="B49">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wilke</surname> <given-names>S. A.</given-names></name> <name><surname>Antonios</surname> <given-names>J. K.</given-names></name> <name><surname>Bushong</surname> <given-names>E. A.</given-names></name> <name><surname>Badkoobehi</surname> <given-names>A.</given-names></name> <name><surname>Malek</surname> <given-names>E.</given-names></name> <name><surname>Hwang</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2013</year>). <article-title>Deconstructing complexity: serial block-face electron microscopic analysis of the hippocampal mossy fiber synapse</article-title>. <source>J. Neurosci</source>. <volume>33</volume>, <fpage>507</fpage>&#x02013;<lpage>522</lpage>. <pub-id pub-id-type="doi">10.1523/JNEUROSCI.1600-12.2013</pub-id><pub-id pub-id-type="pmid">23303931</pub-id></citation>
</ref>
<ref id="B50">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhu</surname> <given-names>X.</given-names></name> <name><surname>Perry</surname> <given-names>G.</given-names></name> <name><surname>Smith</surname> <given-names>M. A.</given-names></name> <name><surname>Wang</surname> <given-names>X.</given-names></name></person-group> (<year>2013</year>). <article-title>Abnormal mitochondrial dynamics in the pathogenesis of Alzheimer&#x00027;s disease</article-title>. <source>J. Alzheimers Dis</source>. <volume>33</volume>, <fpage>S253</fpage>&#x02013;<lpage>S262</lpage>. <pub-id pub-id-type="doi">10.1111/j.1471-4159.2009.05867.x</pub-id><pub-id pub-id-type="pmid">22531428</pub-id></citation>
</ref>
<ref id="B51">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhuravleva</surname> <given-names>E.</given-names></name> <name><surname>Gut</surname> <given-names>H.</given-names></name> <name><surname>Hynx</surname> <given-names>D.</given-names></name> <name><surname>Marcellin</surname> <given-names>D.</given-names></name> <name><surname>Bleck</surname> <given-names>C. K. E.</given-names></name> <name><surname>Genoud</surname> <given-names>C.</given-names></name> <etal/></person-group>. (<year>2012</year>). <article-title>Acyl coenzyme A thioesterase Them5/Acot15 is involved in cardiolipin remodeling and fatty liver development</article-title>. <source>Mol. Cell. Biol</source>. <volume>32</volume>, <fpage>2685</fpage>&#x02013;<lpage>2697</lpage>. <pub-id pub-id-type="doi">10.1128/MCB.00312-12</pub-id><pub-id pub-id-type="pmid">22586271</pub-id></citation>
</ref>
</ref-list>
</back>
</article>
