<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Archiving and Interchange DTD v2.3 20070202//EN" "archivearticle.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="systematic-review" dtd-version="2.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Neurosci.</journal-id>
<journal-title>Frontiers in Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Neurosci.</abbrev-journal-title>
<issn pub-type="epub">1662-453X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnins.2024.1376570</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Neuroscience</subject>
<subj-group>
<subject>Systematic Review</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>A systematic review of automated methods to perform white matter tract segmentation</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Joshi</surname> <given-names>Ankita</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2640549/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Li</surname> <given-names>Hailong</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/546475/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Parikh</surname> <given-names>Nehal A.</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/567403/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>He</surname> <given-names>Lili</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/586755/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Imaging Research Center, Department of Radiology, Cincinnati Children's Hospital Medical Center</institution>, <addr-line>Cincinnati, OH</addr-line>, <country>United States</country></aff>
<aff id="aff2"><sup>2</sup><institution>Neurodevelopmental Disorders Prevention Center, Perinatal Institute, Cincinnati Children's Hospital Medical Center</institution>, <addr-line>Cincinnati, OH</addr-line>, <country>United States</country></aff>
<aff id="aff3"><sup>3</sup><institution>Department of Radiology, University of Cincinnati College of Medicine</institution>, <addr-line>Cincinnati, OH</addr-line>, <country>United States</country></aff>
<aff id="aff4"><sup>4</sup><institution>Department of Pediatrics, University of Cincinnati College of Medicine</institution>, <addr-line>Cincinnati, OH</addr-line>, <country>United States</country></aff>
<aff id="aff5"><sup>5</sup><institution>Computer Science, Biomedical Informatics, and Biomedical Engineering, University of Cincinnati</institution>, <addr-line>Cincinnati, OH</addr-line>, <country>United States</country></aff>
<author-notes>
<fn fn-type="edited-by" id="fn0001"><p>Edited by: Thomas Schultz, University of Bonn, Germany</p></fn>
<fn fn-type="edited-by" id="fn0002"><p>Reviewed by: Maxime Chamberland, Eindhoven University of Technology, Netherlands</p><p>Fan Zhang, University of Electronic Science and Technology of China, China</p></fn>
<corresp id="c001">&#x002A;Correspondence: Lili He, <email>Lili.He@cchmc.org</email></corresp>
</author-notes>
<pub-date pub-type="epub">
<day>19</day>
<month>03</month>
<year>2024</year>
</pub-date>
<pub-date pub-type="collection">
<year>2024</year>
</pub-date>
<volume>18</volume>
<elocation-id>1376570</elocation-id>
<history>
<date date-type="received">
<day>25</day>
<month>01</month>
<year>2024</year>
</date>
<date date-type="accepted">
<day>04</day>
<month>03</month>
<year>2024</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2024 Joshi, Li, Parikh and He.</copyright-statement>
<copyright-year>2024</copyright-year>
<copyright-holder>Joshi, Li, Parikh and He</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<p>White matter tract segmentation is a pivotal research area that leverages diffusion-weighted magnetic resonance imaging (dMRI) for the identification and mapping of individual white matter tracts and their trajectories. This study aims to provide a comprehensive systematic literature review on automated methods for white matter tract segmentation in brain dMRI scans. Articles on PubMed, ScienceDirect [NeuroImage, NeuroImage (Clinical), Medical Image Analysis], Scopus and IEEEXplore databases and Conference proceedings of Medical Imaging Computing and Computer Assisted Intervention Society (MICCAI) and International Symposium on Biomedical Imaging (ISBI), were searched in the range from January 2013 until September 2023. This systematic search and review identified 619 articles. Adhering to the specified search criteria using the query, &#x201C;<italic>white matter tract segmentation</italic> OR <italic>fiber tract identification</italic> OR fiber <italic>bundle segmentation</italic> OR <italic>tractography dissection</italic> OR <italic>white matter parcellation</italic> OR <italic>tract segmentation,&#x201D;</italic> 59 published studies were selected. Among these, 27% employed direct voxel-based methods, 25% applied streamline-based clustering methods, 20% used streamline-based classification methods, 14% implemented atlas-based methods, and 14% utilized hybrid approaches. The paper delves into the research gaps and challenges associated with each of these categories. Additionally, this review paper illuminates the most frequently utilized public datasets for tract segmentation along with their specific characteristics. Furthermore, it presents evaluation strategies and their key attributes. The review concludes with a detailed discussion of the challenges and future directions in this field.</p>
</abstract>
<kwd-group>
<kwd>diffusion magnetic resonance imaging (dMRI)</kwd>
<kwd>white matter tract</kwd>
<kwd>segmentation</kwd>
<kwd>systematic review</kwd>
<kwd>tract segmentation</kwd>
<kwd>tractography</kwd>
</kwd-group>
<counts>
<fig-count count="6"/>
<table-count count="8"/>
<equation-count count="0"/>
<ref-count count="157"/>
<page-count count="23"/>
<word-count count="14434"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Brain Imaging Methods</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec1">
<label>1</label>
<title>Introduction</title>
<p>The development of diffusion magnetic resonance imaging (dMRI) coupled with the subsequent introduction of techniques to model water diffusion within the brain tissue using a diffusion tensor model (DTI) (<xref ref-type="bibr" rid="ref26">Delmarcelle and Hesselink, 1992</xref>; <xref ref-type="bibr" rid="ref3">Basser et al., 1994a</xref>, <xref ref-type="bibr" rid="ref4">1994b</xref>; <xref ref-type="bibr" rid="ref13">Carter et al., 2015</xref>), has led to unprecedented opportunities for noninvasive exploration of the brain&#x2019;s intricate white matter (WM) structures (<xref ref-type="bibr" rid="ref22">Clayden et al., 2007</xref>). Tractography is a technique that harnesses data derived from dMRI to reconstruct and visualize the WM pathways within the brain by tracing the likely paths of water diffusion. Tractography (<xref ref-type="bibr" rid="ref82">Mori et al., 1999</xref>; <xref ref-type="bibr" rid="ref5">Basser et al., 2000</xref>) involves the algorithmic reconstruction of these WM pathways, generating a multitude of fibers (<xref ref-type="bibr" rid="ref34">El Kouby et al., 2005</xref>) for each subject. This is followed by the delineation of the obtained fiber trajectories or streamlines into bundles or their association with anatomically well-defined tracts, a process commonly referred to as WM tract segmentation or dissection (<xref ref-type="bibr" rid="ref11">Bullock et al., 2019</xref>).</p>
<p>WM tracts in the brain serve as the communication highways that connect different regions of the brain. Accurate segmentation enables researchers and clinicians to identify specific tracts associated with particular neurological functions, including cognitive, motor, and behavioral processes (<xref ref-type="bibr" rid="ref149">Yushkevich et al., 2008</xref>; <xref ref-type="bibr" rid="ref109">Sadeghi et al., 2013</xref>). Accurate tract segmentation plays a pivotal role in comprehending alterations in the micro- and macro-structure of the brain&#x2019;s WM. It enhances our understanding of how structural connectivity shapes brain function and development. Additionally, it provides valuable insights into neurological diseases, including cognitive impairment and neurodegeneration, mental health disorders, and the aging process (<xref ref-type="bibr" rid="ref14">Catani, 2006</xref>; <xref ref-type="bibr" rid="ref25">De Belder et al., 2012</xref>; <xref ref-type="bibr" rid="ref65">Le Bihan and Johansen-Berg, 2012</xref>). Moreover, accurate WM tract segmentation holds immense clinical significance, particularly in aiding in pre-operative and intra-operative brain tumor resections. It facilitates the visualization and localization of WM tracts that may be displaced or affected by tumors (<xref ref-type="bibr" rid="ref64">Lazar et al., 2006</xref>; <xref ref-type="bibr" rid="ref18">Chen et al., 2016</xref>; <xref ref-type="bibr" rid="ref35">Essayed et al., 2017</xref>; <xref ref-type="bibr" rid="ref123">Vanderweyen et al., 2020</xref>). It is worth noting that WM tract segmentation is a very challenging task. The human brain contains millions of intertwined axonal pathways, and these fibers can cross, split, or merge, making it challenging to accurately track individual pathways.</p>
<p>Most techniques employed for WM tract segmentation are based on virtual dissection or manual approaches, which involve the meticulous delineation of regions of interest (ROIs) (<xref ref-type="bibr" rid="ref16">Catani et al., 2002</xref>; <xref ref-type="bibr" rid="ref83">Mori and van Zijl, 2007</xref>; <xref ref-type="bibr" rid="ref130">Wakana et al., 2007</xref>). These ROIs define where streamlines should pass and where streamlines should terminate. The provision of ROIs requires expert knowledge and hence manual methods incur expert labor costs. Manual methods face practical challenges in their adoption since they are time-consuming and expensive due to their high clinical and labor costs. Nevertheless, manual methods remain the gold standard for delineating WM tracts and serve as a critical benchmark for validating alternative approaches. The advent of better imaging techniques, improved image quality and higher resolutions (<xref ref-type="bibr" rid="ref122">Van Essen et al., 2012</xref>), along with the application of sophisticated post-processing techniques, has driven a significant surge in the development of automated methods for tract segmentation (<xref ref-type="bibr" rid="ref142">Yamada et al., 2009</xref>; <xref ref-type="bibr" rid="ref35">Essayed et al., 2017</xref>; <xref ref-type="bibr" rid="ref42">Ghazi et al., 2023</xref>).</p>
<p>A wide range of automated white matter tract segmentation methods have been developed over the years. While multiple works exist that review tractography methods and their applications, currently, there is limited literature available that specifically discusses the topic of delineating white matter tracts. Authors summarize the various categories that tractography segmentation methods fall under (<xref ref-type="bibr" rid="ref151">Zhang et al., 2022</xref>) when reviewing quantitative tractography methods for studying the brain&#x2019;s structural connectivity in health and disease. Recently, authors in <xref ref-type="bibr" rid="ref42">Ghazi et al. (2023)</xref> have reviewed literature focusing on deep learning approaches for tract segmentation. In this work, we extend the scope by conducting a systematic and comprehensive review of automated approaches for the segmentation of white matter tracts in the last decade. This paper contributes to the following:</p>
<list list-type="order">
<list-item><p>Review of automated tract segmentation methods explored within the last 10&#x2009;years with respect to key research questions.</p></list-item>
<list-item><p>Identify the categories of methods and their research gaps and challenges.</p></list-item>
<list-item><p>Highlight an overview of the various datasets and evaluation metrics used in the methods.</p></list-item>
<list-item><p>Discuss the future directions that can be conducted.</p></list-item>
</list>
<p>The remainder of the survey is organized as follows: Section 2 presents the review planning, Section 3 introduces the key findings as results, Section 4 summarizes and discusses the findings, Section 5 outlines the future directions, and Section 6 concludes the review.</p>
</sec>
<sec id="sec2">
<label>2</label>
<title>Review planning</title>
<p>This section is dedicated to planning the review: the comprehensive research questions related to the study are rigorously defined, the identification criteria and the resources of study are detailed.</p>
<sec id="sec3">
<label>2.1</label>
<title>Key research questions</title>
<list list-type="bullet">
<list-item><p>What method is developed?</p></list-item>
<list-item><p>What dataset is used?</p></list-item>
<list-item><p>What evaluation metrics are used?</p></list-item>
<list-item><p>What category of method does the study fall under?</p></list-item>
<list-item><p>Is the code for the automatic tract segmentation method publicly available, is the practical applicability of the method discussed in terms of computation time and external validation?</p></list-item>
</list>
</sec>
<sec id="sec4">
<label>2.2</label>
<title>Sources of information</title>
<p>The sources of information listed below were searched between the time span from <italic>January 2013 until September 2023</italic> using the query &#x201C;<italic>white matter tract segmentation</italic> OR <italic>fiber tract identification</italic> OR fiber <italic>bundle segmentation</italic> OR <italic>tractography dissection</italic> OR <italic>white matter parcellation</italic> OR <italic>tract segmentation&#x201D;</italic></p>
<list list-type="bullet">
<list-item><p>Pubmed (<ext-link xlink:href="https://pubmed.ncbi.nlm.nih.gov/" ext-link-type="uri">https://pubmed.ncbi.nlm.nih.gov/</ext-link>)</p></list-item>
<list-item><p>Science direct (<ext-link xlink:href="https://www.sciencedirect.com" ext-link-type="uri">https://www.sciencedirect.com</ext-link>) for publication titles under NeuroImage, NeuroImage: Clinical, and Medical Image Analysis</p></list-item>
<list-item><p>Scopus (<ext-link xlink:href="https://www.scopus.com/" ext-link-type="uri">https://www.scopus.com/</ext-link>)</p></list-item>
<list-item><p>IEEE explore digital library (<ext-link xlink:href="https://ieeexplore.ieee.org/" ext-link-type="uri">https://ieeexplore.ieee.org/</ext-link>)</p></list-item>
<list-item><p>Conference publications for: Medical Imaging Computing and Computer Assisted Intervention Society (MICCAI), International Symposium on Biomedical Imaging (ISBI)</p></list-item>
</list>
</sec>
<sec id="sec5">
<label>2.3</label>
<title>Inclusion criteria</title>
<p>Inclusion requirements were: (a) original research article published in the selected journal publications of Pubmed, ScienceDirect, Scopus, IEEE Explore Digital library and conference publications MICCAI and ISBI; (b) published within the last 10&#x2009;years from January 2013 until September 2023; (c) published in English; (d) performed automated white matter tract segmentation in human brains; and (e) research articles specifically developing automated methods for white matter tract segmentation performed on deep white matter. Search strings were established via literature search and domain expertise. Specifically, title and abstract articles were searched on each of the above-mentioned sources of information using strings: white matter tract segmentation OR fiber tract identification OR fiber bundle segmentation OR tractography dissection OR white matter parcellation OR tract segmentation.</p>
</sec>
</sec>
<sec sec-type="results" id="sec6">
<label>3</label>
<title>Results</title>
<p>Our search strategy retrieved 619 articles published between January 2013 and September 2023. After articles were reviewed for definite exclusions and the bibliography of eligible articles were hand-searched, 59 articles met the inclusion criteria. <xref ref-type="fig" rid="fig1">Figure 1</xref> shows the flow diagram of the retrieved articles and the rules applied to get the resulting 59 articles. The results are presented as follows: First, we summarize the major datasets used in the studies included in this review. We then provide a list of the 59 research articles by focusing on the research questions established. These research articles are mentioned according to the categories they belong to and finally we provide a summary for the evaluation metrics used by the studies.</p>
<fig position="float" id="fig1">
<label>Figure 1</label>
<caption>
<p>Flow diagram for articles retrieved in this study.</p>
</caption>
<graphic xlink:href="fnins-18-1376570-g001.tif"/>
</fig>
<sec id="sec7">
<label>3.1</label>
<title>Datasets</title>
<p>We present a list of the most commonly used imaging datasets used for the 59 studies. For each dataset we highlight the population details, the MRI acquisition details and online link to access the dataset. <xref ref-type="table" rid="tab1">Table 1</xref> lists the dataset studied.</p>
<table-wrap position="float" id="tab1">
<label>Table 1</label>
<caption>
<p>Summary of the datasets used in the papers included in this review.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top"><bold>Dataset</bold></th>
<th align="left" valign="top"><bold>Online Link</bold></th>
<th align="left" valign="top"><bold>Subjects</bold></th>
<th align="left" valign="top"><bold>MRI details</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Human Connectome Project (HCP) (<xref ref-type="bibr" rid="ref121">Van Essen et al., 2013</xref>)</td>
<td align="left" valign="top">
<ext-link xlink:href="https://humanconnectome.org" ext-link-type="uri">https://humanconnectome.org</ext-link>
</td>
<td align="left" valign="top">1,200 healthy young adults between ages 22&#x2013;35&#x2009;years</td>
<td align="left" valign="top">1.25&#x2009;mm3 isotropic resolution, 270 gradient directions with 3 b-values (1,000,2000,3,000&#x2009;s/mm<sup>2</sup>) and 18 b&#x2009;&#x2212;&#x2009;0 images</td>
</tr>
<tr>
<td align="left" valign="top">Developing Human Connectome Project (dHCP) (<xref ref-type="bibr" rid="ref77">Makropoulos et al., 2018</xref>)</td>
<td align="left" valign="top">
<ext-link xlink:href="http://www.developingconnectome.org" ext-link-type="uri">www.developingconnectome.org</ext-link>
</td>
<td align="left" valign="top">783 healthy newborn babies between postmenstrual ages ranging from 26 to 45&#x2009;weeks</td>
<td align="left" valign="top">1.5&#x2009;mm&#x2009;&#x00D7;&#x2009;1.5&#x2009;mm&#x2009;&#x00D7;&#x2009;3&#x2009;mm resolution; uniform distributed set of directions on 4 shells <italic>b</italic> =&#x2009;0&#x2009;s/mm<sup>2</sup>:20, <italic>b</italic> =&#x2009;400&#x2009;s/mm<sup>2</sup>: 64, <italic>b</italic> =&#x2009;1000s/mm<sup>2</sup>:88, <italic>b</italic> =&#x2009;2,600&#x2009;s/mm<sup>2</sup>:128; TR/TE&#x2009;=&#x2009;3800/90&#x2009;ms</td>
</tr>
<tr>
<td align="left" valign="top">Consortium for Neuropsychiatric Phenomics (CNP) (<xref ref-type="bibr" rid="ref94">Poldrack et al., 2016</xref>)</td>
<td align="left" valign="top">
<ext-link xlink:href="http://openfmri.org" ext-link-type="uri">http://openfmri.org</ext-link>
</td>
<td align="left" valign="top">130 subjects, healthy and patient (ADHD, bipolar disorder, schizophrenia)<break/>groups between 21 and 50 years</td>
<td align="left" valign="top">2&#x2009;mm<sup>3</sup> isotropic resolution; 64 directions; TR/TE&#x2009;=&#x2009;9000/93&#x2009;ms; <italic>b</italic> =&#x2009;1,000&#x2009;s/mm<sup>2</sup></td>
</tr>
<tr>
<td align="left" valign="top">Multiple Acquisitions for Standardization of Structural Imaging Validation and Evaluation (MASSIVE) (<xref ref-type="bibr" rid="ref38">Froeling et al., 2017</xref>)</td>
<td align="left" valign="top">
<ext-link xlink:href="http://www.massive-data.org" ext-link-type="uri">www.massive-data.org</ext-link>
</td>
<td align="left" valign="top">8,000 unique dMRI volumes acquired of a single healthy subject</td>
<td align="left" valign="top">2.5mm<sup>3</sup> isotropic resolution, multiple shells of 125, 250, 250, 250, and 300 gradient orientations, and <italic>b</italic>-values of 500, 1,000, 2000, 3,000, and 4,000&#x2009;s/mm<sup>2</sup> respectively, additional 204 <italic>b</italic> =&#x2009;0&#x2009;s/mm<sup>2</sup> images</td>
</tr>
<tr>
<td align="left" valign="top">Autism Brain Imaging Data Exchange (ABIDE) (<xref ref-type="bibr" rid="ref29">Di Martino et al., 2017</xref>)</td>
<td align="left" valign="top">
<ext-link xlink:href="http://fcon_1,000.projects.nitrc.org/indi/abide/" ext-link-type="uri">http://fcon_1,000.projects.nitrc.org/indi/abide/</ext-link>
</td>
<td align="left" valign="top">subjects between 5 and 17&#x2009;years of age</td>
<td align="left" valign="top">resolution of 3mm<sup>3</sup> <italic>b</italic> =&#x2009;1000s/mm<sup>2</sup>; 64 directions; TR/TE&#x2009;=&#x2009;5200/78&#x2009;ms</td>
</tr>
<tr>
<td align="left" valign="top">Rotterdam Study (<xref ref-type="bibr" rid="ref50">Hofman et al., 2015</xref>)</td>
<td align="left" valign="top">
<ext-link xlink:href="https://www.ergo-onderzoek.nl/" ext-link-type="uri">https://www.ergo-onderzoek.nl/</ext-link>
</td>
<td align="left" valign="top">9,752 dMRI scans from 5,286 participants with mean age 64.7&#x2009;&#x00B1;&#x2009;9.9&#x2009;years</td>
<td align="left" valign="top">imaging matrix of 64&#x2009;&#x00D7;&#x2009;96 zero-padded in k-space to 256&#x2009;&#x00D7;&#x2009;256 in a field of view of 210&#x2009;&#x00D7;&#x2009;210&#x2009;mm<sup>2</sup>, TR/TE&#x2009;=&#x2009;8575/82.6&#x2009;ms, 25 diffusion weighted volumes along non-colinear directions using a b-value of 1000s/mm<sup>2</sup></td>
</tr>
<tr>
<td align="left" valign="top">Non-invasive Exploration of brain connectivity and Tracts (CONNECT/ARCHI) (<xref ref-type="bibr" rid="ref111">Schmitt et al., 2012</xref>)</td>
<td align="left" valign="top"><ext-link xlink:href="https://www.humanbrainproject.eu/" ext-link-type="uri">https://www.humanbrainproject.eu/</ext-link> and ARCHI database can be requested from <email>cyril.poupon@cea.fr</email></td>
<td align="left" valign="top">79 healthy subjects, age between 18 and 40&#x2009;years</td>
<td align="left" valign="top">1.71875&#x2009;&#x00D7;&#x2009;1.71875&#x2009;&#x00D7;&#x2009;1.7&#x2009;mm resolution, 60 optimized diffusion directions <italic>b</italic> =&#x2009;1,500&#x2009;s/mm<sup>2</sup>, one <italic>b</italic> =&#x2009;0 image, TR/TE&#x2009;=&#x2009;14,000/93&#x2009;ms</td>
</tr>
<tr>
<td align="left" valign="top">Growing Up in Singapore Toward Health Outcomes (GUSTO) study (<xref ref-type="bibr" rid="ref115">Soh et al., 2014</xref>)</td>
<td align="left" valign="top">
<ext-link xlink:href="http://www.gusto.sg/" ext-link-type="uri">http://www.gusto.sg/</ext-link>
</td>
<td align="left" valign="top">388 neonates screened at day 7, 30 at 6&#x2009;weeks, and/or 50 babies screened at 6&#x2009;months since birth.</td>
<td align="left" valign="top">TR /TE&#x2009;=&#x2009;7000/56&#x2009;ms; flip angle&#x2009;=&#x2009;90&#x00B0;; FOV&#x2009;=&#x2009;200&#x2009;mm&#x2009;&#x00D7;&#x2009;200&#x2009;mm; matrix size&#x2009;=&#x2009;256&#x2009;&#x00D7;&#x2009;256; 19 images with <italic>b</italic> =&#x2009;600&#x2009;s/mm<sup>2</sup> and 1 with <italic>b</italic> =&#x2009;0&#x2009;s/mm<sup>2</sup></td>
</tr>
<tr>
<td align="left" valign="top">Parkinson&#x2019;s Progression Markers Initiative (PPMI) (<xref ref-type="bibr" rid="ref79">Marek et al., 2011</xref>)</td>
<td align="left" valign="top">
<ext-link xlink:href="https://www.ppmi-info.org" ext-link-type="uri">https://www.ppmi-info.org</ext-link>
</td>
<td align="left" valign="top">400 recently diagnosed of Parkinson disease and 200 healthy subjects</td>
<td align="left" valign="top">2&#x2009;mm<sup>3</sup> isotropic resolution; 64 directions; TE/TR&#x2009;=&#x2009;7600/88&#x2009;ms; <italic>b</italic> =&#x2009;1,000&#x2009;s/mm<sup>2</sup></td>
</tr>
<tr>
<td align="left" valign="top">Adolescent Brain Cognitive Development (ABCD) (<xref ref-type="bibr" rid="ref128">Volkow et al., 2018</xref>)</td>
<td align="left" valign="top">
<ext-link xlink:href="https://abcdstudy.org/" ext-link-type="uri">https://abcdstudy.org/</ext-link>
</td>
<td align="left" valign="top">10,000 children starting at 9&#x2013;10&#x2009;years up to ages 19&#x2013;21</td>
<td align="left" valign="top">1.7&#x2009;mm<sup>3</sup> resolution; 96 directions; TR/TE&#x2009;=&#x2009;4100/88&#x2009;ms; <italic>b</italic> =&#x2009;3,000&#x2009;s/mm<sup>2</sup></td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>For each study, we give the online availability of the dataset, the population details involved in the study and the MRI acquisition details.</p>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="sec8">
<label>3.2</label>
<title>Automated methods for white matter tract segmentation</title>
<p>All automated methods included in this review can be classified into categories based on the specific technique used for automatic tract segmentation. The high-level categories have been specified in <xref ref-type="table" rid="tab2">Table 2</xref>, noting the references in which they were implemented. <xref ref-type="fig" rid="fig2">Figure 2</xref> shows a bar graph of the distribution of studies within the categories. Some studies have used methods which have been developed as a combination of multiple categories and are referred to as a hybrid approach. The goal of this section is to investigate the findings corresponding to the questions framed in the review planning phase in Section 2. <xref ref-type="table" rid="tab3">Tables 3</xref>&#x2013;<xref ref-type="table" rid="tab7">7</xref> give a list of each of the studies and summarizes their inclusion criteria; the dataset used in the study, an overview of the approach used, the evaluation metrics used to validate the results in the work, and finally the practical application of the study in terms of public availability of the algorithm, the computational runtime to segment white matter tracts for a single subject and whether external validation has been conducted.</p>
<table-wrap position="float" id="tab2">
<label>Table 2</label>
<caption>
<p>Categories of methods identified in this review and the corresponding studies included.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top"><bold>Category</bold></th>
<th align="left" valign="top"><bold>References</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Direct voxel-based</td>
<td align="left" valign="top"><xref ref-type="bibr" rid="ref88">Ocegueda and Rivera (2013)</xref>, <xref ref-type="bibr" rid="ref136">Wasserthal et al. (2018</xref>, <xref ref-type="bibr" rid="ref135">2019)</xref>, <xref ref-type="bibr" rid="ref30">Dong et al. (2019)</xref>, <xref ref-type="bibr" rid="ref95">Pomiecko et al. (2019)</xref>, <xref ref-type="bibr" rid="ref73">Lu et al. (2020</xref>, <xref ref-type="bibr" rid="ref74">2021</xref>, <xref ref-type="bibr" rid="ref75">2022)</xref>, <xref ref-type="bibr" rid="ref85">Nelkenbaum et al. (2020)</xref>, <xref ref-type="bibr" rid="ref66">Li et al. (2021)</xref>, <xref ref-type="bibr" rid="ref69">Liu et al. (2022</xref>, <xref ref-type="bibr" rid="ref70">2023)</xref>, <xref ref-type="bibr" rid="ref76">Lucena et al. (2022)</xref>, <xref ref-type="bibr" rid="ref131">Wang et al. (2022)</xref>, and <xref ref-type="bibr" rid="ref146">Yin et al. (2022)</xref></td>
</tr>
<tr>
<td align="left" valign="top">Streamline-based clustering</td>
<td align="left" valign="top"><xref ref-type="bibr" rid="ref119">Tunc et al. (2014)</xref>, <xref ref-type="bibr" rid="ref56">Jin and Ceting&#x00FC;l (2015)</xref>, <xref ref-type="bibr" rid="ref59">Kamali and Stashuk (2016)</xref>, <xref ref-type="bibr" rid="ref61">Kumar and Desrosiers (2016)</xref>, <xref ref-type="bibr" rid="ref47">Gupta et al. (2017</xref>, <xref ref-type="bibr" rid="ref46">2018)</xref>, <xref ref-type="bibr" rid="ref107">Roman et al. (2017)</xref>, <xref ref-type="bibr" rid="ref114">Siless et al. (2018)</xref>, <xref ref-type="bibr" rid="ref126">V&#x00E1;zquez et al. (2020)</xref>, <xref ref-type="bibr" rid="ref143">Yang et al. (2020)</xref>, <xref ref-type="bibr" rid="ref20">Chen et al. (2021</xref>, <xref ref-type="bibr" rid="ref21">2023)</xref>, <xref ref-type="bibr" rid="ref139">Xu et al. (2021)</xref>, <xref ref-type="bibr" rid="ref71">Logiraj et al. (2021a)</xref>, and <xref ref-type="bibr" rid="ref157">Zhao et al. (2022)</xref></td>
</tr>
<tr>
<td align="left" valign="top">Streamline-based classification</td>
<td align="left" valign="top"><xref ref-type="bibr" rid="ref102">Ratnarajah and Qiu (2014)</xref>, <xref ref-type="bibr" rid="ref49">Heker et al. (2016)</xref>, <xref ref-type="bibr" rid="ref86">Ngattai Lam et al. (2018)</xref>, <xref ref-type="bibr" rid="ref7">Bert&#x00F2; et al. (2019</xref>, <xref ref-type="bibr" rid="ref8">2021)</xref>, <xref ref-type="bibr" rid="ref68">Liu et al. (2019)</xref>, <xref ref-type="bibr" rid="ref120">Ugurlu et al. (2019)</xref>, <xref ref-type="bibr" rid="ref152">Zhang et al. (2019</xref>, <xref ref-type="bibr" rid="ref153">2020)</xref>, <xref ref-type="bibr" rid="ref137">Wu et al. (2020)</xref>, <xref ref-type="bibr" rid="ref72">Logiraj et al. (2021b)</xref>, and <xref ref-type="bibr" rid="ref33">Dumais et al. (2023)</xref></td>
</tr>
<tr>
<td align="left" valign="top">Atlas-based</td>
<td align="left" valign="top"><xref ref-type="bibr" rid="ref57">Jin et al. (2013)</xref>, <xref ref-type="bibr" rid="ref147">Yoo et al. (2015)</xref>, <xref ref-type="bibr" rid="ref62">Labra et al. (2017)</xref>, <xref ref-type="bibr" rid="ref112">Sharmin et al. (2018)</xref>, <xref ref-type="bibr" rid="ref155">Zhang et al. (2018)</xref>, <xref ref-type="bibr" rid="ref125">V&#x00E1;zquez et al. (2019)</xref>, <xref ref-type="bibr" rid="ref58">Jordan et al. (2021)</xref>, and <xref ref-type="bibr" rid="ref101">Radwan et al. (2022)</xref></td>
</tr>
<tr>
<td align="left" valign="top">Hybrid</td>
<td align="left" valign="top"><xref ref-type="bibr" rid="ref133">Wassermann et al. (2013</xref>, <xref ref-type="bibr" rid="ref134">2016)</xref>, <xref ref-type="bibr" rid="ref17">Chekir et al. (2014)</xref>, <xref ref-type="bibr" rid="ref87">O&#x2019;Donnell et al. (2017)</xref>, <xref ref-type="bibr" rid="ref41">Garyfallidis et al. (2018)</xref>, <xref ref-type="bibr" rid="ref27">Delmonte et al. (2019)</xref>, <xref ref-type="bibr" rid="ref93">Peretzke et al. (2023)</xref>, and <xref ref-type="bibr" rid="ref140">Xu et al. (2023)</xref></td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap position="float" id="tab3">
<label>Table 3</label>
<caption>
<p>Direct voxel-based methods for Automated White Matter Tract Segmentation.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="center" valign="top" colspan="10"><bold>Direct Voxel-based methods</bold></th>
</tr>
<tr>
<th align="left" valign="top" rowspan="2"><bold>Author/Year/Citation</bold></th>
<th align="left" valign="top" rowspan="2"><bold>Dataset/No. of Subjects</bold></th>
<th align="left" valign="top" rowspan="2"><bold>Main Context</bold></th>
<th align="left" valign="top" rowspan="2"><bold>Architecture</bold></th>
<th align="center" valign="top" rowspan="2"><bold>No. of tracts segmented</bold></th>
<th align="left" valign="top" rowspan="2"><bold>Data Augmentation</bold></th>
<th align="left" valign="top" rowspan="2"><bold>Performance Metrics</bold></th>
<th align="center" valign="top" colspan="3"><bold>Practical Application</bold></th>
</tr>
<tr>
<th align="left" valign="top"><bold>Code</bold></th>
<th align="left" valign="top"><bold>Runtime per subject</bold></th>
<th align="left" valign="top"><bold>External validation</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Liu, Wan, et al./2023/ (<xref ref-type="bibr" rid="ref70">Liu et al., 2023</xref>)</td>
<td align="left" valign="top">HCP/105; Private/16</td>
<td align="left" valign="top">- Transfer knowledge of pretrained CNN using fine-tuning strategy for new tracts with only a single annotated scan<break/>- Use extensive data augmentation</td>
<td align="left" valign="top">2D U-net (<xref ref-type="bibr" rid="ref108">Ronneberger et al., 2015</xref>)</td>
<td align="center" valign="top">12</td>
<td align="left" valign="top">Random Cutout,<break/>Tract Cutout</td>
<td align="left" valign="top">Dice: 0.619&#x2009;~&#x2009;0.693</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i001.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Lucena, Oeslle, et al./2022/ (<xref ref-type="bibr" rid="ref76">Lucena et al., 2022</xref>)</td>
<td align="left" valign="top">HCP/105</td>
<td align="left" valign="top">- Based on 3D nnUNet with raw dMRI intensities transformed into spherical harmonics (SH) space<break/>- Also output uncertainty measurement with respect to groundtruth</td>
<td align="left" valign="top">3D nnUNet (<xref ref-type="bibr" rid="ref54">Isensee et al., 2021</xref>)</td>
<td align="center" valign="top">72</td>
<td align="left" valign="top">3D rotation to both the spatial location and SH coefficients</td>
<td align="left" valign="top">Dice: 0.82<break/>Sensitivity: 0.85&#x2009;~&#x2009;0.86<break/>Specificity: 0.78&#x2009;~&#x2009;0.80<break/>ASSD: 0.63&#x2009;~&#x2009;0.66<break/>Hausdorff distance: 9.24&#x2009;~&#x2009;10.57</td>
<td align="left" valign="top"><ext-link xlink:href="https://github.com/OeslleLucena/TractSegmentation" ext-link-type="uri">https://github.com/OeslleLucena/TractSegmentation</ext-link><break/>(Link inactive)</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Lu, Qi, et al./2022/ (<xref ref-type="bibr" rid="ref75">Lu et al., 2022</xref>)</td>
<td align="left" valign="top">HCP/100; Private/12</td>
<td align="left" valign="top">- Transfer knowledge of pretrained CNN using fine-tuning strategy for new tracts with only few annotated scans<break/>- Utilize data augmentation strategy for learning in few-shot setting</td>
<td align="left" valign="top">2D U-net (<xref ref-type="bibr" rid="ref108">Ronneberger et al., 2015</xref>)</td>
<td align="center" valign="top">12</td>
<td align="left" valign="top">Mixing-based data augmentation (<xref ref-type="bibr" rid="ref150">Zhang et al., 2017</xref>; <xref ref-type="bibr" rid="ref148">Yun et al., 2019</xref>)</td>
<td align="left" valign="top">Dice: 0.780&#x2009;~&#x2009;0.846 Relative Volume Difference (RVD): 0.129&#x2009;~&#x2009;0.156</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i001.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Liu, Wan, et al./2022/ (<xref ref-type="bibr" rid="ref69">Liu et al., 2022</xref>)</td>
<td align="left" valign="top">HCP/100; Private/17</td>
<td align="left" valign="top">- Utilize tract correlation by embedding tract labels as a vector<break/>- Integrate label embedding with segmentation module built using TractSeg (<xref ref-type="bibr" rid="ref136">Wasserthal et al., 2018</xref>)</td>
<td align="left" valign="top">2D U-net (<xref ref-type="bibr" rid="ref108">Ronneberger et al., 2015</xref>)</td>
<td align="center" valign="top">72</td>
<td align="left" valign="top">Angular and spatial downsampling of dMRI</td>
<td align="left" valign="top">Dice: 0.582&#x2009;~&#x2009;0.851</td>
<td align="left" valign="top">
<ext-link xlink:href="https://github.com/liuwan0208/TractSegWithLabelEmbedding" ext-link-type="uri">https://github.com/liuwan0208/TractSegWithLabelEmbedding</ext-link>
</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i001.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Wang, Zhenwei, et al./2022/ (<xref ref-type="bibr" rid="ref131">Wang et al., 2022</xref>)</td>
<td align="left" valign="top">HCP/205<break/>3D Fiber atlas (<xref ref-type="bibr" rid="ref155">Zhang et al., 2018</xref>)</td>
<td align="left" valign="top">- Represent the spatial distribution and shape of fibers using a novel descriptor called FiberGeoMap</td>
<td align="left" valign="top">Transformer (<xref ref-type="bibr" rid="ref124">Vaswani et al., 2017</xref>)</td>
<td align="center" valign="top">103</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">Precision: 0.9279<break/>Recall: 0.9478<break/>Accuracy: 0.9319<break/>Dice: 0.9268</td>
<td align="left" valign="top">
<ext-link xlink:href="https://github.com/Garand0o0/FiberTractSegmentation" ext-link-type="uri">https://github.com/Garand0o0/FiberTractSegmentation</ext-link>
</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Yin, Haoran, et al./2022/ (<xref ref-type="bibr" rid="ref146">Yin et al., 2022</xref>)</td>
<td align="left" valign="top">HCP/105</td>
<td align="left" valign="top">- Utilized a modified U-net architecture to use a dense crisscross attention mechanism</td>
<td align="left" valign="top">CCNet (<xref ref-type="bibr" rid="ref51">Huang et al., 2023</xref>)</td>
<td align="center" valign="top">72</td>
<td align="left" valign="top">Elastic Deformation,<break/>rotation, resampling, gaussian noise, displacement, zooming</td>
<td align="left" valign="top">Dice: 0.843</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Lu, Qi, Yuxing Li, and Chuyang Ye/ 2021/ (<xref ref-type="bibr" rid="ref74">Lu et al., 2021</xref>)</td>
<td align="left" valign="top">HCP/155</td>
<td align="left" valign="top">- Exploit self-supervised learning since pretext tasks do not require manual annotations<break/>- Transfer knowledge learned in pretraining using fine-tuning</td>
<td align="left" valign="top">2D U-net (<xref ref-type="bibr" rid="ref108">Ronneberger et al., 2015</xref>)</td>
<td align="center" valign="top">72</td>
<td align="left" valign="top">Angular and spatial downsampling of dMRI</td>
<td align="left" valign="top">Dice: 0.813<break/>RVD: 0.128</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i001.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Li, Siqi, et al./2021/ (<xref ref-type="bibr" rid="ref66">Li et al., 2021</xref>)</td>
<td align="left" valign="top">HCP/102</td>
<td align="left" valign="top">- Utilize fractional anisotropy (FA) images and T1 weighted images<break/>- combine output of two parallel architectures for final output</td>
<td align="left" valign="top">2D U-net (<xref ref-type="bibr" rid="ref108">Ronneberger et al., 2015</xref>)</td>
<td align="center" valign="top">1</td>
<td align="left" valign="top">Cropping, Contrast augmentation,<break/>Brightness augmentation,<break/>Hue augmentation</td>
<td align="left" valign="top">Dice: 0.855</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Lu, Qi, Yuxing Li, and Chuyang Ye/2020/ (<xref ref-type="bibr" rid="ref73">Lu et al., 2020</xref>)</td>
<td align="left" valign="top">HCP/155</td>
<td align="left" valign="top">- Exploit self-supervised learning along with pseudo-labelling</td>
<td align="left" valign="top">2D U-net (<xref ref-type="bibr" rid="ref108">Ronneberger et al., 2015</xref>)</td>
<td align="center" valign="top">72</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">Dice: 0.761&#x2009;~&#x2009;0.768</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Li, Bo, et al./2020/ (<xref ref-type="bibr" rid="ref67">Li et al., 2020</xref>)</td>
<td align="left" valign="top">Rotterdam Study/5286<break/>Iris Study (<xref ref-type="bibr" rid="ref116">Steketee et al., 2016</xref>)/&#x2212;</td>
<td align="left" valign="top">- Utilize 4D diffusion tensor image directly as input<break/>- Separate network trained for each tract</td>
<td align="left" valign="top">3D U-net</td>
<td align="center" valign="top">25</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">Dice: 0.72&#x2009;~&#x2009;0.83</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">0.49&#x2009;s</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i001.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Nelkenbaum, Ilya, et al./2020/ (<xref ref-type="bibr" rid="ref85">Nelkenbaum et al., 2020</xref>)</td>
<td align="left" valign="top">HCP/105</td>
<td align="left" valign="top">- Utilize both T1-weighted and principal direction of diffusion (PDD) images as input</td>
<td align="left" valign="top">VNet (<xref ref-type="bibr" rid="ref81">Milletari et al., 2016</xref>)</td>
<td align="center" valign="top">14</td>
<td align="left" valign="top">Angular and spatial downsampling of dMRI</td>
<td align="left" valign="top">Dice: 0.722&#x2009;~&#x2009;0.869</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Wasserthal, Jakob, et al./2019/ (<xref ref-type="bibr" rid="ref135">Wasserthal et al., 2019</xref>)</td>
<td align="left" valign="top">HCP/105</td>
<td align="left" valign="top">- Built on top of TractSeg (<xref ref-type="bibr" rid="ref136">Wasserthal et al., 2018</xref>)<break/>- Module for tract start and end segmentation added &#x2013; Module for tract orientation mapping (TOM) prediction added</td>
<td align="left" valign="top">2D U-net (<xref ref-type="bibr" rid="ref108">Ronneberger et al., 2015</xref>)</td>
<td align="center" valign="top">72</td>
<td align="left" valign="top">Rotation, Elastic deformation, Displacement, Zooming, Resampling, Gaussian noise</td>
<td align="left" valign="top">Dice: 0.74&#x2009;~&#x2009;0.85</td>
<td align="left" valign="top">
<ext-link xlink:href="https://github.com/MIC-DKFZ/TractSeg/" ext-link-type="uri">https://github.com/MIC-DKFZ/TractSeg/</ext-link>
</td>
<td align="left" valign="top">8.95&#x2009;min</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i001.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Pomiecko, Kristofer, et al./ 2019/ (<xref ref-type="bibr" rid="ref95">Pomiecko et al., 2019</xref>)</td>
<td align="left" valign="top">Private/240</td>
<td align="left" valign="top">- Utilize whole brain MRI diffusion anisotropy maps as input<break/>- Separate network trained for each tract</td>
<td align="left" valign="top">Multi-scale 3D U-net based on<break/>DeepMedic (<xref ref-type="bibr" rid="ref60">Kamnitsas et al., 2017</xref>)</td>
<td align="center" valign="top">12</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">Dice: 0.72</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">16&#x2009;s</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Dong, Xiaofeng, et al./2019/ (<xref ref-type="bibr" rid="ref30">Dong et al., 2019</xref>)</td>
<td align="left" valign="top">HCP/105<break/>Human Brain Data Sharing Initiative (HBDSI)/&#x2212;</td>
<td align="left" valign="top">- Utilize both T1-weighted images and fiber orientation distribution function (fODF) as input</td>
<td align="left" valign="top">2D U-net (<xref ref-type="bibr" rid="ref108">Ronneberger et al., 2015</xref>)</td>
<td align="center" valign="top">72</td>
<td align="left" valign="top">Edge enhancing diffusion filter</td>
<td align="left" valign="top">Dice: 0.832</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i001.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Wasserthal, Jakob, Peter Neher, and Klaus H. Maier-Hein/ 2018/ (<xref ref-type="bibr" rid="ref136">Wasserthal et al., 2018</xref>)</td>
<td align="left" valign="top">HCP/105</td>
<td align="left" valign="top">- Utilizes fiber orientation distribution function (fODF) peaks as input<break/>- Semi-automatically generated binary segmentations for 72 tracts made public</td>
<td align="left" valign="top">2D U-net (<xref ref-type="bibr" rid="ref108">Ronneberger et al., 2015</xref>)<break/>named<break/>TractSeg</td>
<td align="center" valign="top">72</td>
<td align="left" valign="top">Rotation, Elastic deformation, Displacement, Zooming, Resampling, Gaussian noise, Contrast augmentation, Brightness augmentation</td>
<td align="left" valign="top">Dice: 0.82&#x2009;~&#x2009;0.84</td>
<td align="left" valign="top">
<ext-link xlink:href="https://github.com/MIC-DKFZ/TractSeg/" ext-link-type="uri">https://github.com/MIC-DKFZ/TractSeg/</ext-link>
</td>
<td align="left" valign="top">1&#x2009;min</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i001.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Ocegueda, Omar, and Mariano Rivera/2013/ (<xref ref-type="bibr" rid="ref88">Ocegueda and Rivera, 2013</xref>)</td>
<td align="left" valign="top">2012 HARDI Reconstruction Challenge Dataset/&#x2212;<break/>Phantom/&#x2212;</td>
<td align="left" valign="top">- Represent DWI signal using a Muti-Tensor Field model<break/>- Points are embedded using eigenvectors to perform segmentation</td>
<td align="left" valign="top">Entropy-Controlled Quadratic Markov Measure Field (EC-QMMF)</td>
<td align="center" valign="top">16</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">Qualitative Results</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">14&#x2009;min</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i001.tif"/>
</td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap position="float" id="tab4">
<label>Table 4</label>
<caption>
<p>Streamline-based clustering methods for Automated White Matter Tract Segmentation.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="center" valign="top" colspan="9"><bold>Streamline-based clustering methods</bold></th>
</tr>
<tr>
<th align="left" valign="top" rowspan="2"><bold>Author/Year/Citation</bold></th>
<th align="left" valign="top" rowspan="2"><bold>Dataset/No. of Subjects</bold></th>
<th align="left" valign="top" rowspan="2"><bold>Main Context</bold></th>
<th align="left" valign="top" rowspan="2"><bold>Clustering Algorithm</bold></th>
<th align="center" valign="top" rowspan="2"><bold>No. of fiber clusters</bold></th>
<th align="left" valign="top" rowspan="2"><bold>Performance Metrics</bold></th>
<th align="center" valign="top" colspan="3"><bold>Practical Application</bold></th>
</tr>
<tr>
<th align="left" valign="top"><bold>Code</bold></th>
<th align="left" valign="top"><bold>Runtime</bold>
<break/><bold>per subject</bold></th>
<th align="left" valign="top"><bold>External Validation</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Chen, Yuqian, et al./2023/ (<xref ref-type="bibr" rid="ref21">Chen et al., 2023</xref>)</td>
<td align="left" valign="top">HCP/50; CNP/40; PPMI/30</td>
<td align="left" valign="top">- Trained using self-supervised learning with the pretext task of predicting pairwise fiber distances</td>
<td align="left" valign="top">K-Means; Deep Convolutional Embedded Clustering</td>
<td align="center" valign="top">800</td>
<td align="left" valign="top">Davies-Bouldin Index (DB): 2.014&#x2009;~&#x2009;2.119 White Matter Parcellation Generalization (WMPG): 0.970&#x2009;~&#x2009;0.996 Tract Anatomical Profile Coherence (TAPC): 0.830&#x2009;~&#x2009;0.844 Tract Surface Profile Coherence (TSPC): 0.476&#x2009;~&#x2009;0.601</td>
<td align="left" valign="top">
<ext-link xlink:href="https://github.com/SlicerDMRI/DFC" ext-link-type="uri">https://github.com/SlicerDMRI/DFC</ext-link>
</td>
<td align="left" valign="top">15&#x2009;~&#x2009;110&#x2009;s</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i001.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Zhao, Yi, et al./2022/ (<xref ref-type="bibr" rid="ref157">Zhao et al., 2022</xref>)</td>
<td align="left" valign="top">HCP</td>
<td align="left" valign="top">- Multimodal dMRI and fMRI data (extracted BOLD signals) used as input for clustering</td>
<td align="left" valign="top">Riemannian metric geodesic distance to measure structural and functional differences for clustering fibers</td>
<td align="center" valign="top">72</td>
<td align="left" valign="top">Mean undirected euclidean distance (UE)<break/>Mean functional correlation (FC)</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Chen, Yuqian, et al./2021/ (<xref ref-type="bibr" rid="ref20">Chen et al., 2021</xref>)</td>
<td align="left" valign="top">HCP/200</td>
<td align="left" valign="top">- Based on self-supervised learning with the pretext task of pairwise fiber distance prediction</td>
<td align="left" valign="top">Siamese Networks, K-means, CNN</td>
<td align="center" valign="top">800</td>
<td align="left" valign="top">WMPG: 99.35%<break/>TAPC: 0.836</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">205&#x2009;s</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Xu, Chaoqing, et al./2021/ (<xref ref-type="bibr" rid="ref139">Xu et al., 2021</xref>)</td>
<td align="left" valign="top">Private/&#x2212;</td>
<td align="left" valign="top">- Based on encoding streamlines into 31 features and fed to encoder-decoder type architecture</td>
<td align="left" valign="top">Improved Deep Embedded Clustering (IDEC)</td>
<td align="center" valign="top">10</td>
<td align="left" valign="top">Qualitative results; Expert assessment</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">3&#x2009;min</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Logiraj, Kumaralingam, et al./2021/ (<xref ref-type="bibr" rid="ref71">Logiraj et al., 2021a</xref>)</td>
<td align="left" valign="top">ADNI/20</td>
<td align="left" valign="top">- Based on geometrical curve features and multi-feature matching</td>
<td align="left" valign="top">Progressive clustering of large clusters of curves into smaller ones</td>
<td align="center" valign="top">6</td>
<td align="left" valign="top">Accuracy: 86%&#x2009;~&#x2009;87%</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Vazquez, Andrea, et al./2020/ (<xref ref-type="bibr" rid="ref126">V&#x00E1;zquez et al., 2020</xref>)</td>
<td align="left" valign="top">HARDI ARCHI/50</td>
<td align="left" valign="top">- Based on refining and merging clusters</td>
<td align="left" valign="top">Fast Fiber Clustering (FFClust)</td>
<td align="center" valign="top">150&#x2013;200</td>
<td align="left" valign="top">Davies Bouldin Index (DB): 0.7&#x2009;~&#x2009;0.75<break/>Execution Time: 1.99&#x2009;min for 1 subject with 1 million fibers and parallel 45&#x2009;s.</td>
<td align="left" valign="top">
<ext-link xlink:href="https://github.com/andvazva/FFClust" ext-link-type="uri">https://github.com/andvazva/FFClust</ext-link>
</td>
<td align="left" valign="top">9.92&#x2009;s</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Yang, Zhipeng, et al./2020/ (<xref ref-type="bibr" rid="ref143">Yang et al., 2020</xref>)</td>
<td align="left" valign="top">Private/7</td>
<td align="left" valign="top">- Based on using multi-modal information by combining spatial features and fMRI signals in WM</td>
<td align="left" valign="top">Gaussian Mixture Model (GMM)<break/>and Expectation Maximization (EM)</td>
<td align="center" valign="top">48</td>
<td align="left" valign="top">Hausdorff distance: 4.1&#x2009;~&#x2009;48.4</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Siless, Viviana, et al./2018/ (<xref ref-type="bibr" rid="ref114">Siless et al., 2018</xref>)</td>
<td align="left" valign="top">HCP/32</td>
<td align="left" valign="top">- Based on a novel anatomical similarity measure</td>
<td align="left" valign="top">Normalized Cuts (<xref ref-type="bibr" rid="ref10">Brun et al., 2004</xref>; <xref ref-type="bibr" rid="ref113">Shi and Malik, 2000</xref>)</td>
<td align="center" valign="top">200</td>
<td align="left" valign="top">Dice: 0.55&#x2013;0.60</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">2.45&#x2009;~&#x2009;2392.37&#x2009;min</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Gupta, Vikash, et al./2018/ (<xref ref-type="bibr" rid="ref46">Gupta et al., 2018</xref>)</td>
<td align="left" valign="top">PPMI/226</td>
<td align="left" valign="top">- Use CNN to learn shape features and cluster streamlines</td>
<td align="left" valign="top">Convolutional Neural Network (CNN)</td>
<td align="center" valign="top">10</td>
<td align="left" valign="top">Accuracy: 97%</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Gupta, Vikash, et al./2017/ (<xref ref-type="bibr" rid="ref47">Gupta et al., 2017</xref>)</td>
<td align="left" valign="top">Private/42</td>
<td align="left" valign="top">- Use CNN to learn shape features and cluster streamlines</td>
<td align="left" valign="top">Convolutional Neural Network (CNN)</td>
<td align="center" valign="top">17</td>
<td align="left" valign="top">Qualitative results</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Rom&#x00E1;n, Claudio, et al./2017/ (<xref ref-type="bibr" rid="ref107">Roman et al., 2017</xref>)</td>
<td align="left" valign="top">Private/74</td>
<td align="left" valign="top">- Use intersubject hierarchical clustering of fibers<break/>- Create an atlas of identified bundles to promote automatic labeling</td>
<td align="left" valign="top">Heirarchical clustering</td>
<td align="center" valign="top">93</td>
<td align="left" valign="top">Lateralization index (<xref ref-type="bibr" rid="ref15">Catani et al., 2012</xref>): &#x2013;0.171&#x2009;~&#x2009;0.389</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">2.6&#x2009;~&#x2009;3.4&#x2009;h</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Kamali, Tahereh, and Daniel Stashuk/2016/ (<xref ref-type="bibr" rid="ref59">Kamali and Stashuk, 2016</xref>)</td>
<td align="left" valign="top">JHU DTI (<ext-link xlink:href="http://lbam.med.jhmi.edu" ext-link-type="uri">http://lbam.med.jhmi.edu</ext-link>)/15</td>
<td align="left" valign="top">- Based on distances of nearest neighbors of individual fibers<break/>- separate high densities (smaller distances) from lower densities (higher distances)</td>
<td align="left" valign="top">Neighborhood Distance Entropy Consistency (NDEC)</td>
<td align="center" valign="top">3</td>
<td align="left" valign="top">Dice: 0.94<break/>Density-Based Clustering Validation (DBCV): 0.71</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">2&#x2009;min</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Kumar, Kuldeep, and Christian Desrosiers /2016/ (<xref ref-type="bibr" rid="ref61">Kumar and Desrosiers, 2016</xref>)</td>
<td align="left" valign="top">HCP/10</td>
<td align="left" valign="top">- Atlas created from multi-subject data by learning a compact dictionary of training fibers describing the whole dataset</td>
<td align="left" valign="top">Kernel Sparse Clustering (KSC)</td>
<td align="center" valign="top">4</td>
<td align="left" valign="top">0.634&#x2009;~&#x2009;0.809</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">0.876&#x2009;~&#x2009;2.736&#x2009;s</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Jin, Yan, and H. Ertan Ceting&#x00FC;l/2015/(<xref ref-type="bibr" rid="ref56">Jin and Ceting&#x00FC;l, 2015</xref>)</td>
<td align="left" valign="top">Neurospin MR phantom dataset (<xref ref-type="bibr" rid="ref98">Poupon et al., 2008</xref>)/65 HCP/10</td>
<td align="left" valign="top">- Group fibers growing from a manually selected ROI and monitor divergence of fibers through drift detection while tractography is performed</td>
<td align="left" valign="top">Affinity Propagation (AP) (<xref ref-type="bibr" rid="ref37">Frey and Dueck, 2007</xref>)</td>
<td align="center" valign="top">5</td>
<td align="left" valign="top">Dice Coefficient: 0.91&#x2009;~&#x2009;1.0</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">2&#x2009;min</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Tun&#x00E7;, Birkan, et al./2014/ (<xref ref-type="bibr" rid="ref119">Tunc et al., 2014</xref>)</td>
<td align="left" valign="top">Private/6</td>
<td align="left" valign="top">- Based on a connectivity-based representation of fibers<break/>- Also generate a fiber clustering atlas which is used for further clustering unknown subjects</td>
<td align="left" valign="top">Gaussian Mixture Model (GMM) (<xref ref-type="bibr" rid="ref103">Reynolds et al., 2000</xref>)<break/>and Expectation Maximization (EM) (<xref ref-type="bibr" rid="ref28">Dempster et al., 1977</xref>)</td>
<td align="center" valign="top">327</td>
<td align="left" valign="top">Dice: 0.62&#x2009;~&#x2009;0.93</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap position="float" id="tab5">
<label>Table 5</label>
<caption>
<p>Streamline-based classification methods for Automated White Matter Tract Segmentation.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="center" valign="top" colspan="9"><bold>Streamline-based classification methods</bold></th>
</tr>
<tr>
<th align="left" valign="top" rowspan="2"><bold>Author/Year/Citation</bold></th>
<th align="left" valign="top" rowspan="2"><bold>Dataset/No. of subjects</bold></th>
<th align="left" valign="top" rowspan="2"><bold>Main context</bold></th>
<th align="left" valign="top" rowspan="2"><bold>Architecture</bold></th>
<th align="center" valign="top" rowspan="2"><bold>No. of fiber clusters</bold></th>
<th align="left" valign="top" rowspan="2"><bold>Performance metrics</bold></th>
<th align="center" valign="top" colspan="3"><bold>Practical application</bold></th>
</tr>
<tr>
<th align="left" valign="top"><bold>Code</bold></th>
<th align="left" valign="top"><bold>Runtime per subject</bold></th>
<th align="left" valign="top"><bold>External validation</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Dumais, F&#x00E9;lix, et al./2023/ (<xref ref-type="bibr" rid="ref33">Dumais et al., 2023</xref>)</td>
<td align="left" valign="top">TractInferno (<xref ref-type="bibr" rid="ref97">Poulin et al., 2022</xref>)/354; HCP/1200; MyeloInferno/45; ADNI/23; PPMI/34</td>
<td align="left" valign="top">- Based on training an autoencoder on contrastive loss using whole brain tractograms</td>
<td align="left" valign="top">AutoEncoder</td>
<td align="center" valign="top">27</td>
<td align="left" valign="top">Dice: 0.74&#x2009;&#x00B1;&#x2009;0.08</td>
<td align="left" valign="top">
<ext-link xlink:href="https://github.com/scil-vital/fiesta" ext-link-type="uri">https://github.com/scil-vital/fiesta</ext-link>
</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i001.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Bert&#x00F2;, Giulia, et al./2021/ (<xref ref-type="bibr" rid="ref8">Bert&#x00F2; et al., 2021</xref>)</td>
<td align="left" valign="top">HCP-minor/105; HCP-IFOF/30; HCP-major/105; Private/10</td>
<td align="left" valign="top">- Based on vector representation using anatomical and geometrical information of streamlines</td>
<td align="left" valign="top">Linear Classifier</td>
<td align="center" valign="top">500</td>
<td align="left" valign="top">Dice: 0.80&#x2009;~&#x2009;0.91</td>
<td align="left" valign="top">
<ext-link xlink:href="https://brainlife.io" ext-link-type="uri">https://brainlife.io</ext-link>
</td>
<td align="left" valign="top">3&#x2009;min</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i001.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Logiraj, Kumaralingam, et al./2021/ (<xref ref-type="bibr" rid="ref72">Logiraj et al., 2021b</xref>)</td>
<td align="left" valign="top">Private/15</td>
<td align="left" valign="top">- Based on segmenting 3D fiber curves into bundles</td>
<td align="left" valign="top">PointNet (<xref ref-type="bibr" rid="ref100">Qi et al., 2017</xref>)</td>
<td align="center" valign="top">10</td>
<td align="left" valign="top">Accuracy: 97.06%<break/>Precision: 0.98&#x2009;~&#x2009;1.0<break/>Recall: 0.91&#x2009;~&#x2009;1.0</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Zhang, Fan, et al./2020/ (<xref ref-type="bibr" rid="ref153">Zhang et al., 2020</xref>)</td>
<td align="left" valign="top">HCP/100; dHCP/40; ABCD/50;<break/>CNP/50; PPMI/50; BTP/39</td>
<td align="left" valign="top">- Based on fiber descriptor called FiberMap (<xref ref-type="bibr" rid="ref153">Zhang et al., 2020</xref>)</td>
<td align="left" valign="top">2D CNN</td>
<td align="center" valign="top">54</td>
<td align="left" valign="top">Accuracy: 90.99%<break/>Recall: 85.67%<break/>Precision: 88.47%<break/>Tract Identification Rate: 99.17%&#x2009;~&#x2009;99.96%<break/>Weighted Dice: 0.91&#x2009;~&#x2009;0.97</td>
<td align="left" valign="top">
<ext-link xlink:href="http://dmri.slicer.org" ext-link-type="uri">http://dmri.slicer.org</ext-link>
</td>
<td align="left" valign="top">8&#x2009;min</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i001.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Wu, Ye, et al./2020/ (<xref ref-type="bibr" rid="ref137">Wu et al., 2020</xref>)</td>
<td align="left" valign="top">HCP/105</td>
<td align="left" valign="top">- Based on representing each fiber bundle by compact dictionary</td>
<td align="left" valign="top">Dictionary Learning Tool DICTOL (<xref ref-type="bibr" rid="ref129">Vu and Monga, 2017</xref>)</td>
<td align="center" valign="top">72</td>
<td align="left" valign="top">Accuracy: about 0.6&#x2013;1.0</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i001.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Zhang, Fan, et al./2019/(<xref ref-type="bibr" rid="ref152">Zhang et al., 2019</xref>)</td>
<td align="left" valign="top">dHCP/40; ABIDE/70; HCP/100; CNP/204; PPI/144; BTP/39</td>
<td align="left" valign="top">- Based on fiber descriptor called FiberMap (<xref ref-type="bibr" rid="ref153">Zhang et al., 2020</xref>)</td>
<td align="left" valign="top">2D CNN</td>
<td align="center" valign="top">54</td>
<td align="left" valign="top">Accuracy: 90.99% Recall: 85.67%<break/>Precision: 88.47%<break/>Tract Identification Rate: 99.17&#x2013;100%</td>
<td align="left" valign="top">
<ext-link xlink:href="https://github.com/SlicerDMRI/DeepWMA" ext-link-type="uri">https://github.com/SlicerDMRI/DeepWMA</ext-link>
</td>
<td align="left" valign="top">8&#x2009;min</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i001.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Liu, Feihong, et al./2019/ (<xref ref-type="bibr" rid="ref68">Liu et al., 2019</xref>)</td>
<td align="left" valign="top">HCP/38</td>
<td align="left" valign="top">- Based on representing streamlines as graphs<break/>- Separate network trained for each bundle</td>
<td align="left" valign="top">Graph Convolutional Neural Network (GCNN)</td>
<td align="center" valign="top">12</td>
<td align="left" valign="top">Precision: 90.5%&#x2009;~&#x2009;9.9%<break/>Recall: 88.4%&#x2009;~&#x2009;100%<break/>Dice: 80.7&#x2009;~&#x2009;99.1</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Ugurlu, Devran, et al./2019/(<xref ref-type="bibr" rid="ref120">Ugurlu et al., 2019</xref>)</td>
<td align="left" valign="top">HCP/30</td>
<td align="left" valign="top">- Based on representing each streamline as the fiber orientation distributions in its neighborhood</td>
<td align="left" valign="top">NN</td>
<td align="center" valign="top">9</td>
<td align="left" valign="top">Bundle-based Minimum Distance (BMD): 1.2&#x2009;~&#x2009;5.46<break/>Kappa: 0.68&#x2009;~&#x2009;0.84</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Bert&#x00F2;, Giulia, et al. /2019/ (<xref ref-type="bibr" rid="ref7">Bert&#x00F2; et al., 2019</xref>)</td>
<td align="left" valign="top">HCP/130</td>
<td align="left" valign="top">- example created based on 130 tractograms and using the Automated Fiber Quantification (<xref ref-type="bibr" rid="ref144">Yeatman et al., 2012</xref>) algorithm</td>
<td align="left" valign="top">Linear assignment problem for segmentation and ROI-based distance matrix</td>
<td align="center" valign="top">12</td>
<td align="left" valign="top">Dice: 0.84&#x2009;~&#x2009;0.87</td>
<td align="left" valign="top">doi: <ext-link xlink:href="https://doi.org/10.25663/brainlife.app.122" ext-link-type="uri">10.25663/brainlife.app.122</ext-link></td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Lam, Prince D. Ngattai, et al./2018/ (<xref ref-type="bibr" rid="ref86">Ngattai Lam et al., 2018</xref>)</td>
<td align="left" valign="top">Private/685</td>
<td align="left" valign="top">- Based on fiber features curvature, torsion and euclidean<break/>distances to a certain number of landmarks and CNN used to classify</td>
<td align="left" valign="top">2D NN</td>
<td align="center" valign="top">1</td>
<td align="left" valign="top">Accuracy: 98.8%</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Heker, Michal, et al./2016/ (<xref ref-type="bibr" rid="ref49">Heker et al., 2016</xref>)</td>
<td align="left" valign="top">HCP/15</td>
<td align="left" valign="top">- Based on Adaboost selected features such as fiber length, location, variance, etc.</td>
<td align="left" valign="top">Viola-Jones (<xref ref-type="bibr" rid="ref127">Viola and Jones, 2001</xref>)</td>
<td align="center" valign="top">3</td>
<td align="left" valign="top">Dice: 0.90&#x2009;~&#x2009;0.91</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Ratnarajah, Nagulan, and Anqi Qiu/2014/ (<xref ref-type="bibr" rid="ref102">Ratnarajah and Qiu, 2014</xref>)</td>
<td align="left" valign="top">GUSTO study (<xref ref-type="bibr" rid="ref115">Soh et al., 2014</xref>)/20</td>
<td align="left" valign="top">- Based on Riemannian structure of diffusion tensors</td>
<td align="left" valign="top">Multi-label k-NN</td>
<td align="center" valign="top">13</td>
<td align="left" valign="top">Hamming Loss: 0.041&#x2009;~&#x2009;0.053<break/>One error: 0.098&#x2009;~&#x2009;0.200 Coverage: 0.104&#x2009;~&#x2009;0.181 Volume Overlap percentage: 0.764</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap position="float" id="tab6">
<label>Table 6</label>
<caption>
<p>Atlas-based methods for Automated White Matter Tract Segmentation.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="center" valign="top" colspan="9"><bold>Atlas-Based Methods</bold></th>
</tr>
<tr>
<th align="left" valign="top" rowspan="2"><bold>Author/Year/Citation</bold></th>
<th align="center" valign="top" rowspan="2"><bold>Dataset/No. of Subjects</bold></th>
<th align="center" valign="top" rowspan="2"><bold>Main Context</bold></th>
<th align="center" valign="top" rowspan="2"><bold>Architecture</bold></th>
<th align="center" valign="top" rowspan="2"><bold>No. of fiber clusters</bold></th>
<th align="center" valign="top" rowspan="2"><bold>Performance Metrics</bold></th>
<th align="center" valign="top" colspan="3"><bold>Practical Application</bold></th>
</tr>
<tr>
<th align="center" valign="top"><bold>Code</bold></th>
<th align="center" valign="top"><bold>Runtime</bold>
<break/><bold>per subject</bold></th>
<th align="center" valign="top"><bold>External Validation</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Radwan, ahmed M, et al./2022/ (<xref ref-type="bibr" rid="ref101">Radwan et al., 2022</xref>)</td>
<td align="center" valign="top">HCP/20;<break/>MASSIVE/1</td>
<td align="center" valign="top">- Builds an atlas based on literature-based dissection protocol<break/>- atlas applied to new subjects using registration</td>
<td align="center" valign="top">ANTs (<xref ref-type="bibr" rid="ref1">Avants et al., 2009</xref>)</td>
<td align="center" valign="top">68</td>
<td align="center" valign="top">Weighted-Dice: 0.747&#x2009;~&#x2009;0.963</td>
<td align="center" valign="top"><ext-link xlink:href="https://github.com/KUL-Radneuron/KUL_FWT.git" ext-link-type="uri">https://github.com/KUL-Radneuron/KUL_FWT.git</ext-link>, <ext-link xlink:href="https://osf.io/snq2d/" ext-link-type="uri">https://osf.io/snq2d/</ext-link></td>
<td align="center" valign="top">N/A</td>
<td align="center" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i001.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Jordan, Kesshi M., et al./2021/ (<xref ref-type="bibr" rid="ref58">Jordan et al., 2021</xref>)</td>
<td align="center" valign="top">UCSF Dyslexia Center/59</td>
<td align="center" valign="top">- FreeSurfer derived ROIs used for anatomical information<break/>- RecoBundles (<xref ref-type="bibr" rid="ref41">Garyfallidis et al., 2018</xref>) used to filter out the streamlines that do not match the shape of the tract based on predefined 3D bundle templates</td>
<td align="center" valign="top">Streamline Linear Registration</td>
<td align="center" valign="top">6</td>
<td align="center" valign="top">Dice: 0.76</td>
<td align="center" valign="top">
<ext-link xlink:href="https://github.com/kesshijordan/Kesh_Autoseg_Tools/tree/v1.0.0" ext-link-type="uri">https://github.com/kesshijordan/Kesh_Autoseg_Tools/tree/v1.0.0</ext-link>
</td>
<td align="center" valign="top">N/A</td>
<td align="center" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">V&#x00E1;zquez, Andream et al./2019/ (<xref ref-type="bibr" rid="ref125">V&#x00E1;zquez et al., 2019</xref>)</td>
<td align="center" valign="top">HARDI ARCHI/&#x2212;</td>
<td align="center" valign="top">- Utilize Euclidean distance between subject fiber and atlas centroid using multi-subject bundle atlas</td>
<td align="center" valign="top">N/A</td>
<td align="center" valign="top">100/62 based on atlas used</td>
<td align="center" valign="top">Execution Time: 6&#x2009;min</td>
<td align="center" valign="top">N/A</td>
<td align="center" valign="top">6&#x2009;min</td>
<td align="center" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Zhang, Fan, et al./2018/ (<xref ref-type="bibr" rid="ref155">Zhang et al., 2018</xref>)</td>
<td align="center" valign="top">HCP/200; dHCP/40; ABIDE/70; CNP/204; PPMI/144; BTP/26</td>
<td align="center" valign="top">- Atlas created based on data obtained across multiple populations and different scanners</td>
<td align="center" valign="top">Entropy-based tractography registration</td>
<td align="center" valign="top">256</td>
<td align="center" valign="top">White matter parcellation Generalization (WMPG): 92.28&#x2009;~&#x2009;100<break/>Tract Anatomical Profile Coherence (TAPC): 0.626&#x2009;~&#x2009;0.783<break/>Inter Subject Parcellation Variability (ISPV): 0.264&#x2009;~&#x2009;0.919</td>
<td align="center" valign="top">
<ext-link xlink:href="https://github.com/SlicerDMRI/whitematteranalysis" ext-link-type="uri">https://github.com/SlicerDMRI/whitematteranalysis</ext-link>
</td>
<td align="center" valign="top">N/A</td>
<td align="center" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i001.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Sharmin, Nusrat, Emanuele Olivetti, and Paolo Avesani/2018/ (<xref ref-type="bibr" rid="ref112">Sharmin et al., 2018</xref>)</td>
<td align="center" valign="top">HCP/30</td>
<td align="center" valign="top">- Based on finding corresponding streamlines across different tractograms formulated as a linear assignment problem (LAP)</td>
<td align="center" valign="top">FLIRT/FSL (<xref ref-type="bibr" rid="ref36">Fischer and Modersitzki, 2003</xref>)</td>
<td align="center" valign="top">10</td>
<td align="center" valign="top">Dice: 0.40&#x2009;~&#x2009;0.80<break/>Receiver Operating Characteristic (ROC): 0.75&#x2009;~&#x2009;0.90</td>
<td align="center" valign="top">
<ext-link xlink:href="https://github.com/FBK-NILab/LAP_tract_segmentation" ext-link-type="uri">https://github.com/FBK-NILab/LAP_tract_segmentation</ext-link>
</td>
<td align="center" valign="top">2&#x2009;min</td>
<td align="center" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Labra, Nicole, et al./2017/ (<xref ref-type="bibr" rid="ref62">Labra et al., 2017</xref>)</td>
<td align="center" valign="top">HARDI ARCHI/</td>
<td align="center" valign="top">- Compare subject streamlines to multisubject bundle atlas based on distance metric</td>
<td align="center" valign="top">N/A</td>
<td align="center" valign="top">26</td>
<td align="center" valign="top">Execution Time: 9 million streamlines in less than 6&#x2009;min</td>
<td align="center" valign="top">integrated with the Brain VISA/Connectomist software (<xref ref-type="bibr" rid="ref32">Duclap et al., 2012</xref>)</td>
<td align="center" valign="top">1&#x2009;~&#x2009;6.5&#x2009;s</td>
<td align="center" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Yoo, Sang Wook, et al./2015/ (<xref ref-type="bibr" rid="ref147">Yoo et al., 2015</xref>)</td>
<td align="center" valign="top">NMR/12</td>
<td align="center" valign="top">- Based on searching the most similar tract group in example data<break/>- multiple example subjects used; final label chosen based on voting scheme</td>
<td align="center" valign="top">FLIRT/FSL (<xref ref-type="bibr" rid="ref36">Fischer and Modersitzki, 2003</xref>)</td>
<td align="center" valign="top">7</td>
<td align="center" valign="top">Consistency: 96.1%<break/>Sensitivity: 89.5%&#x2009;~&#x2009;91.0%<break/>False Discovery Rate (FDR): 14.2%&#x2009;~&#x2009;14.9%<break/>Kappa Analysis: 0.87&#x2009;~&#x2009;0.88</td>
<td align="center" valign="top">N/A</td>
<td align="center" valign="top">53.1&#x2009;s</td>
<td align="center" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Jin, Yan, et al./2013/ (<xref ref-type="bibr" rid="ref57">Jin et al., 2013</xref>)</td>
<td align="center" valign="top">HARDI/86</td>
<td align="center" valign="top">- Based on incorporating information from multiple hand-labeled atlases</td>
<td align="center" valign="top">ANTs (<xref ref-type="bibr" rid="ref1">Avants et al., 2009</xref>)</td>
<td align="center" valign="top">17</td>
<td align="center" valign="top">Dice: 0.90&#x2009;~&#x2009;1.0</td>
<td align="center" valign="top">N/A</td>
<td align="center" valign="top">N/A</td>
<td align="center" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap position="float" id="tab7">
<label>Table 7</label>
<caption>
<p>Hybrid methods for Automated White Matter Tract Segmentation.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="center" valign="top" colspan="9"><bold>Hybrid Methods</bold></th>
</tr>
<tr>
<th align="left" valign="top" rowspan="2"><bold>Author/Year/Citation</bold></th>
<th align="left" valign="top" rowspan="2"><bold>Dataset/</bold>
<break/><bold>No. of subjects</bold></th>
<th align="left" valign="top" colspan="2" rowspan="2"><bold>Hybrid algorithms</bold></th>
<th align="left" valign="top" rowspan="2"><bold>No. of fiber bundles</bold></th>
<th align="left" valign="top" rowspan="2"><bold>Performance metrics</bold></th>
<th align="left" valign="top" colspan="3"><bold>Practical application</bold></th>
</tr>
<tr>
<th align="left" valign="top"><bold>Code</bold></th>
<th align="left" valign="top"><bold>Runtime per subject</bold></th>
<th align="left" valign="top"><bold>External validation</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Xu, H., et al./2023/ (<xref ref-type="bibr" rid="ref140">Xu et al., 2023</xref>)</td>
<td align="left" valign="top">HCP/105</td>
<td align="left" valign="top">Registration<break/>Deep learning-based registration (<xref ref-type="bibr" rid="ref2">Balakrishnan et al., 2019</xref>)</td>
<td align="left" valign="top">Segmentation<break/>TractSeg (<xref ref-type="bibr" rid="ref136">Wasserthal et al., 2018</xref>)</td>
<td align="left" valign="top">72</td>
<td align="left" valign="top">Dice: 73.01%</td>
<td align="left" valign="top">
<ext-link xlink:href="https://github.com/HaoXu0507/ISBI2023-One-Shot-WM-Tract-Segmentation" ext-link-type="uri">https://github.com/HaoXu0507/ISBI2023-One-Shot-WM-Tract-Segmentation</ext-link>
</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Peretzke, Robin, et al./2023/ (<xref ref-type="bibr" rid="ref93">Peretzke et al., 2023</xref>)</td>
<td align="left" valign="top">HCP/21<break/>Private/10</td>
<td align="left" valign="top">Semi-Automatic<break/>Based on an active learning pipeline by training a random forest classifier on a specific tract</td>
<td align="left" valign="top">Manual<break/>Unlabeled streamlines from whole brain tractogram are manually annotated</td>
<td align="left" valign="top">3</td>
<td align="left" valign="top">Dice: 0.73&#x2009;~&#x2009;0.90</td>
<td align="left" valign="top">
<ext-link xlink:href="https://github.com/MIC-DKFZ/MITK-Diffusion" ext-link-type="uri">https://github.com/MIC-DKFZ/MITK-Diffusion</ext-link>
</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i001.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Delmonte, Alessandro, et al./2019/ (<xref ref-type="bibr" rid="ref27">Delmonte et al., 2019</xref>)</td>
<td align="left" valign="top">HCP/5</td>
<td align="left" valign="top">Semi-Automatic<break/>Representing the inherent inaccuracy of anatomical definitions using theory of fuzzy sets (<xref ref-type="bibr" rid="ref9">Bloch, 2005</xref>)</td>
<td align="left" valign="top">Manual<break/>Model qualitative anatomical definitions, navigate through levels of resolution</td>
<td align="left" valign="top">2</td>
<td align="left" valign="top">Qualitative Results</td>
<td align="left" valign="top">
<ext-link xlink:href="https://github.com/CorentinMercier/FBTS" ext-link-type="uri">https://github.com/CorentinMercier/FBTS</ext-link>
</td>
<td align="left" valign="top">100&#x2009;s</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Garyfallidis, Eleftherios, et al./2018/ (<xref ref-type="bibr" rid="ref41">Garyfallidis et al., 2018</xref>)</td>
<td align="left" valign="top">BIL&#x0026;GIN diffusion data (<xref ref-type="bibr" rid="ref80">Mazoyer et al., 2016</xref>)/60</td>
<td align="left" valign="top">Clustering Quickbundles (<xref ref-type="bibr" rid="ref40">Garyfallidis et al., 2012</xref>)</td>
<td align="left" valign="top">Registration<break/>Streamline-based Linear Registration (SLR)</td>
<td align="left" valign="top">4</td>
<td align="left" valign="top">Jaccard index: 0.21&#x2009;~&#x2009;0.26 Accuracy:<break/>0.99&#x2009;~&#x2009;1.0<break/>Sensitivity:<break/>0.68&#x2009;~&#x2009;0.92 Specificity: 1.0 Bundle Adjacency: 0.53&#x2009;~&#x2009;0.68</td>
<td align="left" valign="top">
<ext-link xlink:href="http://dipy.org" ext-link-type="uri">http://dipy.org</ext-link>
</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">O&#x2019;Donell, Lauren J./2017/ (<xref ref-type="bibr" rid="ref87">O&#x2019;Donnell et al., 2017</xref>)</td>
<td align="left" valign="top">HCP/10; Private/18</td>
<td align="left" valign="top">Atlas-based<break/>Atlas learned using groupwise-registration and spectral clustering</td>
<td align="left" valign="top">Registration<break/>tractography-based registration to atlas</td>
<td align="left" valign="top">800</td>
<td align="left" valign="top">Accuracy:<break/>80%&#x2009;~&#x2009;94%</td>
<td align="left" valign="top">
<ext-link xlink:href="https://github.com/SlicerDMRI/whitematteranalysis" ext-link-type="uri">https://github.com/SlicerDMRI/whitematteranalysis</ext-link>
</td>
<td align="left" valign="top">2.5&#x2009;h</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i001.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Wassermann, Demian, et al. /2016/ (<xref ref-type="bibr" rid="ref134">Wassermann et al., 2016</xref>)</td>
<td align="left" valign="top">Private/77</td>
<td align="left" valign="top">Semi-Automatic:<break/>- a novel query language based on a near to English textual syntax to construct a dictionary of anatomical definitions describing white matter tracts</td>
<td align="left" valign="top">Manual:<break/>- tract descriptions are written by the operator as text sentences</td>
<td align="left" valign="top">32</td>
<td align="left" valign="top">Kappa score: 0.71&#x2009;~&#x2009;0.90</td>
<td align="left" valign="top">
<ext-link xlink:href="https://demianw.github.com/tract_querier" ext-link-type="uri">https://demianw.github.com/tract_querier</ext-link>
</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Chekir, Amira, et al./2014/ (<xref ref-type="bibr" rid="ref17">Chekir et al., 2014</xref>)</td>
<td align="left" valign="top">HARDI/3;</td>
<td align="left" valign="top">Clustering:<break/>Quickbundles (<xref ref-type="bibr" rid="ref40">Garyfallidis et al., 2012</xref>)</td>
<td align="left" valign="top">Atlas-based:<break/>WMPM Type 2 Eve Atlas</td>
<td align="left" valign="top">13</td>
<td align="left" valign="top">Kappa analysis: 0.70 Quantitative Diffusivity Analysis (FA average correlation): 0.94</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
<tr>
<td align="left" valign="top">Wassermann, Demian, et al./2013/ (<xref ref-type="bibr" rid="ref133">Wassermann et al., 2013</xref>)</td>
<td align="left" valign="top">Private/77</td>
<td align="left" valign="top">Semi-Automatic:<break/>- a novel query language based on a near to English textual syntax to construct a dictionary of anatomical definitions describing white matter tracts</td>
<td align="left" valign="top">Manual<break/>- careful syntactical definition of major white matter tracts in the human brain based on a neuroanatomist&#x2019;s expert knowledge</td>
<td align="left" valign="top">37</td>
<td align="left" valign="top">Mean FA to detect tract changes specific to schizophrenia</td>
<td align="left" valign="top">
<ext-link xlink:href="https://demianw.github.com/tract_querier" ext-link-type="uri">https://demianw.github.com/tract_querier</ext-link>
</td>
<td align="left" valign="top">N/A</td>
<td align="left" valign="top">
<inline-graphic xlink:href="fnins-18-1376570-i002.tif"/>
</td>
</tr>
</tbody>
</table>
</table-wrap>
<fig position="float" id="fig2">
<label>Figure 2</label>
<caption>
<p>The above chart shows the number of studies included in each category.</p>
</caption>
<graphic xlink:href="fnins-18-1376570-g002.tif"/>
</fig>
<sec id="sec9">
<label>3.2.1</label>
<title>Direct voxel-based methods</title>
<p>This category of methods directly segments tracts based on the diffusion images without performing tractography as shown in <xref ref-type="fig" rid="fig3">Figure 3</xref>. These methods are fast and utilize deep learning or machine learning techniques like convolutional neural networks (CNNs) to improve segmentation accuracy. Direct segmentation helps in providing a simpler processing pipeline and reduces potential errors due intermediate steps like registration (<xref ref-type="bibr" rid="ref78">Mancini et al., 2019</xref>). Voxel-based approaches can associate each voxel with multiple tracts which is useful since WM tracts are known to cross or overlap (<xref ref-type="bibr" rid="ref55">Jeurissen et al., 2019</xref>). Recent advances in GPU-based algorithms reduce algorithm runtimes to several minutes due to their highly parallelizable implementations. Although learning-based techniques achieve very high segmentation performance and are fast, they require a large number of manually annotated training data. Manual annotations are labor intensive to obtain, time-consuming and are prone to inter-observer intra-observer or even inter training set variability. Deep learning models also fail to generalize well on unseen data if they are trained on scarce training scans. <xref ref-type="table" rid="tab3">Table 3</xref> provides a list of all studies that use direct voxel-based approaches.</p>
<fig position="float" id="fig3">
<label>Figure 3</label>
<caption>
<p>Illustration of the direct voxel-based segmentation pipeline using the segmentation of the corpus callosum as a representative example. Refer to <xref ref-type="table" rid="tab3">Table 3</xref> for more details regarding the direct voxel-based segmentation methods.</p>
</caption>
<graphic xlink:href="fnins-18-1376570-g003.tif"/>
</fig>
</sec>
<sec id="sec10">
<label>3.2.2</label>
<title>Streamline-based clustering</title>
<p>Streamline based methods are those that are applied to streamlines derived from whole brain tractography outputs as shown in <xref ref-type="fig" rid="fig4">Figure 4</xref>. These streamlines can be clustered or classified into meaningful groups of fibers known as bundles in either supervised or unsupervised ways. The unsupervised approach usually called streamline clustering methods are a popular white matter tract segmentation method. Such methods divide the entire brain white matter into multiple white matter parcels based on some information about the streamlines. Several bundles can be found using clustering-based methods, and the tractography data can also be characterized by using these clusters and their centroids as representative data which is used for further analyses. One of the main steps after clustering is to assign a label to the clustering results. This is a crucial step since clustering methods are commonly criticized to provide no guarantee of obtaining anatomically meaningful tracts (<xref ref-type="bibr" rid="ref118">Toga and Mazziotta, 2002</xref>). Therefore in many cases, prior knowledge is used for this purpose, for example, by using an ROI atlas to guide the identification (<xref ref-type="bibr" rid="ref71">Logiraj et al., 2021a</xref>) or from labeling clusters of streamlines from multiple subjects also called as atlas creation (<xref ref-type="bibr" rid="ref147">Yoo et al., 2015</xref>; <xref ref-type="bibr" rid="ref62">Labra et al., 2017</xref>) or labeling clusters in a single subject (<xref ref-type="bibr" rid="ref41">Garyfallidis et al., 2018</xref>). Recently, deep learning methods are also being used for clustering large tractography datasets (<xref ref-type="bibr" rid="ref153">Zhang et al., 2020</xref>). One of the main limitations of such methods is the large size of tractography datasets which are composed of various tracts of different shapes, lengths, positions. The advent of improved dMRI techniques has resulted in increased size and complexity of datasets. Tractography datasets comprise up to more than 10 million tracts. This causes an increase in storage and memory challenges when clustering such large datasets. <xref ref-type="table" rid="tab4">Table 4</xref> provides a list of all studies that use streamline-based clustering approaches.</p>
<fig position="float" id="fig4">
<label>Figure 4</label>
<caption>
<p>Illustration of the streamline-based clustering pipeline. Refer to <xref ref-type="table" rid="tab4">Table 4</xref> for more details regarding the clustering methods included in this review.</p>
</caption>
<graphic xlink:href="fnins-18-1376570-g004.tif"/>
</fig>
</sec>
<sec id="sec11">
<label>3.2.3</label>
<title>Streamline-based classification</title>
<p>The supervised approach of streamline-based methods involves streamline-based classification or labeling as shown in <xref ref-type="fig" rid="fig5">Figure 5</xref>. These methods assign an anatomical label to each individual streamline. This can be done by computing a pairwise distance of each streamline to a labeled streamline in a reference tract segmentation and then assigning a streamline label based on the closest reference tract (<xref ref-type="bibr" rid="ref8">Bert&#x00F2; et al., 2021</xref>). Recently, fibers obtained after tractography are classified into tracts using a deep learning-based classifiers such as CNNs which are trained on selected fiber features. Similar to segmentation methods, while they are fast in assigning labels to fibers, they also require a large number of manually annotated training data and tend to face similar issues as segmentation methods. <xref ref-type="table" rid="tab5">Table 5</xref> provides a list of all studies that use streamline-based classification approaches for automated methods for white matter tract segmentation.</p>
<fig position="float" id="fig5">
<label>Figure 5</label>
<caption>
<p>Illustration of the streamline-based classification pipeline. Refer to <xref ref-type="table" rid="tab5">Table 5</xref> for more details regarding the classification methods included in this review used to assign labels to streamlines.</p>
</caption>
<graphic xlink:href="fnins-18-1376570-g005.tif"/>
</fig>
</sec>
<sec id="sec12">
<label>3.2.4</label>
<title>Atlas-based</title>
<p>In atlas-based methods, tracts are identified by automatic placement of ROIs by warping a brain ROI atlas (<xref ref-type="bibr" rid="ref23">Cook et al., 2005</xref>) or using volumes of interest (<xref ref-type="bibr" rid="ref90">Oishi et al., 2009</xref>) to automatically group fiber streamlines into anatomically defined tracts as shown in <xref ref-type="fig" rid="fig6">Figure 6</xref>. These methods also can be based on tract similarity, also called streamline-based methods, using pairwise tract distances with a reference streamline label and assign a label based on the reference label of the streamline it is closest to (<xref ref-type="bibr" rid="ref89">O'Donnell and Westin, 2007</xref>; <xref ref-type="bibr" rid="ref137">Wu et al., 2020</xref>). Such approaches require image-based multi-modal nonlinear registration so that the streamlines obtained from tractography, and the ROIs are in the same space. However, registration results are not perfect because aligning streamlines with ROIs is a challenging task and time-consuming and can be even more difficult when applied to pathological brains. While more tracts can be easily added to the reference streamline atlas, in such methods, limited quality of some tracts limits their generalization ability. <xref ref-type="table" rid="tab6">Table 6</xref> provides a list of all studies that use atlas-based approaches.</p>
<fig position="float" id="fig6">
<label>Figure 6</label>
<caption>
<p>Illustration of the atlas-based method pipeline. Refer to <xref ref-type="table" rid="tab6">Table 6</xref> for more details regarding the atlas-based methods included in this review.</p>
</caption>
<graphic xlink:href="fnins-18-1376570-g006.tif"/>
</fig>
</sec>
<sec id="sec13">
<label>3.2.5</label>
<title>Hybrid</title>
<p>In this review we also identified methods that combined more than one strategy from the categories to extract more information to improve labeling of anatomical bundles. The semi-automated methods identified in this review are also included under this category. Semi-automated techniques typically involve human intervention, such as manual labeling or correction, within an otherwise automated process. These methods are more time consuming since they have multiple steps as compared to the other methods. <xref ref-type="table" rid="tab7">Table 7</xref> provides a list of all studies that use hybrid approaches.</p>
</sec>
</sec>
<sec id="sec14">
<label>3.3</label>
<title>Evaluation metrics</title>
<p>In this section we review the most common evaluation methods that have been used for validating the white matter tract segmentation results in the studies included in this work. Evaluation of accuracy for tract segmentation is difficult since the errors cannot point out which stage of the pipeline causes the issue; for example, it is difficult to determine whether the errors were generated from the preprocessing steps, the selected algorithm for tract segmentation, similarity metric, etc. <xref ref-type="table" rid="tab8">Table 8</xref> gives a list of the most frequently used evaluation metrics with the following attributes for each: the metric name, metric description provides a brief definition, the formulation of the metric to show how it is computed and finally the usage of the metric.</p>
<table-wrap position="float" id="tab8">
<label>Table 8</label>
<caption>
<p>Evaluation metrics used for validating Automated White Matter Tract Segmentation.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="center" valign="top" colspan="4"><bold>Evaluation Metrics</bold></th>
</tr>
<tr>
<th align="left" valign="top"><bold>Metric</bold></th>
<th align="left" valign="top"><bold>Description</bold></th>
<th align="left" valign="top"><bold>Formulation</bold></th>
<th align="left" valign="top"><bold>Usage</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Dice Coefficient (DSC)</td>
<td align="left" valign="top">Calculate overlap between segmented tract and the groundtruth tract. Convert tracts into binary masks where 1 indicates that a voxel is crossed by a streamline of the tract and 0 otherwise.</td>
<td align="left" valign="top">Given the segmented tract <inline-formula><mml:math id="M1"><mml:msup><mml:mi>t</mml:mi><mml:mo>&#x2032;</mml:mo></mml:msup></mml:math></inline-formula>, and the ground truth tract <inline-formula><mml:math id="M2"><mml:mi>t</mml:mi></mml:math></inline-formula>,<break/><inline-formula><mml:math id="M3"><mml:mi>D</mml:mi><mml:mi>S</mml:mi><mml:mi>C</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mn>2</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mfenced open="(" close=")"><mml:mfenced open="|" close="|"><mml:mrow><mml:mi>v</mml:mi><mml:mfenced open="(" close=")"><mml:msup><mml:mi>t</mml:mi><mml:mo>&#x2032;</mml:mo></mml:msup></mml:mfenced><mml:mo>&#x2229;</mml:mo><mml:mi>v</mml:mi><mml:mfenced open="(" close=")"><mml:mi>t</mml:mi></mml:mfenced></mml:mrow></mml:mfenced></mml:mfenced><mml:mspace width="0.25em"/></mml:mrow><mml:mfenced open="(" close=")"><mml:mfenced open="|" close="|"><mml:mrow><mml:mi>v</mml:mi><mml:mfenced open="(" close=")"><mml:msup><mml:mi>t</mml:mi><mml:mo>&#x2032;</mml:mo></mml:msup></mml:mfenced><mml:mo>+</mml:mo><mml:mi>v</mml:mi><mml:mfenced open="(" close=")"><mml:mi>t</mml:mi></mml:mfenced></mml:mrow></mml:mfenced></mml:mfenced></mml:mfrac></mml:math></inline-formula></td>
<td align="left" valign="top">The authors in <xref ref-type="bibr" rid="ref155">Zhang et al. (2018)</xref> proposed Tract Anatomical Profile Coherence (TAPC) metric and Tract Surface Profile Coherence (TSPC) which are both based on the Dice scores computed between either for the tract anatomical profile or tract surface profile.</td>
</tr>
<tr>
<td align="left" valign="top">Jaccard Similarity Index (JSI)</td>
<td align="left" valign="top">Measure the ratio of the intersection of voxels belonging to a predicted tract with its groundtruth and the union of all voxels belonging to a predicted tract and its groundtruth.</td>
<td align="left" valign="top">For a predicted segmented tract <inline-formula><mml:math id="M4"><mml:msup><mml:mi>t</mml:mi><mml:mo>&#x2032;</mml:mo></mml:msup></mml:math></inline-formula>, and the ground truth tract t,<break/><inline-formula><mml:math id="M5"><mml:mi>J</mml:mi><mml:mi>I</mml:mi><mml:mo>=</mml:mo><mml:mspace width="0.25em"/><mml:mfrac><mml:mrow><mml:mo stretchy="true">|</mml:mo><mml:mi>v</mml:mi><mml:mfenced open="(" close=")"><mml:msup><mml:mi>t</mml:mi><mml:mo>&#x2032;</mml:mo></mml:msup></mml:mfenced><mml:mo>&#x2229;</mml:mo><mml:mi>v</mml:mi><mml:mfenced open="(" close=")"><mml:mi>t</mml:mi></mml:mfenced><mml:mo stretchy="true">|</mml:mo><mml:mspace width="0.25em"/></mml:mrow><mml:mfenced open="|" close="|"><mml:mrow><mml:mi>v</mml:mi><mml:mfenced open="(" close=")"><mml:msup><mml:mi>t</mml:mi><mml:mo>&#x2032;</mml:mo></mml:msup></mml:mfenced><mml:mo>&#x222A;</mml:mo><mml:mi>v</mml:mi><mml:mfenced open="(" close=")"><mml:mi>t</mml:mi></mml:mfenced></mml:mrow></mml:mfenced></mml:mfrac></mml:math></inline-formula><break/>where <inline-formula><mml:math id="M6"><mml:mi>v</mml:mi><mml:mfenced open="(" close=")"><mml:mi>t</mml:mi></mml:mfenced></mml:math></inline-formula> is the set of voxels crossed by the streamlines of <inline-formula><mml:math id="M7"><mml:mi>t</mml:mi></mml:math></inline-formula>, and <inline-formula><mml:math id="M8"><mml:mi>v</mml:mi><mml:mfenced open="(" close=")"><mml:msup><mml:mi>t</mml:mi><mml:mo>&#x2032;</mml:mo></mml:msup></mml:mfenced></mml:math></inline-formula> us the set of voxels crossed by streamlines of <inline-formula><mml:math id="M9"><mml:msup><mml:mi>t</mml:mi><mml:mo>&#x2032;</mml:mo></mml:msup></mml:math></inline-formula>.</td>
<td align="left" valign="top">The score ranges from 0 to 1, with 1 showing exact similarity and 0 showing no similarity between two tract segments. This is also referred to as the volumetric overlap error (VOE) in segmentation methods (<xref ref-type="bibr" rid="ref19">Chen et al., 2012</xref>).</td>
</tr>
<tr>
<td align="left" valign="top">Precision</td>
<td align="left" valign="top">Normalize the volume of the correctly segmented tract over the volume of the result of the segmentation.</td>
<td align="left" valign="top">For a predicted segmented tract <inline-formula><mml:math id="M10"><mml:msup><mml:mi>t</mml:mi><mml:mo>&#x2032;</mml:mo></mml:msup></mml:math></inline-formula>, and the ground truth tract t,<break/><inline-formula><mml:math id="M11"><mml:mi mathvariant="italic">Precision</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mfenced open="(" close=")"><mml:mfenced open="|" close="|"><mml:mrow><mml:mi>v</mml:mi><mml:mfenced open="(" close=")"><mml:msup><mml:mi>t</mml:mi><mml:mo>&#x2032;</mml:mo></mml:msup></mml:mfenced><mml:mo>&#x2229;</mml:mo><mml:mi>v</mml:mi><mml:mfenced open="(" close=")"><mml:mi>t</mml:mi></mml:mfenced></mml:mrow></mml:mfenced></mml:mfenced><mml:mspace width="0.25em"/></mml:mrow><mml:mrow><mml:mi>v</mml:mi><mml:mfenced open="(" close=")"><mml:msup><mml:mi>t</mml:mi><mml:mo>&#x2032;</mml:mo></mml:msup></mml:mfenced></mml:mrow></mml:mfrac></mml:math></inline-formula></td>
<td align="left" valign="top">Precision ranging from 0 to 1 focuses on the proportion of positive predictions that were correct.</td>
</tr>
<tr>
<td align="left" valign="top">Recall</td>
<td align="left" valign="top">Normalize the size of the correctly segmented tract over the ground truth tract segmentation</td>
<td align="left" valign="top">For a predicted segmented tract <inline-formula><mml:math id="M12"><mml:msup><mml:mi>t</mml:mi><mml:mo>&#x2032;</mml:mo></mml:msup></mml:math></inline-formula>, and the ground truth tract t,<break/><inline-formula><mml:math id="M13"><mml:mi mathvariant="italic">Recall</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mfenced open="(" close=")"><mml:mfenced open="|" close="|"><mml:mrow><mml:mi>v</mml:mi><mml:mfenced open="(" close=")"><mml:msup><mml:mi>t</mml:mi><mml:mo>&#x2032;</mml:mo></mml:msup></mml:mfenced><mml:mo>&#x2229;</mml:mo><mml:mi>v</mml:mi><mml:mfenced open="(" close=")"><mml:mi>t</mml:mi></mml:mfenced></mml:mrow></mml:mfenced></mml:mfenced><mml:mspace width="0.25em"/></mml:mrow><mml:mrow><mml:mi>v</mml:mi><mml:mfenced open="(" close=")"><mml:mi>t</mml:mi></mml:mfenced></mml:mrow></mml:mfrac></mml:math></inline-formula></td>
<td align="left" valign="top">Recall ranging from 0 to 1 focuses on the proportion of actual positive instances that were correctly identified.</td>
</tr>
<tr>
<td align="left" valign="top">Davies-Bouldin Index (DB)</td>
<td align="left" valign="top">Measure the average similarity<break/>of each cluster with its most similar cluster, where similarity is the ratio of within-cluster distances to between-cluster distances (<xref ref-type="bibr" rid="ref24">Davies and Bouldin, 1979</xref>)</td>
<td align="left" valign="top"><inline-formula><mml:math id="M14"><mml:mi mathvariant="normal">D</mml:mi><mml:mi mathvariant="normal">B</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mi mathvariant="normal">k</mml:mi></mml:mfrac><mml:munderover><mml:mstyle displaystyle="true"><mml:mo stretchy="true">&#x2211;</mml:mo></mml:mstyle><mml:mi mathvariant="normal">k</mml:mi><mml:mrow><mml:mover accent="true"><mml:mi>i</mml:mi><mml:mo stretchy="true">&#x02D9;</mml:mo></mml:mover><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:munderover><mml:munder><mml:mo>max</mml:mo><mml:mrow><mml:mi mathvariant="normal">i</mml:mi><mml:mo>&#x2260;</mml:mo><mml:mi mathvariant="normal">j</mml:mi></mml:mrow></mml:munder><mml:mspace width="0.25em"/><mml:msub><mml:mi mathvariant="script">R</mml:mi><mml:mrow><mml:mi mathvariant="normal">i</mml:mi><mml:mi mathvariant="normal">j</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula><break/>where <inline-formula><mml:math id="M15"><mml:mi mathvariant="normal">k</mml:mi></mml:math></inline-formula> is the number of clusters, <inline-formula><mml:math id="M16"><mml:msub><mml:mi mathvariant="script">R</mml:mi><mml:mrow><mml:mi mathvariant="normal">i</mml:mi><mml:mi mathvariant="normal">j</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> is the ratio of the average within-cluster distance<break/>to the between-cluster distance.</td>
<td align="left" valign="top">Evaluates the white matter tract segmentation methods that are based on clustering approaches.<break/>A lower DB score shows better clustering results, with 0<break/>being the minimum score.</td>
</tr>
<tr>
<td align="left" valign="top">Density Based Clustering Validation (DBCV) (<xref ref-type="bibr" rid="ref84">Moulavi et al., 2014</xref>)</td>
<td align="left" valign="top">Assigns a validity index to the obtained clustering solution which considers both the density and shape properties of the clusters.</td>
<td align="left" valign="top"><inline-formula><mml:math id="M17"><mml:mi mathvariant="italic">DBCV</mml:mi><mml:mfenced open="(" close=")"><mml:mi>C</mml:mi></mml:mfenced><mml:mo>=</mml:mo><mml:munderover><mml:mstyle displaystyle="true"><mml:mo stretchy="true">&#x2211;</mml:mo></mml:mstyle><mml:mrow><mml:msup><mml:mi>i</mml:mi><mml:mn>1</mml:mn></mml:msup><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mi>l</mml:mi></mml:mrow></mml:munderover><mml:mfrac><mml:mfenced open="|" close="|"><mml:msub><mml:mi>C</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mfenced><mml:mfenced open="|" close="|"><mml:mi>O</mml:mi></mml:mfenced></mml:mfrac><mml:msub><mml:mi>V</mml:mi><mml:mi>C</mml:mi></mml:msub><mml:mfenced open="(" close=")"><mml:msub><mml:mi>C</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mfenced></mml:math></inline-formula><break/>where a validity index of a clustering solution is obtained by taking the weighted average of the validity indexes for all clusters given a<bold>s</bold> <inline-formula><mml:math id="M18"><mml:msub><mml:mi>V</mml:mi><mml:mi>C</mml:mi></mml:msub><mml:mfenced open="(" close=")"><mml:msub><mml:mi>C</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mfenced></mml:math></inline-formula>, <inline-formula><mml:math id="M19"><mml:mi>l</mml:mi></mml:math></inline-formula> are the number of clusters, <inline-formula><mml:math id="M20"><mml:mfenced open="|" close="|"><mml:mi>O</mml:mi></mml:mfenced></mml:math></inline-formula> is the number of objects in the cluster.</td>
<td align="left" valign="top">This results in a score between [&#x2212;1, 1], with greater values indicating better clustering solutions.</td>
</tr>
<tr>
<td align="left" valign="top">Kappa analysis</td>
<td align="left" valign="top">Evaluate agreement between two raters, which is known to be robust since the kappa considers agreement by chance (<xref ref-type="bibr" rid="ref63">Lacante et al., 2008</xref>)</td>
<td align="left" valign="top">Two binary images are superimposed to classify each pixel into three categories: pixels whose values are 1 in both images (<inline-formula><mml:math id="M21"><mml:mi mathvariant="italic">pp</mml:mi></mml:math></inline-formula>), pixels whose values are 0 in both images (<inline-formula><mml:math id="M22"><mml:mi>n</mml:mi><mml:mi>n</mml:mi></mml:math></inline-formula>), and pixels whose values are different in the two images (<inline-formula><mml:math id="M23"><mml:mi>p</mml:mi><mml:mi>n</mml:mi></mml:math></inline-formula>, <inline-formula><mml:math id="M24"><mml:mi>n</mml:mi><mml:mi>p</mml:mi></mml:math></inline-formula>). Then a probability of observed agreement (<inline-formula><mml:math id="M25"><mml:msub><mml:mi>p</mml:mi><mml:mn>0</mml:mn></mml:msub></mml:math></inline-formula>) and a probability of chance agreement (<inline-formula><mml:math id="M26"><mml:msub><mml:mi>p</mml:mi><mml:mi>e</mml:mi></mml:msub></mml:math></inline-formula>) are computed as follows:<break/><inline-formula><mml:math id="M27"><mml:msub><mml:mi>p</mml:mi><mml:mn>0</mml:mn></mml:msub><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mi mathvariant="italic">pp</mml:mi><mml:mo>+</mml:mo><mml:mi>n</mml:mi><mml:mi>n</mml:mi></mml:mrow><mml:mi>N</mml:mi></mml:mfrac></mml:math></inline-formula><break/><inline-formula><mml:math id="M28"><mml:msub><mml:mi>p</mml:mi><mml:mi>e</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mfenced open="(" close=")"><mml:mfrac><mml:mrow><mml:mi mathvariant="italic">pp</mml:mi><mml:mo>+</mml:mo><mml:mi>p</mml:mi><mml:mi>n</mml:mi></mml:mrow><mml:mi>N</mml:mi></mml:mfrac></mml:mfenced><mml:mo>.</mml:mo><mml:mfenced open="(" close=")"><mml:mfrac><mml:mrow><mml:mi mathvariant="italic">pp</mml:mi><mml:mo>+</mml:mo><mml:mi>n</mml:mi><mml:mi>p</mml:mi></mml:mrow><mml:mi>N</mml:mi></mml:mfrac></mml:mfenced><mml:mo>+</mml:mo><mml:mfenced open="(" close=")"><mml:mfrac><mml:mrow><mml:mi>n</mml:mi><mml:mi>n</mml:mi><mml:mo>+</mml:mo><mml:mi>n</mml:mi><mml:mi>p</mml:mi></mml:mrow><mml:mi>N</mml:mi></mml:mfrac></mml:mfenced><mml:mo>.</mml:mo><mml:mfenced open="(" close=")"><mml:mfrac><mml:mrow><mml:mi>n</mml:mi><mml:mi>n</mml:mi><mml:mo>+</mml:mo><mml:mi>p</mml:mi><mml:mi>n</mml:mi></mml:mrow><mml:mi>N</mml:mi></mml:mfrac></mml:mfenced></mml:math></inline-formula><break/>where <inline-formula><mml:math id="M29"><mml:mi>N</mml:mi><mml:mo>=</mml:mo><mml:mi mathvariant="italic">pp</mml:mi><mml:mo>+</mml:mo><mml:mi>n</mml:mi><mml:mi>n</mml:mi><mml:mo>+</mml:mo><mml:mi>p</mml:mi><mml:mi>n</mml:mi><mml:mo>+</mml:mo><mml:mi>n</mml:mi><mml:mi>p</mml:mi><mml:mtext>.</mml:mtext></mml:math></inline-formula>Finally the kappa value, &#x03BA;, for the two bundles is computed as follows:<break/><inline-formula><mml:math id="M30"><mml:mi>&#x03BA;</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:msub><mml:mi>p</mml:mi><mml:mn>0</mml:mn></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>p</mml:mi><mml:mi>e</mml:mi></mml:msub></mml:mrow><mml:mrow><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>p</mml:mi><mml:mi>e</mml:mi></mml:msub></mml:mrow></mml:mfrac></mml:math></inline-formula></td>
<td align="left" valign="top">Landis and Koch assigned labels to kappa value ranges as follows [103]: &#x03BA; value smaller than 0 is &#x201C;poor,&#x201D; 0.00&#x2013;0.20 is &#x201C;slight,&#x201D; 0.21&#x2013;0.40 is &#x201C;fair,&#x201D; 0.41&#x2013;0.60 is &#x201C;moderate,&#x201D; 0.61&#x2013;0.80 is &#x201C;substantial&#x201D; and 0.81&#x2013;1.00 is &#x201C;almost perfect&#x201D; agreement. For each bundle, a binary image is computed which is the same size as the diffusion-weighted image, by setting pixel value to 1 if any tract passes through the voxel and set to 0 otherwise.</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Other than these commonly used evaluation methods, white matter tract segmentation methods are also validated qualitatively in the form of visualizing the generated tract segmentation. Visualization by a domain expert is still used as a complementary method along with a few of the above-mentioned quantitative measures. Recently authors in <xref ref-type="bibr" rid="ref99">Pujol et al. (2015)</xref> had initiated the DTI challenge to promote the standardized evaluation of tractography methods for neurosurgery. Despite ample research in the development of tractography and tract segmentation algorithms there is no consensus on the validation techniques to compare the different algorithms.</p>
</sec>
</sec>
<sec sec-type="discussion" id="sec15">
<label>4</label>
<title>Discussion</title>
<p>In this review paper, we have provided a systematic review of automated methods for white matter tract segmentation with respect to the most widely used public datasets for this task, the various categories of automated methods developed, and the evaluation metrics used to study the performance of the method. Although there are studies that have reviewed automated methods for brain tractography (<xref ref-type="bibr" rid="ref96">Poulin et al., 2019</xref>; <xref ref-type="bibr" rid="ref151">Zhang et al., 2022</xref>) and also deep learning methods for tract segmentation (<xref ref-type="bibr" rid="ref42">Ghazi et al., 2023</xref>), to the best of our knowledge, a systematic review that focuses on automated methods for tract segmentation has not been published yet. This review paper underscores the methodological advancements in building automated methods, as evidenced through the 59 articles included in this review.</p>
<p>While manual segmentation of tracts or virtual dissection methods were not a focus of this survey, multiple approaches have been proposed in the last decade that conduct fiber selection and anatomical labeling using expert knowledge (<xref ref-type="bibr" rid="ref104">Rheault et al., 2020</xref>, <xref ref-type="bibr" rid="ref105">2022a</xref>,<xref ref-type="bibr" rid="ref106">b</xref>; <xref ref-type="bibr" rid="ref53">Ille et al., 2021</xref>). These methods focus on improving the design of white matter dissection protocols to build more generalizable and reproducible methods. In <xref ref-type="bibr" rid="ref110">Schilling et al. (2021)</xref> authors show the need to have a standard nomenclature and definitions for white matter bundles and that there are still issues in tractography segmentation that need to be resolved so that they can be used in routine clinical settings. Such methods are worth mentioning in this survey since they show that segmenting white matter tracts is a crucial task and that there is still a lot of scope for improvement.</p>
<p>It is also important to note that our study of white matter tract segmentation focusses on fibers in the deep white matter. Multiple studies are available that investigate the segmentation of subcortical U-fibers, which are special types of short association fibers located in the superficial white matter (<xref ref-type="bibr" rid="ref45">Guevara et al., 2017</xref>, <xref ref-type="bibr" rid="ref44">2020</xref>; <xref ref-type="bibr" rid="ref141">Xue et al., 2023</xref>). Despite studies on superficial white matter (SWM) being sparse due to its complexity (<xref ref-type="bibr" rid="ref141">Xue et al., 2023</xref>) have employed point-cloud-based deep learning techniques that concentrate on superficial white matter tract segmentation. Also, numerous studies on automated white matter tract segmentation methods were omitted from this review because they did not meet the search criteria used to compile the literature included in this study. For example, studies (<xref ref-type="bibr" rid="ref6">Bazin et al., 2011</xref>; <xref ref-type="bibr" rid="ref145">Yendiki et al., 2011</xref>) provide automated methods for white matter tract segmentation, however, were not included in this study since they are published before 2013. Studies centered on automated fiber tracking or tractography (<xref ref-type="bibr" rid="ref117">Teeuw et al., 2015</xref>; <xref ref-type="bibr" rid="ref132">Warrington et al., 2020</xref>) are not featured in this review; however, they merit attention as they play a vital role in advancing new automated methodologies for accurately reconstructing white matter pathways, thereby facilitating the analysis of extensive datasets. Authors in <xref ref-type="bibr" rid="ref132">Warrington et al. (2020)</xref> present tractography protocols as a software tool for standardized and automated cross-species tractography generated from large datasets. Automated tractography methods such as TRACULA (<xref ref-type="bibr" rid="ref145">Yendiki et al., 2011</xref>), <xref ref-type="bibr" rid="ref117">Teeuw et al. (2015)</xref> have used learning-based methods that show highly promising performance incorporating information on the anatomy of the pathways for reconstruction of white matter pathways thereby facilitating automated fiber tracking to large studies.</p>
<p>Twenty-seven percent of the papers in the current survey are based on direct voxel-based segmentation methods. Our results show that papers based on fully convolutional networks are typically based on encoder-decoder architecture such as U-net (<xref ref-type="bibr" rid="ref108">Ronneberger et al., 2015</xref>). These voxel-based methods are gaining popularity with the advent of new and efficient deep learning-based segmentation techniques. However most current studies still rely on U-net based architectures as the baseline model, and the popular segmentation architectures like those based on transformers (<xref ref-type="bibr" rid="ref31">Dosovitskiy et al., 2020</xref>; <xref ref-type="bibr" rid="ref48">Hatamizadeh et al., 2021</xref>; <xref ref-type="bibr" rid="ref12">Cao et al., 2022</xref>) have not been applied to this domain yet. Deep learning methods also perform segmentation by either labeling the streamlines or directly labeling the voxels. In general, the progress seen in using deep learning methods for medical image segmentation tasks (<xref ref-type="bibr" rid="ref48">Hatamizadeh et al., 2021</xref>; <xref ref-type="bibr" rid="ref138">Xiao et al., 2023</xref>) has not yet been fully applied to white matter tract segmentation. This is mostly because it is more demanding to have manual annotations of white matter tracts than other brain anatomical structures. Also, while deep learning methods provide fast segmentations, their results can still be unsatisfactory, and are not robust to changes of bundle sizes, tracking methods and data quality (<xref ref-type="bibr" rid="ref8">Bert&#x00F2; et al., 2021</xref>). This shows that the major challenges in using machine learning or deep learning methods for tract segmentation will require researchers to come up with more generalizable solutions, create and publish more annotated datasets, use other techniques like transfer learning, self-supervised learning to overcome the challenges of limited training samples for deep learning-based methods to gain clinical applicability.</p>
<p>Next, our results show 25% of the papers in the current survey applied streamline-based clustering methods and 20% used streamline-based classification methods for automatic tract segmentation. These methods focus on clustering large number of fiber trajectories or streamlines into clusters or fiber bundles. However, few of these methods attach labels to clusters, and the clusters must be assigned labels either manually or automatically by using a streamline atlas usually incorporated in the clustering process. Such methods also have to post-process their results in order to filter the bundles to exclude spurious tracts that are falsely included in the clustering results.</p>
<p>In all the methods seen in this work, only two studies were found which used registration-based methods for white matter tract segmentation (<xref ref-type="bibr" rid="ref41">Garyfallidis et al., 2018</xref>; <xref ref-type="bibr" rid="ref58">Jordan et al., 2021</xref>). In <xref ref-type="bibr" rid="ref136">Wasserthal et al. (2018)</xref> authors compared their work with two registration-based methods for automatic tract segmentation, which usually involves using a tract atlas and registering it to the subject of interest which yields a binary mask for each tract in subject space. Fewer registration methods are likely employed due to the inaccuracies produced during the registration step, and the computational complexity needed. However recently, a lot of work has been done in using deep learning methods for image registration (<xref ref-type="bibr" rid="ref92">Oliveira and Tavares, 2014</xref>; <xref ref-type="bibr" rid="ref39">Fu et al., 2020</xref>) to overcome the challenges of traditional registration methods. For example, authors in <xref ref-type="bibr" rid="ref154">Zhang et al. (2021)</xref> proposed a deep learning-based method for registration of dMRI images. This exemplifies the growing interest in applying such methods to tract segmentation. Currently, to the best of our knowledge, there are no studies that use deep learning-based registration techniques for the task of automated white matter tract segmentation.</p>
<p>It is interesting to note that there is limited existing work on automated methods for segmenting white matter bundles for the neonatal brain. In our survey we only found two papers (<xref ref-type="bibr" rid="ref102">Ratnarajah and Qiu, 2014</xref>; <xref ref-type="bibr" rid="ref72">Logiraj et al., 2021b</xref>) that correspond to this topic. This could mainly be because segmenting white matter structures is particularly difficult in the neonatal brain since it is undergoing a critical growing process along with cellular maturation such as myelination and synaptic pruning (<xref ref-type="bibr" rid="ref102">Ratnarajah and Qiu, 2014</xref>). Existing methods rely mostly on fully manual segmentation for delineating white matter structures (<xref ref-type="bibr" rid="ref91">Oishi et al., 2011</xref>) or are based on semi-automated techniques (<xref ref-type="bibr" rid="ref52">Huang et al., 2006</xref>). In <xref ref-type="bibr" rid="ref91">Oishi et al. (2011)</xref> authors developed an atlas-based segmentation based on image registration, which also needs manual expert assessment in order to delineate the required white matter structures. This work was developed almost a decade ago and there have been multiple automated segmentation techniques proposed since that have been successfully applied to adult&#x2019;s brain as shown in this survey. Manual methods also suffer challenges of being time consuming and require prior anatomical knowledge to achieve reasonable accuracy and reproducibility.</p>
<p>Overall, we observe that automated tract segmentation algorithms follow varied methods for pre-processing, augmenting, and training their datasets and few methods use multi-site datasets. Even the techniques used to generate reference tracts are not the same across most of the methods. This makes it impossible to assess the true generalizability and reliability of the proposed methods (<xref ref-type="bibr" rid="ref97">Poulin et al., 2022</xref>). This problem is also observed in manual segmentation methods where there is varied reproducibility for segmenting the same tracts among different experts or the inter-protocol agreement across protocols for various white matter pathways is poor as shown in (<xref ref-type="bibr" rid="ref110">Schilling et al., 2021</xref>).</p>
<p>We observe that most studies included in this review do not provide computational time making it challenging to assess the practicality of these methods. In general, streamline-based methods typically require substantial memory for generating millions of streamlines per subject, whereas direct voxel-based methods can segment white matter tracts for a test subject in under a minute (<xref ref-type="bibr" rid="ref136">Wasserthal et al., 2018</xref>). Out of 59 segmentation methods reviewed, only 18 have been validated on external datasets with varying scanners and acquisition parameters. This lack of generalizability testing may be due to the limited availability of publicly accessible tract segmentation datasets. Despite this, direct voxel-based methods can be used for data augmentation during training to simulate domain shifts in external datasets, potentially reducing the domain-shift impact.</p>
<p>Lastly, we have summarized the most common evaluation metrics used by tract segmentation methods to validate their results in Section 3.3. However, there is no consensus on the evaluation metrics used to compare the various proposed approaches. Due to the limitation of ground truth, most methods rely on reproducibility in terms of intra- and inter-rater as well as test&#x2013;retest reproducibility (<xref ref-type="bibr" rid="ref152">Zhang et al., 2019</xref>; <xref ref-type="bibr" rid="ref104">Rheault et al., 2020</xref>, <xref ref-type="bibr" rid="ref106">2022b</xref>) and consistency of methods across different populations and acquisitions (<xref ref-type="bibr" rid="ref136">Wasserthal et al., 2018</xref>, <xref ref-type="bibr" rid="ref135">2019</xref>), as validations points for identifying a good tract segmentation method.</p>
</sec>
<sec id="sec16">
<label>5</label>
<title>Future directions</title>
<p>Although this paper reveals the advancements of automated methods for white matter tract segmentation, there is still a lack of a general standardized method that can be reliably used by clinicians. There is still limited consensus on the definition of tracts even among knowledgeable and experienced professionals who are concerned about the inter- and intra-user reproducibility with manual placement of ROIs (<xref ref-type="bibr" rid="ref156">Zhang et al., 2010</xref>). This further complicates the methodology development and validation process. This suggests that there is a need for the development of more standardized approaches for validation tract segmentation results.</p>
<p>The recent work of authors of TractSeg (<xref ref-type="bibr" rid="ref136">Wasserthal et al., 2018</xref>) enabled the distribution of manually labelled tracts to the community so that researchers could collaboratively share the segmented tracts by experts. This gave rise to the development of more generalized approaches towards white matter tract segmentation, which otherwise would not have been possible. This has set a particularly good example so that in the future, researchers can continue to enable the progress, development, and assessment of higher-quality automated methods through such public collaborations. There is still substantial room for future improvements in the domain of generating high quality ground truth via expert neuroanatomists.</p>
<p>Another important aspect to consider when developing automated methods is their computational cost. With the advent of improved imaging tools for the acquisition of data and increasing efficiency of computational resources, there is a critical need for building applications that can be clinically used. Moreover, there has been a significant surge in image sizes. A decade ago, state-of-the-art MR acquisitions typically featured MR images of human brains with voxel sizes of 2 &#x00D7; 2 &#x00D7; 2&#x2009;mm<sup>3</sup>. Today, we routinely encounter voxel sizes smaller than 1 &#x00D7; 1 &#x00D7; 1 mm<sup>3</sup>, as seen in data collected by projects like the Human Connectome Project (<xref ref-type="bibr" rid="ref122">Van Essen et al., 2012</xref>; <xref ref-type="bibr" rid="ref43">Glasser et al., 2016</xref>). Therefore, rapid tract segmentation approaches are needed to allow interactive analysis and also to efficiently handle very large imaging studies in a time and cost-effective manner.</p>
<p>Another important future direction would be to consider tractogram data generated from varied tracking algorithms as input to the automated tract segmentation methods developed. This is because a variety of tracking algorithms with different parameter values can be used by tractography studies to generate the tractograms. Then the segmentation of tracts could be applied to any of these generated tractograms, and the method should be able to adapt to all these diverse types of inputs.</p>
</sec>
<sec sec-type="conclusions" id="sec17">
<label>6</label>
<title>Conclusion</title>
<p>This systematic review summarized 59 relevant articles in all. Unlike previous studies, our work focuses on a systematic review of methods for automated white matter tract segmentation developed in the last decade. This work framed crucial research questions to explain what approaches have been used for automated tract segmentation methods, discover key research gaps, determine datasets that are publicly accessible for researchers and summarize the most common evaluation techniques utilized. The literature published in this area as displayed and characterized in the Results section is one that is of growing and global interest.</p>
</sec>
<sec sec-type="data-availability" id="sec18">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/supplementary material, further inquiries can be directed to the corresponding author.</p>
</sec>
<sec sec-type="author-contributions" id="sec19">
<title>Author contributions</title>
<p>AJ: Conceptualization, Data curation, Formal analysis, Investigation, Methodology, Software, Validation, Visualization, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. HL: Investigation, Project administration, Resources, Supervision, Writing &#x2013; review &#x0026; editing. NP: Funding acquisition, Project administration, Resources, Supervision, Writing &#x2013; review &#x0026; editing. LH: Funding acquisition, Project administration, Resources, Supervision, Writing &#x2013; review &#x0026; editing.</p>
</sec>
</body>
<back>
<sec sec-type="funding-information" id="sec20">
<title>Funding</title>
<p>The author(s) declare that financial support was received for the research, authorship, and/or publication of this article. This work was supported in part by NIH (R01 EB029944, R01 NS094200, and R01 NS096037) and Academic and Research Committee Awards of Cincinnati Children&#x2019;s Hospital Medical Center.</p>
</sec>
<sec sec-type="COI-statement" id="sec21">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
<p>The author(s) declared that they were an editorial board member of Frontiers, at the time of submission. This had no impact on the peer review process and the final decision.</p>
</sec>
<sec id="sec100" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="ref1"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Avants</surname> <given-names>B. B.</given-names></name> <name><surname>Tustison</surname> <given-names>N.</given-names></name> <name><surname>Song</surname> <given-names>G.</given-names></name></person-group> (<year>2009</year>). <article-title>Advanced normalization tools (ANTS)</article-title>. <source>Insight J.</source> <volume>2</volume>, <fpage>1</fpage>&#x2013;<lpage>35</lpage>.</citation></ref>
<ref id="ref2"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Balakrishnan</surname> <given-names>G.</given-names></name> <name><surname>Zhao</surname> <given-names>A.</given-names></name> <name><surname>Sabuncu</surname> <given-names>M. R.</given-names></name> <name><surname>Guttag</surname> <given-names>J.</given-names></name> <name><surname>Dalca</surname> <given-names>A. V.</given-names></name></person-group> (<year>2019</year>). <article-title>VoxelMorph: a learning framework for deformable medical image registration</article-title>. <source>IEEE Trans. Med. Imaging</source> <volume>38</volume>, <fpage>1788</fpage>&#x2013;<lpage>1800</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TMI.2019.2897538</pub-id>, PMID: <pub-id pub-id-type="pmid">30716034</pub-id></citation></ref>
<ref id="ref3"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Basser</surname> <given-names>P. J.</given-names></name> <name><surname>Mattiello</surname> <given-names>J.</given-names></name> <name><surname>LeBihan</surname> <given-names>D.</given-names></name></person-group> (<year>1994a</year>). <article-title>Estimation of the effective self-diffusion tensor from the NMR spin echo</article-title>. <source>J. Magn. Reson. B</source> <volume>103</volume>, <fpage>247</fpage>&#x2013;<lpage>254</lpage>. doi: <pub-id pub-id-type="doi">10.1006/jmrb.1994.1037</pub-id>, PMID: <pub-id pub-id-type="pmid">8019776</pub-id></citation></ref>
<ref id="ref4"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Basser</surname> <given-names>P. J.</given-names></name> <name><surname>Mattiello</surname> <given-names>J.</given-names></name> <name><surname>LeBihan</surname> <given-names>D.</given-names></name></person-group> (<year>1994b</year>). <article-title>MR diffusion tensor spectroscopy and imaging</article-title>. <source>Biophys. J.</source> <volume>66</volume>, <fpage>259</fpage>&#x2013;<lpage>267</lpage>. doi: <pub-id pub-id-type="doi">10.1016/S0006-3495(94)80775-1</pub-id>, PMID: <pub-id pub-id-type="pmid">8130344</pub-id></citation></ref>
<ref id="ref5"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Basser</surname> <given-names>P. J.</given-names></name> <name><surname>Pajevic</surname> <given-names>S.</given-names></name> <name><surname>Pierpaoli</surname> <given-names>C.</given-names></name> <name><surname>Duda</surname> <given-names>J.</given-names></name> <name><surname>Aldroubi</surname> <given-names>A.</given-names></name></person-group> (<year>2000</year>). <article-title>In vivo fiber tractography using DT-MRI data</article-title>. <source>Magn. Reson. Med.</source> <volume>44</volume>, <fpage>625</fpage>&#x2013;<lpage>632</lpage>. doi: <pub-id pub-id-type="doi">10.1002/1522-2594(200010)44:4&#x003C;625::AID-MRM17&#x003E;3.0.CO;2-O</pub-id>, PMID: <pub-id pub-id-type="pmid">11025519</pub-id></citation></ref>
<ref id="ref6"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bazin</surname> <given-names>P. L.</given-names></name> <name><surname>Ye</surname> <given-names>C.</given-names></name> <name><surname>Bogovic</surname> <given-names>J. A.</given-names></name> <name><surname>Shiee</surname> <given-names>N.</given-names></name> <name><surname>Reich</surname> <given-names>D. S.</given-names></name> <name><surname>Prince</surname> <given-names>J. L.</given-names></name> <etal/></person-group>. (<year>2011</year>). <article-title>Direct segmentation of the major white matter tracts in diffusion tensor images</article-title>. <source>NeuroImage</source> <volume>58</volume>, <fpage>458</fpage>&#x2013;<lpage>468</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2011.06.020</pub-id>, PMID: <pub-id pub-id-type="pmid">21718790</pub-id></citation></ref>
<ref id="ref7"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Bert&#x00F2;</surname> <given-names>G.</given-names></name> <name><surname>Avesani</surname> <given-names>P.</given-names></name> <name><surname>Pestilli</surname> <given-names>F.</given-names></name> <name><surname>Bullock</surname> <given-names>D.</given-names></name> <name><surname>Caron</surname> <given-names>B.</given-names></name> <name><surname>Olivetti</surname> <given-names>E.</given-names></name></person-group> (<year>2019</year>). <article-title>Anatomically-informed multiple linear assignment problems for white matter bundle segmentation</article-title>. <conf-name>2019 IEEE 16th International Symposium on Biomedical Imaging (ISBI 2019)</conf-name>,</citation></ref>
<ref id="ref8"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bert&#x00F2;</surname> <given-names>G.</given-names></name> <name><surname>Bullock</surname> <given-names>D.</given-names></name> <name><surname>Astolfi</surname> <given-names>P.</given-names></name> <name><surname>Hayashi</surname> <given-names>S.</given-names></name> <name><surname>Zigiotto</surname> <given-names>L.</given-names></name> <name><surname>Annicchiarico</surname> <given-names>L.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Classifyber, a robust streamline-based linear classifier for white matter bundle segmentation</article-title>. <source>NeuroImage</source> <volume>224</volume>:<fpage>117402</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2020.117402</pub-id>, PMID: <pub-id pub-id-type="pmid">32979520</pub-id></citation></ref>
<ref id="ref9"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bloch</surname> <given-names>I.</given-names></name></person-group> (<year>2005</year>). <article-title>Fuzzy spatial relationships for image processing and interpretation: a review</article-title>. <source>Image Vis. Comput.</source> <volume>23</volume>, <fpage>89</fpage>&#x2013;<lpage>110</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.imavis.2004.06.013</pub-id></citation></ref>
<ref id="ref10"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Brun</surname> <given-names>A.</given-names></name> <name><surname>Knutsson</surname> <given-names>H.</given-names></name> <name><surname>Park</surname> <given-names>H.-J.</given-names></name> <name><surname>Shenton</surname> <given-names>M. E.</given-names></name> <name><surname>Westin</surname> <given-names>C.-F.</given-names></name></person-group> (<year>2004</year>). <article-title>Clustering fiber traces using normalized cuts</article-title>. <conf-name>Medical Image Computing and Computer-Assisted Intervention&#x2013;MICCAI 2004: 7th International Conference, Saint-Malo, France, September 26&#x2013;29, 2004. Proceedings, Part I 7</conf-name></citation></ref>
<ref id="ref11"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bullock</surname> <given-names>D.</given-names></name> <name><surname>Takemura</surname> <given-names>H.</given-names></name> <name><surname>Caiafa</surname> <given-names>C. F.</given-names></name> <name><surname>Kitchell</surname> <given-names>L.</given-names></name> <name><surname>McPherson</surname> <given-names>B.</given-names></name> <name><surname>Caron</surname> <given-names>B.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>Associative white matter connecting the dorsal and ventral posterior human cortex</article-title>. <source>Brain Struct. Funct.</source> <volume>224</volume>, <fpage>2631</fpage>&#x2013;<lpage>2660</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00429-019-01907-8</pub-id>, PMID: <pub-id pub-id-type="pmid">31342157</pub-id></citation></ref>
<ref id="ref12"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Cao</surname> <given-names>H.</given-names></name> <name><surname>Wang</surname> <given-names>Y.</given-names></name> <name><surname>Chen</surname> <given-names>J.</given-names></name> <name><surname>Jiang</surname> <given-names>D.</given-names></name> <name><surname>Zhang</surname> <given-names>X.</given-names></name> <name><surname>Tian</surname> <given-names>Q.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Swin-unet: Unet-like pure transformer for medical image segmentation</article-title>. <conf-name>European conference on computer vision</conf-name></citation></ref>
<ref id="ref13"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Carter</surname> <given-names>M.</given-names></name> <name><surname>Jennifer</surname> <given-names>S.</given-names></name> <name><surname>Farra</surname> <given-names>N.</given-names></name> <name><surname>Harris</surname> <given-names>G.</given-names></name><collab id="coll1">ScienceDirect</collab></person-group> (<year>2015</year>). <source>Guide to research techniques in neuroscience</source>. <edition>2nd</edition> Edn <publisher-name>Academic Press</publisher-name>.</citation></ref>
<ref id="ref14"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Catani</surname> <given-names>M.</given-names></name></person-group> (<year>2006</year>). <article-title>Diffusion tensor magnetic resonance imaging tractography in cognitive disorders</article-title>. <source>Curr. Opin. Neurol.</source> <volume>19</volume>, <fpage>599</fpage>&#x2013;<lpage>606</lpage>. doi: <pub-id pub-id-type="doi">10.1097/01.wco.0000247610.44106.3f</pub-id></citation></ref>
<ref id="ref15"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Catani</surname> <given-names>M.</given-names></name> <name><surname>Dell'acqua</surname> <given-names>F.</given-names></name> <name><surname>Vergani</surname> <given-names>F.</given-names></name> <name><surname>Malik</surname> <given-names>F.</given-names></name> <name><surname>Hodge</surname> <given-names>H.</given-names></name> <name><surname>Roy</surname> <given-names>P.</given-names></name> <etal/></person-group>. (<year>2012</year>). <article-title>Short frontal lobe connections of the human brain</article-title>. <source>Cortex</source> <volume>48</volume>, <fpage>273</fpage>&#x2013;<lpage>291</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cortex.2011.12.001</pub-id></citation></ref>
<ref id="ref16"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Catani</surname> <given-names>M.</given-names></name> <name><surname>Howard</surname> <given-names>R. J.</given-names></name> <name><surname>Pajevic</surname> <given-names>S.</given-names></name> <name><surname>Jones</surname> <given-names>D. K.</given-names></name></person-group> (<year>2002</year>). <article-title>Virtual in vivo interactive dissection of white matter fasciculi in the human brain</article-title>. <source>NeuroImage</source> <volume>17</volume>, <fpage>77</fpage>&#x2013;<lpage>94</lpage>. doi: <pub-id pub-id-type="doi">10.1006/nimg.2002.1136</pub-id></citation></ref>
<ref id="ref17"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Chekir</surname> <given-names>A.</given-names></name> <name><surname>Descoteaux</surname> <given-names>M.</given-names></name> <name><surname>Garyfallidis</surname> <given-names>E.</given-names></name> <name><surname>C&#x00F4;t&#x00E9;</surname> <given-names>M.-A.</given-names></name> <name><surname>Boumghar</surname> <given-names>F. O.</given-names></name></person-group> (<year>2014</year>). <article-title>A hybrid approach for optimal automatic segmentation of white matter tracts in hardi</article-title>. <conf-name>2014 IEEE Conference on Biomedical Engineering and Sciences (IECBES)</conf-name></citation></ref>
<ref id="ref18"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>Z.</given-names></name> <name><surname>Tie</surname> <given-names>Y.</given-names></name> <name><surname>Olubiyi</surname> <given-names>O.</given-names></name> <name><surname>Zhang</surname> <given-names>F.</given-names></name> <name><surname>Mehrtash</surname> <given-names>A.</given-names></name> <name><surname>Rigolo</surname> <given-names>L.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>Corticospinal tract modeling for neurosurgical planning by tracking through regions of peritumoral edema and crossing fibers using two-tensor unscented Kalman filter tractography</article-title>. <source>Int. J. Comput. Assist. Radiol. Surg.</source> <volume>11</volume>, <fpage>1475</fpage>&#x2013;<lpage>1486</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s11548-015-1344-5</pub-id>, PMID: <pub-id pub-id-type="pmid">26762104</pub-id></citation></ref>
<ref id="ref19"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>X.</given-names></name> <name><surname>Udupa</surname> <given-names>J. K.</given-names></name> <name><surname>Bagci</surname> <given-names>U.</given-names></name> <name><surname>Zhuge</surname> <given-names>Y.</given-names></name> <name><surname>Yao</surname> <given-names>J.</given-names></name></person-group> (<year>2012</year>). <article-title>Medical image segmentation by combining graph cuts and oriented active appearance models</article-title>. <source>IEEE Trans. Image Process.</source> <volume>21</volume>, <fpage>2035</fpage>&#x2013;<lpage>2046</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TIP.2012.2186306</pub-id>, PMID: <pub-id pub-id-type="pmid">22311862</pub-id></citation></ref>
<ref id="ref20"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>Y.</given-names></name> <name><surname>Zhang</surname> <given-names>C.</given-names></name> <name><surname>Song</surname> <given-names>Y.</given-names></name> <name><surname>Makris</surname> <given-names>N.</given-names></name> <name><surname>Rathi</surname> <given-names>Y.</given-names></name> <name><surname>Cai</surname> <given-names>W.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Deep fiber clustering: anatomically informed unsupervised deep learning for fast and effective white matter parcellation</article-title>. <conf-name>International Conference on Medical Image Computing and Computer-Assisted Intervention</conf-name></citation></ref>
<ref id="ref21"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>Y.</given-names></name> <name><surname>Zhang</surname> <given-names>C.</given-names></name> <name><surname>Xue</surname> <given-names>T.</given-names></name> <name><surname>Song</surname> <given-names>Y.</given-names></name> <name><surname>Makris</surname> <given-names>N.</given-names></name> <name><surname>Rathi</surname> <given-names>Y.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Deep fiber clustering: anatomically informed fiber clustering with self-supervised deep learning for fast and effective tractography parcellation</article-title>. <source>NeuroImage</source> <volume>273</volume>:<fpage>120086</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2023.120086</pub-id>, PMID: <pub-id pub-id-type="pmid">37019346</pub-id></citation></ref>
<ref id="ref22"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Clayden</surname> <given-names>J. D.</given-names></name> <name><surname>Storkey</surname> <given-names>A. J.</given-names></name> <name><surname>Bastin</surname> <given-names>M. E.</given-names></name></person-group> (<year>2007</year>). <article-title>A probabilistic model-based approach to consistent white matter tract segmentation</article-title>. <source>IEEE Trans. Med. Imaging</source> <volume>26</volume>, <fpage>1555</fpage>&#x2013;<lpage>1561</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TMI.2007.905826</pub-id>, PMID: <pub-id pub-id-type="pmid">18041270</pub-id></citation></ref>
<ref id="ref23"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Cook</surname> <given-names>P. A.</given-names></name> <name><surname>Zhang</surname> <given-names>H.</given-names></name> <name><surname>Avants</surname> <given-names>B. B.</given-names></name> <name><surname>Yushkevich</surname> <given-names>P.</given-names></name> <name><surname>Alexander</surname> <given-names>D. C.</given-names></name> <name><surname>Gee</surname> <given-names>J. C.</given-names></name> <etal/></person-group>.. (<year>2005</year>). <article-title>An automated approach to connectivity-based partitioning of brain structures. Medical image computing and computer-assisted intervention&#x2013;MICCAI 2005</article-title>: <conf-name>8th International Conference, Palm Springs, CA, USA, October 26&#x2013;29, 2005, Proceedings, Part I 8</conf-name></citation></ref>
<ref id="ref24"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Davies</surname> <given-names>D. L.</given-names></name> <name><surname>Bouldin</surname> <given-names>D. W.</given-names></name></person-group> (<year>1979</year>). <article-title>A cluster separation measure</article-title>. <source>IEEE Transac. Pattern Analysis Machine PAMI-1</source>, <fpage>224</fpage>&#x2013;<lpage>227</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TPAMI.1979.4766909</pub-id></citation></ref>
<ref id="ref25"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>De Belder</surname> <given-names>F. E.</given-names></name> <name><surname>Oot</surname> <given-names>A. R.</given-names></name> <name><surname>Van Hecke</surname> <given-names>W.</given-names></name> <name><surname>Venstermans</surname> <given-names>C.</given-names></name> <name><surname>Menovsky</surname> <given-names>T.</given-names></name> <name><surname>Van Marck</surname> <given-names>V.</given-names></name> <etal/></person-group>. (<year>2012</year>). <article-title>Diffusion tensor imaging provides an insight into the microstructure of meningiomas, high-grade gliomas, and peritumoral edema</article-title>. <source>J. Comput. Assist. Tomogr.</source> <volume>36</volume>, <fpage>577</fpage>&#x2013;<lpage>582</lpage>. doi: <pub-id pub-id-type="doi">10.1097/RCT.0b013e318261e913</pub-id>, PMID: <pub-id pub-id-type="pmid">22992609</pub-id></citation></ref>
<ref id="ref26"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Delmarcelle</surname> <given-names>T.</given-names></name> <name><surname>Hesselink</surname> <given-names>L.</given-names></name></person-group> (<year>1992</year>). <article-title>Visualization of second order tensor fields and matrix data</article-title>. In <conf-name>Proceedings Visualization&#x2019;92</conf-name>. (pp. <fpage>316</fpage>&#x2013;<lpage>317</lpage>). <publisher-name>IEEE Computer Society</publisher-name>.</citation></ref>
<ref id="ref27"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Delmonte</surname> <given-names>A.</given-names></name> <name><surname>Mercier</surname> <given-names>C.</given-names></name> <name><surname>Pallud</surname> <given-names>J.</given-names></name> <name><surname>Bloch</surname> <given-names>I.</given-names></name> <name><surname>Gori</surname> <given-names>P.</given-names></name></person-group> (<year>2019</year>). <article-title>White matter multi-resolution segmentation using fuzzy set theory</article-title>. <conf-name>2019 IEEE 16th International Symposium on Biomedical Imaging (ISBI 2019)</conf-name></citation></ref>
<ref id="ref28"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dempster</surname> <given-names>A. P.</given-names></name> <name><surname>Laird</surname> <given-names>N. M.</given-names></name> <name><surname>Rubin</surname> <given-names>D. B.</given-names></name></person-group> (<year>1977</year>). <article-title>Maximum likelihood from incomplete data via the EM algorithm</article-title>. <source>J. Royal Statistic. Soc.</source> <volume>39</volume>, <fpage>1</fpage>&#x2013;<lpage>22</lpage>.</citation></ref>
<ref id="ref29"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Di Martino</surname> <given-names>A.</given-names></name> <name><surname>O&#x2019;connor</surname> <given-names>D.</given-names></name> <name><surname>Chen</surname> <given-names>B.</given-names></name> <name><surname>Alaerts</surname> <given-names>K.</given-names></name> <name><surname>Anderson</surname> <given-names>J. S.</given-names></name> <name><surname>Assaf</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>Enhancing studies of the connectome in autism using the autism brain imaging data exchange II</article-title>. <source>Scientific Data</source> <volume>4</volume>, <fpage>1</fpage>&#x2013;<lpage>15</lpage>. doi: <pub-id pub-id-type="doi">10.1038/sdata.2017.10</pub-id></citation></ref>
<ref id="ref30"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Dong</surname> <given-names>X.</given-names></name> <name><surname>Peng</surname> <given-names>J.</given-names></name> <name><surname>Yang</surname> <given-names>Z.</given-names></name> <name><surname>Wu</surname> <given-names>X.</given-names></name></person-group> (<year>2019</year>). <source>Multimodality white matter tract segmentation using CNN</source>.</citation></ref>
<ref id="ref31"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dosovitskiy</surname> <given-names>A.</given-names></name> <name><surname>Beyer</surname> <given-names>L.</given-names></name> <name><surname>Kolesnikov</surname> <given-names>A.</given-names></name> <name><surname>Weissenborn</surname> <given-names>D.</given-names></name> <name><surname>Zhai</surname> <given-names>X.</given-names></name> <name><surname>Unterthiner</surname> <given-names>T.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>An image is worth 16x16 words: Transformers for image recognition at scale</article-title>. <source>arXiv</source></citation></ref>
<ref id="ref32"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Duclap</surname> <given-names>D.</given-names></name> <name><surname>Lebois</surname> <given-names>A.</given-names></name> <name><surname>Schmitt</surname> <given-names>B.</given-names></name> <name><surname>Riff</surname> <given-names>O.</given-names></name> <name><surname>Guevara</surname> <given-names>P.</given-names></name> <name><surname>Marrakchi-Kacem</surname> <given-names>L.</given-names></name> <etal/></person-group>. (<year>2012</year>). <article-title>Connectomist-2.0: a novel diffusion analysis toolbox for BrainVISA</article-title>. In <conf-name>Proceedings of the 29th ESMRMB meeting</conf-name></citation></ref>
<ref id="ref33"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dumais</surname> <given-names>F.</given-names></name> <name><surname>Legarreta</surname> <given-names>J. H.</given-names></name> <name><surname>Lemaire</surname> <given-names>C.</given-names></name> <name><surname>Poulin</surname> <given-names>P.</given-names></name> <name><surname>Rheault</surname> <given-names>F.</given-names></name> <name><surname>Petit</surname> <given-names>L.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>FIESTA: autoencoders for accurate fiber segmentation in tractography</article-title>. <source>NeuroImage</source> <volume>279</volume>:<fpage>120288</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2023.120288</pub-id>, PMID: <pub-id pub-id-type="pmid">37495198</pub-id></citation></ref>
<ref id="ref34"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>El Kouby</surname> <given-names>V.</given-names></name> <name><surname>Cointepas</surname> <given-names>Y.</given-names></name> <name><surname>Poupon</surname> <given-names>C.</given-names></name> <name><surname>Riviere</surname> <given-names>D.</given-names></name> <name><surname>Golestani</surname> <given-names>N.</given-names></name> <name><surname>Poline</surname> <given-names>J. B.</given-names></name> <etal/></person-group>. (<year>2005</year>). <article-title>MR diffusion-based inference of a fiber bundle model from a population of subjects</article-title>. <source>Med. Image Comput. Comput. Assist. Interv.</source> <volume>8</volume>, <fpage>196</fpage>&#x2013;<lpage>204</lpage>. doi: <pub-id pub-id-type="doi">10.1007/11566465_25</pub-id>, PMID: <pub-id pub-id-type="pmid">16685846</pub-id></citation></ref>
<ref id="ref35"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Essayed</surname> <given-names>W. I.</given-names></name> <name><surname>Zhang</surname> <given-names>F.</given-names></name> <name><surname>Unadkat</surname> <given-names>P.</given-names></name> <name><surname>Cosgrove</surname> <given-names>G. R.</given-names></name> <name><surname>Golby</surname> <given-names>A. J.</given-names></name> <name><surname>O'Donnell</surname> <given-names>L. J.</given-names></name></person-group> (<year>2017</year>). <article-title>White matter tractography for neurosurgical planning: a topography-based review of the current state of the art</article-title>. <source>NeuroImage</source> <volume>15</volume>, <fpage>659</fpage>&#x2013;<lpage>672</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.nicl.2017.06.011</pub-id>, PMID: <pub-id pub-id-type="pmid">28664037</pub-id></citation></ref>
<ref id="ref36"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Fischer</surname> <given-names>B.</given-names></name> <name><surname>Modersitzki</surname> <given-names>J.</given-names></name></person-group> (<year>2003</year>). <source>FLIRT: A flexible image registration toolbox. International workshop on biomedical image registration</source>,</citation></ref>
<ref id="ref37"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Frey</surname> <given-names>B. J.</given-names></name> <name><surname>Dueck</surname> <given-names>D.</given-names></name></person-group> (<year>2007</year>). <article-title>Clustering by passing messages between data points</article-title>. <source>Science</source> <volume>315</volume>, <fpage>972</fpage>&#x2013;<lpage>976</lpage>. doi: <pub-id pub-id-type="doi">10.1126/science.1136800</pub-id>, PMID: <pub-id pub-id-type="pmid">17218491</pub-id></citation></ref>
<ref id="ref38"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Froeling</surname> <given-names>M.</given-names></name> <name><surname>Tax</surname> <given-names>C. M.</given-names></name> <name><surname>Vos</surname> <given-names>S. B.</given-names></name> <name><surname>Luijten</surname> <given-names>P. R.</given-names></name> <name><surname>Leemans</surname> <given-names>A.</given-names></name></person-group> (<year>2017</year>). <article-title>&#x201C;MASSIVE&#x201D; brain dataset: multiple acquisitions for standardization of structural imaging validation and evaluation</article-title>. <source>Magn. Reson. Med.</source> <volume>77</volume>, <fpage>1797</fpage>&#x2013;<lpage>1809</lpage>. doi: <pub-id pub-id-type="doi">10.1002/mrm.26259</pub-id>, PMID: <pub-id pub-id-type="pmid">27173617</pub-id></citation></ref>
<ref id="ref39"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fu</surname> <given-names>Y.</given-names></name> <name><surname>Lei</surname> <given-names>Y.</given-names></name> <name><surname>Wang</surname> <given-names>T.</given-names></name> <name><surname>Curran</surname> <given-names>W. J.</given-names></name> <name><surname>Liu</surname> <given-names>T.</given-names></name> <name><surname>Yang</surname> <given-names>X.</given-names></name></person-group> (<year>2020</year>). <article-title>Deep learning in medical image registration: a review</article-title>. <source>Phys. Med. Biol.</source> <volume>65</volume>:<fpage>20TR01</fpage>. doi: <pub-id pub-id-type="doi">10.1088/1361-6560/ab843e</pub-id>, PMID: <pub-id pub-id-type="pmid">32217829</pub-id></citation></ref>
<ref id="ref40"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Garyfallidis</surname> <given-names>E.</given-names></name> <name><surname>Brett</surname> <given-names>M.</given-names></name> <name><surname>Correia</surname> <given-names>M. M.</given-names></name> <name><surname>Williams</surname> <given-names>G. B.</given-names></name> <name><surname>Nimmo-Smith</surname> <given-names>I.</given-names></name></person-group> (<year>2012</year>). <article-title>QuickBundles, a method for Tractography simplification</article-title>. <source>Front. Neurosci.</source> <volume>6</volume>:<fpage>175</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnins.2012.00175</pub-id>, PMID: <pub-id pub-id-type="pmid">23248578</pub-id></citation></ref>
<ref id="ref41"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Garyfallidis</surname> <given-names>E.</given-names></name> <name><surname>C&#x00F4;t&#x00E9;</surname> <given-names>M.-A.</given-names></name> <name><surname>Rheault</surname> <given-names>F.</given-names></name> <name><surname>Sidhu</surname> <given-names>J.</given-names></name> <name><surname>Hau</surname> <given-names>J.</given-names></name> <name><surname>Petit</surname> <given-names>L.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>Recognition of white matter bundles using local and global streamline-based registration and clustering</article-title>. <source>NeuroImage</source> <volume>170</volume>, <fpage>283</fpage>&#x2013;<lpage>295</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2017.07.015</pub-id>, PMID: <pub-id pub-id-type="pmid">28712994</pub-id></citation></ref>
<ref id="ref42"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ghazi</surname> <given-names>N.</given-names></name> <name><surname>Aarabi</surname> <given-names>M. H.</given-names></name> <name><surname>Soltanian-Zadeh</surname> <given-names>H.</given-names></name></person-group> (<year>2023</year>). <article-title>Deep learning methods for identification of white matter Fiber tracts: review of state-of-the-art and future prospective</article-title>. <source>Neuroinformatics</source> <volume>21</volume>, <fpage>517</fpage>&#x2013;<lpage>548</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s12021-023-09636-4</pub-id></citation></ref>
<ref id="ref43"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Glasser</surname> <given-names>M. F.</given-names></name> <name><surname>Smith</surname> <given-names>S. M.</given-names></name> <name><surname>Marcus</surname> <given-names>D. S.</given-names></name> <name><surname>Andersson</surname> <given-names>J. L.</given-names></name> <name><surname>Auerbach</surname> <given-names>E. J.</given-names></name> <name><surname>Behrens</surname> <given-names>T. E.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>The human connectome project's neuroimaging approach</article-title>. <source>Nat. Neurosci.</source> <volume>19</volume>, <fpage>1175</fpage>&#x2013;<lpage>1187</lpage>. doi: <pub-id pub-id-type="doi">10.1038/nn.4361</pub-id>, PMID: <pub-id pub-id-type="pmid">27571196</pub-id></citation></ref>
<ref id="ref44"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Guevara</surname> <given-names>M.</given-names></name> <name><surname>Guevara</surname> <given-names>P.</given-names></name> <name><surname>Roman</surname> <given-names>C.</given-names></name> <name><surname>Mangin</surname> <given-names>J. F.</given-names></name></person-group> (<year>2020</year>). <article-title>Superficial white matter: a review on the dMRI analysis methods and applications</article-title>. <source>NeuroImage</source> <volume>212</volume>:<fpage>116673</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2020.116673</pub-id>, PMID: <pub-id pub-id-type="pmid">32114152</pub-id></citation></ref>
<ref id="ref45"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Guevara</surname> <given-names>M.</given-names></name> <name><surname>Roman</surname> <given-names>C.</given-names></name> <name><surname>Houenou</surname> <given-names>J.</given-names></name> <name><surname>Duclap</surname> <given-names>D.</given-names></name> <name><surname>Poupon</surname> <given-names>C.</given-names></name> <name><surname>Mangin</surname> <given-names>J. F.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>Reproducibility of superficial white matter tracts using diffusion-weighted imaging tractography</article-title>. <source>NeuroImage</source> <volume>147</volume>, <fpage>703</fpage>&#x2013;<lpage>725</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2016.11.066</pub-id>, PMID: <pub-id pub-id-type="pmid">28034765</pub-id></citation></ref>
<ref id="ref46"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Gupta</surname> <given-names>V.</given-names></name> <name><surname>Thomopoulos</surname> <given-names>S. I.</given-names></name> <name><surname>Corbin</surname> <given-names>C. K.</given-names></name> <name><surname>Rashid</surname> <given-names>F.</given-names></name> <name><surname>Thompson</surname> <given-names>P. M.</given-names></name></person-group> (<year>2018</year>). <article-title>Fibernet 2.0: an automatic neural network based tool for clustering white matter fibers in the brain</article-title>. <conf-name>2018 IEEE 15th International Symposium on Biomedical Imaging (ISBI 2018)</conf-name></citation></ref>
<ref id="ref47"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Gupta</surname> <given-names>V.</given-names></name> <name><surname>Thomopoulos</surname> <given-names>S. I.</given-names></name> <name><surname>Rashid</surname> <given-names>F. M.</given-names></name> <name><surname>Thompson</surname> <given-names>P. M</given-names></name></person-group>. (<year>2017</year>). <article-title>FiberNET: an ensemble deep learning framework for clustering white matter fibers. Medical image computing and computer assisted intervention&#x2212; MICCAI 2017</article-title>: <conf-name>20th International Conference, Quebec City, QC, Canada, September 11&#x2013;13, 2017, Proceedings, Part I 20</conf-name></citation></ref>
<ref id="ref48"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Hatamizadeh</surname> <given-names>A.</given-names></name> <name><surname>Nath</surname> <given-names>V.</given-names></name> <name><surname>Tang</surname> <given-names>Y.</given-names></name> <name><surname>Yang</surname> <given-names>D.</given-names></name> <name><surname>Roth</surname> <given-names>H. R.</given-names></name> <name><surname>Xu</surname> <given-names>D.</given-names></name></person-group> (<year>2021</year>). <source>Swin unetr: Swin transformers for semantic segmentation of brain tumors in MRI images</source> (pp. <fpage>272</fpage>&#x2013;<lpage>284</lpage>.) <publisher-loc>Cham</publisher-loc>: <publisher-name>Springer International Publishing</publisher-name>.</citation></ref>
<ref id="ref49"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Heker</surname> <given-names>M.</given-names></name> <name><surname>Amer</surname> <given-names>R.</given-names></name> <name><surname>Alexandroni</surname> <given-names>G.</given-names></name> <name><surname>Greenspan</surname> <given-names>H.</given-names></name></person-group> (<year>2016</year>). <article-title>Automated supervised segmentation of anatomical fiber tracts using an AdaBoost framework</article-title>. <conf-name>2016 IEEE International Conference on the Science of Electrical Engineering (ICSEE)</conf-name></citation></ref>
<ref id="ref50"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hofman</surname> <given-names>A.</given-names></name> <name><surname>Brusselle</surname> <given-names>G. G.</given-names></name> <name><surname>Murad</surname> <given-names>S. D.</given-names></name> <name><surname>van Duijn</surname> <given-names>C. M.</given-names></name> <name><surname>Franco</surname> <given-names>O. H.</given-names></name> <name><surname>Goedegebure</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2015</year>). <article-title>The Rotterdam study: 2016 objectives and design update</article-title>. <source>Eur. J. Epidemiol.</source> <volume>30</volume>, <fpage>661</fpage>&#x2013;<lpage>708</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10654-015-0082-x</pub-id>, PMID: <pub-id pub-id-type="pmid">26386597</pub-id></citation></ref>
<ref id="ref51"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Huang</surname> <given-names>Z.</given-names></name> <name><surname>Wang</surname> <given-names>X.</given-names></name> <name><surname>Wei</surname> <given-names>Y.</given-names></name> <name><surname>Huang</surname> <given-names>L.</given-names></name> <name><surname>Shi</surname> <given-names>H.</given-names></name> <name><surname>Liu</surname> <given-names>W.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>CCNet: Criss-cross attention for semantic segmentation</article-title>. <source>IEEE Trans. Pattern Anal. Mach. Intell.</source> <volume>45</volume>, <fpage>6896</fpage>&#x2013;<lpage>6908</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TPAMI.2020.3007032</pub-id>, PMID: <pub-id pub-id-type="pmid">32750802</pub-id></citation></ref>
<ref id="ref52"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Huang</surname> <given-names>H.</given-names></name> <name><surname>Zhang</surname> <given-names>J.</given-names></name> <name><surname>Wakana</surname> <given-names>S.</given-names></name> <name><surname>Zhang</surname> <given-names>W.</given-names></name> <name><surname>Ren</surname> <given-names>T.</given-names></name> <name><surname>Richards</surname> <given-names>L. J.</given-names></name> <etal/></person-group>. (<year>2006</year>). <article-title>White and gray matter development in human fetal, newborn and pediatric brains</article-title>. <source>NeuroImage</source> <volume>33</volume>, <fpage>27</fpage>&#x2013;<lpage>38</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2006.06.009</pub-id>, PMID: <pub-id pub-id-type="pmid">16905335</pub-id></citation></ref>
<ref id="ref53"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ille</surname> <given-names>S.</given-names></name> <name><surname>Ohlerth</surname> <given-names>A.-K.</given-names></name> <name><surname>Colle</surname> <given-names>D.</given-names></name> <name><surname>Colle</surname> <given-names>H.</given-names></name> <name><surname>Dragoy</surname> <given-names>O.</given-names></name> <name><surname>Goodden</surname> <given-names>J.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Augmented reality for the virtual dissection of white matter pathways</article-title>. <source>Acta Neurochir.</source> <volume>163</volume>, <fpage>895</fpage>&#x2013;<lpage>903</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00701-020-04545-w</pub-id>, PMID: <pub-id pub-id-type="pmid">33026532</pub-id></citation></ref>
<ref id="ref54"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Isensee</surname> <given-names>F.</given-names></name> <name><surname>Jaeger</surname> <given-names>P. F.</given-names></name> <name><surname>Kohl</surname> <given-names>S. A. A.</given-names></name> <name><surname>Petersen</surname> <given-names>J.</given-names></name> <name><surname>Maier-Hein</surname> <given-names>K. H.</given-names></name></person-group> (<year>2021</year>). <article-title>nnU-net: a self-configuring method for deep learning-based biomedical image segmentation</article-title>. <source>Nat. Methods</source> <volume>18</volume>, <fpage>203</fpage>&#x2013;<lpage>211</lpage>. doi: <pub-id pub-id-type="doi">10.1038/s41592-020-01008-z</pub-id>, PMID: <pub-id pub-id-type="pmid">33288961</pub-id></citation></ref>
<ref id="ref55"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jeurissen</surname> <given-names>B.</given-names></name> <name><surname>Descoteaux</surname> <given-names>M.</given-names></name> <name><surname>Mori</surname> <given-names>S.</given-names></name> <name><surname>Leemans</surname> <given-names>A.</given-names></name></person-group> (<year>2019</year>). <article-title>Diffusion MRI fiber tractography of the brain</article-title>. <source>NMR Biomed.</source> <volume>32</volume>:<fpage>e3785</fpage>. doi: <pub-id pub-id-type="doi">10.1002/nbm.3785</pub-id></citation></ref>
<ref id="ref56"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Jin</surname> <given-names>Y.</given-names></name> <name><surname>Ceting&#x00FC;l</surname> <given-names>H. E.</given-names></name></person-group> (<year>2015</year>). <article-title>Tractography-embedded white matter stream clustering</article-title>. <conf-name>2015 IEEE 12th International Symposium on Biomedical Imaging (ISBI)</conf-name></citation></ref>
<ref id="ref57"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Jin</surname> <given-names>Y.</given-names></name> <name><surname>Shi</surname> <given-names>Y.</given-names></name> <name><surname>Zhan</surname> <given-names>L.</given-names></name> <name><surname>De Zubicaray</surname> <given-names>G. I.</given-names></name> <name><surname>McMahon</surname> <given-names>K. L.</given-names></name> <name><surname>Martin</surname> <given-names>N. G.</given-names></name> <etal/></person-group>. (<year>2013</year>). <article-title>Labeling white matter tracts in HARDI by fusing multiple tract atlases with applications to genetics</article-title>. <conf-name>2013 IEEE 10th International Symposium on Biomedical Imaging</conf-name></citation></ref>
<ref id="ref58"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jordan</surname> <given-names>K. M.</given-names></name> <name><surname>Lauricella</surname> <given-names>M.</given-names></name> <name><surname>Licata</surname> <given-names>A. E.</given-names></name> <name><surname>Sacco</surname> <given-names>S.</given-names></name> <name><surname>Asteggiano</surname> <given-names>C.</given-names></name> <name><surname>Wang</surname> <given-names>C.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Cortically constrained shape recognition: automated white matter tract segmentation validated in the pediatric brain</article-title>. <source>J. Neuroimaging</source> <volume>31</volume>, <fpage>758</fpage>&#x2013;<lpage>772</lpage>. doi: <pub-id pub-id-type="doi">10.1111/jon.12854</pub-id>, PMID: <pub-id pub-id-type="pmid">33878229</pub-id></citation></ref>
<ref id="ref59"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kamali</surname> <given-names>T.</given-names></name> <name><surname>Stashuk</surname> <given-names>D.</given-names></name></person-group> (<year>2016</year>). <article-title>Automated segmentation of white matter fiber bundles using diffusion tensor imaging data and a new density based clustering algorithm</article-title>. <source>Artif. Intell. Med.</source> <volume>73</volume>, <fpage>14</fpage>&#x2013;<lpage>22</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.artmed.2016.09.003</pub-id>, PMID: <pub-id pub-id-type="pmid">27926378</pub-id></citation></ref>
<ref id="ref60"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kamnitsas</surname> <given-names>K.</given-names></name> <name><surname>Ledig</surname> <given-names>C.</given-names></name> <name><surname>Newcombe</surname> <given-names>V. F.</given-names></name> <name><surname>Simpson</surname> <given-names>J. P.</given-names></name> <name><surname>Kane</surname> <given-names>A. D.</given-names></name> <name><surname>Menon</surname> <given-names>D. K.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>Efficient multi-scale 3D CNN with fully connected CRF for accurate brain lesion segmentation</article-title>. <source>Med. Image Anal.</source> <volume>36</volume>, <fpage>61</fpage>&#x2013;<lpage>78</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.media.2016.10.004</pub-id>, PMID: <pub-id pub-id-type="pmid">27865153</pub-id></citation></ref>
<ref id="ref61"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Kumar</surname> <given-names>K.</given-names></name> <name><surname>Desrosiers</surname> <given-names>C.</given-names></name></person-group> (<year>2016</year>). <article-title>A sparse coding approach for the efficient representation and segmentation of white matter fibers</article-title>. <conf-name>2016 IEEE 13th International Symposium on Biomedical Imaging (ISBI)</conf-name></citation></ref>
<ref id="ref62"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Labra</surname> <given-names>N.</given-names></name> <name><surname>Guevara</surname> <given-names>P.</given-names></name> <name><surname>Duclap</surname> <given-names>D.</given-names></name> <name><surname>Houenou</surname> <given-names>J.</given-names></name> <name><surname>Poupon</surname> <given-names>C.</given-names></name> <name><surname>Mangin</surname> <given-names>J.-F.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>Fast automatic segmentation of white matter streamlines based on a multi-subject bundle atlas</article-title>. <source>Neuroinformatics</source> <volume>15</volume>, <fpage>71</fpage>&#x2013;<lpage>86</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s12021-016-9316-7</pub-id>, PMID: <pub-id pub-id-type="pmid">27722821</pub-id></citation></ref>
<ref id="ref63"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Lacante</surname> <given-names>M.</given-names></name> <name><surname>Van Esbroeck</surname> <given-names>R.</given-names></name> <name><surname>De Vos</surname> <given-names>A.</given-names></name></person-group> (<year>2008</year>). <source>Met een dynamische keuzebegeleiding naar een effectieve keuzebekwaamheid: Eindrapport OBPWO projecten 04.01 &#x0026; 02.02 en Ministerieel Initiatief</source>. <publisher-loc>Brussel/Leuven</publisher-loc>: <publisher-name>Vrije Universiteit Brussel/Katholieke Universiteit Leuven</publisher-name>.</citation></ref>
<ref id="ref64"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lazar</surname> <given-names>M.</given-names></name> <name><surname>Alexander</surname> <given-names>A.</given-names></name> <name><surname>Thottakara</surname> <given-names>P.</given-names></name> <name><surname>Badie</surname> <given-names>B.</given-names></name> <name><surname>Field</surname> <given-names>A.</given-names></name></person-group> (<year>2006</year>). <article-title>White matter reorganization after surgical resection of brain tumors and vascular malformations</article-title>. <source>Am. J. Neuroradiol.</source> <volume>27</volume>, <fpage>1258</fpage>&#x2013;<lpage>1271</lpage>. PMID: <pub-id pub-id-type="pmid">16775277</pub-id></citation></ref>
<ref id="ref65"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Le Bihan</surname> <given-names>D.</given-names></name> <name><surname>Johansen-Berg</surname> <given-names>H.</given-names></name></person-group> (<year>2012</year>). <article-title>Diffusion MRI at 25: exploring brain tissue structure and function</article-title>. <source>NeuroImage</source> <volume>61</volume>, <fpage>324</fpage>&#x2013;<lpage>341</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2011.11.006</pub-id>, PMID: <pub-id pub-id-type="pmid">22120012</pub-id></citation></ref>
<ref id="ref66"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>S.</given-names></name> <name><surname>Chen</surname> <given-names>Z.</given-names></name> <name><surname>Guo</surname> <given-names>W.</given-names></name> <name><surname>Zeng</surname> <given-names>Q.</given-names></name> <name><surname>Feng</surname> <given-names>Y.</given-names></name></person-group> (<year>2021</year>) <source>Two parallel stages deep learning network for anterior visual pathway segmentation</source>. <publisher-name>Computational Diffusion MRI, International MICCAI Workshop</publisher-name>: <publisher-loc>Lima, Peru</publisher-loc></citation></ref>
<ref id="ref67"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>B.</given-names></name> <name><surname>De Groot</surname> <given-names>M.</given-names></name> <name><surname>Steketee</surname> <given-names>R. M.</given-names></name> <name><surname>Meijboom</surname> <given-names>R.</given-names></name> <name><surname>Smits</surname> <given-names>M.</given-names></name> <name><surname>Vernooij</surname> <given-names>M. W.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Neuro4Neuro: a neural network approach for neural tract segmentation using large-scale population-based diffusion imaging</article-title>. <source>NeuroImage</source> <volume>218</volume>:<fpage>116993</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2020.116993</pub-id>, PMID: <pub-id pub-id-type="pmid">32492510</pub-id></citation></ref>
<ref id="ref68"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>F.</given-names></name> <name><surname>Feng</surname> <given-names>J.</given-names></name> <name><surname>Chen</surname> <given-names>G.</given-names></name> <name><surname>Wu</surname> <given-names>Y.</given-names></name> <name><surname>Hong</surname> <given-names>Y.</given-names></name> <name><surname>Yap</surname> <given-names>P. T.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>DeepBundle: Fiber bundle Parcellation with graph convolution neural networks</article-title>. <source>Graph. Learn. Med. Imaging</source> <volume>11849</volume>, <fpage>88</fpage>&#x2013;<lpage>95</lpage>. doi: <pub-id pub-id-type="doi">10.1007/978-3-030-35817-4_11</pub-id>, PMID: <pub-id pub-id-type="pmid">34485996</pub-id></citation></ref>
<ref id="ref69"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>W.</given-names></name> <name><surname>Lu</surname> <given-names>Q.</given-names></name> <name><surname>Zhuo</surname> <given-names>Z.</given-names></name> <name><surname>Li</surname> <given-names>Y.</given-names></name> <name><surname>Duan</surname> <given-names>Y.</given-names></name> <name><surname>Yu</surname> <given-names>P.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Volumetric segmentation of white matter tracts with label embedding</article-title>. <source>NeuroImage</source> <volume>250</volume>:<fpage>118934</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2022.118934</pub-id>, PMID: <pub-id pub-id-type="pmid">35091078</pub-id></citation></ref>
<ref id="ref70"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>W.</given-names></name> <name><surname>Zhuo</surname> <given-names>Z.</given-names></name> <name><surname>Liu</surname> <given-names>Y.</given-names></name> <name><surname>Ye</surname> <given-names>C.</given-names></name></person-group> (<year>2023</year>). <article-title>One-shot segmentation of novel white matter tracts via extensive data augmentation and adaptive knowledge transfer</article-title>. <source>Med. Image Anal.</source> <volume>90</volume>:<fpage>102968</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.media.2023.102968</pub-id>, PMID: <pub-id pub-id-type="pmid">37729793</pub-id></citation></ref>
<ref id="ref71"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Logiraj</surname> <given-names>K.</given-names></name> <name><surname>Sotheeswaran</surname> <given-names>S.</given-names></name> <name><surname>Jeyasuthan</surname> <given-names>M.</given-names></name> <name><surname>Ratnarajah</surname> <given-names>N.</given-names></name></person-group> (<year>2021a</year>). <article-title>Clustering of major white matter bundles using tract-specific geometric curve features</article-title>. <conf-name>2021 10th International Conference on Information and Automation for Sustainability (ICIAfS)</conf-name></citation></ref>
<ref id="ref72"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Logiraj</surname> <given-names>K.</given-names></name> <name><surname>Thanikasalam</surname> <given-names>K.</given-names></name> <name><surname>Sotheeswaran</surname> <given-names>S.</given-names></name> <name><surname>Ratnarajah</surname> <given-names>N.</given-names></name></person-group> (<year>2021b</year>). <article-title>TractNet: a deep learning approach on 3D curves for segmenting white matter fibre bundles</article-title>. <conf-name>2021 21st International Conference on Advances in ICT for Emerging Regions (ICter)</conf-name></citation></ref>
<ref id="ref73"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Lu</surname> <given-names>Q.</given-names></name> <name><surname>Li</surname> <given-names>Y.</given-names></name> <name><surname>Ye</surname> <given-names>C</given-names></name></person-group>. (<year>2020</year>). <article-title>White matter tract segmentation with self-supervised learning. Medical image computing and computer assisted intervention&#x2013;MICCAI 2020</article-title>: <conf-name>23rd International Conference, Lima, Peru, October 4&#x2013;8, 2020, Proceedings, Part VII 23</conf-name></citation></ref>
<ref id="ref74"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lu</surname> <given-names>Q.</given-names></name> <name><surname>Li</surname> <given-names>Y.</given-names></name> <name><surname>Ye</surname> <given-names>C.</given-names></name></person-group> (<year>2021</year>). <article-title>Volumetric white matter tract segmentation with nested self-supervised learning using sequential pretext tasks</article-title>. <source>Med. Image Anal.</source> <volume>72</volume>:<fpage>102094</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.media.2021.102094</pub-id>, PMID: <pub-id pub-id-type="pmid">34004493</pub-id></citation></ref>
<ref id="ref75"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lu</surname> <given-names>Q.</given-names></name> <name><surname>Liu</surname> <given-names>W.</given-names></name> <name><surname>Zhuo</surname> <given-names>Z.</given-names></name> <name><surname>Li</surname> <given-names>Y.</given-names></name> <name><surname>Duan</surname> <given-names>Y.</given-names></name> <name><surname>Yu</surname> <given-names>P.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>A transfer learning approach to few-shot segmentation of novel white matter tracts</article-title>. <source>Med. Image Anal.</source> <volume>79</volume>:<fpage>102454</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.media.2022.102454</pub-id>, PMID: <pub-id pub-id-type="pmid">35468555</pub-id></citation></ref>
<ref id="ref76"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lucena</surname> <given-names>O.</given-names></name> <name><surname>Borges</surname> <given-names>P.</given-names></name> <name><surname>Cardoso</surname> <given-names>J.</given-names></name> <name><surname>Ashkan</surname> <given-names>K.</given-names></name> <name><surname>Sparks</surname> <given-names>R.</given-names></name> <name><surname>Ourselin</surname> <given-names>S.</given-names></name></person-group> (<year>2022</year>). <article-title>Informative and reliable tract segmentation for preoperative planning</article-title>. <source>Front. Radiol.</source> <volume>2</volume>:<fpage>866974</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fradi.2022.866974</pub-id>, PMID: <pub-id pub-id-type="pmid">37492653</pub-id></citation></ref>
<ref id="ref77"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Makropoulos</surname> <given-names>A.</given-names></name> <name><surname>Robinson</surname> <given-names>E. C.</given-names></name> <name><surname>Schuh</surname> <given-names>A.</given-names></name> <name><surname>Wright</surname> <given-names>R.</given-names></name> <name><surname>Fitzgibbon</surname> <given-names>S.</given-names></name> <name><surname>Bozek</surname> <given-names>J.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>The developing human connectome project: a minimal processing pipeline for neonatal cortical surface reconstruction</article-title>. <source>NeuroImage</source> <volume>173</volume>, <fpage>88</fpage>&#x2013;<lpage>112</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2018.01.054</pub-id>, PMID: <pub-id pub-id-type="pmid">29409960</pub-id></citation></ref>
<ref id="ref78"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mancini</surname> <given-names>M.</given-names></name> <name><surname>Vos</surname> <given-names>S. B.</given-names></name> <name><surname>Vakharia</surname> <given-names>V. N.</given-names></name> <name><surname>O'Keeffe</surname> <given-names>A. G.</given-names></name> <name><surname>Trimmel</surname> <given-names>K.</given-names></name> <name><surname>Barkhof</surname> <given-names>F.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>Automated fiber tract reconstruction for surgery planning: extensive validation in language-related white matter tracts</article-title>. <source>Neuroimage Clin.</source> <volume>23</volume>:<fpage>101883</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.nicl.2019.101883</pub-id>, PMID: <pub-id pub-id-type="pmid">31163386</pub-id></citation></ref>
<ref id="ref79"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Marek</surname> <given-names>K.</given-names></name> <name><surname>Jennings</surname> <given-names>D.</given-names></name> <name><surname>Lasch</surname> <given-names>S.</given-names></name> <name><surname>Siderowf</surname> <given-names>A.</given-names></name> <name><surname>Tanner</surname> <given-names>C.</given-names></name> <name><surname>Simuni</surname> <given-names>T.</given-names></name> <etal/></person-group>. (<year>2011</year>). <article-title>The Parkinson progression marker initiative (PPMI)</article-title>. <source>Prog. Neurobiol.</source> <volume>95</volume>, <fpage>629</fpage>&#x2013;<lpage>635</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.pneurobio.2011.09.005</pub-id>, PMID: <pub-id pub-id-type="pmid">21930184</pub-id></citation></ref>
<ref id="ref80"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mazoyer</surname> <given-names>B.</given-names></name> <name><surname>Mellet</surname> <given-names>E.</given-names></name> <name><surname>Perchey</surname> <given-names>G.</given-names></name> <name><surname>Zago</surname> <given-names>L.</given-names></name> <name><surname>Crivello</surname> <given-names>F.</given-names></name> <name><surname>Jobard</surname> <given-names>G.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>BIL&#x0026;GIN: a neuroimaging, cognitive, behavioral, and genetic database for the study of human brain lateralization</article-title>. <source>NeuroImage</source> <volume>124</volume>, <fpage>1225</fpage>&#x2013;<lpage>1231</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2015.02.071</pub-id></citation></ref>
<ref id="ref81"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Milletari</surname> <given-names>F.</given-names></name> <name><surname>Navab</surname> <given-names>N.</given-names></name> <name><surname>Ahmadi</surname> <given-names>S.-A.</given-names></name></person-group> (<year>2016</year>). <article-title>V-net: fully convolutional neural networks for volumetric medical image segmentation</article-title>. <conf-name>2016 fourth international conference on 3D vision (3DV)</conf-name>,</citation></ref>
<ref id="ref82"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mori</surname> <given-names>S.</given-names></name> <name><surname>Crain</surname> <given-names>B. J.</given-names></name> <name><surname>Chacko</surname> <given-names>V. P.</given-names></name> <name><surname>Van Zijl</surname> <given-names>P. C.</given-names></name></person-group> (<year>1999</year>). <article-title>Three-dimensional tracking of axonal projections in the brain by magnetic resonance imaging</article-title>. <source>Ann. Neurol.</source> <volume>45</volume>, <fpage>265</fpage>&#x2013;<lpage>269</lpage>. doi: <pub-id pub-id-type="doi">10.1002/1531-8249(199902)45:2&#x003C;265::AID-ANA21&#x003E;3.0.CO;2-3</pub-id>, PMID: <pub-id pub-id-type="pmid">9989633</pub-id></citation></ref>
<ref id="ref83"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mori</surname> <given-names>S.</given-names></name> <name><surname>van Zijl</surname> <given-names>P.</given-names></name></person-group> (<year>2007</year>). <article-title>Human white matter atlas</article-title>. <source>Am. J. Psychiatry</source> <volume>164</volume>:<fpage>1005</fpage>. doi: <pub-id pub-id-type="doi">10.1176/ajp.2007.164.7.1005</pub-id></citation></ref>
<ref id="ref84"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Moulavi</surname> <given-names>D.</given-names></name> <name><surname>Jaskowiak</surname> <given-names>P. A.</given-names></name> <name><surname>Campello</surname> <given-names>R. J.</given-names></name> <name><surname>Zimek</surname> <given-names>A.</given-names></name> <name><surname>Sander</surname> <given-names>J.</given-names></name></person-group> (<year>2014</year>). <article-title>Density-based clustering validation</article-title>. <conf-name>Proceedings of the 2014 SIAM international conference on data mining</conf-name></citation></ref>
<ref id="ref85"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Nelkenbaum</surname> <given-names>I.</given-names></name> <name><surname>Tsarfaty</surname> <given-names>G.</given-names></name> <name><surname>Kiryati</surname> <given-names>N.</given-names></name> <name><surname>Konen</surname> <given-names>E.</given-names></name> <name><surname>Mayer</surname> <given-names>A.</given-names></name></person-group> (<year>2020</year>). <article-title>Automatic segmentation of white matter tracts using multiple brain MRI sequences</article-title>. <conf-name>2020 IEEE 17th International Symposium on Biomedical Imaging (ISBI)</conf-name></citation></ref>
<ref id="ref86"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ngattai Lam</surname> <given-names>P. D.</given-names></name> <name><surname>Belhomme</surname> <given-names>G.</given-names></name> <name><surname>Ferrall</surname> <given-names>J.</given-names></name> <name><surname>Patterson</surname> <given-names>B.</given-names></name> <name><surname>Styner</surname> <given-names>M.</given-names></name> <name><surname>Prieto</surname> <given-names>J. C.</given-names></name></person-group> (<year>2018</year>). <article-title>TRAFIC: Fiber tract classification using deep learning</article-title>. <source>Proc. SPIE Int. Soc. Opt. Eng.</source> <volume>10574</volume>:<fpage>1057412</fpage>. doi: <pub-id pub-id-type="doi">10.1117/12.2293931</pub-id>, PMID: <pub-id pub-id-type="pmid">29780197</pub-id></citation></ref>
<ref id="ref87"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>O&#x2019;Donnell</surname> <given-names>L. J.</given-names></name> <name><surname>Suter</surname> <given-names>Y.</given-names></name> <name><surname>Rigolo</surname> <given-names>L.</given-names></name> <name><surname>Kahali</surname> <given-names>P.</given-names></name> <name><surname>Zhang</surname> <given-names>F.</given-names></name> <name><surname>Norton</surname> <given-names>I.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>Automated white matter fiber tract identification in patients with brain tumors</article-title>. <source>NeuroImage</source> <volume>13</volume>, <fpage>138</fpage>&#x2013;<lpage>153</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.nicl.2016.11.023</pub-id>, PMID: <pub-id pub-id-type="pmid">27981029</pub-id></citation></ref>
<ref id="ref88"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Ocegueda</surname> <given-names>O.</given-names></name> <name><surname>Rivera</surname> <given-names>M.</given-names></name></person-group> (<year>2013</year>). <article-title>Multi-tensor Field spectral segmentation for white matter fiber bundle classification</article-title>. <conf-name>2013 IEEE 10th International Symposium on Biomedical Imaging</conf-name></citation></ref>
<ref id="ref89"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>O'Donnell</surname> <given-names>L. J.</given-names></name> <name><surname>Westin</surname> <given-names>C.-F.</given-names></name></person-group> (<year>2007</year>). <article-title>Automatic tractography segmentation using a high-dimensional white matter atlas</article-title>. <source>IEEE Trans. Med. Imaging</source> <volume>26</volume>, <fpage>1562</fpage>&#x2013;<lpage>1575</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TMI.2007.906785</pub-id>, PMID: <pub-id pub-id-type="pmid">18041271</pub-id></citation></ref>
<ref id="ref90"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Oishi</surname> <given-names>K.</given-names></name> <name><surname>Faria</surname> <given-names>A.</given-names></name> <name><surname>Jiang</surname> <given-names>H.</given-names></name> <name><surname>Li</surname> <given-names>X.</given-names></name> <name><surname>Akhter</surname> <given-names>K.</given-names></name> <name><surname>Zhang</surname> <given-names>J.</given-names></name> <etal/></person-group>. (<year>2009</year>). <article-title>Atlas-based whole brain white matter analysis using large deformation diffeomorphic metric mapping: application to normal elderly and Alzheimer's disease participants</article-title>. <source>NeuroImage</source> <volume>46</volume>, <fpage>486</fpage>&#x2013;<lpage>499</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2009.01.002</pub-id>, PMID: <pub-id pub-id-type="pmid">19385016</pub-id></citation></ref>
<ref id="ref91"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Oishi</surname> <given-names>K.</given-names></name> <name><surname>Mori</surname> <given-names>S.</given-names></name> <name><surname>Donohue</surname> <given-names>P. K.</given-names></name> <name><surname>Ernst</surname> <given-names>T.</given-names></name> <name><surname>Anderson</surname> <given-names>L.</given-names></name> <name><surname>Buchthal</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2011</year>). <article-title>Multi-contrast human neonatal brain atlas: application to normal neonate development analysis</article-title>. <source>NeuroImage</source> <volume>56</volume>, <fpage>8</fpage>&#x2013;<lpage>20</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2011.01.051</pub-id>, PMID: <pub-id pub-id-type="pmid">21276861</pub-id></citation></ref>
<ref id="ref92"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Oliveira</surname> <given-names>F. P.</given-names></name> <name><surname>Tavares</surname> <given-names>J. M. R.</given-names></name></person-group> (<year>2014</year>). <article-title>Medical image registration: a review</article-title>. <source>Comput. Methods Biomech. Biomed. Engin.</source> <volume>17</volume>, <fpage>73</fpage>&#x2013;<lpage>93</lpage>. doi: <pub-id pub-id-type="doi">10.1080/10255842.2012.670855</pub-id></citation></ref>
<ref id="ref93"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Peretzke</surname> <given-names>R.</given-names></name> <name><surname>Maier-Hein</surname> <given-names>K. H.</given-names></name> <name><surname>Bohn</surname> <given-names>J.</given-names></name> <name><surname>Kirchhoff</surname> <given-names>Y.</given-names></name> <name><surname>Roy</surname> <given-names>S.</given-names></name> <name><surname>Oberli-Palma</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>atTRACTive: semi-automatic white matter tract segmentation using active learning</article-title>. <conf-name>International Conference on Medical Image Computing and Computer-Assisted Intervention</conf-name></citation></ref>
<ref id="ref94"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Poldrack</surname> <given-names>R. A.</given-names></name> <name><surname>Congdon</surname> <given-names>E.</given-names></name> <name><surname>Triplett</surname> <given-names>W.</given-names></name> <name><surname>Gorgolewski</surname> <given-names>K.</given-names></name> <name><surname>Karlsgodt</surname> <given-names>K.</given-names></name> <name><surname>Mumford</surname> <given-names>J.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>A phenome-wide examination of neural and cognitive function</article-title>. <source>Scientific Data</source> <volume>3</volume>, <fpage>1</fpage>&#x2013;<lpage>12</lpage>. doi: <pub-id pub-id-type="doi">10.1038/sdata.2016.110</pub-id></citation></ref>
<ref id="ref95"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Pomiecko</surname> <given-names>K.</given-names></name> <name><surname>Sestili</surname> <given-names>C.</given-names></name> <name><surname>Fissell</surname> <given-names>K.</given-names></name> <name><surname>Pathak</surname> <given-names>S.</given-names></name> <name><surname>Okonkwo</surname> <given-names>D.</given-names></name> <name><surname>Schneider</surname> <given-names>W.</given-names></name></person-group> (<year>2019</year>). <article-title>3D convolutional neural network segmentation of white matter tract masks from MR diffusion anisotropy maps</article-title>. <conf-name>2019 IEEE 16th International Symposium on Biomedical Imaging (ISBI 2019)</conf-name></citation></ref>
<ref id="ref96"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Poulin</surname> <given-names>P.</given-names></name> <name><surname>Jorgens</surname> <given-names>D.</given-names></name> <name><surname>Jodoin</surname> <given-names>P. M.</given-names></name> <name><surname>Descoteaux</surname> <given-names>M.</given-names></name></person-group> (<year>2019</year>). <article-title>Tractography and machine learning: current state and open challenges</article-title>. <source>Magn. Reson. Imaging</source> <volume>64</volume>, <fpage>37</fpage>&#x2013;<lpage>48</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.mri.2019.04.013</pub-id>, PMID: <pub-id pub-id-type="pmid">31078615</pub-id></citation></ref>
<ref id="ref97"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Poulin</surname> <given-names>P.</given-names></name> <name><surname>Theaud</surname> <given-names>G.</given-names></name> <name><surname>Rheault</surname> <given-names>F.</given-names></name> <name><surname>St-Onge</surname> <given-names>E.</given-names></name> <name><surname>Bore</surname> <given-names>A.</given-names></name> <name><surname>Renauld</surname> <given-names>E.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>TractoInferno-A large-scale, open-source, multi-site database for machine learning dMRI tractography</article-title>. <source>Scientific Data</source> <volume>9</volume>:<fpage>725</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41597-022-01833-1</pub-id>, PMID: <pub-id pub-id-type="pmid">36433966</pub-id></citation></ref>
<ref id="ref98"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Poupon</surname> <given-names>C.</given-names></name> <name><surname>Rieul</surname> <given-names>B.</given-names></name> <name><surname>Kezele</surname> <given-names>I.</given-names></name> <name><surname>Perrin</surname> <given-names>M.</given-names></name> <name><surname>Poupon</surname> <given-names>F.</given-names></name> <name><surname>Mangin</surname> <given-names>J. F.</given-names></name></person-group> (<year>2008</year>). <article-title>New diffusion phantoms dedicated to the study and validation of high-angular-resolution diffusion imaging (HARDI) models</article-title>. <source>Magnet. Reson. Med</source> <volume>60</volume>, <fpage>1276</fpage>&#x2013;<lpage>1283</lpage>. doi: <pub-id pub-id-type="doi">10.1002/mrm.21789</pub-id>, PMID: <pub-id pub-id-type="pmid">19030160</pub-id></citation></ref>
<ref id="ref99"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pujol</surname> <given-names>S.</given-names></name> <name><surname>Wells</surname> <given-names>W.</given-names></name> <name><surname>Pierpaoli</surname> <given-names>C.</given-names></name> <name><surname>Brun</surname> <given-names>C.</given-names></name> <name><surname>Gee</surname> <given-names>J.</given-names></name> <name><surname>Cheng</surname> <given-names>G.</given-names></name> <etal/></person-group>. (<year>2015</year>). <article-title>The DTI challenge: toward standardized evaluation of diffusion tensor imaging Tractography for neurosurgery</article-title>. <source>J. Neuroimaging</source> <volume>25</volume>, <fpage>875</fpage>&#x2013;<lpage>882</lpage>. doi: <pub-id pub-id-type="doi">10.1111/jon.12283</pub-id>, PMID: <pub-id pub-id-type="pmid">26259925</pub-id></citation></ref>
<ref id="ref100"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Qi</surname> <given-names>C. R.</given-names></name> <name><surname>Su</surname> <given-names>H.</given-names></name> <name><surname>Mo</surname> <given-names>K.</given-names></name> <name><surname>Guibas</surname> <given-names>L. J.</given-names></name></person-group> (<year>2017</year>). <article-title>Pointnet: deep learning on point sets for 3d classification and segmentation</article-title>. <conf-name>Proceedings of the IEEE conference on computer vision and pattern recognition</conf-name></citation></ref>
<ref id="ref101"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Radwan</surname> <given-names>A. M.</given-names></name> <name><surname>Sunaert</surname> <given-names>S.</given-names></name> <name><surname>Schilling</surname> <given-names>K.</given-names></name> <name><surname>Descoteaux</surname> <given-names>M.</given-names></name> <name><surname>Landman</surname> <given-names>B. A.</given-names></name> <name><surname>Vandenbulcke</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>An atlas of white matter anatomy, its variability, and reproducibility based on constrained spherical deconvolution of diffusion MRI</article-title>. <source>NeuroImage</source> <volume>254</volume>:<fpage>119029</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2022.119029</pub-id>, PMID: <pub-id pub-id-type="pmid">35231632</pub-id></citation></ref>
<ref id="ref102"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ratnarajah</surname> <given-names>N.</given-names></name> <name><surname>Qiu</surname> <given-names>A.</given-names></name></person-group> (<year>2014</year>). <article-title>Multi-label segmentation of white matter structures: application to neonatal brains</article-title>. <source>NeuroImage</source> <volume>102</volume>, <fpage>913</fpage>&#x2013;<lpage>922</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2014.08.001</pub-id>, PMID: <pub-id pub-id-type="pmid">25111473</pub-id></citation></ref>
<ref id="ref103"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Reynolds</surname> <given-names>D. A.</given-names></name> <name><surname>Quatieri</surname> <given-names>T. F.</given-names></name> <name><surname>Dunn</surname> <given-names>R. B.</given-names></name></person-group> (<year>2000</year>). <article-title>Speaker verification using adapted Gaussian mixture models</article-title>. <source>Digit. Signal Process.</source> <volume>10</volume>, <fpage>19</fpage>&#x2013;<lpage>41</lpage>. doi: <pub-id pub-id-type="doi">10.1006/dspr.1999.0361</pub-id></citation></ref>
<ref id="ref104"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rheault</surname> <given-names>F.</given-names></name> <name><surname>De Benedictis</surname> <given-names>A.</given-names></name> <name><surname>Daducci</surname> <given-names>A.</given-names></name> <name><surname>Maffei</surname> <given-names>C.</given-names></name> <name><surname>Tax</surname> <given-names>C. M.</given-names></name> <name><surname>Romascano</surname> <given-names>D.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Tractostorm: the what, why, and how of tractography dissection reproducibility</article-title>. <source>Hum. Brain Mapp.</source> <volume>41</volume>, <fpage>1859</fpage>&#x2013;<lpage>1874</lpage>. doi: <pub-id pub-id-type="doi">10.1002/hbm.24917</pub-id>, PMID: <pub-id pub-id-type="pmid">31925871</pub-id></citation></ref>
<ref id="ref105"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rheault</surname> <given-names>F.</given-names></name> <name><surname>Schilling</surname> <given-names>K. G.</given-names></name> <name><surname>Obaid</surname> <given-names>S.</given-names></name> <name><surname>Begnoche</surname> <given-names>J. P.</given-names></name> <name><surname>Cutting</surname> <given-names>L. E.</given-names></name> <name><surname>Descoteaux</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2022a</year>). <article-title>The influence of regions of interest on tractography virtual dissection protocols: general principles to learn and to follow</article-title>. <source>Brain Struct. Funct.</source> <volume>227</volume>, <fpage>2191</fpage>&#x2013;<lpage>2207</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00429-022-02518-6</pub-id></citation></ref>
<ref id="ref106"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rheault</surname> <given-names>F.</given-names></name> <name><surname>Schilling</surname> <given-names>K. G.</given-names></name> <name><surname>Valcourt-Caron</surname> <given-names>A.</given-names></name> <name><surname>Th&#x00E9;berge</surname> <given-names>A.</given-names></name> <name><surname>Poirier</surname> <given-names>C.</given-names></name> <name><surname>Grenier</surname> <given-names>G.</given-names></name> <etal/></person-group>. (<year>2022b</year>). <article-title>Tractostorm 2: optimizing tractography dissection reproducibility with segmentation protocol dissemination</article-title>. <source>Hum. Brain Mapp.</source> <volume>43</volume>, <fpage>2134</fpage>&#x2013;<lpage>2147</lpage>. doi: <pub-id pub-id-type="doi">10.1002/hbm.25777</pub-id>, PMID: <pub-id pub-id-type="pmid">35141980</pub-id></citation></ref>
<ref id="ref107"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Roman</surname> <given-names>C.</given-names></name> <name><surname>Guevara</surname> <given-names>M.</given-names></name> <name><surname>Valenzuela</surname> <given-names>R.</given-names></name> <name><surname>Figueroa</surname> <given-names>M.</given-names></name> <name><surname>Houenou</surname> <given-names>J.</given-names></name> <name><surname>Duclap</surname> <given-names>D.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>Clustering of whole-brain white matter short association bundles using HARDI data</article-title>. <source>Front. Neuroinform.</source> <volume>11</volume>:<fpage>73</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fninf.2017.00073</pub-id>, PMID: <pub-id pub-id-type="pmid">29311886</pub-id></citation></ref>
<ref id="ref108"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Ronneberger</surname> <given-names>O.</given-names></name> <name><surname>Fischer</surname> <given-names>P.</given-names></name> <name><surname>Brox</surname> <given-names>T</given-names></name></person-group>. (<year>2015</year>). <article-title>U-net: convolutional networks for biomedical image segmentation. Medical image computing and computer-assisted intervention&#x2013;MICCAI 2015</article-title>. <conf-name>18th International Conference, Munich, Germany, October 5&#x2013;9, 2015, Proceedings, Part III 18</conf-name></citation></ref>
<ref id="ref109"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sadeghi</surname> <given-names>N.</given-names></name> <name><surname>Prastawa</surname> <given-names>M.</given-names></name> <name><surname>Fletcher</surname> <given-names>P. T.</given-names></name> <name><surname>Wolff</surname> <given-names>J.</given-names></name> <name><surname>Gilmore</surname> <given-names>J. H.</given-names></name> <name><surname>Gerig</surname> <given-names>G.</given-names></name></person-group> (<year>2013</year>). <article-title>Regional characterization of longitudinal DT-MRI to study white matter maturation of the early developing brain</article-title>. <source>NeuroImage</source> <volume>68</volume>, <fpage>236</fpage>&#x2013;<lpage>247</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2012.11.040</pub-id>, PMID: <pub-id pub-id-type="pmid">23235270</pub-id></citation></ref>
<ref id="ref110"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Schilling</surname> <given-names>K. G.</given-names></name> <name><surname>Rheault</surname> <given-names>F.</given-names></name> <name><surname>Petit</surname> <given-names>L.</given-names></name> <name><surname>Hansen</surname> <given-names>C. B.</given-names></name> <name><surname>Nath</surname> <given-names>V.</given-names></name> <name><surname>Yeh</surname> <given-names>F.-C.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Tractography dissection variability: what happens when 42 groups dissect 14 white matter bundles on the same dataset?</article-title> <source>NeuroImage</source> <volume>243</volume>:<fpage>118502</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2021.118502</pub-id>, PMID: <pub-id pub-id-type="pmid">34433094</pub-id></citation></ref>
<ref id="ref111"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Schmitt</surname> <given-names>B.</given-names></name> <name><surname>Lebois</surname> <given-names>A.</given-names></name> <name><surname>Duclap</surname> <given-names>D.</given-names></name> <name><surname>Guevara</surname> <given-names>P.</given-names></name> <name><surname>Poupon</surname> <given-names>F.</given-names></name> <name><surname>Rivi&#x00E8;re</surname> <given-names>D.</given-names></name> <etal/></person-group>. (<year>2012</year>). <article-title>CONNECT/ARCHI: an open database to infer atlases of the human brain connectivity</article-title>. <source>ESMRMB</source> <volume>272</volume>:<fpage>2012</fpage>.</citation></ref>
<ref id="ref112"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sharmin</surname> <given-names>N.</given-names></name> <name><surname>Olivetti</surname> <given-names>E.</given-names></name> <name><surname>Avesani</surname> <given-names>P.</given-names></name></person-group> (<year>2018</year>). <article-title>White matter tract segmentation as multiple linear assignment problems</article-title>. <source>Front. Neurosci.</source> <volume>11</volume>:<fpage>754</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnins.2017.00754</pub-id>, PMID: <pub-id pub-id-type="pmid">29467600</pub-id></citation></ref>
<ref id="ref113"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Shi</surname> <given-names>J.</given-names></name> <name><surname>Malik</surname> <given-names>J.</given-names></name></person-group> (<year>2000</year>). <article-title>Normalized cuts and image segmentation</article-title>. <source>IEEE Trans. Pattern Anal. Mach. Intell.</source> <volume>22</volume>, <fpage>888</fpage>&#x2013;<lpage>905</lpage>. doi: <pub-id pub-id-type="doi">10.1109/34.868688</pub-id></citation></ref>
<ref id="ref114"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Siless</surname> <given-names>V.</given-names></name> <name><surname>Chang</surname> <given-names>K.</given-names></name> <name><surname>Fischl</surname> <given-names>B.</given-names></name> <name><surname>Yendiki</surname> <given-names>A.</given-names></name></person-group> (<year>2018</year>). <article-title>AnatomiCuts: hierarchical clustering of tractography streamlines based on anatomical similarity</article-title>. <source>NeuroImage</source> <volume>166</volume>, <fpage>32</fpage>&#x2013;<lpage>45</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2017.10.058</pub-id>, PMID: <pub-id pub-id-type="pmid">29100937</pub-id></citation></ref>
<ref id="ref115"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Soh</surname> <given-names>S.-E.</given-names></name> <name><surname>Tint</surname> <given-names>M. T.</given-names></name> <name><surname>Gluckman</surname> <given-names>P. D.</given-names></name> <name><surname>Godfrey</surname> <given-names>K. M.</given-names></name> <name><surname>Rifkin-Graboi</surname> <given-names>A.</given-names></name> <name><surname>Chan</surname> <given-names>Y. H.</given-names></name> <etal/></person-group>. (<year>2014</year>). <article-title>Cohort profile: growing up in Singapore towards healthy outcomes (GUSTO) birth cohort study</article-title>. <source>Int. J. Epidemiol.</source> <volume>43</volume>, <fpage>1401</fpage>&#x2013;<lpage>1409</lpage>. doi: <pub-id pub-id-type="doi">10.1093/ije/dyt125</pub-id>, PMID: <pub-id pub-id-type="pmid">23912809</pub-id></citation></ref>
<ref id="ref116"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Steketee</surname> <given-names>R. M.</given-names></name> <name><surname>Bron</surname> <given-names>E. E.</given-names></name> <name><surname>Meijboom</surname> <given-names>R.</given-names></name> <name><surname>Houston</surname> <given-names>G. C.</given-names></name> <name><surname>Klein</surname> <given-names>S.</given-names></name> <name><surname>Mutsaerts</surname> <given-names>H. J.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>Early-stage differentiation between presenile Alzheimer&#x2019;s disease and frontotemporal dementia using arterial spin labeling MRI</article-title>. <source>Eur. Radiol.</source> <volume>26</volume>, <fpage>244</fpage>&#x2013;<lpage>253</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00330-015-3789-x</pub-id>, PMID: <pub-id pub-id-type="pmid">26024845</pub-id></citation></ref>
<ref id="ref117"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Teeuw</surname> <given-names>J.</given-names></name> <name><surname>Caan</surname> <given-names>M. W.</given-names></name> <name><surname>Olabarriaga</surname> <given-names>S. D</given-names></name></person-group>. (<year>2015</year>). <article-title>Robust automated white matter pathway reconstruction for large studies. Medical image computing and computer-assisted intervention--MICCAI 2015</article-title>, <conf-name>18th International Conference, Munich, Germany, October 5&#x2013;9, 2015, Proceedings, Part I 18</conf-name></citation></ref>
<ref id="ref118"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Toga</surname> <given-names>A. W.</given-names></name> <name><surname>Mazziotta</surname> <given-names>J. C.</given-names></name></person-group> (<year>2002</year>). <source>Brain mapping: the methods</source>, vol. <volume>1</volume> <publisher-name>Academic Press</publisher-name>.</citation></ref>
<ref id="ref119"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tunc</surname> <given-names>B.</given-names></name> <name><surname>Parker</surname> <given-names>W. A.</given-names></name> <name><surname>Ingalhalikar</surname> <given-names>M.</given-names></name> <name><surname>Verma</surname> <given-names>R.</given-names></name></person-group> (<year>2014</year>). <article-title>Automated tract extraction via atlas based adaptive clustering</article-title>. <source>NeuroImage</source> <volume>102</volume>, <fpage>596</fpage>&#x2013;<lpage>607</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2014.08.021</pub-id>, PMID: <pub-id pub-id-type="pmid">25134977</pub-id></citation></ref>
<ref id="ref120"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ugurlu</surname> <given-names>D.</given-names></name> <name><surname>Firat</surname> <given-names>Z.</given-names></name> <name><surname>Ture</surname> <given-names>U.</given-names></name> <name><surname>Unal</surname> <given-names>G.</given-names></name></person-group> (<year>2019</year>). <article-title>Supervised classification of white matter fibers based on neighborhood fiber orientation distributions using an ensemble of neural networks</article-title>. <source>Comput. Diffusion MRI</source> <volume>2018</volume>:<fpage>22</fpage>. doi: <pub-id pub-id-type="doi">10.1007/978-3-030-05831-9_12</pub-id></citation></ref>
<ref id="ref121"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Van Essen</surname> <given-names>D. C.</given-names></name> <name><surname>Smith</surname> <given-names>S. M.</given-names></name> <name><surname>Barch</surname> <given-names>D. M.</given-names></name> <name><surname>Behrens</surname> <given-names>T. E.</given-names></name> <name><surname>Yacoub</surname> <given-names>E.</given-names></name> <name><surname>Ugurbil</surname> <given-names>K.</given-names></name> <etal/></person-group>. (<year>2013</year>). <article-title>The WU-Minn human connectome project: an overview</article-title>. <source>NeuroImage</source> <volume>80</volume>, <fpage>62</fpage>&#x2013;<lpage>79</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2013.05.041</pub-id></citation></ref>
<ref id="ref122"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Van Essen</surname> <given-names>D. C.</given-names></name> <name><surname>Ugurbil</surname> <given-names>K.</given-names></name> <name><surname>Auerbach</surname> <given-names>E.</given-names></name> <name><surname>Barch</surname> <given-names>D.</given-names></name> <name><surname>Behrens</surname> <given-names>T. E.</given-names></name> <name><surname>Bucholz</surname> <given-names>R.</given-names></name> <etal/></person-group>. (<year>2012</year>). <article-title>The human connectome project: a data acquisition perspective</article-title>. <source>NeuroImage</source> <volume>62</volume>, <fpage>2222</fpage>&#x2013;<lpage>2231</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2012.02.018</pub-id>, PMID: <pub-id pub-id-type="pmid">22366334</pub-id></citation></ref>
<ref id="ref123"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Vanderweyen</surname> <given-names>D. C.</given-names></name> <name><surname>Theaud</surname> <given-names>G.</given-names></name> <name><surname>Sidhu</surname> <given-names>J.</given-names></name> <name><surname>Rheault</surname> <given-names>F.</given-names></name> <name><surname>Sarubbo</surname> <given-names>S.</given-names></name> <name><surname>Descoteaux</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>The role of diffusion tractography in refining glial tumor resection</article-title>. <source>Brain Struct. Funct.</source> <volume>225</volume>, <fpage>1413</fpage>&#x2013;<lpage>1436</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00429-020-02056-z</pub-id>, PMID: <pub-id pub-id-type="pmid">32180019</pub-id></citation></ref>
<ref id="ref124"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Vaswani</surname> <given-names>A.</given-names></name> <name><surname>Shazeer</surname> <given-names>N.</given-names></name> <name><surname>Parmar</surname> <given-names>N.</given-names></name> <name><surname>Uszkoreit</surname> <given-names>J.</given-names></name> <name><surname>Jones</surname> <given-names>L.</given-names></name> <name><surname>Gomez</surname> <given-names>A. N.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>Attention is all you need</article-title>. <source>Adv. Neural Inf. Proces. Syst.</source> <volume>30</volume></citation></ref>
<ref id="ref125"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>V&#x00E1;zquez</surname> <given-names>A.</given-names></name> <name><surname>L&#x00F3;pez-L&#x00F3;pez</surname> <given-names>N.</given-names></name> <name><surname>Labra</surname> <given-names>N.</given-names></name> <name><surname>Figueroa</surname> <given-names>M.</given-names></name> <name><surname>Poupon</surname> <given-names>C.</given-names></name> <name><surname>Mangin</surname> <given-names>J.-F.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>Parallel optimization of fiber bundle segmentation for massive tractography datasets</article-title>. <conf-name>2019 IEEE 16th International Symposium on Biomedical Imaging (ISBI 2019)</conf-name></citation></ref>
<ref id="ref126"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>V&#x00E1;zquez</surname> <given-names>A.</given-names></name> <name><surname>L&#x00F3;pez-L&#x00F3;pez</surname> <given-names>N.</given-names></name> <name><surname>S&#x00E1;nchez</surname> <given-names>A.</given-names></name> <name><surname>Houenou</surname> <given-names>J.</given-names></name> <name><surname>Poupon</surname> <given-names>C.</given-names></name> <name><surname>Mangin</surname> <given-names>J.-F.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>FFClust: fast fiber clustering for large tractography datasets for a detailed study of brain connectivity</article-title>. <source>NeuroImage</source> <volume>220</volume>:<fpage>117070</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2020.117070</pub-id>, PMID: <pub-id pub-id-type="pmid">32599269</pub-id></citation></ref>
<ref id="ref127"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Viola</surname> <given-names>P.</given-names></name> <name><surname>Jones</surname> <given-names>M</given-names></name></person-group>. (<year>2001</year>). <article-title>Rapid object detection using a boosted cascade of simple features</article-title>. In <conf-name>Proceedings of the 2001 IEEE computer society conference on computer vision and pattern recognition. CVPR 2001</conf-name></citation></ref>
<ref id="ref128"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Volkow</surname> <given-names>N. D.</given-names></name> <name><surname>Koob</surname> <given-names>G. F.</given-names></name> <name><surname>Croyle</surname> <given-names>R. T.</given-names></name> <name><surname>Bianchi</surname> <given-names>D. W.</given-names></name> <name><surname>Gordon</surname> <given-names>J. A.</given-names></name> <name><surname>Koroshetz</surname> <given-names>W. J.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>The conception of the ABCD study: from substance use to a broad NIH collaboration</article-title>. <source>Dev. Cogn. Neurosci.</source> <volume>32</volume>, <fpage>4</fpage>&#x2013;<lpage>7</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.dcn.2017.10.002</pub-id>, PMID: <pub-id pub-id-type="pmid">29051027</pub-id></citation></ref>
<ref id="ref129"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Vu</surname> <given-names>T. H.</given-names></name> <name><surname>Monga</surname> <given-names>V.</given-names></name></person-group> (<year>2017</year>). <article-title>Fast low-rank shared dictionary learning for image classification</article-title>. <source>IEEE Trans. Image Process.</source> <volume>26</volume>, <fpage>5160</fpage>&#x2013;<lpage>5175</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TIP.2017.2729885</pub-id>, PMID: <pub-id pub-id-type="pmid">28742035</pub-id></citation></ref>
<ref id="ref130"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wakana</surname> <given-names>S.</given-names></name> <name><surname>Caprihan</surname> <given-names>A.</given-names></name> <name><surname>Panzenboeck</surname> <given-names>M. M.</given-names></name> <name><surname>Fallon</surname> <given-names>J. H.</given-names></name> <name><surname>Perry</surname> <given-names>M.</given-names></name> <name><surname>Gollub</surname> <given-names>R. L.</given-names></name> <etal/></person-group>. (<year>2007</year>). <article-title>Reproducibility of quantitative tractography methods applied to cerebral white matter</article-title>. <source>NeuroImage</source> <volume>36</volume>, <fpage>630</fpage>&#x2013;<lpage>644</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2007.02.049</pub-id>, PMID: <pub-id pub-id-type="pmid">17481925</pub-id></citation></ref>
<ref id="ref131"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>Z.</given-names></name> <name><surname>Lv</surname> <given-names>Y.</given-names></name> <name><surname>He</surname> <given-names>M.</given-names></name> <name><surname>Ge</surname> <given-names>E.</given-names></name> <name><surname>Qiang</surname> <given-names>N.</given-names></name> <name><surname>Ge</surname> <given-names>B.</given-names></name></person-group> (<year>2022</year>). <article-title>Accurate corresponding Fiber tract segmentation via FiberGeoMap learner</article-title>. <conf-name>International Conference on Medical Image Computing and Computer-Assisted Intervention</conf-name></citation></ref>
<ref id="ref132"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Warrington</surname> <given-names>S.</given-names></name> <name><surname>Bryant</surname> <given-names>K. L.</given-names></name> <name><surname>Khrapitchev</surname> <given-names>A. A.</given-names></name> <name><surname>Sallet</surname> <given-names>J.</given-names></name> <name><surname>Charquero-Ballester</surname> <given-names>M.</given-names></name> <name><surname>Douaud</surname> <given-names>G.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>XTRACT-standardised protocols for automated tractography in the human and macaque brain</article-title>. <source>NeuroImage</source> <volume>217</volume>:<fpage>116923</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2020.116923</pub-id>, PMID: <pub-id pub-id-type="pmid">32407993</pub-id></citation></ref>
<ref id="ref133"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wassermann</surname> <given-names>D.</given-names></name> <name><surname>Makris</surname> <given-names>N.</given-names></name> <name><surname>Rathi</surname> <given-names>Y.</given-names></name> <name><surname>Shenton</surname> <given-names>M.</given-names></name> <name><surname>Kikinis</surname> <given-names>R.</given-names></name> <name><surname>Kubicki</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2013</year>). <article-title>On describing human white matter anatomy: the white matter query language</article-title>. <source>Med. Image Comput. Comput. Assist. Interv.</source> <volume>16</volume>, <fpage>647</fpage>&#x2013;<lpage>654</lpage>. doi: <pub-id pub-id-type="doi">10.1007/978-3-642-40811-3_81</pub-id>, PMID: <pub-id pub-id-type="pmid">24505722</pub-id></citation></ref>
<ref id="ref134"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wassermann</surname> <given-names>D.</given-names></name> <name><surname>Makris</surname> <given-names>N.</given-names></name> <name><surname>Rathi</surname> <given-names>Y.</given-names></name> <name><surname>Shenton</surname> <given-names>M.</given-names></name> <name><surname>Kikinis</surname> <given-names>R.</given-names></name> <name><surname>Kubicki</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>The white matter query language: a novel approach for describing human white matter anatomy</article-title>. <source>Brain Struct. Funct.</source> <volume>221</volume>, <fpage>4705</fpage>&#x2013;<lpage>4721</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00429-015-1179-4</pub-id></citation></ref>
<ref id="ref135"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wasserthal</surname> <given-names>J.</given-names></name> <name><surname>Neher</surname> <given-names>P. F.</given-names></name> <name><surname>Hirjak</surname> <given-names>D.</given-names></name> <name><surname>Maier-Hein</surname> <given-names>K. H.</given-names></name></person-group> (<year>2019</year>). <article-title>Combined tract segmentation and orientation mapping for bundle-specific tractography</article-title>. <source>Med. Image Anal.</source> <volume>58</volume>:<fpage>101559</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.media.2019.101559</pub-id>, PMID: <pub-id pub-id-type="pmid">31542711</pub-id></citation></ref>
<ref id="ref136"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wasserthal</surname> <given-names>J.</given-names></name> <name><surname>Neher</surname> <given-names>P.</given-names></name> <name><surname>Maier-Hein</surname> <given-names>K. H.</given-names></name></person-group> (<year>2018</year>). <article-title>TractSeg - fast and accurate white matter tract segmentation</article-title>. <source>NeuroImage</source> <volume>183</volume>, <fpage>239</fpage>&#x2013;<lpage>253</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2018.07.070</pub-id>, PMID: <pub-id pub-id-type="pmid">30086412</pub-id></citation></ref>
<ref id="ref137"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wu</surname> <given-names>Y.</given-names></name> <name><surname>Hong</surname> <given-names>Y.</given-names></name> <name><surname>Ahmad</surname> <given-names>S.</given-names></name> <name><surname>Lin</surname> <given-names>W.</given-names></name> <name><surname>Shen</surname> <given-names>D.</given-names></name> <name><surname>Yap</surname> <given-names>P. T.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Tract dictionary learning for fast and robust recognition of Fiber bundles</article-title>. <source>Med. Image Comput. Comput. Assist. Interv.</source> <volume>12267</volume>, <fpage>251</fpage>&#x2013;<lpage>259</lpage>. doi: <pub-id pub-id-type="doi">10.1007/978-3-030-59728-3_25</pub-id>, PMID: <pub-id pub-id-type="pmid">34195699</pub-id></citation></ref>
<ref id="ref138"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Xiao</surname> <given-names>H.</given-names></name> <name><surname>Li</surname> <given-names>L.</given-names></name> <name><surname>Liu</surname> <given-names>Q.</given-names></name> <name><surname>Zhu</surname> <given-names>X.</given-names></name> <name><surname>Zhang</surname> <given-names>Q.</given-names></name></person-group> (<year>2023</year>). <article-title>Transformers in medical image segmentation: a review</article-title>. <source>Biomed. Signal Process. Control</source> <volume>84</volume>:<fpage>104791</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.bspc.2023.104791</pub-id></citation></ref>
<ref id="ref139"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Xu</surname> <given-names>C.</given-names></name> <name><surname>Sun</surname> <given-names>G.</given-names></name> <name><surname>Liang</surname> <given-names>R.</given-names></name> <name><surname>Xu</surname> <given-names>X.</given-names></name></person-group> (<year>2021</year>). <article-title>Vector field streamline clustering framework for brain fiber tract segmentation</article-title>. <source>IEEE Transac. Cognit. Develop. Syst.</source> <volume>14</volume>, <fpage>1066</fpage>&#x2013;<lpage>1081</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TCDS.2021.3094555</pub-id></citation></ref>
<ref id="ref140"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Xu</surname> <given-names>H.</given-names></name> <name><surname>Xue</surname> <given-names>T.</given-names></name> <name><surname>Liu</surname> <given-names>D.</given-names></name> <name><surname>Zhang</surname> <given-names>F.</given-names></name> <name><surname>Westin</surname> <given-names>C.-F.</given-names></name> <name><surname>Kikinis</surname> <given-names>R.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>A registration-and uncertainty-based framework for white matter tract segmentation with only one annotated subject</article-title>. <conf-name>2023 IEEE 20th International Symposium on Biomedical Imaging (ISBI)</conf-name></citation></ref>
<ref id="ref141"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Xue</surname> <given-names>T.</given-names></name> <name><surname>Zhang</surname> <given-names>F.</given-names></name> <name><surname>Zhang</surname> <given-names>C.</given-names></name> <name><surname>Chen</surname> <given-names>Y.</given-names></name> <name><surname>Song</surname> <given-names>Y.</given-names></name> <name><surname>Golby</surname> <given-names>A. J.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Superficial white matter analysis: an efficient point-cloud-based deep learning framework with supervised contrastive learning for consistent tractography parcellation across populations and dMRI acquisitions</article-title>. <source>Med. Image Anal.</source> <volume>85</volume>:<fpage>102759</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.media.2023.102759</pub-id>, PMID: <pub-id pub-id-type="pmid">36706638</pub-id></citation></ref>
<ref id="ref142"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yamada</surname> <given-names>K.</given-names></name> <name><surname>Sakai</surname> <given-names>K.</given-names></name> <name><surname>Akazawa</surname> <given-names>K.</given-names></name> <name><surname>Yuen</surname> <given-names>S.</given-names></name> <name><surname>Nishimura</surname> <given-names>T.</given-names></name></person-group> (<year>2009</year>). <article-title>MR tractography: a review of its clinical applications</article-title>. <source>Magn. Reson. Med. Sci.</source> <volume>8</volume>, <fpage>165</fpage>&#x2013;<lpage>174</lpage>. doi: <pub-id pub-id-type="doi">10.2463/mrms.8.165</pub-id>, PMID: <pub-id pub-id-type="pmid">20035125</pub-id></citation></ref>
<ref id="ref143"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yang</surname> <given-names>Z.</given-names></name> <name><surname>Li</surname> <given-names>X.</given-names></name> <name><surname>Zhou</surname> <given-names>J.</given-names></name> <name><surname>Wu</surname> <given-names>X.</given-names></name> <name><surname>Ding</surname> <given-names>Z.</given-names></name></person-group> (<year>2020</year>). <article-title>Functional clustering of whole brain white matter fibers</article-title>. <source>J. Neurosci. Methods</source> <volume>335</volume>:<fpage>108626</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jneumeth.2020.108626</pub-id>, PMID: <pub-id pub-id-type="pmid">32032716</pub-id></citation></ref>
<ref id="ref144"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yeatman</surname> <given-names>J. D.</given-names></name> <name><surname>Dougherty</surname> <given-names>R. F.</given-names></name> <name><surname>Myall</surname> <given-names>N. J.</given-names></name> <name><surname>Wandell</surname> <given-names>B. A.</given-names></name> <name><surname>Feldman</surname> <given-names>H. M.</given-names></name></person-group> (<year>2012</year>). <article-title>Tract profiles of white matter properties: automating fiber-tract quantification</article-title>. <source>PLoS One</source> <volume>7</volume>:<fpage>e49790</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pone.0049790</pub-id>, PMID: <pub-id pub-id-type="pmid">23166771</pub-id></citation></ref>
<ref id="ref145"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yendiki</surname> <given-names>A.</given-names></name> <name><surname>Panneck</surname> <given-names>P.</given-names></name> <name><surname>Srinivasan</surname> <given-names>P.</given-names></name> <name><surname>Stevens</surname> <given-names>A.</given-names></name> <name><surname>Zollei</surname> <given-names>L.</given-names></name> <name><surname>Augustinack</surname> <given-names>J.</given-names></name> <etal/></person-group>. (<year>2011</year>). <article-title>Automated probabilistic reconstruction of white-matter pathways in health and disease using an atlas of the underlying anatomy</article-title>. <source>Front. Neuroinform.</source> <volume>5</volume>:<fpage>23</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fninf.2011.00023</pub-id>, PMID: <pub-id pub-id-type="pmid">22016733</pub-id></citation></ref>
<ref id="ref146"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Yin</surname> <given-names>H.</given-names></name> <name><surname>Xu</surname> <given-names>P.</given-names></name> <name><surname>Cui</surname> <given-names>H.</given-names></name> <name><surname>Chen</surname> <given-names>G.</given-names></name> <name><surname>Ma</surname> <given-names>J.</given-names></name></person-group> (<year>2022</year>). <article-title>DC 2 U-net: tract segmentation in brain white matter using dense Criss-cross U-net</article-title>. <conf-name>International Workshop on Computational Diffusion MRI</conf-name></citation></ref>
<ref id="ref147"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yoo</surname> <given-names>S. W.</given-names></name> <name><surname>Guevara</surname> <given-names>P.</given-names></name> <name><surname>Jeong</surname> <given-names>Y.</given-names></name> <name><surname>Yoo</surname> <given-names>K.</given-names></name> <name><surname>Shin</surname> <given-names>J. S.</given-names></name> <name><surname>Mangin</surname> <given-names>J.-F.</given-names></name> <etal/></person-group>. (<year>2015</year>). <article-title>An example-based multi-atlas approach to automatic labeling of white matter tracts</article-title>. <source>PLoS One</source> <volume>10</volume>:<fpage>e0133337</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pone.0133337</pub-id>, PMID: <pub-id pub-id-type="pmid">26225419</pub-id></citation></ref>
<ref id="ref148"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Yun</surname> <given-names>S.</given-names></name> <name><surname>Han</surname> <given-names>D.</given-names></name> <name><surname>Oh</surname> <given-names>S. J.</given-names></name> <name><surname>Chun</surname> <given-names>S.</given-names></name> <name><surname>Choe</surname> <given-names>J.</given-names></name> <name><surname>Yoo</surname> <given-names>Y.</given-names></name></person-group> (<year>2019</year>). <article-title>Cutmix: regularization strategy to train strong classifiers with localizable features</article-title>. <conf-name>Proceedings of the IEEE/CVF international conference on computer vision</conf-name></citation></ref>
<ref id="ref149"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yushkevich</surname> <given-names>P. A.</given-names></name> <name><surname>Zhang</surname> <given-names>H.</given-names></name> <name><surname>Simon</surname> <given-names>T. J.</given-names></name> <name><surname>Gee</surname> <given-names>J. C.</given-names></name></person-group> (<year>2008</year>). <article-title>Structure-specific statistical mapping of white matter tracts</article-title>. <source>NeuroImage</source> <volume>41</volume>, <fpage>448</fpage>&#x2013;<lpage>461</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2008.01.013</pub-id></citation></ref>
<ref id="ref150"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>H.</given-names></name> <name><surname>Cisse</surname> <given-names>M.</given-names></name> <name><surname>Dauphin</surname> <given-names>Y. N.</given-names></name> <name><surname>Lopez-Paz</surname> <given-names>D.</given-names></name></person-group> (<year>2017</year>). <article-title>mixup: Beyond empirical risk minimization</article-title>. <source>arXiv</source> <comment>preprint arXiv:1710.09412</comment></citation></ref>
<ref id="ref151"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>F.</given-names></name> <name><surname>Daducci</surname> <given-names>A.</given-names></name> <name><surname>He</surname> <given-names>Y.</given-names></name> <name><surname>Schiavi</surname> <given-names>S.</given-names></name> <name><surname>Seguin</surname> <given-names>C.</given-names></name> <name><surname>Smith</surname> <given-names>R. E.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Quantitative mapping of the brain&#x2019;s structural connectivity using diffusion MRI tractography: a review</article-title>. <source>NeuroImage</source> <volume>249</volume>:<fpage>118870</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2021.118870</pub-id>, PMID: <pub-id pub-id-type="pmid">34979249</pub-id></citation></ref>
<ref id="ref152"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>F.</given-names></name> <name><surname>Hoffmann</surname> <given-names>N.</given-names></name> <name><surname>Karayumak</surname> <given-names>S. C.</given-names></name> <name><surname>Rathi</surname> <given-names>Y.</given-names></name> <name><surname>Golby</surname> <given-names>A. J.</given-names></name> <name><surname>O&#x2019;Donnell</surname> <given-names>L. J.</given-names></name></person-group> (<year>2019</year>). <article-title>Deep white matter analysis: fast, consistent tractography segmentation across populations and dMRI acquisitions</article-title>. <source>Int. Conference Med. Image Comput. Comput. Assist. Intervent.</source> doi: <pub-id pub-id-type="doi">10.1007/978-3-030-32248-9_67</pub-id></citation></ref>
<ref id="ref153"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>F.</given-names></name> <name><surname>Karayumak</surname> <given-names>S. C.</given-names></name> <name><surname>Hoffmann</surname> <given-names>N.</given-names></name> <name><surname>Rathi</surname> <given-names>Y.</given-names></name> <name><surname>Golby</surname> <given-names>A. J.</given-names></name> <name><surname>O&#x2019;Donnell</surname> <given-names>L. J.</given-names></name></person-group> (<year>2020</year>). <article-title>Deep white matter analysis (DeepWMA): fast and consistent tractography segmentation</article-title>. <source>Med. Image Anal.</source> <volume>65</volume>:<fpage>101761</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.media.2020.101761</pub-id>, PMID: <pub-id pub-id-type="pmid">32622304</pub-id></citation></ref>
<ref id="ref154"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>F.</given-names></name> <name><surname>Wells</surname> <given-names>W. M.</given-names></name> <name><surname>O&#x2019;Donnell</surname> <given-names>L. J.</given-names></name></person-group> (<year>2021</year>). <article-title>Deep diffusion MRI registration (DDMReg): a deep learning method for diffusion MRI registration</article-title>. <source>IEEE Trans. Med. Imaging</source> <volume>41</volume>, <fpage>1454</fpage>&#x2013;<lpage>1467</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TMI.2021.3139507</pub-id></citation></ref>
<ref id="ref155"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>F.</given-names></name> <name><surname>Wu</surname> <given-names>Y.</given-names></name> <name><surname>Norton</surname> <given-names>I.</given-names></name> <name><surname>Rigolo</surname> <given-names>L.</given-names></name> <name><surname>Rathi</surname> <given-names>Y.</given-names></name> <name><surname>Makris</surname> <given-names>N.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>An anatomically curated fiber clustering white matter atlas for consistent white matter tract parcellation across the lifespan</article-title>. <source>NeuroImage</source> <volume>179</volume>, <fpage>429</fpage>&#x2013;<lpage>447</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2018.06.027</pub-id>, PMID: <pub-id pub-id-type="pmid">29920375</pub-id></citation></ref>
<ref id="ref156"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>Y.</given-names></name> <name><surname>Zhang</surname> <given-names>J.</given-names></name> <name><surname>Oishi</surname> <given-names>K.</given-names></name> <name><surname>Faria</surname> <given-names>A. V.</given-names></name> <name><surname>Jiang</surname> <given-names>H.</given-names></name> <name><surname>Li</surname> <given-names>X.</given-names></name> <etal/></person-group>. (<year>2010</year>). <article-title>Atlas-guided tract reconstruction for automated and comprehensive examination of the white matter anatomy</article-title>. <source>NeuroImage</source> <volume>52</volume>, <fpage>1289</fpage>&#x2013;<lpage>1301</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2010.05.049</pub-id>, PMID: <pub-id pub-id-type="pmid">20570617</pub-id></citation></ref>
<ref id="ref157"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Zhao</surname> <given-names>Y.</given-names></name> <name><surname>Su</surname> <given-names>J.</given-names></name> <name><surname>Yang</surname> <given-names>Z.</given-names></name> <name><surname>Ding</surname> <given-names>Z.</given-names></name></person-group> (<year>2022</year>). <article-title>A Riemannian framework for functional clustering of whole brain white matter fibers</article-title>. <conf-name>2022 IEEE 19th International Symposium on Biomedical Imaging (ISBI)</conf-name></citation></ref>
</ref-list>
</back>
</article>