<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="2.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Oncol.</journal-id>
<journal-title>Frontiers in Oncology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Oncol.</abbrev-journal-title>
<issn pub-type="epub">2234-943X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fonc.2023.1273013</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Oncology</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Development of RLK-Unet: a clinically favorable deep learning algorithm for brain metastasis detection and treatment response assessment</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Son</surname>
<given-names>Seungyeon</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2408297"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Joo</surname>
<given-names>Bio</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1985966"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Park</surname>
<given-names>Mina</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Suh</surname>
<given-names>Sang Hyun</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/560660"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Oh</surname>
<given-names>Hee Sang</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Kim</surname>
<given-names>Jun Won</given-names>
</name>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/727704"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Lee</surname>
<given-names>Seoyoung</given-names>
</name>
<xref ref-type="aff" rid="aff4">
<sup>4</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author" equal-contrib="yes" corresp="yes">
<name>
<surname>Ahn</surname>
<given-names>Sung Jun</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="author-notes" rid="fn001">
<sup>*</sup>
</xref>
<xref ref-type="author-notes" rid="fn003">
<sup>&#x2020;</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1005297"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author" equal-contrib="yes" corresp="yes">
<name>
<surname>Lee</surname>
<given-names>Jong-Min</given-names>
</name>
<xref ref-type="aff" rid="aff5">
<sup>5</sup>
</xref>
<xref ref-type="author-notes" rid="fn001">
<sup>*</sup>
</xref>
<xref ref-type="author-notes" rid="fn003">
<sup>&#x2020;</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/193559"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
</contrib-group>
<aff id="aff1">
<sup>1</sup>
<institution>Department of Artificial Intelligence, Hanyang University</institution>, <addr-line>Seoul</addr-line>, <country>Republic of Korea</country>
</aff>
<aff id="aff2">
<sup>2</sup>
<institution>Department of Radiology, Gangnam Severance Hospital, Yonsei University, College of Medicine</institution>, <addr-line>Seoul</addr-line>, <country>Republic of Korea</country>
</aff>
<aff id="aff3">
<sup>3</sup>
<institution>Department of Radiation Oncology, Gangnam Severance Hospital, Yonsei University, College of Medicine</institution>, <addr-line>Seoul</addr-line>, <country>Republic of Korea</country>
</aff>
<aff id="aff4">
<sup>4</sup>
<institution>Division of Medical Oncology, Department of Internal Medicine, Gangnam Severance Hospital, College of Medicine, Yonsei University</institution>, <addr-line>Seoul</addr-line>, <country>Republic of Korea</country>
</aff>
<aff id="aff5">
<sup>5</sup>
<institution>Department of Biomedical Engineering, Hanyang University</institution>, <addr-line>Seoul</addr-line>, <country>Republic of Korea</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>Edited by: Lizhi Liu, Sun Yat-sen University Cancer Center (SYSUCC), China</p>
</fn>
<fn fn-type="edited-by">
<p>Reviewed by: Hongbo Chen, Guilin University of Electronic Technology, China</p>
<p>Ling Ma, Nankai University, China</p>
</fn>
<fn fn-type="corresp" id="fn001">
<p>*Correspondence: Sung Jun Ahn, <email xlink:href="mailto:aahng77@yuhs.ac">aahng77@yuhs.ac</email>; Jong-Min Lee, <email xlink:href="mailto:ljm@hanyang.ac.kr">ljm@hanyang.ac.kr</email>
</p>
</fn>
<fn fn-type="equal" id="fn003">
<p>&#x2020;These authors have contributed equally to this work</p>
</fn>
</author-notes>
<pub-date pub-type="epub">
<day>15</day>
<month>01</month>
<year>2024</year>
</pub-date>
<pub-date pub-type="collection">
<year>2023</year>
</pub-date>
<volume>13</volume>
<elocation-id>1273013</elocation-id>
<history>
<date date-type="received">
<day>07</day>
<month>08</month>
<year>2023</year>
</date>
<date date-type="accepted">
<day>27</day>
<month>12</month>
<year>2023</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2024 Son, Joo, Park, Suh, Oh, Kim, Lee, Ahn and Lee</copyright-statement>
<copyright-year>2024</copyright-year>
<copyright-holder>Son, Joo, Park, Suh, Oh, Kim, Lee, Ahn and Lee</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<sec>
<title>Purpose/objective(s)</title>
<p>Previous deep learning (DL) algorithms for brain metastasis (BM) detection and segmentation have not been commonly used in clinics because they produce false-positive findings, require multiple sequences, and do not reflect physiological properties such as necrosis. The aim of this study was to develop a more clinically favorable DL algorithm (RLK-Unet) using a single sequence reflecting necrosis and apply it to automated treatment response assessment.</p>
</sec>
<sec>
<title>Methods and materials</title>
<p>A total of 128 patients with 1339 BMs, who underwent BM magnetic resonance imaging using the contrast-enhanced 3D T1 weighted (T1WI) turbo spin-echo black blood sequence, were included in the development of the DL algorithm. Fifty-eight patients with 629 BMs were assessed for treatment response. The detection sensitivity, precision, Dice similarity coefficient (DSC), and agreement of treatment response assessments between neuroradiologists and RLK-Unet were assessed.</p>
</sec>
<sec>
<title>Results</title>
<p>RLK-Unet demonstrated a sensitivity of 86.9% and a precision of 79.6% for BMs and had a DSC of 0.663. Segmentation performance was better in the subgroup with larger BMs (DSC, 0.843). The agreement in the response assessment for BMs between the radiologists and RLK-Unet was excellent (intraclass correlation, 0.84).</p>
</sec>
<sec>
<title>Conclusion</title>
<p>RLK-Unet yielded accurate detection and segmentation of BM and could assist clinicians in treatment response assessment.</p>
</sec>
</abstract>
<kwd-group>
<kwd>deep learning algorithm</kwd>
<kwd>brain metastasis</kwd>
<kwd>detection</kwd>
<kwd>segmentation</kwd>
<kwd>treatment response</kwd>
</kwd-group>
<counts>
<fig-count count="6"/>
<table-count count="5"/>
<equation-count count="3"/>
<ref-count count="41"/>
<page-count count="11"/>
<word-count count="5123"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-in-acceptance</meta-name>
<meta-value>Cancer Imaging and Image-directed Interventions</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec id="s1" sec-type="intro">
<label>1</label>
<title>Introduction</title>
<p>Lung cancer is the most frequent source of brain metastases (BMs), and 30%&#x2013;50% of patients with lung cancer develop BMs during the course of the disease (<xref ref-type="bibr" rid="B1">1</xref>). As a result, brain magnetic resonance imaging (MRI) has become an important part of staging and treatment planning for lung cancer. Many guidelines recommend brain MRI for the screening and follow-up of BMs in advanced non-small lung cancer or small cell lung cancer (<xref ref-type="bibr" rid="B2">2</xref>, <xref ref-type="bibr" rid="B3">3</xref>). However, the detection of small BMs and an accurate assessment of treatment response require tedious effort by radiologists. In addition, stereotactic radiosurgery has become popular in the treatment of BMs; therefore, manual segmentation of BMs has significantly increased the workload of radiosurgeons (<xref ref-type="bibr" rid="B4">4</xref>, <xref ref-type="bibr" rid="B5">5</xref>).</p>
<p>In this context, recent studies (<xref ref-type="bibr" rid="B6">6</xref>&#x2013;<xref ref-type="bibr" rid="B8">8</xref>) have implemented deep learning models, particularly deep convolutional neural networks (CNNs), for the automatic detection and segmentation of BMs, and have reported promising results with sensitivities of up to 90% and Dice coefficients of up to 0.8. However, the studies often report a substantial number of false-positive (FP) results and low sensitivity in detecting small BMs. Moreover, their segmentation methods were based on multiparametric scans such as the T1-weighted image T2-weighted image (T2WI), contrast-enhanced T1WI, and fluid-attenuated inversion recovery (FLAIR). However, these methods are not always favorable because additional sequences may increase the scan time and are often acquired with a larger thickness and lower resolution, which may add uncertainty to the segmentation. A few studies (<xref ref-type="bibr" rid="B9">9</xref>, <xref ref-type="bibr" rid="B10">10</xref>) have used a single modality&#x2014;in particular, the contrast-enhanced 3D gradient echo (GRE) T1WI sequence. However, recent studies (<xref ref-type="bibr" rid="B11">11</xref>, <xref ref-type="bibr" rid="B12">12</xref>) have demonstrated that the three-dimensional (3D) black blood (BB) T1WI sequence is superior to the 3D GRE T1WI sequence in detecting small BMs by suppressing intraluminal blood signals. In a subsequent study, deep learning (DL)-based methods for BM detection and segmentation, utilizing the 3D BB T1WI sequence, demonstrated a better performance advantage over methods employing the 3D GRE T1WI sequence (<xref ref-type="bibr" rid="B8">8</xref>). In that study, the sensitivity for detecting brain metastases (BM) on 3D BB T1WI was higher at 92.6% compared to the sensitivity on 3D GRE T1WI, which stood at 76.8%.</p>
<p>Another limitation of previous studies is that internal necrosis was included in the BM segmentation. BM necrosis may represent a by-product of chemotherapy or radiation therapy (<xref ref-type="bibr" rid="B13">13</xref>, <xref ref-type="bibr" rid="B14">14</xref>). The Response Assessment in Neuro-Oncology Brain Metastases (RANO-BM) criteria also recommend that these necrotic or cystic cavities should not be measured for determining a response (<xref ref-type="bibr" rid="B15">15</xref>). Thus, previous BM segmentation algorithms that included solid components and necrosis may lead to inappropriate treatment assessment.</p>
<p>The aims of our study were two-fold (1): to assess whether a DL algorithm using a single modality, 3D BB T1WI, has promising performance for the detection and segmentation of BMs and (2) to investigate whether the volumetric assessment using our developed DL algorithm, excluding necrosis, is comparable to the conventional assessment based on the RANO-BM criteria.</p>
</sec>
<sec id="s2" sec-type="materials|methods">
<label>2</label>
<title>Materials and methods</title>
<sec id="s2_1">
<label>2.1</label>
<title>Participants</title>
<p>This retrospective study was approved by our institutional review board, which waived the requirement for informed consent. We retrospectively searched the electronic medical records to identify patients with lung cancer who underwent brain MRI to evaluate BMs diagnosed between April 2017 and December 2021. For the segmentation of BMs between April 2017 and October 2020, 128 consecutive patients with newly developed 1339 BMs were included (Dataset 1). For the assessment of the treatment response between November 2020 and December 2021, 59 consecutive patients with 629 BMs were included (Dataset 2). The detailed inclusion and exclusion criteria are described in <xref ref-type="supplementary-material" rid="SM1">
<bold>Supplementary  Material S1</bold>
</xref>. Histopathological diagnoses of lung cancer were determined by using bronchoscopic, percutaneous needle-guided, or surgical biopsies in all patients.</p>
</sec>
<sec id="s2_2">
<label>2.2</label>
<title>MRI protocol</title>
<p>Routine MRIs for the evaluation of the BMs were acquired using the Siemens 3T Vida scanner (Siemens Healthineers, Erlangen, Germany) or the GE 3T Discovery MR750 scanner (GE Healthcare, Milwaukee, WI, USA). Our BM MRI protocol consisted of T1-weighted image (T1WI), T2-weighted image (T2WI), FLAIR, contrast-enhanced T1WI, and BB T1WI. Contrast-enhanced images were acquired after administering gadobutrol (0.2 mmol/kg; Gadovist, Bayer Schering Pharma; Berlin, Germany). Detailed MR parameters are provided in <xref ref-type="supplementary-material" rid="SM1">
<bold>Supplementary  Material S2</bold>
</xref>.</p>
</sec>
<sec id="s2_3">
<label>2.3</label>
<title>BM segmentation</title>
<p>The ground truths (GTs) in all BMs were carefully drawn by a radiologist with 8 years of clinical experience, while avoiding cystic or necrotic areas on contrast-enhanced BB T1WIs and referring to T1WIs, T2WIs, and contrast-enhanced T1WIs, by using the open-source software ITK-Snap, version 3.8.0 (available at <ext-link ext-link-type="uri" xlink:href="http://www.itksnap.org">www.itksnap.org</ext-link>) (<xref ref-type="fig" rid="f1">
<bold>Figure&#xa0;1</bold>
</xref>) (<xref ref-type="bibr" rid="B16">16</xref>). Another neuroradiologist with 14 years of clinical experience confirmed the segmented BMs or modified ambiguous cases.</p>
<fig id="f1" position="float">
<label>Figure&#xa0;1</label>
<caption>
<p>
<bold>(A)</bold> A large necrotic brain metastasis (BM) is noted in left parietal lobe on contrast-enhanced 3D turbo spin-echo (TSE) black blood (BB) sequence. <bold>(B)</bold> Radiologist manually segmented BM avoiding a necrosis (red). <bold>(C)</bold> RLK-Unet algorithm predicted segmentation of BMs. Dice similarity coefficient (DSC) was 0.894.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-13-1273013-g001.tif"/>
</fig>
</sec>
<sec id="s2_4">
<label>2.4</label>
<title>Treatment response</title>
<p>The treatment response, based on the RANO-BM criteria, was independently assessed and classified into three categories by two radiologists (HSO and SJA, who had 4 years and 14 years of clinical experience, respectively) (<xref ref-type="bibr" rid="B15">15</xref>): complete response (CR), partial response/stable disease (PR/SD), and progressive disease (PD). Inconsistent cases were determined by a consensus between the two radiologists. The treatment response of the DL algorithm was based on the volumetric response by using the modified RANO-BM criteria (<xref ref-type="bibr" rid="B17">17</xref>). While the RANO-BM guidelines emphasize the significance of volumetric analysis, they do not provide specific criteria. Therefore, we took inspiration from the fundamental principles of the RANO-BM guidelines and defined volumetric criteria based on the established unidimensional recommendations, using spherical geometry. In this context, PD was defined as a volume increase of &#x2265; 72.8% in the present study compared to the baseline. This corresponds to a &#x2265; 20% increase in the diameter of a perfect sphere, aligning with the unidimensional RANO-BM criteria for progression.</p>
</sec>
<sec id="s2_5">
<label>2.5</label>
<title>Deep learning algorithm</title>
<p>The U-Net architecture is a powerful and flexible tool for image segmentation tasks, and its success has led to the development of many variations and extensions of the original architecture (<xref ref-type="bibr" rid="B18">18</xref>&#x2013;<xref ref-type="bibr" rid="B20">20</xref>). In the current study, we propose a modified DL-based 3D U-Net architecture, named RLK-Unet, which incorporates re-parameterizing and multiscale highlighting foregrounds (MHFs), along with postprocessing (<xref ref-type="fig" rid="f2">
<bold>Figure&#xa0;2</bold>
</xref>). The training data for RKL-Unet consisted of contrast-enhanced 3D BB T1WIs as the input and the GT as the reference mask. The experiments were conducted by splitting Dataset 1 into five folds. In each round of the five-fold cross-validation procedure, four data folds were employed as the training cases, and the remaining fold was used for testing. Ten percent of the training samples were randomly selected for validation. Particularly, the stratified K-fold method was used to ensure an even distribution of small and large BMs in both the training and test sets (<xref ref-type="bibr" rid="B21">21</xref>). Details of the network configuration are provided in <xref ref-type="supplementary-material" rid="SM1">
<bold>Supplementary Material S3</bold>
</xref>.</p>
<fig id="f2" position="float">
<label>Figure&#xa0;2</label>
<caption>
<p>The workflow of the RLK-Unet algorithm, which includes a reparameterized large kernel and multiscale highlighting foregrounds. RLK-Unet has four layers in the encoder and the decoder, respectively. To capture information from a large region, a large kernel (13&#xd7;13&#xd7;13) was applied in the encoder and multiscale highlighting foregrounds were introduced in the decoder to improve the detection of brain metastases. Conv, convolution; DW-Conv, depth-wise convolution; GELU, Gaussian error linear unit.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-13-1273013-g002.tif"/>
</fig>
<p>RKL-Unet comprises an encoder that extracts the low-level features of the input data and a decoder that reconstructs the corresponding label map. Feature maps of the encoder are passed to the decoder by using skip connection, which concatenates the feature maps from the corresponding encoder layer to preserve the spatial information lost in the encoding stage (<xref ref-type="bibr" rid="B22">22</xref>). To improve the segmentation performance, we applied the guidelines proposed by Ding et&#xa0;al. (<xref ref-type="bibr" rid="B23">23</xref>) to the encoder of our architecture, which allowed us to build a large receptive field by using a large kernel. Thus, we used a large kernel structure (13 &#xd7; 13 &#xd7; 13) instead of a small kernel (3 &#xd7; 3 &#xd7; 3) that is typically used in U-Net models to extract feature maps through large receptive fields. Furthermore, we introduced MHFs into the U-Net architecture, highlighting foreground regions at multiple scales, which allowed the network to better differentiate between BMs and healthy brain tissue, even in situations in which lesions are small or have a low contrast (<xref ref-type="bibr" rid="B24">24</xref>). Additional postprocessing was conducted to eliminate blood vessels that were incompletely suppressed in the BB images and choroid-plexuses, which may mimic BMs and increase the number of FPs. We implemented the surface and choroid plexus mask to effectively reduce the number of FPs in the output. This process is conducted solely on the foreground channels of the output features of the trained model. Detailed postprocessing is described in <xref ref-type="supplementary-material" rid="SM1">
<bold>Supplementary Material S4</bold>
</xref>.</p>
</sec>
<sec id="s2_6">
<label>2.6</label>
<title>Statistical methods</title>
<p>Lesion-based sensitivity and precision were calculated to assess the detection performance of the DL algorithm. Sensitivity and precision were defined by using true-positive (TP), false-negative (FN), and FP metrics, as follows:</p>
<disp-formula>
<mml:math display="block" id="M1">
<mml:mrow>
<mml:mi>S</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>v</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>y</mml:mi>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<disp-formula>
<mml:math display="block" id="M2">
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>n</mml:mi>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<p>Sensitivities were also evaluated with respect to the size of BMs (i.e., &#x2264;10 mm or &gt;10&#xa0;mm). The automatic segmentation results were compared with the GT, using the Dice similarity coefficient (DSC) to investigate the segmentation performance of the DL algorithm. The DSC computes the overlap of the GT segmentation (V<sub>g</sub>) and automatic segmentation (V<sub>s</sub>), as follows:</p>
<disp-formula>
<mml:math display="block" id="M3">
<mml:mrow>
<mml:mi>D</mml:mi>
<mml:mi>S</mml:mi>
<mml:mi>C</mml:mi>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>2</mml:mn>
<mml:mo>*</mml:mo>
<mml:mrow>
<mml:mo>|</mml:mo>
<mml:mrow>
<mml:mi>V</mml:mi>
<mml:mi>s</mml:mi>
<mml:mo>&#x2229;</mml:mo>
<mml:mi>V</mml:mi>
<mml:mi>g</mml:mi>
</mml:mrow>
<mml:mo>|</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mrow>
<mml:mo>|</mml:mo>
<mml:mrow>
<mml:mi>V</mml:mi>
<mml:mi>s</mml:mi>
</mml:mrow>
<mml:mo>|</mml:mo>
</mml:mrow>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>+</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mrow>
<mml:mo>|</mml:mo>
<mml:mrow>
<mml:mi>V</mml:mi>
<mml:mi>g</mml:mi>
</mml:mrow>
<mml:mo>|</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<p>Pearson&#x2019;s correlation and Bland&#x2013;Altman analysis were conducted to compare volumetric measurements of the GT and automatic segmentations (<xref ref-type="bibr" rid="B25">25</xref>, <xref ref-type="bibr" rid="B26">26</xref>). Agreement between the neuroradiologist and the DL algorithm for treatment response was assessed by using the intra-class correlation coefficient (ICC) with a two-way random model of absolute agreement (<xref ref-type="bibr" rid="B27">27</xref>).</p>
</sec>
</sec>
<sec id="s3" sec-type="results">
<label>3</label>
<title>Results</title>
<sec id="s3_1">
<label>3.1</label>
<title>Patient characteristics</title>
<p>In our study, a total of 186 patients diagnosed with lung cancer and brain metastases (BMs) were enrolled and categorized into two distinct groups. Dataset 1, designated for BM segmentation, comprised 128 patients with a mean age of 67.1 &#xb1; 9.9 years, consisting of 87 men and 41 women. Dataset 2, intended for response assessment, involved 58 pairs of sequential brain MRIs corresponding to 58 patients, with a mean age of 63.2 &#xb1; 9.5 years, including 35 men and 23 women. The average time interval between the baseline and follow-up MRI scans was 3.53 &#xb1; 1.32 months. Among the 58 lung cancer patients, the breakdown of treatments was as follows: 57% underwent whole-brain radiotherapy (WBRT), 19% received stereotactic radiosurgery (SRS) alone, 15% were on tyrosine kinase inhibitors alone, and 9% underwent a combination of SRS and WBRT. Comprehensive patient characteristics are detailed in <xref ref-type="table" rid="T1">
<bold>Table&#xa0;1</bold>
</xref>. Distribution of small and large BMs in training and test sets during 5-fold cross-validation is summarized in the <xref ref-type="table" rid="T2">
<bold>Table&#xa0;2</bold>
</xref>. Bar graph describes distribution of size of BMs across all folds (<xref ref-type="supplementary-material" rid="SM1">
<bold>Supplementary Materials S4</bold>
</xref>, <xref ref-type="supplementary-material" rid="SM1">
<bold>S5</bold>
</xref>).</p>
<table-wrap id="T1" position="float">
<label>Table&#xa0;1</label>
<caption>
<p>Patients&#x2019; characteristics.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="top" align="left">Variable</th>
<th valign="top" align="left">Dataset for BM <break/>segmentation<break/>(n=128)</th>
<th valign="top" align="left">Dataset for response assessment (n=58)</th>
<th valign="top" align="left">Total (n=186)</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">&#x2003;Age (y, mean &#xb1; SD)</td>
<td valign="top" align="left">67.15 &#xb1; 9.86</td>
<td valign="top" align="left">63.21 &#xb1; 9.46</td>
<td valign="top" align="left">65.92 &#xb1; 9.88</td>
</tr>
<tr>
<th valign="top" colspan="4" align="left">Sex</th>
</tr>
<tr>
<td valign="top" align="left">Female</td>
<td valign="top" align="left">41 (32.0%)</td>
<td valign="top" align="left">23 (39.7%)</td>
<td valign="top" align="left">64 (34.4%)</td>
</tr>
<tr>
<td valign="top" align="left">Male</td>
<td valign="top" align="left">87 (68.0%)</td>
<td valign="top" align="left">35 (60.3%)</td>
<td valign="top" align="left">122 (65.6%)</td>
</tr>
<tr>
<th valign="top" colspan="4" align="left">Number of BMs</th>
</tr>
<tr>
<td valign="top" align="left">1</td>
<td valign="top" align="left">29 (22.7%)</td>
<td valign="top" align="left">10 (17.2%)</td>
<td valign="top" align="left">39 (21.0%)</td>
</tr>
<tr>
<td valign="top" align="left">2&#x2013;5</td>
<td valign="top" align="left">47 (36.7%)</td>
<td valign="top" align="left">24 (41.4%)</td>
<td valign="top" align="left">71 (38.2%)</td>
</tr>
<tr>
<td valign="top" align="left">6&#x2013;10</td>
<td valign="top" align="left">21 (16.4%)</td>
<td valign="top" align="left">11 (19.0%)</td>
<td valign="top" align="left">32 (17.2%)</td>
</tr>
<tr>
<td valign="top" align="left">&gt;10</td>
<td valign="top" align="left">31 (24.2%)</td>
<td valign="top" align="left">13 (22.4%)</td>
<td valign="top" align="left">44 (23.7%)</td>
</tr>
<tr>
<td valign="top" align="left">Volume of BM, mm<sup>3</sup> (mean &#xb1; SD)</td>
<td valign="top" align="left">694.13 &#xb1; 2057.07</td>
<td valign="top" align="left">419.01 &#xb1; 1192.27</td>
<td valign="top" align="left">608.34 &#xb1; 1832.81</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>BM, brain metastasis; SD, standard deviation.</p>
</fn>
<fn>
<p>Data are presented as the mean &#xb1; standard deviation or as numbers of patients (%).</p>
</fn>
</table-wrap-foot>
</table-wrap>
<table-wrap id="T2" position="float">
<label>Table&#xa0;2</label>
<caption>
<p>Distribution of small and large BMs in training and test sets during 5-fold cross validation.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="top" align="left" rowspan="2"/>
<th valign="top" colspan="2" align="left">Training</th>
<th valign="top" colspan="2" align="left">Test</th>
</tr>
<tr>
<th valign="top" align="left">Small BM</th>
<th valign="top" align="left">Large BM</th>
<th valign="top" align="left">Small BM</th>
<th valign="top" align="left">Large BM</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Number of BMs at fold 1</td>
<td valign="top" align="left">994 (84.1%)</td>
<td valign="top" align="left">187 (15.9%)</td>
<td valign="top" align="left">120 (75.9%)</td>
<td valign="top" align="left">38 (24.1%)</td>
</tr>
<tr>
<td valign="top" align="left">Number of BMs at fold 2</td>
<td valign="top" align="left">881 (81.7%)</td>
<td valign="top" align="left">197 (18.3%)</td>
<td valign="top" align="left">233 (89.2%)</td>
<td valign="top" align="left">28 (10.8%)</td>
</tr>
<tr>
<td valign="top" align="left">Number of BMs at fold 3</td>
<td valign="top" align="left">665 (82.5%)</td>
<td valign="top" align="left">141 (17.5%)</td>
<td valign="top" align="left">449 (84.2%)</td>
<td valign="top" align="left">84 (15.8%)</td>
</tr>
<tr>
<td valign="top" align="left">Number of BMs at fold 4</td>
<td valign="top" align="left">999 (84.3%)</td>
<td valign="top" align="left">186 (15.7%)</td>
<td valign="top" align="left">115 (74.6%)</td>
<td valign="top" align="left">39 (25.4%)</td>
</tr>
<tr>
<td valign="top" align="left">Number of BMs at fold 5</td>
<td valign="top" align="left">917 (82.9%)</td>
<td valign="top" align="left">189 (17.1%)</td>
<td valign="top" align="left">197 (84.5%)</td>
<td valign="top" align="left">36 (15.5%)</td>
</tr>
<tr>
<td valign="top" align="left">Mean</td>
<td valign="top" align="left">891 (83.1%)</td>
<td valign="top" align="left">180 (16.9%)</td>
<td valign="top" align="left">222 (83.1%)</td>
<td valign="top" align="left">45 (16.9%)</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>Small BM: Brain metastasis (&#x2264;10 mm in diameter).</p>
</fn>
<fn>
<p>Large BM: Brain metastasis (&gt;10&#xa0;mm in diameter).</p>
</fn>
<fn>
<p>Data are presented as numbers of patients (%).</p>
</fn>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="s3_2">
<label>3.2</label>
<title>Detection and segmentation performance of DL algorithms</title>
<p>The detection sensitivities and precisions of RLK-Unet are summarized in <xref ref-type="table" rid="T3">
<bold>Table&#xa0;3</bold>
</xref>. RLK-Unet demonstrated a sensitivity of 86.9% and a precision of 79.6% for all BMs. False positive (FP) per scan was 1.76. In particular, we evaluated the predicted result from RLK-Unet, focusing on the assessment of segmentation performance that excludes necrosis and the detection of small BMs. The predicted results of RLK-Unet were analyzed by categorizing the BMs into two groups using a diameter threshold of 10&#xa0;mm. The sensitivity and precision for the detection of small BMs (&#x2264;10 mm) were 80.84% and 87.39% respectively, whereas the sensitivity and precision for large BMs (&gt;10&#xa0;mm) was 98.66% and 91.10% respectively. In addition, FP per scan for small BMs was relatively higher (1.6) than that for large BM (0.15).</p>
<table-wrap id="T3" position="float">
<label>Table&#xa0;3</label>
<caption>
<p>Detection and segmentation performance of RLK-Unet.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="top" align="left"/>
<th valign="top" align="left">Small BM</th>
<th valign="top" align="left">Large BM</th>
<th valign="top" align="left">All</th>
</tr>
</thead>
<tbody>
<tr>
<th valign="top" colspan="4" align="left">Detection</th>
</tr>
<tr>
<td valign="top" align="left">&#x2003;Sensitivity (%)</td>
<td valign="top" align="left">80.84 &#xb1; 7.32</td>
<td valign="top" align="left">98.66 &#xb1; 1.26</td>
<td valign="top" align="left">86.90 &#xb1; 4.07</td>
</tr>
<tr>
<td valign="top" align="left">&#x2003;Missed BM/patient (%)</td>
<td valign="top" align="left">1.67 &#xb1; 0.93</td>
<td valign="top" align="left">0.03 &#xb1; 0.05</td>
<td valign="top" align="left">1.71 &#xb1; 0.86</td>
</tr>
<tr>
<td valign="top" align="left">&#x2003;FP/scan</td>
<td valign="top" align="left">1.60 &#xb1; 0.19</td>
<td valign="top" align="left">0.15 &#xb1; 0.10</td>
<td valign="top" align="left">1.76 &#xb1; 0.22</td>
</tr>
<tr>
<td valign="top" align="left">&#x2003;Precision (%)</td>
<td valign="top" align="left">78.39 &#xb1; 8.27</td>
<td valign="top" align="left">91.10 &#xb1; 6.18</td>
<td valign="top" align="left">79.60 &#xb1; 6.46</td>
</tr>
<tr>
<th valign="top" colspan="4" align="left">Segmentation</th>
</tr>
<tr>
<td valign="top" align="left">&#x2003;DSC</td>
<td valign="top" align="left">0.54 &#xb1; 0.08</td>
<td valign="top" align="left">0.85 &#xb1; 0.03</td>
<td valign="top" align="left">0.66 &#xb1; 0.02</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>BM, brain metastasis; DSC, Dice similarity coefficient; FP, false-positive.</p>
</fn>
</table-wrap-foot>
</table-wrap>
<p>The DSC for all BMs was 0.663, whereas the DSCs for the large and small BMs were 0.851 and 0.535, respectively (<xref ref-type="fig" rid="f3">
<bold>Figure&#xa0;3</bold>
</xref>). <xref ref-type="fig" rid="f4">
<bold>Figure&#xa0;4</bold>
</xref> displays the volumetric correlation between the GT and the automated segmentation. The Pearson&#x2019;s correlation coefficient (<italic>r</italic>) was 0.96, which indicated a strong positive correlation between the two sets. Bland&#x2013;Altman analysis findings also demonstrated excellent agreement with a difference of 0.01 cm<sup>3</sup> between the two sets of results. These results confirmed the accuracy and reliability of the proposed algorithm.</p>
<fig id="f3" position="float">
<label>Figure&#xa0;3</label>
<caption>
<p>Representative figures, voxel counts of ground truth, prediction, true positives and DSC scores of large BM <bold>(A&#x2013;C)</bold> and small BM <bold>(D&#x2013;F)</bold>. Red and green colors indicate false positives and false negatives, respectively, while the yellow color represents true positives.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-13-1273013-g003.tif"/>
</fig>
<fig id="f4" position="float">
<label>Figure&#xa0;4</label>
<caption>
<p>Volumetric correlations between ground truth and automated segmentations of BMs. <bold>(A)</bold> Pearson correlation (<italic>r</italic>) at the lesion level. The shaded area indicates the 95% confidence interval (95% CI) of the fitted line (<italic>y</italic> = the regression equation). <bold>(B)</bold> Bland&#x2013;Altman analysis at the lesion level. The solid line indicates the mean difference between the two segmentations, whereas the dotted lines indicate the 95% limit of agreement. BM, brain metastasis; GT, ground truth.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-13-1273013-g004.tif"/>
</fig>
<p>The detection and segmentation performance of each step of RLK-Unet are presented in <xref ref-type="supplementary-material" rid="SM1">
<bold>Supplementary Material S7</bold>
</xref>. The use of encoder blocks with a large kernel (13 &#xd7; 13 &#xd7; 13) in our 3D U-Net architecture improved the sensitivity for detecting BMs to 88.3%, compared to the sensitivity of 84.5% that was achieved using smaller kernel sizes. The application of MHFs increased the precision from 68.4% to 73.9%. After postprocessing, the precision further improved from 73.9% to 79.6%.</p>
</sec>
<sec id="s3_3">
<label>3.3</label>
<title>Agreements in the response assessment for BMs</title>
<p>The agreement in the response assessment of BMs between the radiologists and the DL algorithm was excellent [ICC = 0.84; 95% confidence interval (CI), 0.75-0.91]. Response assessment for BM in 87.9% (51/58) of patients was agreed on by the radiologist and the DL algorithm (<xref ref-type="table" rid="T4">
<bold>Table&#xa0;4</bold>
</xref>). The DL algorithm overestimated the response assessment in 6.8% (4/58) of patients (<xref ref-type="fig" rid="f5">
<bold>Figure&#xa0;5</bold>
</xref>) in which all PR/SD cases were misclassified as PD, and underestimated the response assessment in 5.1% (3/58) of patients (<xref ref-type="fig" rid="f6">
<bold>Figure&#xa0;6</bold>
</xref>), in which one PD case was misinterpreted as PR/SD and two PR/SD cases were misinterpreted as CR.</p>
<table-wrap id="T4" position="float">
<label>Table&#xa0;4</label>
<caption>
<p>Response assessment by the radiologists and by the deep learning algorithm.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="top" rowspan="2" colspan="2" align="left"/>
<th valign="top" colspan="3" align="left">Response assessment by the deep learning algorithm</th>
</tr>
<tr>
<th valign="top" align="left">CR</th>
<th valign="top" align="left">PR/SD</th>
<th valign="top" align="left">PD</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" rowspan="3" align="left">Response assessment by radiologist</td>
<td valign="top" align="left">CR</td>
<td valign="top" align="left">3</td>
<td valign="top" align="left">0</td>
<td valign="top" align="left">0</td>
</tr>
<tr>
<td valign="top" align="left">PR/SD</td>
<td valign="top" align="left">1</td>
<td valign="top" align="left">24</td>
<td valign="top" align="left">1</td>
</tr>
<tr>
<td valign="top" align="left">PD</td>
<td valign="top" align="left">0</td>
<td valign="top" align="left">1</td>
<td valign="top" align="left">28</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>CR, complete response; PR/SD, partial response/stable disease; PD, progressive disease.</p>
</fn>
</table-wrap-foot>
</table-wrap>
<fig id="f5" position="float">
<label>Figure&#xa0;5</label>
<caption>
<p>Examples of the overestimation of treatment response for brain metastasis by the deep learning (DL) algorithm. <bold>(A)</bold> The baseline contrast-enhanced three-dimensional (3D) turbo spin-echo (TSE) black blood (BB) T1-weighted image (T1WI) shows two metastases in both parietal cortices (green arrows). <bold>(B)</bold> Our DL algorithm predicted two corresponding metastases. <bold>(C)</bold> In the follow-up 3D TSE BB T1WI, the radiologist classified this case as stable. <bold>(D)</bold> The DL algorithm regarded the equivocal enhancement (red box) in right deep white matter as a new lesion and assessed this finding as progression.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-13-1273013-g005.tif"/>
</fig>
<fig id="f6" position="float">
<label>Figure&#xa0;6</label>
<caption>
<p>Examples of the underestimation of treatment response for brain metastasis by the deep learning (DL) algorithm. <bold>(A)</bold> The baseline contrast-enhanced three-dimensional (3D) turbo spin-echo (TSE) black blood (BB) T1WI shows a metastasis in the right parieto-temporal lobe (red box). <bold>(B)</bold> Our DL algorithm predicted a corresponding metastasis. <bold>(C)</bold> On the follow up 3D TSE BB T1WI, the radiologist classified this case as a partial response/stable. <bold>(D)</bold> The DL algorithm missed a remaining tumor and assessed this case as complete remission.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-13-1273013-g006.tif"/>
</fig>
</sec>
</sec>
<sec id="s4" sec-type="discussion">
<label>4</label>
<title>Discussion</title>
<p>RLK-Unet for the detection and segmentation of BMs has two clinically favored features that previous models have rarely tried. First, RLK-Unet was based only on a single modality, 3D BB T1WI. Second, RLK-Unet segments the solid part of the tumor to avoid necrosis. Nevertheless, RLK-Unet exhibited promising performance for detection and segmentation. Moreover, the volumetric assessment by RLK-Unet strongly agreed with that of the response assessment by the radiologist, based on the RANO-BM criteria. Thus, our model is expected to facilitate clinical workflow and to potentially improve patient outcomes via a volumetric assessment of the treatment response.</p>
<p>While earlier studies demonstrated high sensitivity in the detection of BMs, surpassing 80%, they were accompanied by a significant number of FPs and, consequently, exhibited low precision, as indicated in <xref ref-type="table" rid="T5">
<bold>Table&#xa0;5</bold>
</xref> (<xref ref-type="bibr" rid="B7">7</xref>, <xref ref-type="bibr" rid="B10">10</xref>, <xref ref-type="bibr" rid="B28">28</xref>, <xref ref-type="bibr" rid="B29">29</xref>). Subsequent research, incorporating multiple modalities, showed improved performance with sensitivities ranging from 82% to 100% and reducing the FP rate to between 0.6 and 1.5 per scan (<xref ref-type="bibr" rid="B6">6</xref>, <xref ref-type="bibr" rid="B8">8</xref>, <xref ref-type="bibr" rid="B30">30</xref>). Notably, a recent study, utilizing a single modality, introduced a novel loss function and integrated temporal prior information, achieving exceptional results (sensitivity: 84%; precision: 99%; FP rate: 1) (<xref ref-type="bibr" rid="B31">31</xref>). Another extensive study also reported remarkable outcomes (sensitivity: 88.4%; precision: 90.1%; FP rate: 0.4) (<xref ref-type="bibr" rid="B32">32</xref>). However, it is essential to acknowledge that most of these studies did not address the critical issue of excluding internal necrosis, which is pivotal for accurate volumetric assessment of tumor burden. In contrast, our RLK-Unet successfully addressed the exclusion of necrotic regions within BMs, achieving outstanding performance (sensitivity: 86.9%; precision: 79.6%; FP rate: 1.8). To achieve this, we implemented several strategies within our DL algorithm, enabling us to maintain high sensitivity while concurrently reducing the FP rate.</p>
<table-wrap id="T5" position="float">
<label>Table&#xa0;5</label>
<caption>
<p>Comparison of published DL-based BMs detection and segmentation performance.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" rowspan="2" align="left">Author</th>
<th valign="middle" rowspan="2" align="left">Number of patients (train/test)</th>
<th valign="middle" rowspan="2" align="left">Number of BMs (train/test)</th>
<th valign="middle" colspan="2" align="left">BM size</th>
<th valign="middle" rowspan="2" align="left">Acquisition image</th>
<th valign="middle" rowspan="2" align="left">DL model</th>
<th valign="middle" colspan="4" align="left">Performance</th>
<th valign="middle" align="left" rowspan="2">Excluding necrosis</th>
</tr>
<tr>
<th valign="middle" align="left">Diameter (mm)</th>
<th valign="middle" align="left">Volume<break/>(cm<sup>3</sup>)</th>
<th valign="middle" align="left">Sensitivity (%)</th>
<th valign="middle" align="left">FP/scan</th>
<th valign="middle" align="left">Precision (%)</th>
<th valign="middle" align="left">DSC</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">Charron et&#xa0;al., 2018 (<xref ref-type="bibr" rid="B28">28</xref>)</td>
<td valign="middle" align="left">164/18</td>
<td valign="middle" align="left">374/38</td>
<td valign="middle" align="left">8.1</td>
<td valign="middle" align="left">2.4</td>
<td valign="middle" align="left">Multisequence</td>
<td valign="middle" align="left">DeepMedic</td>
<td valign="middle" align="left">94</td>
<td valign="middle" align="left">12.7/patient</td>
<td valign="middle" align="left"/>
<td valign="middle" align="left">0.77</td>
<td valign="middle" align="left">Y</td>
</tr>
<tr>
<td valign="middle" align="left">Gr&#xf8;vik et&#xa0;al., 2020 (<xref ref-type="bibr" rid="B7">7</xref>)</td>
<td valign="middle" align="left">105/51</td>
<td valign="middle" align="left">-/856</td>
<td valign="middle" align="left">2-40</td>
<td valign="middle" align="left">&#x2013;</td>
<td valign="middle" align="left">Multisequence</td>
<td valign="middle" align="left">GoogLe Net</td>
<td valign="middle" align="left">50:&lt;7 mm<break/>80:&lt;22 mm<break/>100: &#x2265; 22 mm</td>
<td valign="middle" align="left">8 &#xb1; 13</td>
<td valign="middle" align="left">79 &#xb1; 20</td>
<td valign="middle" align="left">0.79 &#xb1; 0.12</td>
<td valign="middle" align="left">N</td>
</tr>
<tr>
<td valign="middle" align="left">Xue et&#xa0;al., 2020 (<xref ref-type="bibr" rid="B10">10</xref>)</td>
<td valign="middle" align="left">1201</td>
<td valign="middle" align="left">&#x2013;</td>
<td valign="middle" align="left">2-45</td>
<td valign="middle" align="left">0.1-23.8</td>
<td valign="middle" align="left">T1 CE<break/>(T1 GRE)</td>
<td valign="middle" align="left">BMDS Net</td>
<td valign="middle" align="left">96 (&#x2265;6 mm)</td>
<td valign="middle" align="left">&#x2013;</td>
<td valign="middle" align="left">
<italic>-</italic>
</td>
<td valign="middle" align="left">0.85 &#xb1; 0.08 (&#x2265;6 mm)</td>
<td valign="middle" align="left">N</td>
</tr>
<tr>
<td valign="middle" align="left">Zhou et&#xa0;al., 2020 (<xref ref-type="bibr" rid="B29">29</xref>)</td>
<td valign="middle" align="left">212/54</td>
<td valign="middle" align="left">&#x2013;</td>
<td valign="middle" align="left">10 &#xb1; 8</td>
<td valign="middle" align="left">&#x2013;</td>
<td valign="middle" align="left">T1 CE<break/>(T1 GRE)</td>
<td valign="middle" align="left">Single-shot Detector</td>
<td valign="middle" align="left">15:&lt;3mm<break/>70: 3~6mm<break/>98: &#x2265; 6 mm</td>
<td valign="middle" align="left">3-4 (&#x2265; 6&#xa0;mm)</td>
<td valign="middle" align="left">100:&lt;3mm<break/>35: 3~6mm<break/>36&#x2265; 6 mm</td>
<td valign="middle" align="left">&#x2013;</td>
<td valign="middle" align="left">N</td>
</tr>
<tr>
<td valign="middle" align="left">J&#xfc;nger et&#xa0;al., 2021 (<xref ref-type="bibr" rid="B6">6</xref>)</td>
<td valign="middle" align="left">66/17</td>
<td valign="middle" align="left">248/67</td>
<td valign="middle" align="left">&#x2013;</td>
<td valign="middle" align="left">1.0 &#xb1; 4.2</td>
<td valign="middle" align="left">Multisequence</td>
<td valign="middle" align="left">DeepMedic</td>
<td valign="middle" align="left">86 &#xb1; 4</td>
<td valign="middle" align="left">1.5</td>
<td valign="middle" align="left">-68.7</td>
<td valign="middle" align="left">0.75 &#xb1; 0.02</td>
<td valign="middle" align="left">N</td>
</tr>
<tr>
<td valign="middle" align="left">Park et&#xa0;al., 2021 (<xref ref-type="bibr" rid="B8">8</xref>)</td>
<td valign="middle" align="left">188/94</td>
<td valign="middle" align="left">917/203</td>
<td valign="middle" align="left">9.9 &#xb1; 10.9</td>
<td valign="middle" align="left">1.6 &#xb1; 6.5</td>
<td valign="middle" align="left">Multisequence</td>
<td valign="middle" align="left">U-Net +&#xa0;Reconstruction</td>
<td valign="middle" align="left">82:&lt;3mm<break/>93: 3~10 mm<break/>100: &#x2265;10 mm</td>
<td valign="middle" align="left">0.6</td>
<td valign="middle" align="left">&#x2013;</td>
<td valign="middle" align="left">0.82 &#xb1; 0.15</td>
<td valign="middle" align="left">&#x2013;</td>
</tr>
<tr>
<td valign="middle" align="left">Ottesen et&#xa0;al., 2022 (<xref ref-type="bibr" rid="B30">30</xref>)</td>
<td valign="middle" align="left">175/51</td>
<td valign="middle" align="left">&#x2013;</td>
<td valign="middle" align="left">&#x2013;</td>
<td valign="middle" align="left">&#x2013;</td>
<td valign="middle" align="left">Multisequence</td>
<td valign="middle" align="left">HRNetV2<break/>nnUnet</td>
<td valign="middle" align="left">88</td>
<td valign="middle" align="left">1.0 &#xb1; 1.1</td>
<td valign="middle" align="left"/>
<td valign="middle" align="left">0.93 &#xb1;0.04</td>
<td valign="middle" align="left">N</td>
</tr>
<tr>
<td valign="middle" align="left">Huang et&#xa0;al., 2022 (<xref ref-type="bibr" rid="B31">31</xref>)</td>
<td valign="middle" align="left">135/32</td>
<td valign="middle" align="left">1503/278</td>
<td valign="middle" align="left">&#x2013;</td>
<td valign="middle" align="left">&#x2013;</td>
<td valign="middle" align="left">T1 CE<break/>(T1 GRE)</td>
<td valign="middle" align="left">DeepMedic+</td>
<td valign="middle" align="left">93: (&#x3b1;=0.5)<break/>84: (&#x3b1;=0.995)</td>
<td valign="middle" align="left">158: (&#x3b1;=0.5)<break/>1: (&#x3b1;=0.99)</td>
<td valign="middle" align="left">62: (&#x3b1;=0.5)<break/>99: (&#x3b1;=0.99)</td>
<td valign="middle" align="left">0.80: (&#x3b1;=0.5)<break/>0.76: (&#x3b1;=0.99)</td>
<td valign="middle" align="left">&#x2013;</td>
</tr>
<tr>
<td valign="middle" align="left">Ziyaee et&#xa0;al., 2023 (<xref ref-type="bibr" rid="B32">32</xref>)</td>
<td valign="middle" align="left">845/206</td>
<td valign="middle" align="left">3482/930</td>
<td valign="middle" align="left">10.5 &#xb1; 7.8</td>
<td valign="middle" align="left">&#x2013;</td>
<td valign="middle" align="left">T1 CE<break/>(T1 GRE)</td>
<td valign="middle" align="left">nnUnet</td>
<td valign="middle" align="left">88.4</td>
<td valign="middle" align="left">0.4 &#xb1; 1.0</td>
<td valign="middle" align="left">90.1</td>
<td valign="middle" align="left">0.82 &#xb1;0.09</td>
<td valign="middle" align="left">&#x2013;</td>
</tr>
<tr>
<td valign="middle" align="left">Ours</td>
<td valign="middle" align="left">103/25</td>
<td valign="middle" align="left">1339</td>
<td valign="middle" align="left">7.3 &#xb1; 7.2</td>
<td valign="middle" align="left">0.7 &#xb1; 2.1</td>
<td valign="middle" align="left">T1 CE<break/>(BB T1WI)</td>
<td valign="middle" align="left">RLK-Unet</td>
<td valign="middle" align="left">81 &#xb1; 7:&#x2264;10 mm<break/>99 &#xb1; 1:&gt;10 mm</td>
<td valign="middle" align="left">1.8 &#xb1; 0.2</td>
<td valign="middle" align="left">80 &#xb1; 7</td>
<td valign="middle" align="left">0.54 &#xb1; 0.08:&#x2264;10mm 0.85 &#xb1; 0.03:&gt;10 mm</td>
<td valign="middle" align="left">Y</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>BM, brain metastasis; DL, deep learning; DSC, Dice similarity coefficient; FP, false-positive; T1 CE, contrast-enhanced 3D T1, T1 GRE: T1 gradient-echo.</p>
</fn>
</table-wrap-foot>
</table-wrap>
<p>First, the DL model was based on a BB image. A previous meta-analysis (<xref ref-type="bibr" rid="B33">33</xref>) reported the superiority of BB images for the detection of small BMs (&lt;5&#xa0;mm) because these images suppress the blood signal and have a higher contrast-to-noise ratio, compared to GRE images. In accordance with this finding, RLK-Unet maintained a high sensitivity of 80.84 in detecting small BMs (&#x2264;10 mm), whereas previous models showed a relatively lower performance for small BMs (sensitivity: 15&#x2013;50) (<xref ref-type="bibr" rid="B7">7</xref>, <xref ref-type="bibr" rid="B29">29</xref>). Second, we used a few large kernels instead of a stack of small kernels in the CNN. This approach resulted in larger effective receptive field more efficiently, thereby significantly increasing the sensitivity from 84.52 to 88.36 (<xref ref-type="supplementary-material" rid="SM1">
<bold>Supplementary Material S2</bold>
</xref>) (<xref ref-type="bibr" rid="B23">23</xref>). However, because of trade-off between sensitivity and precision, the precision of RLK-Unet was unfortunately decreased from 80.6 to 68.4. To replenish this, we implemented MHFs, which maximize the contrast between BMs and normal brain tissue, thereby increasing precision. Lastly, the surface mask effectively decreased FPs, by suppressing some blood vessels that were incompletely suppressed in BB images (<xref ref-type="bibr" rid="B34">34</xref>). The choroid plexus also frequently mimicked BMs in our model. It was successfully removed using the choroid plexus mask.</p>
<p>RLK-Unet demonstrated a DSC of 0.66 in segmenting BMs. This value is lower than that reported in previous studies (0.77&#x2013;0.85) (<xref ref-type="bibr" rid="B7">7</xref>, <xref ref-type="bibr" rid="B10">10</xref>, <xref ref-type="bibr" rid="B28">28</xref>). We suggest the following explanation for this result: the DSC cannot incorporate the size of the BMs within its score. Only small pixel differences between the GT and the prediction in small BMs may substantially decrease the score (<xref ref-type="fig" rid="f3">
<bold>Figure&#xa0;3</bold>
</xref>) (<xref ref-type="bibr" rid="B35">35</xref>). In line with this suggestion, our results showed excellent segmentation performance in larger BMs (DSC of large BMs vs. small BMs: 0.85 vs. 0.54). We presume that small pixel differences in the segmentation of small BMs rarely affect the volumetric assessment. The excellent agreement in the volume measurement of the BM between the GT and the prediction in our results also supports our assumption.</p>
<p>Volumetric measurement may provide a more objective and sensitive quantification to evaluate tumor response to treatment than does linear measurement in the current RANO-BM criteria (<xref ref-type="bibr" rid="B36">36</xref>). However, it is not clinically feasible because the manual volumetric measurement is a labor-intensive, time-consuming, and complex task (<xref ref-type="bibr" rid="B37">37</xref>). The clinical significance of our work lies in the fact that our automated DL algorithm may alleviate these tedious and labor-intensive tasks while maintaining results similar to those of conventional tumor assessment by a radiologist. Cho et&#xa0;al. (<xref ref-type="bibr" rid="B38">38</xref>) recently showed the possibility of end-to-end automated treatment response evaluation of BM. However, the sensitivity of BM detection in their system was relatively low (58.0%&#x2013;80.0%). In addition, their BM segmentation method included internal necrosis, which should be avoided in volumetric measurements. Previous studies have reported that the presence of necrosis in BMs may be an indication of a response to chemotherapy or radiation therapy (<xref ref-type="bibr" rid="B14">14</xref>). Furthermore, various imaging characteristics can change during the course of treatment. For instance, patients receiving a combination of tyrosine kinase inhibitors and intracranial radiation therapy are more likely to experience hemorrhages within their BMs (<xref ref-type="bibr" rid="B39">39</xref>). Additionally, the values of the apparent diffusion coefficient show alterations before and after chemoradiation therapy (<xref ref-type="bibr" rid="B40">40</xref>). As a result, monitoring changes in these imaging characteristics is essential for assessing the treatment effects on BMs. Considering these aspects, our method may offer improved performance and better alignment with real-world clinical scenarios. Based on these perspectives, our method may have better performance and may better reflect real-world clinical settings.</p>
<p>However, RLK-Unet also showed three disagreements with the conventional RANO-BM criteria for treatment assessment (5.1%; 3/58 patients). RLK-Unet may overestimate treatment responses because it records an equivocal enhancement as a true lesion and may underestimate treatment responses because it ignores subtle enhancement after treatment. The incorporation of dynamic information from longitudinal images into the DL algorithm may improve performance. With an in-depth comparison of pre- and posttreatment images, the DL algorithm may better detect subtle changes in tumor size and assess the treatment response more precisely (<xref ref-type="bibr" rid="B41">41</xref>).</p>
<p>Our study has some limitations. First, it was a retrospective single-center study, which is insufficient to address variability in scanning techniques and hardware implementation across hospitals. We used five-fold cross-validation for detection and segmentation and a temporally separated internal test set for the treatment response assessment; however, a multicenter study in the near future is required to improve the generalizability of our results. Second, RLK-Unet has some limitations in assessing leptomeningeal seeding, pachymeningeal seeding, and skull metastases because we excluded these factors from our cohort or removed the skull during preprocessing. Third, RLK-Unet was based on patients with lung cancer and may not be applicable to patients with other primary cancers. Finally, in this work, a contrast-enhanced BB T1WI (3D fast spin echo T1-weighted technique) was used for developing our algorithm because a previous study showed that the performance of an algorithm based on 3D BB T1WI was superior to that based on 3D GRE T1WI (sensitivity: 92.6 vs. 76.8) (<xref ref-type="bibr" rid="B8">8</xref>). Our study aligns with this result, with sensitivity, DSC, and precision for 3D BB T1WI and 3D GRE T1WI as follows: 86.9, 0.66, 79.6 vs. 53.7, 0.46, 68.7, as shown in <xref ref-type="supplementary-material" rid="SM1">
<bold>Supplementary Material 7</bold>
</xref>. Consequently, our algorithm may not be optimally applied to the 3D GRE T1WI sequence, which is more widely used for BM imaging. Lastly, the performance of our algorithm may not be directly compared with previous studies because of a different dataset. However, we ran publicly available algorithms such as 3D U-Net and nnU-Net, which were utilized in prior studies (<xref ref-type="bibr" rid="B8">8</xref>, <xref ref-type="bibr" rid="B30">30</xref>, <xref ref-type="bibr" rid="B32">32</xref>), for our dataset, and their performances are inferior to the results of our algorithm (<xref ref-type="supplementary-material" rid="SM1">
<bold>Supplementary Material S7</bold>
</xref>). Consequently, we may conclude that RLK-Unet shows a comparative performance for BM detection and segmentation.</p>
</sec>
<sec id="s5" sec-type="conclusions">
<label>5</label>
<title>Conclusions</title>
<p>Our developed DL model for the treatment response assessment of BM had more favorable features in clinical practice than did models reported in previous studies. RLK-Unet uses a single modality but shows excellent performance for the detection and segmentation of BMs, even for small metastases. Moreover, our segmentation results very well predicted GT, while avoiding cysts or necrosis, and exactly measured the volumetric tumor burden. The assessment of the treatment response showed good agreement with the decision of the radiologists. We believe that this research takes DL-based BM evaluation to the next level and may facilitate the clinical workflow for radiologists or neuro-oncologists.</p>
</sec>
<sec id="s6" sec-type="data-availability">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/<xref ref-type="supplementary-material" rid="SM1">
<bold>Supplementary Material</bold>
</xref>. Further inquiries can be directed to the corresponding authors.</p>
</sec>
<sec id="s7" sec-type="ethics-statement">
<title>Ethics statement</title>
<p>The studies involving humans were approved by Gangnam severance hospital IRB. The studies were conducted in accordance with the local legislation and institutional requirements. The ethics committee/institutional review board waived the requirement of written informed consent for participation from the participants or the participants&#x2019; legal guardians/next of kin because it was a retrospective study. Written informed consent was not obtained from the individual(s) for the publication of any potentially identifiable images or data included in this article because our institutional review board waived the requirement for informed consent because it was a retrospective study.</p>
</sec>
<sec id="s8" sec-type="author-contributions">
<title>Author contributions</title>
<p>SS: Formal Analysis, Writing &#x2013; original draft. BJ: Data curation, Writing &#x2013; review &amp; editing. MP: Writing &#x2013; review &amp; editing. SS: Supervision, Writing &#x2013; review &amp; editing. HO: Data curation, Methodology, Writing &#x2013; review &amp; editing. JK: Methodology, Supervision, Writing &#x2013; review &amp; editing. SL: Project administration, Writing &#x2013; review &amp; editing. SA: Conceptualization, Funding acquisition, Supervision, Writing &#x2013; original draft, Writing &#x2013; review &amp; editing. J-ML: Conceptualization, Funding acquisition, Writing &#x2013; review &amp; editing.</p>
</sec>
</body>
<back>
<sec id="s9" sec-type="funding-information">
<title>Funding</title>
<p>The author(s) declare financial support was received for the research, authorship, and/or publication of this article. This research was supported by the Neurological Disorder Research Program of the National Research Foundation (NRF) funded by the Korea government (MSIT) (No. 2020M3E5D9080788) to J-ML and by a National Research Foundation of Korea (NRF) grant funded by the Korea government (MSIT) (No. 2020R1F1A1056512) and a grant of the Korea Health Technology R&amp;D Project through the Korea Health Industry Development Institute (KHIDI), funded by the Ministry of Health &amp; Welfare, Republic of Korea (grant number: HI20C2125) to SA.</p>
</sec>
<ack>
<title>Acknowledgments</title>
<p>We thank all the patients who participated in this study, and all the health care professionals who treated these patients.</p>
</ack>
<sec id="s10" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="s11" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec id="s12" sec-type="supplementary-material">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fonc.2023.1273013/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fonc.2023.1273013/full#supplementary-material</ext-link>
</p>
<supplementary-material xlink:href="DataSheet_1.docx" id="SM1" mimetype="application/vnd.openxmlformats-officedocument.wordprocessingml.document"/>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<label>1</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Villano</surname> <given-names>JL</given-names>
</name>
<name>
<surname>Durbin</surname> <given-names>EB</given-names>
</name>
<name>
<surname>Normandeau</surname> <given-names>C</given-names>
</name>
<name>
<surname>Thakkar</surname> <given-names>JP</given-names>
</name>
<name>
<surname>Moirangthem</surname> <given-names>V</given-names>
</name>
<name>
<surname>Davis</surname> <given-names>FG</given-names>
</name>
</person-group>. <article-title>Incidence of brain metastasis at initial presentation of lung cancer</article-title>. <source>Neuro Oncol</source> (<year>2015</year>) <volume>17</volume>(<issue>1</issue>):<page-range>122&#x2013;8</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1093/neuonc/nou099</pub-id>
</citation>
</ref>
<ref id="B2">
<label>2</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Postmus</surname> <given-names>PE</given-names>
</name>
<name>
<surname>Kerr</surname> <given-names>KM</given-names>
</name>
<name>
<surname>Oudkerk</surname> <given-names>M</given-names>
</name>
<name>
<surname>Senan</surname> <given-names>S</given-names>
</name>
<name>
<surname>Waller</surname> <given-names>DA</given-names>
</name>
<name>
<surname>Vansteenkiste</surname> <given-names>J</given-names>
</name>
<etal/>
</person-group>. <article-title>Early and locally advanced non-small-cell lung cancer (NSCLC): ESMO Clinical Practice Guidelines for diagnosis, treatment and follow-up</article-title>. <source>Ann Oncol</source> (<year>2017</year>) <volume>28</volume>(<supplement>suppl_4</supplement>):<fpage>iv1</fpage>&#x2013;<lpage>iv21</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1093/annonc/mdx222</pub-id>
</citation>
</ref>
<ref id="B3">
<label>3</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Amin</surname> <given-names>MB</given-names>
</name>
<name>
<surname>Greene</surname> <given-names>FL</given-names>
</name>
<name>
<surname>Edge</surname> <given-names>SB</given-names>
</name>
<name>
<surname>Compton</surname> <given-names>CC</given-names>
</name>
<name>
<surname>Gershenwald</surname> <given-names>JE</given-names>
</name>
<name>
<surname>Brookland</surname> <given-names>RK</given-names>
</name>
<etal/>
</person-group>. <article-title>The Eighth Edition AJCC Cancer Staging Manual: Continuing to build a bridge from a population-based to a more "personalized" approach to cancer staging</article-title>. <source>Ca-a Cancer J Clin</source> (<year>2017</year>) <volume>67</volume>(<issue>2</issue>):<page-range>93&#x2013;9</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.3322/caac.21388</pub-id>
</citation>
</ref>
<ref id="B4">
<label>4</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Aoyama</surname> <given-names>H</given-names>
</name>
<name>
<surname>Shirato</surname> <given-names>H</given-names>
</name>
<name>
<surname>Tago</surname> <given-names>M</given-names>
</name>
<name>
<surname>Nakagawa</surname> <given-names>K</given-names>
</name>
<name>
<surname>Toyoda</surname> <given-names>T</given-names>
</name>
<name>
<surname>Hatano</surname> <given-names>K</given-names>
</name>
<etal/>
</person-group>. <article-title>Stereotactic radiosurgery plus whole-brain radiation therapy vs stereotactic radiosurgery alone for treatment of brain metastases: a randomized controlled trial</article-title>. <source>JAMA</source> (<year>2006</year>) <volume>295</volume>(<issue>21</issue>):<page-range>2483&#x2013;91</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1001/jama.295.21.2483</pub-id>
</citation>
</ref>
<ref id="B5">
<label>5</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Pinkham</surname> <given-names>MB</given-names>
</name>
<name>
<surname>Whitfield</surname> <given-names>GA</given-names>
</name>
<name>
<surname>Brada</surname> <given-names>M</given-names>
</name>
</person-group>. <article-title>New developments in intracranial stereotactic radiotherapy for metastases</article-title>. <source>Clin Oncol (R Coll Radiol)</source> (<year>2015</year>) <volume>27</volume>(<issue>5</issue>):<page-range>316&#x2013;23</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.clon.2015.01.007</pub-id>
</citation>
</ref>
<ref id="B6">
<label>6</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Junger</surname> <given-names>ST</given-names>
</name>
<name>
<surname>Hoyer</surname> <given-names>UCI</given-names>
</name>
<name>
<surname>Schaufler</surname> <given-names>D</given-names>
</name>
<name>
<surname>Laukamp</surname> <given-names>KR</given-names>
</name>
<name>
<surname>Goertz</surname> <given-names>L</given-names>
</name>
<name>
<surname>Thiele</surname> <given-names>F</given-names>
</name>
<etal/>
</person-group>. <article-title>Fully automated MR detection and segmentation of brain metastases in non-small cell lung cancer using deep learning</article-title>. <source>J Magn Reson Imaging</source> (<year>2021</year>) <volume>54</volume>(<issue>5</issue>):<page-range>1608&#x2013;22</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1002/jmri.27741</pub-id>
</citation>
</ref>
<ref id="B7">
<label>7</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Grovik</surname> <given-names>E</given-names>
</name>
<name>
<surname>Yi</surname> <given-names>D</given-names>
</name>
<name>
<surname>Iv</surname> <given-names>M</given-names>
</name>
<name>
<surname>Tong</surname> <given-names>E</given-names>
</name>
<name>
<surname>Rubin</surname> <given-names>D</given-names>
</name>
<name>
<surname>Zaharchuk</surname> <given-names>G</given-names>
</name>
</person-group>. <article-title>Deep learning enables automatic detection and segmentation of brain metastases on multisequence MRI</article-title>. <source>J Magn Reson Imaging</source> (<year>2020</year>) <volume>51</volume>(<issue>1</issue>):<page-range>175&#x2013;82</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1002/jmri.26766</pub-id>
</citation>
</ref>
<ref id="B8">
<label>8</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Park</surname> <given-names>YW</given-names>
</name>
<name>
<surname>Jun</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Han</surname> <given-names>K</given-names>
</name>
<name>
<surname>An</surname> <given-names>C</given-names>
</name>
<name>
<surname>Ahn</surname> <given-names>SS</given-names>
</name>
<etal/>
</person-group>. <article-title>Robust performance of deep learning for automatic detection and segmentation of brain metastases using three-dimensional black-blood and three-dimensional gradient echo imaging</article-title>. <source>Eur Radiol</source> (<year>2021</year>) <volume>31</volume>(<issue>9</issue>):<page-range>6686&#x2013;95</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s00330-021-07783-3</pub-id>
</citation>
</ref>
<ref id="B9">
<label>9</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Stojadinovic</surname> <given-names>S</given-names>
</name>
<name>
<surname>Hrycushko</surname> <given-names>B</given-names>
</name>
<name>
<surname>Wardak</surname> <given-names>Z</given-names>
</name>
<name>
<surname>Lu</surname> <given-names>W</given-names>
</name>
<name>
<surname>Yan</surname> <given-names>Y</given-names>
</name>
<etal/>
</person-group>. <article-title>Automatic metastatic brain tumor segmentation for stereotactic radiosurgery applications</article-title>. <source>Phys Med Biol</source> (<year>2016</year>) <volume>61</volume>(<issue>24</issue>):<page-range>8440&#x2013;61</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1088/0031-9155/61/24/8440</pub-id>
</citation>
</ref>
<ref id="B10">
<label>10</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Xue</surname> <given-names>J</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>B</given-names>
</name>
<name>
<surname>Ming</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>X</given-names>
</name>
<name>
<surname>Jiang</surname> <given-names>Z</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>C</given-names>
</name>
<etal/>
</person-group>. <article-title>Deep learning-based detection and segmentation-assisted management of brain metastases</article-title>. <source>Neuro Oncol</source> (<year>2020</year>) <volume>22</volume>(<issue>4</issue>):<page-range>505&#x2013;14</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1093/neuonc/noz234</pub-id>
</citation>
</ref>
<ref id="B11">
<label>11</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Park</surname> <given-names>J</given-names>
</name>
<name>
<surname>Kim</surname> <given-names>J</given-names>
</name>
<name>
<surname>Yoo</surname> <given-names>E</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>H</given-names>
</name>
<name>
<surname>Chang</surname> <given-names>JH</given-names>
</name>
<name>
<surname>Kim</surname> <given-names>EY</given-names>
</name>
</person-group>. <article-title>Detection of small metastatic brain tumors: comparison of 3D contrast-enhanced whole-brain black-blood imaging and MP-RAGE imaging</article-title>. <source>Invest Radiol</source> (<year>2012</year>) <volume>47</volume>(<issue>2</issue>):<page-range>136&#x2013;41</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1097/RLI.0b013e3182319704</pub-id>
</citation>
</ref>
<ref id="B12">
<label>12</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kammer</surname> <given-names>NN</given-names>
</name>
<name>
<surname>Coppenrath</surname> <given-names>E</given-names>
</name>
<name>
<surname>Treitl</surname> <given-names>KM</given-names>
</name>
<name>
<surname>Kooijman</surname> <given-names>H</given-names>
</name>
<name>
<surname>Dietrich</surname> <given-names>O</given-names>
</name>
<name>
<surname>Saam</surname> <given-names>T</given-names>
</name>
</person-group>. <article-title>Comparison of contrast-enhanced modified T1-weighted 3D TSE black-blood and 3D MP-RAGE sequences for the detection of cerebral metastases and brain tumours</article-title>. <source>Eur Radiol</source> (<year>2016</year>) <volume>26</volume>(<issue>6</issue>):<page-range>1818&#x2013;25</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s00330-015-3975-x</pub-id>
</citation>
</ref>
<ref id="B13">
<label>13</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yoo</surname> <given-names>J</given-names>
</name>
<name>
<surname>Cha</surname> <given-names>YJ</given-names>
</name>
<name>
<surname>Park</surname> <given-names>HH</given-names>
</name>
<name>
<surname>Park</surname> <given-names>M</given-names>
</name>
<name>
<surname>Joo</surname> <given-names>B</given-names>
</name>
<name>
<surname>Suh</surname> <given-names>SH</given-names>
</name>
<etal/>
</person-group>. <article-title>The extent of necrosis in brain metastases may predict subtypes of primary cancer and overall survival in patients receiving craniotomy</article-title>. <source>Cancers (Basel)</source> (<year>2022</year>) <volume>14</volume>(<issue>7</issue>). doi:&#xa0;<pub-id pub-id-type="doi">10.3390/cancers14071694</pub-id>
</citation>
</ref>
<ref id="B14">
<label>14</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Pu</surname> <given-names>RT</given-names>
</name>
<name>
<surname>Schott</surname> <given-names>AF</given-names>
</name>
<name>
<surname>Sturtz</surname> <given-names>DE</given-names>
</name>
<name>
<surname>Griffith</surname> <given-names>KA</given-names>
</name>
<name>
<surname>Kleer</surname> <given-names>CG</given-names>
</name>
</person-group>. <article-title>Pathologic features of breast cancer associated with complete response to neoadjuvant chemotherapy: importance of tumor necrosis</article-title>. <source>Am J Surg Pathol</source> (<year>2005</year>) <volume>29</volume>(<issue>3</issue>):<page-range>354&#x2013;8</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1097/01.pas.0000152138.89395.fb</pub-id>
</citation>
</ref>
<ref id="B15">
<label>15</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lin</surname> <given-names>NU</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>EQ</given-names>
</name>
<name>
<surname>Aoyama</surname> <given-names>H</given-names>
</name>
<name>
<surname>Barani</surname> <given-names>IJ</given-names>
</name>
<name>
<surname>Barboriak</surname> <given-names>DP</given-names>
</name>
<name>
<surname>Baumert</surname> <given-names>BG</given-names>
</name>
<etal/>
</person-group>. <article-title>Response assessment criteria for brain metastases: proposal from the RANO group</article-title>. <source>Lancet Oncol</source> (<year>2015</year>) <volume>16</volume>(<issue>6</issue>):<page-range>e270&#x2013;8</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/S1470-2045(15)70057-4</pub-id>
</citation>
</ref>
<ref id="B16">
<label>16</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yushkevich</surname> <given-names>PA</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>G</given-names>
</name>
<name>
<surname>Gerig</surname> <given-names>G</given-names>
</name>
</person-group>. <article-title>ITK-SNAP: An interactive tool for semi-automatic segmentation of multi-modality biomedical images</article-title>. <source>Annu Int Conf IEEE Eng Med Biol Soc</source> (<year>2016</year>) <volume>2016</volume>:<page-range>3342&#x2013;5</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/EMBC.2016.7591443</pub-id>
</citation>
</ref>
<ref id="B17">
<label>17</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Oft</surname> <given-names>D</given-names>
</name>
<name>
<surname>Schmidt</surname> <given-names>MA</given-names>
</name>
<name>
<surname>Weissmann</surname> <given-names>T</given-names>
</name>
<name>
<surname>Roesch</surname> <given-names>J</given-names>
</name>
<name>
<surname>Mengling</surname> <given-names>V</given-names>
</name>
<name>
<surname>Masitho</surname> <given-names>S</given-names>
</name>
<etal/>
</person-group>. <article-title>Volumetric regression in brain metastases after stereotactic radiotherapy: time course, predictors, and significance</article-title>. <source>Front Oncol</source> (<year>2020</year>) <volume>10</volume>:<elocation-id>590980</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fonc.2020.590980</pub-id>
</citation>
</ref>
<ref id="B18">
<label>18</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname> <given-names>X</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>H</given-names>
</name>
<name>
<surname>Qi</surname> <given-names>X</given-names>
</name>
<name>
<surname>Dou</surname> <given-names>Q</given-names>
</name>
<name>
<surname>Fu</surname> <given-names>CW</given-names>
</name>
<name>
<surname>Heng</surname> <given-names>PA</given-names>
</name>
</person-group>. <article-title>H-denseUNet: hybrid densely connected UNet for liver and tumor segmentation from CT volumes</article-title>. <source>IEEE Trans Med Imaging</source> (<year>2018</year>) <volume>37</volume>(<issue>12</issue>):<page-range>2663&#x2013;74</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/TMI.2018.2845918</pub-id>
</citation>
</ref>
<ref id="B19">
<label>19</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ibtehaz</surname> <given-names>N</given-names>
</name>
<name>
<surname>Rahman</surname> <given-names>MS</given-names>
</name>
</person-group>. <article-title>MultiResUNet : Rethinking the U-Net architecture for multimodal biomedical image segmentation</article-title>. <source>Neural Netw</source> (<year>2020</year>) <volume>121</volume>:<fpage>74</fpage>&#x2013;<lpage>87</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.neunet.2019.08.025</pub-id>
</citation>
</ref>
<ref id="B20">
<label>20</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Weng</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Zhou</surname> <given-names>T</given-names>
</name>
<name>
<surname>Li</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Qiu</surname> <given-names>X</given-names>
</name>
</person-group>. <article-title>Nas-unet: Neural architecture search for medical image segmentation</article-title>. <source>IEEE Access</source> (<year>2019</year>) <volume>7</volume>:<page-range>44247&#x2013;57</page-range>. doi: <pub-id pub-id-type="doi">10.1109/ACCESS.2019.2908991</pub-id>
</citation>
</ref>
<ref id="B21">
<label>21</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Prusty</surname> <given-names>S</given-names>
</name>
<name>
<surname>Patnaik</surname> <given-names>S</given-names>
</name>
<name>
<surname>Dash</surname> <given-names>SK</given-names>
</name>
</person-group>. <article-title>SKCV: Stratified K-fold cross-validation on ML classifiers for predicting cervical cancer</article-title>. <source>Front Nanotechnology</source> (<year>2022</year>) <volume>4</volume>:<elocation-id>972421</elocation-id>. doi: <pub-id pub-id-type="doi">10.3389/fnano.2022.972421</pub-id>
</citation>
</ref>
<ref id="B22">
<label>22</label>
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Ronneberger</surname> <given-names>O</given-names>
</name>
<name>
<surname>Fischer</surname> <given-names>P</given-names>
</name>
<name>
<surname>Brox</surname> <given-names>T</given-names>
</name>
</person-group>. <article-title>U-net: Convolutional networks for biomedical image segmentation</article-title>. In: <source>Medical Image Computing and Computer-Assisted Intervention&#x2013;MICCAI 2015: 18th International Conference, Munich, Germany, October 5-9, 2015, Proceedings, Part III 18</source>. <publisher-loc>Berlin/Heidelberg, Germany</publisher-loc>: <publisher-name>Springer</publisher-name> (<year>2015</year>).</citation>
</ref>
<ref id="B23">
<label>23</label>
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Ding</surname> <given-names>X</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>X</given-names>
</name>
<name>
<surname>Han</surname> <given-names>J</given-names>
</name>
<name>
<surname>Ding</surname> <given-names>G</given-names>
</name>
</person-group>. (<year>2022</year>). <article-title>Scaling up your kernels to 31x31: Revisiting large kernel design in cnns</article-title>, in: <conf-name>Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition</conf-name>, .</citation>
</ref>
<ref id="B24">
<label>24</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Park</surname> <given-names>G</given-names>
</name>
<name>
<surname>Hong</surname> <given-names>J</given-names>
</name>
<name>
<surname>Duffy</surname> <given-names>BA</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>J-M</given-names>
</name>
<name>
<surname>Kim</surname> <given-names>H</given-names>
</name>
</person-group>. <article-title>White matter hyperintensities segmentation using the ensemble U-Net with multi-scale highlighting foregrounds</article-title>. <source>Neuroimage</source> (<year>2021</year>) <volume>237</volume>:<fpage>118140</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2021.118140</pub-id>
</citation>
</ref>
<ref id="B25">
<label>25</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Schober</surname> <given-names>P</given-names>
</name>
<name>
<surname>Boer</surname> <given-names>C</given-names>
</name>
<name>
<surname>Schwarte</surname> <given-names>LA</given-names>
</name>
</person-group>. <article-title>Correlation coefficients: appropriate use and interpretation</article-title>. <source>Anesth Analg</source> (<year>2018</year>) <volume>126</volume>(<issue>5</issue>):<page-range>1763&#x2013;8</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1213/ANE.0000000000002864</pub-id>
</citation>
</ref>
<ref id="B26">
<label>26</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dogan</surname> <given-names>NO</given-names>
</name>
</person-group>. <article-title>Bland-Altman analysis: A paradigm to understand correlation and agreement</article-title>. <source>Turk J Emerg Med</source> (<year>2018</year>) <volume>18</volume>(<issue>4</issue>):<page-range>139&#x2013;41</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.tjem.2018.09.001</pub-id>
</citation>
</ref>
<ref id="B27">
<label>27</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Koo</surname> <given-names>TK</given-names>
</name>
<name>
<surname>Li</surname> <given-names>MY</given-names>
</name>
</person-group>. <article-title>A guideline of selecting and reporting intraclass correlation coefficients for reliability research</article-title>. <source>J Chiropr Med</source> (<year>2016</year>) <volume>15</volume>(<issue>2</issue>):<page-range>155&#x2013;63</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.jcm.2016.02.012</pub-id>
</citation>
</ref>
<ref id="B28">
<label>28</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Charron</surname> <given-names>O</given-names>
</name>
<name>
<surname>Lallement</surname> <given-names>A</given-names>
</name>
<name>
<surname>Jarnet</surname> <given-names>D</given-names>
</name>
<name>
<surname>Noblet</surname> <given-names>V</given-names>
</name>
<name>
<surname>Clavier</surname> <given-names>JB</given-names>
</name>
<name>
<surname>Meyer</surname> <given-names>P</given-names>
</name>
</person-group>. <article-title>Automatic detection and segmentation of brain metastases on multimodal MR images with a deep convolutional neural network</article-title>. <source>Comput Biol Med</source> (<year>2018</year>) <volume>95</volume>:<fpage>43</fpage>&#x2013;<lpage>54</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compbiomed.2018.02.004</pub-id>
</citation>
</ref>
<ref id="B29">
<label>29</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhou</surname> <given-names>Z</given-names>
</name>
<name>
<surname>Sanders</surname> <given-names>JW</given-names>
</name>
<name>
<surname>Johnson</surname> <given-names>JM</given-names>
</name>
<name>
<surname>Gule-Monroe</surname> <given-names>MK</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>MM</given-names>
</name>
<name>
<surname>Briere</surname> <given-names>TM</given-names>
</name>
<etal/>
</person-group>. <article-title>Computer-aided detection of brain metastases in T1-weighted MRI for stereotactic radiosurgery using deep learning single-shot detectors</article-title>. <source>Radiology</source> (<year>2020</year>) <volume>295</volume>(<issue>2</issue>):<page-range>407&#x2013;15</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1148/radiol.2020191479</pub-id>
</citation>
</ref>
<ref id="B30">
<label>30</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ottesen</surname> <given-names>JA</given-names>
</name>
<name>
<surname>Yi</surname> <given-names>D</given-names>
</name>
<name>
<surname>Tong</surname> <given-names>E</given-names>
</name>
<name>
<surname>Iv</surname> <given-names>M</given-names>
</name>
<name>
<surname>Latysheva</surname> <given-names>A</given-names>
</name>
<name>
<surname>Saxhaug</surname> <given-names>C</given-names>
</name>
<etal/>
</person-group>. <article-title>2.5D and 3D segmentation of brain metastases with deep learning on multinational MRI data</article-title>. <source>Front Neuroinform</source> (<year>2022</year>) <volume>16</volume>:<elocation-id>1056068</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fninf.2022.1056068</pub-id>
</citation>
</ref>
<ref id="B31">
<label>31</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Huang</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Bert</surname> <given-names>C</given-names>
</name>
<name>
<surname>Sommer</surname> <given-names>P</given-names>
</name>
<name>
<surname>Frey</surname> <given-names>B</given-names>
</name>
<name>
<surname>Gaipl</surname> <given-names>U</given-names>
</name>
<name>
<surname>Distel</surname> <given-names>LV</given-names>
</name>
<etal/>
</person-group>. <article-title>Deep learning for brain metastasis detection and segmentation in longitudinal MRI data</article-title>. <source>Med Phys</source> (<year>2022</year>) <volume>49</volume>(<issue>9</issue>):<page-range>5773&#x2013;86</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1002/mp.15863</pub-id>
</citation>
</ref>
<ref id="B32">
<label>32</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ziyaee</surname> <given-names>H</given-names>
</name>
<name>
<surname>Cardenas</surname> <given-names>CE</given-names>
</name>
<name>
<surname>Yeboa</surname> <given-names>DN</given-names>
</name>
<name>
<surname>Li</surname> <given-names>J</given-names>
</name>
<name>
<surname>Ferguson</surname> <given-names>SD</given-names>
</name>
<name>
<surname>Johnson</surname> <given-names>J</given-names>
</name>
<etal/>
</person-group>. <article-title>Automated brain metastases segmentation with a deep dive into false-positive detection</article-title>. <source>Adv Radiat Oncol</source> (<year>2023</year>) <volume>8</volume>(<issue>1</issue>):<elocation-id>101085</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.adro.2022.101085</pub-id>
</citation>
</ref>
<ref id="B33">
<label>33</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Suh</surname> <given-names>CH</given-names>
</name>
<name>
<surname>Jung</surname> <given-names>SC</given-names>
</name>
<name>
<surname>Kim</surname> <given-names>KW</given-names>
</name>
<name>
<surname>Pyo</surname> <given-names>J</given-names>
</name>
</person-group>. <article-title>The detectability of brain metastases using contrast-enhanced spin-echo or gradient-echo images: a systematic review and meta-analysis</article-title>. <source>J Neurooncol</source> (<year>2016</year>) <volume>129</volume>(<issue>2</issue>):<page-range>363&#x2013;71</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s11060-016-2185-y</pub-id>
</citation>
</ref>
<ref id="B34">
<label>34</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Nagao</surname> <given-names>E</given-names>
</name>
<name>
<surname>Yoshiura</surname> <given-names>T</given-names>
</name>
<name>
<surname>Hiwatashi</surname> <given-names>A</given-names>
</name>
<name>
<surname>Obara</surname> <given-names>M</given-names>
</name>
<name>
<surname>Yamashita</surname> <given-names>K</given-names>
</name>
<name>
<surname>Kamano</surname> <given-names>H</given-names>
</name>
<etal/>
</person-group>. <article-title>3D turbo spin-echo sequence with motion-sensitized driven-equilibrium preparation for detection of brain metastases on 3T MR imaging</article-title>. <source>AJNR Am J Neuroradiol</source> (<year>2011</year>) <volume>32</volume>(<issue>4</issue>):<page-range>664&#x2013;70</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.3174/ajnr.A2343</pub-id>
</citation>
</ref>
<ref id="B35">
<label>35</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Carass</surname> <given-names>A</given-names>
</name>
<name>
<surname>Roy</surname> <given-names>S</given-names>
</name>
<name>
<surname>Gherman</surname> <given-names>A</given-names>
</name>
<name>
<surname>Reinhold</surname> <given-names>JC</given-names>
</name>
<name>
<surname>Jesson</surname> <given-names>A</given-names>
</name>
<name>
<surname>Arbel</surname> <given-names>T</given-names>
</name>
<etal/>
</person-group>. <article-title>Evaluating white matter lesion segmentations with refined sorensen-dice analysis</article-title>. <source>Sci Rep</source> (<year>2020</year>) <volume>10</volume>(<issue>1</issue>):<fpage>8242</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41598-020-64803-w</pub-id>
</citation>
</ref>
<ref id="B36">
<label>36</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kickingereder</surname> <given-names>P</given-names>
</name>
<name>
<surname>Isensee</surname> <given-names>F</given-names>
</name>
<name>
<surname>Tursunova</surname> <given-names>I</given-names>
</name>
<name>
<surname>Petersen</surname> <given-names>J</given-names>
</name>
<name>
<surname>Neuberger</surname> <given-names>U</given-names>
</name>
<name>
<surname>Bonekamp</surname> <given-names>D</given-names>
</name>
<etal/>
</person-group>. <article-title>Automated quantitative tumour response assessment of MRI in neuro-oncology with artificial neural networks: a multicentre, retrospective study</article-title>. <source>Lancet Oncol</source> (<year>2019</year>) <volume>20</volume>(<issue>5</issue>):<page-range>728&#x2013;40</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/S1470-2045(19)30098-1</pub-id>
</citation>
</ref>
<ref id="B37">
<label>37</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wen</surname> <given-names>PY</given-names>
</name>
<name>
<surname>Chang</surname> <given-names>SM</given-names>
</name>
<name>
<surname>Van den Bent</surname> <given-names>MJ</given-names>
</name>
<name>
<surname>Vogelbaum</surname> <given-names>MA</given-names>
</name>
<name>
<surname>Macdonald</surname> <given-names>DR</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>EQ</given-names>
</name>
</person-group>. <article-title>Response assessment in neuro-oncology clinical trials</article-title>. <source>J Clin Oncol</source> (<year>2017</year>) <volume>35</volume>(<issue>21</issue>):<page-range>2439&#x2013;49</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1200/JCO.2017.72.7511</pub-id>
</citation>
</ref>
<ref id="B38">
<label>38</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cho</surname> <given-names>J</given-names>
</name>
<name>
<surname>Kim</surname> <given-names>YJ</given-names>
</name>
<name>
<surname>Sunwoo</surname> <given-names>L</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>GP</given-names>
</name>
<name>
<surname>Nguyen</surname> <given-names>TQ</given-names>
</name>
<name>
<surname>Cho</surname> <given-names>SJ</given-names>
</name>
<etal/>
</person-group>. <article-title>Deep learning-based computer-aided detection system for automated treatment response assessment of brain metastases on 3D MRI</article-title>. <source>Front Oncol</source> (<year>2021</year>) <volume>11</volume>:<elocation-id>739639</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fonc.2021.739639</pub-id>
</citation>
</ref>
<ref id="B39">
<label>39</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kim</surname> <given-names>SS</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>S</given-names>
</name>
<name>
<surname>Park</surname> <given-names>M</given-names>
</name>
<name>
<surname>Joo</surname> <given-names>B</given-names>
</name>
<name>
<surname>Suh</surname> <given-names>SH</given-names>
</name>
<name>
<surname>Ahn</surname> <given-names>SJ</given-names>
</name>
</person-group>. <article-title>Associated factors of spontaneous hemorrhage in brain metastases in patients with lung adenocarcinoma</article-title>. <source>Cancers (Basel)</source> (<year>2023</year>) <volume>15</volume>(<issue>3</issue>). doi:&#xa0;<pub-id pub-id-type="doi">10.3390/cancers15030619</pub-id>
</citation>
</ref>
<ref id="B40">
<label>40</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname> <given-names>K</given-names>
</name>
<name>
<surname>Ma</surname> <given-names>Z</given-names>
</name>
<name>
<surname>Feng</surname> <given-names>L</given-names>
</name>
</person-group>. <article-title>Apparent diffusion coefficient as an effective index for the therapeutic efficiency of brain chemoradiotherapy for brain metastases from lung cancer</article-title>. <source>BMC Med Imaging</source> (<year>2018</year>) <volume>18</volume>:<fpage>1</fpage>&#x2013;<lpage>7</lpage>. doi: <pub-id pub-id-type="doi">10.1186/s12880-018-0275-3</pub-id>
</citation>
</ref>
<ref id="B41">
<label>41</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jin</surname> <given-names>C</given-names>
</name>
<name>
<surname>Yu</surname> <given-names>H</given-names>
</name>
<name>
<surname>Ke</surname> <given-names>J</given-names>
</name>
<name>
<surname>Ding</surname> <given-names>P</given-names>
</name>
<name>
<surname>Yi</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Jiang</surname> <given-names>X</given-names>
</name>
<etal/>
</person-group>. <article-title>Predicting treatment response from longitudinal images using multi-task deep learning</article-title>. <source>Nat Commun</source> (<year>2021</year>) <volume>12</volume>(<issue>1</issue>):<fpage>1851</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41467-021-22188-y</pub-id>
</citation>
</ref>
</ref-list>
</back>
</article>