<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article article-type="research-article" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Radiol.</journal-id>
<journal-title>Frontiers in Radiology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Radiol.</abbrev-journal-title>
<issn pub-type="epub">2673-8740</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fradi.2024.1495181</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Radiology</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Language task-based fMRI analysis using machine learning and deep learning</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author"><name><surname>Kuan</surname><given-names>Elaine</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/2840559/overview"/><role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/><role content-type="https://credit.niso.org/contributor-roles/data-curation/"/><role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/><role content-type="https://credit.niso.org/contributor-roles/investigation/"/><role content-type="https://credit.niso.org/contributor-roles/methodology/"/><role content-type="https://credit.niso.org/contributor-roles/project-administration/"/><role content-type="https://credit.niso.org/contributor-roles/resources/"/><role content-type="https://credit.niso.org/contributor-roles/software/"/><role content-type="https://credit.niso.org/contributor-roles/validation/"/><role content-type="https://credit.niso.org/contributor-roles/visualization/"/><role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/><role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/></contrib>
<contrib contrib-type="author" corresp="yes"><name><surname>Vegh</surname><given-names>Viktor</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="corresp" rid="cor1">&#x002A;</xref><uri xlink:href="https://loop.frontiersin.org/people/941585/overview" /><role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/><role content-type="https://credit.niso.org/contributor-roles/investigation/"/><role content-type="https://credit.niso.org/contributor-roles/methodology/"/><role content-type="https://credit.niso.org/contributor-roles/supervision/"/><role content-type="https://credit.niso.org/contributor-roles/validation/"/><role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/></contrib>
<contrib contrib-type="author"><name><surname>Phamnguyen</surname><given-names>John</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref><role content-type="https://credit.niso.org/contributor-roles/data-curation/"/><role content-type="https://credit.niso.org/contributor-roles/resources/"/><role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/></contrib>
<contrib contrib-type="author"><name><surname>O&#x2019;Brien</surname><given-names>Kieran</given-names></name>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref><role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/><role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/><role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/></contrib>
<contrib contrib-type="author"><name><surname>Hammond</surname><given-names>Amanda</given-names></name>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref><role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/><role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/></contrib>
<contrib contrib-type="author"><name><surname>Reutens</surname><given-names>David</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/112868/overview" /><role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/><role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/><role content-type="https://credit.niso.org/contributor-roles/methodology/"/><role content-type="https://credit.niso.org/contributor-roles/resources/"/><role content-type="https://credit.niso.org/contributor-roles/supervision/"/><role content-type="https://credit.niso.org/contributor-roles/validation/"/><role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/></contrib>
</contrib-group>
<aff id="aff1"><label><sup>1</sup></label><institution>Centre for Advanced Imaging, The University of Queensland</institution>, <addr-line>Brisbane, QLD</addr-line>, <country>Australia</country></aff>
<aff id="aff2"><label><sup>2</sup></label><institution>ARC Training Centre for Innovation in Biomedical Imaging Technology, The University of Queensland</institution>, <addr-line>Brisbane, QLD</addr-line>, <country>Australia</country></aff>
<aff id="aff3"><label><sup>3</sup></label><institution>Australia Institute for Bioengineering and Nanotechnology, The University of Queensland</institution>, <addr-line>Brisbane, QLD</addr-line>, <country>Australia</country></aff>
<aff id="aff4"><label><sup>4</sup></label><institution>Neurology Department, Royal Brisbane and Women&#x2019;s Hospital</institution>, <addr-line>Brisbane, QLD</addr-line>, <country>Australia</country></aff>
<aff id="aff5"><label><sup>5</sup></label><institution>Siemens Healthineers, Siemens Healthcare Pty Ltd</institution>, <addr-line>Brisbane, QLD</addr-line>, <country>Australia</country></aff>
<author-notes>
<fn fn-type="edited-by"><p><bold>Edited by:</bold> Cheng Chen, The Chinese University of Hong Kong, China</p></fn>
<fn fn-type="edited-by"><p><bold>Reviewed by:</bold> Yanfu Zhang, University of Pittsburgh, United States</p>
<p>Ke Liu, Beijing Normal University, China</p></fn>
<corresp id="cor1"><label>&#x002A;</label><bold>Correspondence:</bold> Viktor Vegh <email>v.vegh@uq.edu.au</email></corresp>
</author-notes>
<pub-date pub-type="epub"><day>27</day><month>11</month><year>2024</year></pub-date>
<pub-date pub-type="collection"><year>2024</year></pub-date>
<volume>4</volume><elocation-id>1495181</elocation-id>
<history>
<date date-type="received"><day>12</day><month>09</month><year>2024</year></date>
<date date-type="accepted"><day>12</day><month>11</month><year>2024</year></date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2024 Kuan, Vegh, Phamnguyen, O&#x0027;Brien, Hammond and Reutens.</copyright-statement>
<copyright-year>2024</copyright-year><copyright-holder>Kuan, Vegh, Phamnguyen, O&#x0027;Brien, Hammond and Reutens</copyright-holder><license license-type="open-access" xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="http://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license>
</permissions>
<abstract><sec><title>Introduction</title>
<p>Task-based language fMRI is a non-invasive method of identifying brain regions subserving language that is used to plan neurosurgical resections which potentially encroach on eloquent regions. The use of unstructured fMRI paradigms, such as naturalistic fMRI, to map language is of increasing interest. Their analysis necessitates the use of alternative methods such as machine learning (ML) and deep learning (DL) because task regressors may be difficult to define in these paradigms.</p>
</sec><sec><title>Methods</title>
<p>Using task-based language fMRI as a starting point, this study investigates the use of different categories of ML and DL algorithms to identify brain regions subserving language. Data comprising of seven task-based language fMRI paradigms were collected from 26 individuals, and ML and DL models were trained to classify voxel-wise fMRI time series.</p>
</sec><sec><title>Results</title>
<p>The general machine learning and the interval-based methods were the most promising in identifying language areas using fMRI time series classification. The geneal machine learning method achieved a mean whole-brain Area Under the Receiver Operating Characteristic Curve (AUC) of <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM1"><mml:mn>0.97</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.03</mml:mn></mml:math></inline-formula>, mean Dice coefficient of <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM2"><mml:mn>0.6</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.34</mml:mn></mml:math></inline-formula> and mean Euclidean distance of <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM3"><mml:mn>2.7</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>2.4</mml:mn></mml:math></inline-formula>&#x2009;mm between activation peaks across the evaluated regions of interest. The interval-based method achieved a mean whole-brain AUC of <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM4"><mml:mn>0.96</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.03</mml:mn></mml:math></inline-formula>, mean Dice coefficient of <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM5"><mml:mn>0.61</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.33</mml:mn></mml:math></inline-formula> and mean Euclidean distance of <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM6"><mml:mn>3.3</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>2.7</mml:mn></mml:math></inline-formula>&#x2009;mm between activation peaks across the evaluated regions of interest.</p>
</sec><sec><title>Discussion</title>
<p>This study demonstrates the utility of different ML and DL methods in classifying task-based language fMRI time series. A potential application of these methods is the identification of language activation from unstructured paradigms.</p>
</sec>
</abstract>
<kwd-group>
<kwd>task-based fMRI</kwd>
<kwd>language</kwd>
<kwd>time series</kwd>
<kwd>brain activation</kwd>
<kwd>machine learning</kwd>
<kwd>deep learning</kwd>
</kwd-group><counts>
<fig-count count="6"/>
<table-count count="6"/><equation-count count="67"/><ref-count count="55"/><page-count count="15"/><word-count count="0"/></counts><custom-meta-wrap><custom-meta><meta-name>section-at-acceptance</meta-name><meta-value>Artificial Intelligence in Radiology</meta-value></custom-meta></custom-meta-wrap>
</article-meta>
</front>
<body><sec id="s1" sec-type="intro"><label>1</label><title>Introduction</title>
<p>Individual variation in functional representation in the cerebral cortex (<xref ref-type="bibr" rid="B1">1</xref>, <xref ref-type="bibr" rid="B2">2</xref>) and the potential for re-organisation in the setting of neurological disorders (<xref ref-type="bibr" rid="B3">3</xref>, <xref ref-type="bibr" rid="B4">4</xref>) make it crucial to accurately localise eloquent areas of the cortex when surgery that may impinge on these areas is contemplated. Accurate localization plays a key role in guiding the decision to proceed with surgery and in intra-operative surgical guidance aiming to minimize the risk of post-operative neurological deficits. Functional Magnetic Resonance Imaging (fMRI) is a non-invasive method of functional brain mapping that measures the blood oxygen level dependent (BOLD) signal changes in the brain due to changes in regional cerebral blood flow during brain activation (<xref ref-type="bibr" rid="B5">5</xref>, <xref ref-type="bibr" rid="B6">6</xref>). It is commonly used for non-invasive pre-surgical mapping across a range of functional domains, including the key domain of language (<xref ref-type="bibr" rid="B7">7</xref>&#x2013;<xref ref-type="bibr" rid="B11">11</xref>).</p>
<p>Task-based language fMRI is a conventional approach for presurgical language mapping. It achieves language lateralization and localization that is concordant with gold standard methods, such as direct cortical stimulation and the Wada test (<xref ref-type="bibr" rid="B12">12</xref>&#x2013;<xref ref-type="bibr" rid="B14">14</xref>). In task-based language fMRI, the subject completes a specific language task arranged in a paradigm comprising of blocks with task performance interleaved with blocks during which a control or baseline task is performed, (<xref ref-type="bibr" rid="B11">11</xref>, <xref ref-type="bibr" rid="B15">15</xref>). Such designs facilitate statistical analysis using the General Linear Model to identify brain areas activated during the performance of the language task (<xref ref-type="bibr" rid="B16">16</xref>, <xref ref-type="bibr" rid="B17">17</xref>). In these areas, the time course of the fMRI signal resembles the temporal profile predicted from the structure of the task (i.e., the task regressor). Clinical application of task-based fMRI requires the patient to understand and perform the task paradigm, potentially hampering its use in patient groups such as young children or those with deficits in comprehension, memory or attention that interfere with task performance.</p>
<p>This limitation of task-based paradigms can be overcome by using unstructured, continuous paradigms such as naturalistic fMRI (<xref ref-type="bibr" rid="B18">18</xref>&#x2013;<xref ref-type="bibr" rid="B22">22</xref>). Naturalistic paradigms are less demanding in terms of patient compliance and mimic everyday activities insofar as they may only involve passive viewing of a movie or video (<xref ref-type="bibr" rid="B18">18</xref>, <xref ref-type="bibr" rid="B20">20</xref>). However, unlike task-based fMRI, naturalistic fMRI has no obvious task regressor of interest. In previous studies, regressors have been defined by manually labelling the movie stimulus to identify features that are considered a priori to engage specific cognitive processes (<xref ref-type="bibr" rid="B23">23</xref>, <xref ref-type="bibr" rid="B24">24</xref>). The labelling procedure is subjective and likely to vary with the expertise of the reviewer. A more direct alternative approach would be to extract the regressor(s) of interest from the temporal profiles of already-defined functional systems. One way to investigate the feasibility of this approach is to validate extracted temporal profiles using task-based paradigms in which the temporal profiles are known.</p>
<p>Machine Learning (ML) and Deep Learning (DL) methods have already been applied to fMRI analysis [See (<xref ref-type="bibr" rid="B25">25</xref>) for a general review of the applications of ML and DL in fMRI data analysis]. ML and DL methods are data-driven and their potential use for the classification of fMRI time series in keeping with their ability to classify time series in other application domains (<xref ref-type="bibr" rid="B26">26</xref>&#x2013;<xref ref-type="bibr" rid="B28">28</xref>). However, the application of ML/DL methods to voxel-wise time series fMRI data (i.e., 1D data) have not been considered to date. The best ML/DL approach for learning task regressors are also yet to be determined.</p>
<p>To answer these questions, we first investigated the ability of different ML and DL algorithms to detect language activation by task-based language fMRI paradigms in individuals. We evaluated different types of ML and DL algorithms using a range of clinically relevant performance metrics including Area Under the Receiver Operating Characteristic curve (AUC), the Dice coefficient and the Euclidean distances between corresponding activation peaks identified by the ML or DL methods and those identified by the gold standard, the General Linear Model. This approach enabled us to determine the ML/DL methods that identify areas of language activation corresponding to the gold standard. This result serves as the foundation for future work to extract the task regressors from naturalistic paradigms using ML/DL methods.</p>
</sec>
<sec id="s2" sec-type="methods"><label>2</label><title>Materials and methods</title>
<sec id="s2a"><label>2.1</label><title>Participants</title>
<p>The study was approved by the Royal Brisbane and Women&#x2019;s Hospital Human Research Ethics Committee. All participants provided written informed consent. The study comprised of 26 individuals (20 healthy participants and 6 epilepsy patients; mean age 40, range 21&#x2013;71 years, 13 females). Head motion for each individual across each language language paradigm was accessed using Framewise Displacement (FD) (<xref ref-type="bibr" rid="B29">29</xref>). Framewise Displacement for the 26 participants across all language paradigms were found to be within the acceptable range (Mean FD <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM7"><mml:mo>=</mml:mo><mml:mn>0.12</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.06</mml:mn></mml:math></inline-formula>&#x2009;mm, maximum FD less than 0.4&#x2009;mm). All participants&#x2019; primary language was English and handedness was assessed using the Edinburgh Handedness Inventory (EHI) Questionnaire.</p>
</sec>
<sec id="s2b"><label>2.2</label><title>Task design</title>
<p>All participants underwent seven task-based language fMRI scans in a single session. The paradigms utilized a block design with task blocks interleaved with control blocks. Participants were provided with training for each task before the scanning session and at the beginning of each task, instructions were presented on screen followed by 10&#x2009;s of dummy scans, during which a black screen was presented. Dummy scans were excluded from the analysis. At the end of each task, a black screen was also presented for 10&#x2009;s to allow for signal stabilisation. See <xref ref-type="table" rid="T1">Table&#x00A0;1</xref> for more information.</p>
<table-wrap id="T1" position="float"><label>Table 1</label>
<caption><p>Task paradigms.</p></caption>
<table frame="hsides" rules="groups">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left">Paradigm</th>
<th valign="top" align="center">Duration</th>
<th valign="top" align="center">No. blocks</th>
<th valign="top" align="center">Details of each block</th>
<th valign="top" align="center">Participant instructions</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Sentence Completion (SC)</td>
<td valign="top" align="left">4:20</td>
<td valign="top" align="left">6 task, 6 control</td>
<td valign="top" align="left">4 incomplete sentences or garbled sentences, presented for 5&#x2009;s each</td>
<td valign="top" align="left">Participants were instructed to think of the word that completes the sentence during task blocks</td>
</tr>
<tr>
<td valign="top" align="left">Silent Word Generation (SWG)</td>
<td valign="top" align="left">4:20</td>
<td valign="top" align="left">6 task, 6 control</td>
<td valign="top" align="left">2 alphabets or symbols, presented for 10&#x2009;s each.</td>
<td valign="top" align="left">Participants were instructed to silently think of as many words as possible beginning with the alphabet shown on the screen during task blocks</td>
</tr>
<tr>
<td valign="top" align="left">Rhyming (R)</td>
<td valign="top" align="left">4:20</td>
<td valign="top" align="left">6 task, 6 control</td>
<td valign="top" align="left">5 sets of two words or symbols, presented for 4&#x2009;s each.</td>
<td valign="top" align="left">Participants were instruct to press a button when the words or symbols presented to them rhyme or match.</td>
</tr>
<tr>
<td valign="top" align="left">Object Naming (ON)</td>
<td valign="top" align="left">4:20</td>
<td valign="top" align="left">6 task, 6 control</td>
<td valign="top" align="left">6 images or symbols, presented for 3.34&#x2009;s each</td>
<td valign="top" align="left">Participants were instructed to think of the name of the object presented on screen during the task blocks.</td>
</tr>
<tr>
<td valign="top" align="left">Antonym Generation (AG)</td>
<td valign="top" align="left">3:00</td>
<td valign="top" align="left">4 task, 4 control</td>
<td valign="top" align="left">10 words or fixation crosses presented for 2&#x2009;s each.</td>
<td valign="top" align="left">Participants were told to think of the antonym of the word during the task blocks.</td>
</tr>
<tr>
<td valign="top" align="left">Passive Story Listening (PSL)</td>
<td valign="top" align="left">4:20</td>
<td valign="top" align="left">6 task, 6 control</td>
<td valign="top" align="left">One segment of the story or garbled audio, played for 20&#x2009;s</td>
<td valign="top" align="left">Participants were instructed to close their eyes and pay attention to the audio story during the task blocks</td>
</tr>
<tr>
<td valign="top" align="left">Sentence Completion Listening (SCL)</td>
<td valign="top" align="left">4:20</td>
<td valign="top" align="left">6 task, 6 control</td>
<td valign="top" align="left">4 audio sentences (with the beep indicating the end of the sentence) or garbled sentences presented for 5&#x2009;s each.</td>
<td valign="top" align="left">Participants were asked to think of the word that completes the sentence at the end of the beep during the task blocks.</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>For each task, participants were instructed to respond covertly during the task blocks (i.e., to think of the responses and not to speak out loud) and to fixate on a fixation point presented on the screen during control blocks.</p>
<p>Task stimuli were presented with E-prime 3.0 software (Psychology Software Tools Inc., version 3.0) and participants were given MRI-safe active noise-cancelling headphones. Participants who required vision correction were encouraged to wear contact lenses or MRI-safe vision correction lenses were used.</p>
</sec>
<sec id="s2c"><label>2.3</label><title>Image acquisition</title>
<p>Data were collected using a Siemens Magnetom 3T Prisma scanner (Siemens Healthcare, Erlangen, Germany). BOLD Functional images were acquired using an Echo Planar (EPI) sequence and standard 64 channel head coil. (TR <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM8"><mml:mo>=</mml:mo></mml:math></inline-formula> 2,000&#x2009;ms, TE <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM9"><mml:mo>=</mml:mo></mml:math></inline-formula> 23&#x2009;ms, Flip angle <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM10"><mml:mo>=</mml:mo></mml:math></inline-formula> 90 Degrees, FoV <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM11"><mml:mo>=</mml:mo></mml:math></inline-formula> 210&#x2009;mm, Resolution <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM12"><mml:mo>=</mml:mo><mml:mn>70</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>70</mml:mn></mml:math></inline-formula>, 42 Axial Slices, Voxel size <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM13"><mml:mo>=</mml:mo><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>3</mml:mn></mml:math></inline-formula>&#x2009;mm). Whole brain T1 MPRAGE structural images were also acquired (TR <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM14"><mml:mo>=</mml:mo></mml:math></inline-formula> 1,900&#x2009;ms, TE <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM15"><mml:mo>=</mml:mo></mml:math></inline-formula> 256&#x2009;mm, Flip Angle <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM16"><mml:mo>=</mml:mo></mml:math></inline-formula> 9 Degrees, FoV <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM17"><mml:mo>=</mml:mo></mml:math></inline-formula> 256&#x2009;mm, Resolution <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM18"><mml:mo>=</mml:mo><mml:mn>256</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>256</mml:mn></mml:math></inline-formula>, 192 Axial Slices, Voxel size <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM19"><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>1</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>1</mml:mn></mml:math></inline-formula>&#x2009;mm).</p>
</sec>
<sec id="s2d"><label>2.4</label><title>Pre-processing of fMRI data</title>
<p>Data were analysed using Statistical Parametric Mapping SPM12 software (<xref ref-type="bibr" rid="B30">30</xref>, <xref ref-type="bibr" rid="B31">31</xref>). The steps included slice timing correction, realignment (realigning images to the mean image functional image across all tasks for each participant), co-registration to structural T1 image, spatial normalization to Montreal Neurological Institute (MNI) space (re-sampled to <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM20"><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>3</mml:mn></mml:math></inline-formula>&#x2009;mm) and smoothing using a full-width half maximum (FWHM) Gaussian kernel of 6&#x2009;mm.</p>
</sec>
<sec id="s2e"><label>2.5</label><title>Task-based language fMRI activation</title>
<p>For each participant, first-level task-based language activation maps were derived for each of the seven tasks. Pre-processed voxel-wise fMRI time series data were modeled with the General Linear Model using SPM12 software. General linear modelling was performed with defined task regressors and covariates. The former were obtained by convolving the box-car stimulus function (contrasting task and control conditions) of each task with the canonical Haemodynamic Response Function (HRF). Covariates or nuisance regressors were the six motion parameters from the motion correction step during pre-processing. High pass filtering with a cut off of 128s and a first-level autoregressive model, i.e., AR(1) were employed.</p>
<p>A threshold of <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM21"><mml:mi>p</mml:mi><mml:mo>&#x003C;</mml:mo><mml:mn>0.001</mml:mn></mml:math></inline-formula>, uncorrected for multiple comparisons, was applied to the <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM22"><mml:mi>t</mml:mi></mml:math></inline-formula>-statistic images to generate language activation maps which were binarised (activated vs non-activated) for machine learning analysis. The process of deriving task-based language activation maps is illustrated in <xref ref-type="fig" rid="F1">Figure&#x00A0;1</xref>.</p>
<fig id="F1" position="float"><label>Figure 1</label>
<caption><p>Illustrated are the steps to obtain task-based language activation maps. Task paradigm stimulus function was convolved with the hemodynamic response function to get task regressors. Raw fMRI images were preprocessed, and together with task and nuisance regressors activation maps are produced using Genearl Linear modelling.</p></caption>
<graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fradi-04-1495181-g001.tif"/>
</fig>
</sec>
<sec id="s2f"><label>2.6</label><title>Training and test pipeline</title>
<p>Data analysis with machine learning and deep learning comprises of training and testing stages. A set of carefully curated training data is first provided to ML or DL algorithms, allowing patterns to be extracted from the data, resulting in a trained model. Test set(s) are then used to evaluate the performance of the ML/DL model.</p>
<sec id="s2f1"><label>2.6.1</label><title>Training set</title>
<p>To construct the training set, pre-processed fMRI voxel time series of 14 healthy participants across 6 tasks (SC, SWG, R, ON, PSL, SCL) were extracted and labelled as activated (label 1) or non-activated (label 0) to yield binarised activation maps for each paradigm. The classification problem to be solved by the ML/DL methods was therefore to classify whether a fMRI voxel time series was activated or non-activated. The process of extracting voxel-wise fMRI time series and labelling is illustrated in <xref ref-type="fig" rid="F2">Figure&#x00A0;2</xref>. Data from the AG tasks were excluded from the training set because the length of the voxel time series differed from that of the other tasks; however, the AG task was included in the test set, see <xref ref-type="sec" rid="s2f2">Section 2.6.2</xref>).</p>
<fig id="F2" position="float"><label>Figure 2</label>
<caption><p>Illustrated are the steps to extract voxel time series data and corresponding binary labels. Task-based language activation maps were used to define the 0 and 1 labels.</p></caption>
<graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fradi-04-1495181-g002.tif"/>
</fig>
<p>Reflecting individual variation in brain function, the number of activated and non-activated fMRI voxel time series varied across participants and tasks. To balance the training set so that the numbers of activated and non-activated time series was equal, and to equalize the contribution of samples from each participant and each task, the participant and task paradigm with the smallest number of activated voxels was identified, which corresponded to 29 fMRI time series samples. The number of activated fMRI voxel time series and non-activated voxel time series for the remaining participants and tasks was randomly sampled, without replacement, to match the smallest number of activated voxels (i.e., resulting in 58 samples from each task paradigm for each participant, with the number of activated voxel time series and non-activated voxel time series from each participant totaling up to 348).</p>
<p>The resulting training set had 4,872 samples, of which 2,436 samples were labelled 1 and 2,436 samples were labelled 0.</p>
</sec>
<sec id="s2f2"><label>2.6.2</label><title>Test set</title>
<p>For the test set, time series from all brain voxels in 6 healthy and 6 epilepsy patients for 6 tasks (SC, SWG, R, AG, ON, SCL) were extracted and labelled according to the binarised language activation maps. A combination of healthy participants and epilepsy patients were included in the test set to ensure diversity in the test data. The PSL task was excluded from analysis because there was little activation in most participants. To determine whether the trained models could be used to analyse unseen data with fMRI time series of different lengths, the time series for the AG task was padded with the final time point in the fMRI time series to match the length of fMRI time series of other tasks.</p>
<p>An average of 66,036 samples was extracted from each task in each participant. The voxel locations of each sample were saved and later used to reconstruct ML/DL activation maps. See <xref ref-type="table" rid="T6">Table&#x00A0;A1</xref> in Appendix (<xref ref-type="app" rid="app1a">Section A</xref>) for the number of samples that were extracted for each participant and each task.</p>
</sec>
<sec id="s2f3"><label>2.6.3</label><title>Choice of machine learning and deep learning algorithms</title>
<p>Several types of machine learning and deep learning algorithms have been proposed for time series classification. They can be categorised into general machine learning, dictionary-based, distance-based, feature-based, frequency-based, interval-based, kernel-based, shapelet-based, hybrid or ensemble-based and deep learning methods. The different ML/DL categories and methods can be found in the SKTIME library (version 13.4) (<xref ref-type="bibr" rid="B32">32</xref>, <xref ref-type="bibr" rid="B33">33</xref>). A separate category was defined, namely the general machine learning (GML) category which includes ML methods that were conventionally developed to solve non-time series problems. These methods can be found in the Scikit-Learn library and the Rotation Forest algoritm falls into this category (<xref ref-type="bibr" rid="B34">34</xref>, <xref ref-type="bibr" rid="B35">35</xref>).</p>
<p>A representative algorithm from each category is chosen based on the literature and evaluated to determine whether a particular category of classification algorithms was more suited to task-based language fMRI analysis. <xref ref-type="table" rid="T2">Table&#x00A0;2</xref> shows the algorithms from each category. Default hyper-parameters were used to evaluate each of the chosen algorithms. While hyper-parameter tuning is largely discussed in the machine learning community, Probst et al. (<xref ref-type="bibr" rid="B36">36</xref>) evaluated the impact of hyper-parameter tuning on six different ML algorithms and showed that AUC at most improves by 10&#x0025; by optimizing of hyper-parameters. Default hyper-parameters are also useful starting points to ML algorithm evaluation and generally work well across different problems (<xref ref-type="bibr" rid="B35">35</xref>). The use of default parameters also ensures reproducibility of results.</p>
<table-wrap id="T2" position="float"><label>Table 2</label>
<caption><p>Algorithms evaluated from different categories.</p></caption>
<table frame="hsides" rules="groups">
<colgroup>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left">Category</th>
<th valign="top" align="center">Algorithm</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">General Machine Learning (GML)</td>
<td valign="top" align="left">Rotation Forest (RotF) (<xref ref-type="bibr" rid="B35">35</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Dictionary-based</td>
<td valign="top" align="left">Word Extraction for Time Series Classification (WEASEL) (<xref ref-type="bibr" rid="B37">37</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Feature-based</td>
<td valign="top" align="left">Time Series Feature Extraction based on Scalable Hypothesis Tests (TSFresh) (<xref ref-type="bibr" rid="B38">38</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Frequency-based</td>
<td valign="top" align="left">Random Interval Spectral Ensemble (RISE) (<xref ref-type="bibr" rid="B39">39</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Interval-based</td>
<td valign="top" align="left">Supervised Time Series Forest (sTSF) (<xref ref-type="bibr" rid="B40">40</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Kernel-based</td>
<td valign="top" align="left">Ensemble of RandOm Convolutional KErnel Transform transformers (ARSENAL) (<xref ref-type="bibr" rid="B41">41</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Deep-Learning (DL)</td>
<td valign="top" align="left">Inception Time (Inception) (<xref ref-type="bibr" rid="B42">42</xref>)</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>To choose the algorithms to be tested, we first identified the most recent algorithms to be developed in each category. The literature was then reviewed to determine their performance in comparison with algorithms previously used to benchmark time series classification performance in terms of accuracy and computational efficiency (<xref ref-type="bibr" rid="B28">28</xref>). The algorithms chosen for comparison from each category were trained on high performance computing clusters with different memory requirements depending on the algorithm. Algorithms that required more than 24&#x2009;h or 128&#x2009;GB of memory on a single core for training were excluded from selection. Because long test times can be overcome by testing multiple batches of data in parallel, we did not consider this in algorithm selection. See Appendix (<xref ref-type="app" rid="app1a">Section A</xref>) for further details on the justification of machine learning algorithm choices.</p>
<p>This study focused on time series classification methods that build on traditional machine learning methods because of their proven success in other application domains involving time series datasets (See (<xref ref-type="bibr" rid="B28">28</xref>) for more). While deep learning methods have great success in many domains, DL-based methods often require more data to train and run the risk of over-fitting. Deep learning networks are also more complex, which often makes interpretation challenging.</p>
</sec>
<sec id="s2f4"><label>2.6.4</label><title>Performance evaluation</title>
<p>Measures used to assess algorithm performance were Area Under the Receiver Operating Characteristic Curve (AUC), Dice coefficient and Euclidean distance(s) between activation peaks identified with SPM and ML/DL. These were calculated for each participant according to language paradigm and ML/DL algorithm.</p>
<p>AUC was calculated for the whole brain and both the Dice coefficient and Euclidean distance were calculated using 12 language-related regions from both left and right hemispheres defined using parcellations from (<xref ref-type="bibr" rid="B43">43</xref>). Of the 12 language-related regions of interest, those for which at least one test participant showed <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM23"><mml:mo>&#x003E;</mml:mo><mml:mn>50</mml:mn></mml:math></inline-formula> percent overlap between the region and the area of task-based activation were selected, yielding 25 regions across 6 language paradigms.</p>
<p>AUC values were calculated from Receiver Operating Characteristic (ROC) curves which plot the True Positive Rate vs. False Positive Rate for each test participant and task at different probability thresholds. The Dice coefficient was calculated using: <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM24"><mml:mi>D</mml:mi><mml:mi>i</mml:mi><mml:mi>c</mml:mi><mml:mi>e</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mfrac><mml:mrow><mml:mn>2</mml:mn><mml:mo fence="false" stretchy="false">|</mml:mo><mml:mi>A</mml:mi><mml:mo>&#x2229;</mml:mo><mml:mi>B</mml:mi><mml:mo fence="false" stretchy="false">|</mml:mo></mml:mrow><mml:mrow><mml:mo fence="false" stretchy="false">|</mml:mo><mml:mi>A</mml:mi><mml:mo fence="false" stretchy="false">|</mml:mo><mml:mo>+</mml:mo><mml:mo fence="false" stretchy="false">|</mml:mo><mml:mi>B</mml:mi><mml:mo fence="false" stretchy="false">|</mml:mo></mml:mrow></mml:mfrac></mml:mrow></mml:math></inline-formula> , where A corresponds to activated voxels identified by SPM and B corresponds to ML/DL activated areas. AUC and Dice coefficient values range from 0 to 1, with 1 indicating that a ML/DL algorithm performs perfectly in classifying test samples (i.e., full overlap between ML/DL and task-based activation maps. We categorised a Dice coefficient of 0.00&#x2013;0.19 as low overlap, 0.20&#x2013;0.39 as low-moderate overlap, 0.40&#x2013;0.59 as moderate overlap, 0.60&#x2013;0.79 as moderate-high overlap and 0.80&#x2013;1.00 as high overlap, in keeping with categories defined by (<xref ref-type="bibr" rid="B44">44</xref>).</p>
<p>The Euclidean distance between peaks within language-related regions was also evaluated as peak location is of potential clinical importance in neurosurgical decision-making. In each selected language-related region, the distance between the highest SPM activation peak and every ML/DL activation peak was calculated and the shortest distance reported in millimeters (mm). We also assessed whether the peaks identified by ML/DL and by SPM were in the same or different gyri by co-registering the activation maps to each participant&#x2019;s T1 MPRAGE structural image.</p>
<p>A Kruskal-Wallis test with post hoc pairwise comparisons using the Dunn test (<inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM25"><mml:mi>p</mml:mi></mml:math></inline-formula>-value corrected for multiple comparisons using the Holm&#x2013;Bonferroni method) were performed to identify if there were significant differences between the mean AUC, Dice coefficient and Euclidean distance between peaks for different ML/DL methods. The threshold for statistical significance was set at <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM26"><mml:mi>p</mml:mi><mml:mo>&#x003C;</mml:mo><mml:mn>0.05</mml:mn></mml:math></inline-formula>.</p>
</sec>
</sec>
</sec>
<sec id="s3" sec-type="results"><label>3</label><title>Results</title>
<sec id="s3a"><label>3.1</label><title>Activation maps</title>
<p>To illustrate how well each method performs, <xref ref-type="fig" rid="F3">Figure&#x00A0;3</xref> shows the overlap between activation areas found by SPM and each ML/DL algorithm studied for the Sentence Completion (SC) task in two test participants (<inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM27"><mml:mi>p</mml:mi><mml:mo>&#x003C;</mml:mo><mml:mn>0.001</mml:mn></mml:math></inline-formula> uncorrected). The black areas denote the overlap between SPM and ML/DL activation, yellow areas denote activated areas found only by SPM and red areas denote activated areas only found by each ML/DL methods. Activated areas identified by SPM are shown in the upper left row of the figure. Reflecting the expressive and receptive language components of the SC task, activation can be seen in frontal and temporal lobes in both participants; bilateral activation was observed.</p>
<fig id="F3" position="float"><label>Figure 3</label>
<caption><p>This figure shows the overlap between SPM activation maps vs. evaluated ML/DL activation maps of two test participants (A healthy participant, LHS under each algorithm title and an epilepsy patient, RHS under each algorithm title) for a single single language task - Sentence Completion (SC). Black - Overlap, yellow - SPM activation, Red - ML/DL activation.</p></caption>
<graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fradi-04-1495181-g003.tif"/>
</fig>
<p>Activated voxels identified by the ML/DL algorithms occurred in clusters and a qualitatively good level of overlap (as indicated by the ratio of black areas compared to red and yellow areas) was observed for most methods except for the frequency-based and dictionary-based methods. The frequency-based method shows areas that are not found to be activated by SPM (red areas in the posterior brain, including occipital and parietal areas). A number of scattered small activated areas were identified by the dictionary-based method, although the main activation clusters were still identified.</p>
</sec>
<sec id="s3b"><label>3.2</label><title>Whole-brain AUC</title>
<p>The scatter plot in <xref ref-type="fig" rid="F4">Figure&#x00A0;4</xref> shows the mean whole-brain AUC values across test participants of different ML/DL categories and language paradigms. The violin plots show the distribution of AUC values across test participants for the best and worse performing ML/DL categories of each language paradigm. Blue violin plots show the distribution of AUC values for the best performing ML/DL method and orange violin plots show the distribution of AUC values for the worst performing ML/DL method. <xref ref-type="table" rid="T3">Table&#x00A0;3</xref> shows the values associated with the scatter plot in <xref ref-type="fig" rid="F4">Figure&#x00A0;4</xref>.</p>
<fig id="F4" position="float"><label>Figure 4</label>
<caption><p>Mean whole-brain AUC values across test participants of different ML/DL categories, by language paradigm (as a scatter plot with violin plots showing the distribution of AUC values for the best and worse performing ML/DL methods for each language paradigm. Blue - best, Orange - worst).</p></caption>
<graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fradi-04-1495181-g004.tif"/>
</fig>
<table-wrap id="T3" position="float"><label>Table 3</label>
<caption><p>Table of mean whole-brain AUC values across test participants by language paradigm, associated with scatter plot in <xref ref-type="fig" rid="F4">Figure&#x00A0;4</xref>.</p></caption>
<table frame="hsides" rules="groups">
<colgroup>
<col align="left"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left"/>
<th valign="top" align="center">SC</th>
<th valign="top" align="center">SWG</th>
<th valign="top" align="center">R</th>
<th valign="top" align="center">ON</th>
<th valign="top" align="center">AG</th>
<th valign="top" align="center">SCL</th>
<th valign="top" align="center">Mean&#x2009;<inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM28"><mml:mo>&#x00B1;</mml:mo></mml:math></inline-formula>&#x2009;SD</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">General ML</td>
<td valign="top" align="center">0.97</td>
<td valign="top" align="center">0.98</td>
<td valign="top" align="center">0.97</td>
<td valign="top" align="center">0.96</td>
<td valign="top" align="center">0.98</td>
<td valign="top" align="center">0.94</td>
<td valign="top" align="center"><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM29"><mml:mn>0.97</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.03</mml:mn></mml:math></inline-formula></td>
</tr>
<tr>
<td valign="top" align="left">Dictionary</td>
<td valign="top" align="center">0.94</td>
<td valign="top" align="center">0.95</td>
<td valign="top" align="center">0.92</td>
<td valign="top" align="center">0.93</td>
<td valign="top" align="center">0.95</td>
<td valign="top" align="center">0.92</td>
<td valign="top" align="center"><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM30"><mml:mn>0.94</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.04</mml:mn></mml:math></inline-formula></td>
</tr>
<tr>
<td valign="top" align="left">Feature</td>
<td valign="top" align="center">0.95</td>
<td valign="top" align="center">0.97</td>
<td valign="top" align="center">0.96</td>
<td valign="top" align="center">0.95</td>
<td valign="top" align="center">0.97</td>
<td valign="top" align="center">0.95</td>
<td valign="top" align="center"><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM31"><mml:mn>0.96</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.04</mml:mn></mml:math></inline-formula></td>
</tr>
<tr>
<td valign="top" align="left">Frequency</td>
<td valign="top" align="center">0.9</td>
<td valign="top" align="center">0.89</td>
<td valign="top" align="center">0.89</td>
<td valign="top" align="center">0.89</td>
<td valign="top" align="center">0.92</td>
<td valign="top" align="center">0.88</td>
<td valign="top" align="center"><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM32"><mml:mn>0.89</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.06</mml:mn></mml:math></inline-formula></td>
</tr>
<tr>
<td valign="top" align="left">Interval</td>
<td valign="top" align="center">0.96</td>
<td valign="top" align="center">0.97</td>
<td valign="top" align="center">0.95</td>
<td valign="top" align="center">0.95</td>
<td valign="top" align="center">0.97</td>
<td valign="top" align="center">0.95</td>
<td valign="top" align="center"><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM33"><mml:mn>0.96</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.03</mml:mn></mml:math></inline-formula></td>
</tr>
<tr>
<td valign="top" align="left">Kernel</td>
<td valign="top" align="center">0.96</td>
<td valign="top" align="center">0.98</td>
<td valign="top" align="center">0.94</td>
<td valign="top" align="center">0.95</td>
<td valign="top" align="center">0.97</td>
<td valign="top" align="center">0.93</td>
<td valign="top" align="center"><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM34"><mml:mn>0.95</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.04</mml:mn></mml:math></inline-formula></td>
</tr>
<tr>
<td valign="top" align="left">Deep Learning</td>
<td valign="top" align="center">0.94</td>
<td valign="top" align="center">0.96</td>
<td valign="top" align="center">0.95</td>
<td valign="top" align="center">0.94</td>
<td valign="top" align="center">0.94</td>
<td valign="top" align="center">0.93</td>
<td valign="top" align="center"><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM35"><mml:mn>0.94</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.04</mml:mn></mml:math></inline-formula></td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Mean whole-brain AUC values for different ML/DL categories and language paradigms exceeded 0.8 with the GML method achieving the highest AUC values for 5 of 6 of the language paradigms (highlighted in blue in <xref ref-type="table" rid="T3">Table&#x00A0;3</xref>). For the SWG paradigm, both the GML and kernel-based methods achieved the highest AUC value of 0.98. AUC values for the GML method ranged between 0.94 to 0.98, with values being highest for SWG and AG (0.98). The highest mean AUC value across language paradigms was achieved by the GML method (<inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM36"><mml:mn>0.97</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.03</mml:mn></mml:math></inline-formula>; <xref ref-type="table" rid="T3">Table&#x00A0;3</xref>). The frequency-based method consistently ranked the lowest in whole-brain AUC values, with values ranging from 0.93 to 0.96 (Mean AUC: <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM37"><mml:mn>0.89</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.06</mml:mn></mml:math></inline-formula>; <xref ref-type="table" rid="T3">Table&#x00A0;3</xref>).</p>
<p>The mean AUC differed significantly between ML categories (Kruskal-Wallis Test, <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM38"><mml:mi>H</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mn>6</mml:mn><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:mn>131.1</mml:mn></mml:math></inline-formula>, <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM39"><mml:mi>p</mml:mi><mml:mo>=</mml:mo><mml:mn>7.34</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:msup><mml:mn>10</mml:mn><mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mn>26</mml:mn></mml:mrow></mml:msup></mml:math></inline-formula>). On post hoc testing, the mean AUC of the GML method was found to be significantly higher than that of the dictionary, frequency and DL-based methods. The mean AUC of the frequency-based method was found to be significantly lower than that of all other evaluated methods.</p>
<p>When evaluated across the healthy participant and epilepsy patient groups, the GML method consistently ranks the top three when evaluated across the different language paradigms. This occurs in 89&#x0025; of cases for healthy participant group and 72&#x0025; for the epilepsy patient group. The GML method ranks the top two in 75&#x0025; of cases for healthy participant group and 64&#x0025; for the epilepsy patient group, and ranks the highest for 50&#x0025; of cases for healthy participant group and 42&#x0025; for the epilepsy patient group. In contrast, the frequency-based method ranks lowest among both groups, with 83&#x0025; of cases for healthy participants and 86&#x0025; for epilepsy patients across the evaluated language paradigms. This suggests consistent performance of ML/DL methods between healthy participants and epilepsy patients.</p>
</sec>
<sec id="s3c"><label>3.3</label><title>Dice coefficients of language regions</title>
<p><xref ref-type="fig" rid="F5">Figure&#x00A0;5</xref> shows the mean Dice coefficients (across test participants) for each ML/DL category in different language-related regions. The violin plots show the distribution of Dice coefficients across test participants for the best and worse performing ML/DL categories of each language region of interest. Blue violin plots show the distribution of Dice coefficients for the best performing ML/DL method and orange violin plots show the distribution of Dice coefficient values for the worst performing ML/DL method. <xref ref-type="table" rid="T4">Table&#x00A0;4</xref> shows the values associated with <xref ref-type="fig" rid="F5">Figure&#x00A0;5</xref>.</p>
<fig id="F5" position="float"><label>Figure 5</label>
<caption><p>Mean Dice coefficient across test participants of different ML/DL categories, by language regions of interest (as a scatter plot with violin plots denoting the distribution of Dice coefficient values for the best and worse performing ML/DL methods for each language paradigm. Blue - best, Orange - worst).</p></caption>
<graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fradi-04-1495181-g005.tif"/>
</fig>
<table-wrap id="T4" position="float"><label>Table 4</label>
<caption><p>Table of mean Dice coefficient across test participants by language language regions of interest, associated with <xref ref-type="fig" rid="F5">Figure&#x00A0;5</xref>.</p></caption>
<table frame="hsides" rules="groups">
<colgroup>
<col align="left"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left"/>
<th valign="top" align="center">SC-P1</th>
<th valign="top" align="center">SC-P2</th>
<th valign="top" align="center">SC-P3</th>
<th valign="top" align="center">SC-P5</th>
<th valign="top" align="center">SC-P7</th>
<th valign="top" align="center">SC-P9</th>
<th valign="top" align="center">SWG-P3</th>
<th valign="top" align="center">SWG-P9</th>
<th valign="top" align="center">R-P1</th>
<th valign="top" align="center">R-P2</th>
<th valign="top" align="center">R-P3</th>
<th valign="top" align="center">ON-P3</th>
<th valign="top" align="center">AG-P1</th>
<th valign="top" align="center">AG-P2</th>
<th valign="top" align="center">AG-P3</th>
<th valign="top" align="center">AG-P5</th>
<th valign="top" align="center">AG-P7</th>
<th valign="top" align="center">AG-P8</th>
<th valign="top" align="center">AG-P9</th>
<th valign="top" align="center">SCL-P1</th>
<th valign="top" align="center">SCL-P2</th>
<th valign="top" align="center">SCL-P3</th>
<th valign="top" align="center">SCL-P7</th>
<th valign="top" align="center">SCL-P8</th>
<th valign="top" align="center">SCL-P9</th>
<th valign="top" align="center">Mean&#x2009;<inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM40"><mml:mo>&#x00B1;</mml:mo></mml:math></inline-formula>&#x2009;SD</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">General ML</td>
<td valign="top" align="center">0.77</td>
<td valign="top" align="center">0.8</td>
<td valign="top" align="center">0.81</td>
<td valign="top" align="center">0.72</td>
<td valign="top" align="center">0.6</td>
<td valign="top" align="center">0.42</td>
<td valign="top" align="center">0.62</td>
<td valign="top" align="center">0.6</td>
<td valign="top" align="center">0.63</td>
<td valign="top" align="center">0.71</td>
<td valign="top" align="center">0.7</td>
<td valign="top" align="center">0.28</td>
<td valign="top" align="center">0.71</td>
<td valign="top" align="center">0.75</td>
<td valign="top" align="center">0.81</td>
<td valign="top" align="center">0.68</td>
<td valign="top" align="center">0.42</td>
<td valign="top" align="center">0.5</td>
<td valign="top" align="center">0.67</td>
<td valign="top" align="center">0.55</td>
<td valign="top" align="center">0.53</td>
<td valign="top" align="center">0.61</td>
<td valign="top" align="center">0.33</td>
<td valign="top" align="center">0.39</td>
<td valign="top" align="center">0.48</td>
<td valign="top" align="center"><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM41"><mml:mn>0.6</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.34</mml:mn></mml:math></inline-formula></td>
</tr>
<tr>
<td valign="top" align="left">Dictionary</td>
<td valign="top" align="center">0.71</td>
<td valign="top" align="center">0.73</td>
<td valign="top" align="center">0.77</td>
<td valign="top" align="center">0.69</td>
<td valign="top" align="center">0.51</td>
<td valign="top" align="center">0.39</td>
<td valign="top" align="center">0.61</td>
<td valign="top" align="center">0.48</td>
<td valign="top" align="center">0.56</td>
<td valign="top" align="center">0.66</td>
<td valign="top" align="center">0.54</td>
<td valign="top" align="center">0.2</td>
<td valign="top" align="center">0.59</td>
<td valign="top" align="center">0.66</td>
<td valign="top" align="center">0.74</td>
<td valign="top" align="center">0.59</td>
<td valign="top" align="center">0.39</td>
<td valign="top" align="center">0.44</td>
<td valign="top" align="center">0.58</td>
<td valign="top" align="center">0.49</td>
<td valign="top" align="center">0.51</td>
<td valign="top" align="center">0.61</td>
<td valign="top" align="center">0.33</td>
<td valign="top" align="center">0.3</td>
<td valign="top" align="center">0.43</td>
<td valign="top" align="center"><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM42"><mml:mn>0.54</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.33</mml:mn></mml:math></inline-formula></td>
</tr>
<tr>
<td valign="top" align="left">Feature</td>
<td valign="top" align="center">0.71</td>
<td valign="top" align="center">0.72</td>
<td valign="top" align="center">0.77</td>
<td valign="top" align="center">0.72</td>
<td valign="top" align="center">0.59</td>
<td valign="top" align="center">0.43</td>
<td valign="top" align="center">0.62</td>
<td valign="top" align="center">0.62</td>
<td valign="top" align="center">0.58</td>
<td valign="top" align="center">0.73</td>
<td valign="top" align="center">0.64</td>
<td valign="top" align="center">0.35</td>
<td valign="top" align="center">0.65</td>
<td valign="top" align="center">0.75</td>
<td valign="top" align="center">0.79</td>
<td valign="top" align="center">0.66</td>
<td valign="top" align="center">0.43</td>
<td valign="top" align="center">0.49</td>
<td valign="top" align="center">0.66</td>
<td valign="top" align="center">0.59</td>
<td valign="top" align="center">0.6</td>
<td valign="top" align="center">0.63</td>
<td valign="top" align="center">0.41</td>
<td valign="top" align="center">0.41</td>
<td valign="top" align="center">0.53</td>
<td valign="top" align="center"><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM43"><mml:mn>0.6</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.34</mml:mn></mml:math></inline-formula></td>
</tr>
<tr>
<td valign="top" align="left">Frequency</td>
<td valign="top" align="center">0.71</td>
<td valign="top" align="center">0.72</td>
<td valign="top" align="center">0.72</td>
<td valign="top" align="center">0.7</td>
<td valign="top" align="center">0.54</td>
<td valign="top" align="center">0.34</td>
<td valign="top" align="center">0.47</td>
<td valign="top" align="center">0.38</td>
<td valign="top" align="center">0.5</td>
<td valign="top" align="center">0.58</td>
<td valign="top" align="center">0.49</td>
<td valign="top" align="center">0.19</td>
<td valign="top" align="center">0.65</td>
<td valign="top" align="center">0.61</td>
<td valign="top" align="center">0.73</td>
<td valign="top" align="center">0.54</td>
<td valign="top" align="center">0.28</td>
<td valign="top" align="center">0.35</td>
<td valign="top" align="center">0.54</td>
<td valign="top" align="center">0.47</td>
<td valign="top" align="center">0.55</td>
<td valign="top" align="center">0.56</td>
<td valign="top" align="center">0.25</td>
<td valign="top" align="center">0.32</td>
<td valign="top" align="center">0.39</td>
<td valign="top" align="center"><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM44"><mml:mn>0.5</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.35</mml:mn></mml:math></inline-formula></td>
</tr>
<tr>
<td valign="top" align="left">Interval</td>
<td valign="top" align="center">0.77</td>
<td valign="top" align="center">0.8</td>
<td valign="top" align="center">0.83</td>
<td valign="top" align="center">0.72</td>
<td valign="top" align="center">0.58</td>
<td valign="top" align="center">0.47</td>
<td valign="top" align="center">0.58</td>
<td valign="top" align="center">0.6</td>
<td valign="top" align="center">0.6</td>
<td valign="top" align="center">0.73</td>
<td valign="top" align="center">0.67</td>
<td valign="top" align="center">0.26</td>
<td valign="top" align="center">0.73</td>
<td valign="top" align="center">0.79</td>
<td valign="top" align="center">0.8</td>
<td valign="top" align="center">0.68</td>
<td valign="top" align="center">0.39</td>
<td valign="top" align="center">0.47</td>
<td valign="top" align="center">0.7</td>
<td valign="top" align="center">0.58</td>
<td valign="top" align="center">0.57</td>
<td valign="top" align="center">0.61</td>
<td valign="top" align="center">0.39</td>
<td valign="top" align="center">0.38</td>
<td valign="top" align="center">0.62</td>
<td valign="top" align="center"><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM45"><mml:mn>0.61</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.33</mml:mn></mml:math></inline-formula></td>
</tr>
<tr>
<td valign="top" align="left">Kernel</td>
<td valign="top" align="center">0.67</td>
<td valign="top" align="center">0.68</td>
<td valign="top" align="center">0.72</td>
<td valign="top" align="center">0.63</td>
<td valign="top" align="center">0.52</td>
<td valign="top" align="center">0.43</td>
<td valign="top" align="center">0.56</td>
<td valign="top" align="center">0.53</td>
<td valign="top" align="center">0.56</td>
<td valign="top" align="center">0.67</td>
<td valign="top" align="center">0.69</td>
<td valign="top" align="center">0.28</td>
<td valign="top" align="center">0.65</td>
<td valign="top" align="center">0.73</td>
<td valign="top" align="center">0.79</td>
<td valign="top" align="center">0.69</td>
<td valign="top" align="center">0.42</td>
<td valign="top" align="center">0.49</td>
<td valign="top" align="center">0.67</td>
<td valign="top" align="center">0.48</td>
<td valign="top" align="center">0.5</td>
<td valign="top" align="center">0.52</td>
<td valign="top" align="center">0.29</td>
<td valign="top" align="center">0.36</td>
<td valign="top" align="center">0.36</td>
<td valign="top" align="center"><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM46"><mml:mn>0.56</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.35</mml:mn></mml:math></inline-formula></td>
</tr>
<tr>
<td valign="top" align="left">Deep Learning</td>
<td valign="top" align="center">0.72</td>
<td valign="top" align="center">0.71</td>
<td valign="top" align="center">0.74</td>
<td valign="top" align="center">0.65</td>
<td valign="top" align="center">0.51</td>
<td valign="top" align="center">0.33</td>
<td valign="top" align="center">0.62</td>
<td valign="top" align="center">0.48</td>
<td valign="top" align="center">0.52</td>
<td valign="top" align="center">0.66</td>
<td valign="top" align="center">0.65</td>
<td valign="top" align="center">0.23</td>
<td valign="top" align="center">0.56</td>
<td valign="top" align="center">0.63</td>
<td valign="top" align="center">0.69</td>
<td valign="top" align="center">0.56</td>
<td valign="top" align="center">0.31</td>
<td valign="top" align="center">0.34</td>
<td valign="top" align="center">0.59</td>
<td valign="top" align="center">0.49</td>
<td valign="top" align="center">0.48</td>
<td valign="top" align="center">0.54</td>
<td valign="top" align="center">0.25</td>
<td valign="top" align="center">0.28</td>
<td valign="top" align="center">0.36</td>
<td valign="top" align="center"><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM47"><mml:mn>0.52</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.34</mml:mn></mml:math></inline-formula></td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The interval-based method has the highest mean Dice coefficient values (<inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM48"><mml:mn>0.61</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.33</mml:mn></mml:math></inline-formula>) across evaluated language region and ranks highest for 10 out of the 25 language-related regions (highlighted in blue in <xref ref-type="table" rid="T4">Table&#x00A0;4</xref>). Mean Dice coefficient values for the interval-based method range from 0.26 (ON-P3) to 0.83 (SC-P3), with mean values larger or equal than 0.6 for most language-related regions except SC-P7, SC-P9, SWG-P3, ON-P3, AG-P7, AG-P8, SCL-P1, SCL-P2, SCL-P7, SCL-P8, indicating at least a moderate to high level of overlap between activated voxels identified by SPM and by the interval-based method. However, a high overlap (0.8-1.0) between activated voxels identified by SPM and by the interval-based method was only observed in 3 language-related regions (SC-P2, SC-P3, AG-P3). The frequency-based method had the lowest mean Dice coefficient (<inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM49"><mml:mn>0.5</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.35</mml:mn></mml:math></inline-formula>) across the evaluated language regions and ranked the lowest for 13 out of 25 language-related regions. The frequency-based method achieved a mean Dice coefficient of larger than 0.6 in only 7 regions (SC-P1, SC-P2, SC-P3, SC-P5, AG-P1, AG-P2, AG-P3), with a high level of overlap not being observed in any of the language regions.</p>
<p>The mean Dice coefficient differed significantly between ML Categories (Kruskal-Wallis Test, <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM50"><mml:mi>H</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mn>6</mml:mn><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:mn>41.8</mml:mn></mml:math></inline-formula>, <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM51"><mml:mi>p</mml:mi><mml:mo>=</mml:mo><mml:mn>2.00</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:msup><mml:mn>10</mml:mn><mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mn>7</mml:mn></mml:mrow></mml:msup></mml:math></inline-formula>). The post hoc Dunn test revealed that the mean Dice coefficient of the interval-based method was significantly larger than that of the dictionary, frequency and DL-based methods but not significantly different from that of the remaining methods. The mean Dice coefficient for the frequency-based method was significantly lower than that of the GML, feature and interval-based methods but not significantly different from that of the remaining methods.</p>
</sec>
<sec id="s3d"><label>3.4</label><title>Euclidean distance between peaks</title>
<p>The scatter plot in <xref ref-type="fig" rid="F6">Figure&#x00A0;6</xref> shows the mean Euclidean distance (across test participants) between peaks in different language-related regions identified by SPM vs. peaks identified by the ML/DL methods in millimeters (mm). The violin plots show the distribution of Euclidean distances between peaks across test participants and ML/DL methods in different language-related regions. <xref ref-type="table" rid="T5">Table&#x00A0;5</xref> shows the values associated with <xref ref-type="fig" rid="F6">Figure&#x00A0;6</xref>.</p>
<fig id="F6" position="float"><label>Figure 6</label>
<caption><p>Mean Euclidean distance between activation peaks across test participants of different ML/DL categories and SPM, by language regions of interest (as a scatter plot with violin plots denoting the distribution of Euclidean distances for the best and worse performing ML/DL methods for each language paradigm. Blue - best, Orange - worst).</p></caption>
<graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fradi-04-1495181-g006.tif"/>
</fig>
<table-wrap id="T5" position="float"><label>Table 5</label>
<caption><p>Table of mean Euclidean distance values across test participants by language regions of interest, associated with <xref ref-type="fig" rid="F6">Figure&#x00A0;6</xref>.</p></caption>
<table frame="hsides" rules="groups">
<colgroup>
<col align="left"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="center"/>
<th valign="top" align="center">SC-P1</th>
<th valign="top" align="center">SC-P2</th>
<th valign="top" align="center">SC-P3</th>
<th valign="top" align="center">SC-P5</th>
<th valign="top" align="center">SC-P7</th>
<th valign="top" align="center">SC-P9</th>
<th valign="top" align="center">SWG-P3</th>
<th valign="top" align="center">SWG-P9</th>
<th valign="top" align="center">R-P1</th>
<th valign="top" align="center">R-P2</th>
<th valign="top" align="center">R-P3</th>
<th valign="top" align="center">ON-P3</th>
<th valign="top" align="center">AG-P1</th>
<th valign="top" align="center">AG-P2</th>
<th valign="top" align="center">AG-P3</th>
<th valign="top" align="center">AG-P5</th>
<th valign="top" align="center">AG-P7</th>
<th valign="top" align="center">AG-P8</th>
<th valign="top" align="center">AG-P9</th>
<th valign="top" align="center">SCL-P1</th>
<th valign="top" align="center">SCL-P2</th>
<th valign="top" align="center">SCL-P3</th>
<th valign="top" align="center">SCL-P7</th>
<th valign="top" align="center">SCL-P8</th>
<th valign="top" align="center">SCL-P9</th>
<th valign="top" align="center">Mean&#x2009;<inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM52"><mml:mo>&#x00B1;</mml:mo></mml:math></inline-formula>&#x2009;SD</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">General ML</td>
<td valign="top" align="center">2.5</td>
<td valign="top" align="center">1.5</td>
<td valign="top" align="center">3</td>
<td valign="top" align="center">2.6</td>
<td valign="top" align="center">3.3</td>
<td valign="top" align="center">2.6</td>
<td valign="top" align="center">2.5</td>
<td valign="top" align="center">2.6</td>
<td valign="top" align="center">4.2</td>
<td valign="top" align="center">2.2</td>
<td valign="top" align="center">2.5</td>
<td valign="top" align="center">1.7</td>
<td valign="top" align="center">3.2</td>
<td valign="top" align="center">2.4</td>
<td valign="top" align="center">3.2</td>
<td valign="top" align="center">2.1</td>
<td valign="top" align="center">2</td>
<td valign="top" align="center">2.4</td>
<td valign="top" align="center">3.1</td>
<td valign="top" align="center">3.3</td>
<td valign="top" align="center">2.8</td>
<td valign="top" align="center">2.9</td>
<td valign="top" align="center">3.3</td>
<td valign="top" align="center">3.2</td>
<td valign="top" align="center">3.3</td>
<td valign="top" align="center"><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM53"><mml:mn>2.7</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>2.4</mml:mn></mml:math></inline-formula></td>
</tr>
<tr>
<td valign="top" align="left">Dictionary</td>
<td valign="top" align="center">2.7</td>
<td valign="top" align="center">5.6</td>
<td valign="top" align="center">2.9</td>
<td valign="top" align="center">3.6</td>
<td valign="top" align="center">2.8</td>
<td valign="top" align="center">3.7</td>
<td valign="top" align="center">3.7</td>
<td valign="top" align="center">3.7</td>
<td valign="top" align="center">3.7</td>
<td valign="top" align="center">4</td>
<td valign="top" align="center">2.6</td>
<td valign="top" align="center">6.1</td>
<td valign="top" align="center">3.5</td>
<td valign="top" align="center">3.9</td>
<td valign="top" align="center">4.4</td>
<td valign="top" align="center">4.6</td>
<td valign="top" align="center">3.8</td>
<td valign="top" align="center">5.5</td>
<td valign="top" align="center">3.2</td>
<td valign="top" align="center">3.5</td>
<td valign="top" align="center">5.3</td>
<td valign="top" align="center">3.7</td>
<td valign="top" align="center">2.5</td>
<td valign="top" align="center">4.5</td>
<td valign="top" align="center">3.4</td>
<td valign="top" align="center"><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM54"><mml:mn>3.9</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>2.9</mml:mn></mml:math></inline-formula></td>
</tr>
<tr>
<td valign="top" align="left">Feature</td>
<td valign="top" align="center">4</td>
<td valign="top" align="center">2.5</td>
<td valign="top" align="center">3.5</td>
<td valign="top" align="center">4.1</td>
<td valign="top" align="center">3.2</td>
<td valign="top" align="center">3.9</td>
<td valign="top" align="center">3.3</td>
<td valign="top" align="center">2.6</td>
<td valign="top" align="center">4.7</td>
<td valign="top" align="center">4.5</td>
<td valign="top" align="center">2.5</td>
<td valign="top" align="center">4.8</td>
<td valign="top" align="center">4.4</td>
<td valign="top" align="center">3.4</td>
<td valign="top" align="center">4.2</td>
<td valign="top" align="center">3.1</td>
<td valign="top" align="center">3.2</td>
<td valign="top" align="center">3.8</td>
<td valign="top" align="center">4.1</td>
<td valign="top" align="center">2.8</td>
<td valign="top" align="center">5.7</td>
<td valign="top" align="center">3.2</td>
<td valign="top" align="center">3.5</td>
<td valign="top" align="center">4.1</td>
<td valign="top" align="center">3</td>
<td valign="top" align="center"><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM55"><mml:mn>3.7</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>3.0</mml:mn></mml:math></inline-formula></td>
</tr>
<tr>
<td valign="top" align="left">Frequency</td>
<td valign="top" align="center">2.6</td>
<td valign="top" align="center">2.3</td>
<td valign="top" align="center">2.8</td>
<td valign="top" align="center">3.3</td>
<td valign="top" align="center">3.1</td>
<td valign="top" align="center">4</td>
<td valign="top" align="center">3.8</td>
<td valign="top" align="center">2.6</td>
<td valign="top" align="center">3.1</td>
<td valign="top" align="center">4.1</td>
<td valign="top" align="center">2.1</td>
<td valign="top" align="center">4.5</td>
<td valign="top" align="center">2.6</td>
<td valign="top" align="center">3.4</td>
<td valign="top" align="center">4.8</td>
<td valign="top" align="center">3.9</td>
<td valign="top" align="center">3</td>
<td valign="top" align="center">4.2</td>
<td valign="top" align="center">4.4</td>
<td valign="top" align="center">3.8</td>
<td valign="top" align="center">2.7</td>
<td valign="top" align="center">4.4</td>
<td valign="top" align="center">4</td>
<td valign="top" align="center">3.1</td>
<td valign="top" align="center">5.1</td>
<td valign="top" align="center"><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM56"><mml:mn>3.5</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>3.0</mml:mn></mml:math></inline-formula></td>
</tr>
<tr>
<td valign="top" align="left">Interval</td>
<td valign="top" align="center">3</td>
<td valign="top" align="center">2.9</td>
<td valign="top" align="center">3.3</td>
<td valign="top" align="center">2.4</td>
<td valign="top" align="center">3</td>
<td valign="top" align="center">3.3</td>
<td valign="top" align="center">3.5</td>
<td valign="top" align="center">2.8</td>
<td valign="top" align="center">3.3</td>
<td valign="top" align="center">4.9</td>
<td valign="top" align="center">1.3</td>
<td valign="top" align="center">4</td>
<td valign="top" align="center">2.6</td>
<td valign="top" align="center">3.8</td>
<td valign="top" align="center">3.4</td>
<td valign="top" align="center">3.3</td>
<td valign="top" align="center">3.4</td>
<td valign="top" align="center">3.1</td>
<td valign="top" align="center">2.6</td>
<td valign="top" align="center">2.8</td>
<td valign="top" align="center">3.4</td>
<td valign="top" align="center">4.5</td>
<td valign="top" align="center">3.9</td>
<td valign="top" align="center">4.2</td>
<td valign="top" align="center">4.1</td>
<td valign="top" align="center"><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM57"><mml:mn>3.3</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>2.7</mml:mn></mml:math></inline-formula></td>
</tr>
<tr>
<td valign="top" align="left">Kernel</td>
<td valign="top" align="center">5.1</td>
<td valign="top" align="center">6.9</td>
<td valign="top" align="center">6.1</td>
<td valign="top" align="center">6.4</td>
<td valign="top" align="center">3</td>
<td valign="top" align="center">3.9</td>
<td valign="top" align="center">5.1</td>
<td valign="top" align="center">3.5</td>
<td valign="top" align="center">3.2</td>
<td valign="top" align="center">6.1</td>
<td valign="top" align="center">2.6</td>
<td valign="top" align="center">4.8</td>
<td valign="top" align="center">5.1</td>
<td valign="top" align="center">7.7</td>
<td valign="top" align="center">6.5</td>
<td valign="top" align="center">6</td>
<td valign="top" align="center">3.2</td>
<td valign="top" align="center">4.5</td>
<td valign="top" align="center">3.8</td>
<td valign="top" align="center">4.6</td>
<td valign="top" align="center">5.6</td>
<td valign="top" align="center">3.5</td>
<td valign="top" align="center">2.8</td>
<td valign="top" align="center">4.8</td>
<td valign="top" align="center">3.8</td>
<td valign="top" align="center"><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM58"><mml:mn>4.7</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>3.1</mml:mn></mml:math></inline-formula></td>
</tr>
<tr>
<td valign="top" align="left">Deep Learning</td>
<td valign="top" align="center">4.8</td>
<td valign="top" align="center">5.6</td>
<td valign="top" align="center">5.4</td>
<td valign="top" align="center">6.1</td>
<td valign="top" align="center">3</td>
<td valign="top" align="center">4.1</td>
<td valign="top" align="center">5.4</td>
<td valign="top" align="center">3.6</td>
<td valign="top" align="center">4.6</td>
<td valign="top" align="center">5.4</td>
<td valign="top" align="center">3.6</td>
<td valign="top" align="center">3.9</td>
<td valign="top" align="center">5.2</td>
<td valign="top" align="center">5.6</td>
<td valign="top" align="center">5.4</td>
<td valign="top" align="center">4.6</td>
<td valign="top" align="center">4.8</td>
<td valign="top" align="center">5.8</td>
<td valign="top" align="center">3.9</td>
<td valign="top" align="center">5.1</td>
<td valign="top" align="center">6</td>
<td valign="top" align="center">4.3</td>
<td valign="top" align="center">3.1</td>
<td valign="top" align="center">5.9</td>
<td valign="top" align="center">4.3</td>
<td valign="top" align="center"><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM59"><mml:mn>4.8</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>3.1</mml:mn></mml:math></inline-formula></td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Euclidean distances ranged from 1.3&#x2009;mm (R-P3, interval-based method) to 7.7&#x2009;mm (AG-P2, kernel-based method) across different ML/DL categories. The GML method had the shortest average distance between SPM peaks and GML peaks for 13 out of 25 regions (highlighted in blue in <xref ref-type="table" rid="T5">Table&#x00A0;5</xref>), with distances ranging from 1.5&#x2009;mm (SC-P2) to 4.2&#x2009;mm (R-P1). The mean Euclidean distances between the peaks for the GML method was <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM60"><mml:mn>2.7</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>2.4</mml:mn></mml:math></inline-formula>&#x2009;mm. The DL and kernel-based methods ranked the lowest for 9 out of 25 and 8 out of 25 regions respectively. Euclidean distance for the DL-based method ranged from 3.0&#x2009;mm (SC-P7) to 6.1&#x2009;mm (SC-P5) and for the kernel-based method it ranged from 2.6&#x2009;mm (R-P3) to 7.7&#x2009;mm (AG-P2). The mean Euclidean distances for the DL and kernel-based method were <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM61"><mml:mn>4.8</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>3.1</mml:mn></mml:math></inline-formula>&#x2009;mm and <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM62"><mml:mn>4.7</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>3.1</mml:mn></mml:math></inline-formula>&#x2009;mm respectively.</p>
<p>The mean Euclidean distance differed significantly between ML categories (Kruskal-Wallis Test, <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM63"><mml:mi>H</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mn>6</mml:mn><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:mn>130.4</mml:mn></mml:math></inline-formula>, <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM64"><mml:mi>p</mml:mi><mml:mo>=</mml:mo><mml:mn>1.04</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:msup><mml:mn>10</mml:mn><mml:mo>&#x2212;</mml:mo></mml:msup><mml:mn>25</mml:mn></mml:math></inline-formula>). On post hoc testing, the mean Euclidean distance for the GML method was significantly smaller than that of all other evaluated methods. The mean Euclidean distances for the kernel and DL-based methods did not significantly differ from each other but were significantly larger than that of all other evaluated methods.</p>
<p>Qualitative assessment of peaks was performed on the activation maps corresponding to the two most promising categories of methods (GML and interval-based). GML method identified peaks which were either in the same or adjacent gyrus to 80&#x0025; of the peaks identified by SPM across the regions of interest. The interval-based method identified peaks in the same or adjacent gyrus to 60&#x0025; of the peaks identified by SPM. (Note that in each participant, we only included language regions with at least 50&#x0025; overlap and consequently the number of language regions of interest varied between test participants).</p>
</sec>
</sec>
<sec id="s4" sec-type="discussion"><label>4</label><title>Discussion</title>
<p>We aimed to identify ML/DL methods for classification of language activation in fMRI time series. This was motivated by the challenges of analysing naturalistic fMRI data, where regressors are often difficult to define. fMRI data from seven language tasks were acquired. Experiments using task-based language activation data i.e., structured fMRI time series data, allowed us to understand how the ML/DL methods classify. We considered ML/DL methods for univariate time series classification from seven categories, namely general ML, DL, dictionary, feature, frequency, interval, and kernel. fMRI time series voxel data from 14 healthy participants were used for training (4,872 1D time series samples). Data from 12 participants including 6 healthy and 6 epilepsy patients were chosen for test. There were around 720,000 total 1D samples per language paradigm - see Appendix (<xref ref-type="app" rid="app1a">Section A</xref>) for exact number of test samples. ML/DL models were trained on labelled data, using participant-specific SPM activation maps as the ground truth. The ML/DL methods were quantitatively evaluated using three different performance measures: whole-brain AUC, Dice coefficient and Euclidean distance between of activation peaks identified ML/DL and by SPM.</p>
<p>The GML and interval-based methods showed good correspondence with SPM activation (refer to <xref ref-type="fig" rid="F3">Figure&#x00A0;3</xref>). Quantitatively, the GML method had the highest mean AUC values across the different ML/DL methods (<inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM65"><mml:mn>0.97</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.03</mml:mn></mml:math></inline-formula>). Interestingly, whole-brain AUC values were high for all the evaluated ML/DL methods and the mean AUC values for the GML, feature, interval and kernel-based methods were not significantly different. The interval-based method achieved the highest mean Dice coefficient (i.e., <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM66"><mml:mn>0.61</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>0.33</mml:mn></mml:math></inline-formula>). The GML method produced the smallest mean Euclidean distance for more than half the evaluated language regions, as well as the smallest mean distance (<inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM67"><mml:mn>2.7</mml:mn><mml:mo>&#x00B1;</mml:mo><mml:mn>2.4</mml:mn></mml:math></inline-formula>&#x2009;mm, superior by at least 0.5&#x2009;mm compared to other ML/DL methods) when all of the evaluated language regions were considered. The GML and interval-based methods located peaks that were qualitatively similar in location to those identified by SPM for 80&#x0025; (GML) and 60&#x0025; of the (interval-based) evaluated peaks. The mean Euclidean distance for the GML method was significantly lower than for other evaluated methods. The DL and frequency-based methods rank lowest across evaluation metrics. While the frequency-based method showed excess areas of activation not corresponding to task activation, the DL-based method did predict reasonable activation. The methods associated with the other categories produced varied results, with no apparent trend in the metrics evaluated. Additionally, there was also no noticeable difference in results between healthy participants and epilepsy patients.</p>
<p>The results here suggest that the classification methods perform dissimilarly. Methods involving decision trees (GML and interval-based) outperform all other types of methods. Worst performing methods appeared to be those incorporating frequency information in one way or another into the features used for classification. The frequency-based method uses spectral features, and the DL-based method apply convolution operations (essentially a product operation in the frequency domain). Their inability to classify frequency features from the 1D time series information may be because the brain response during the task varies across time in a distinct, non-periodic, way (<xref ref-type="bibr" rid="B45">45</xref>). GML and the interval-based method rely on many decision trees chosen across the time series, essentially allowing selection of specific time points. The feature method also considers the time series features of the entire time series (<xref ref-type="bibr" rid="B38">38</xref>), which results in worse performance than GML and interval-based methods. Although not considered here, this suggests potential opportunities for reducing the length of the time series data without sacrificing classification accuracy.</p>
<p>A previous study using resting state fMRI data suggested that locations of abnormal brain activity could be predicted from 1D fMRI time series using a Convolutional Neural Network (CNN) (i.e., DL-based method) (<xref ref-type="bibr" rid="B46">46</xref>) and raised the possibility that DL-based methods can inform design of naturalistic fMRI stimuli. Our findings suggest that GML and interval-based methods may provide additional utility for 1D fMRI time series analysis and task design.</p>
<p>To our best knowledge, specific comparisons between methods categorised in the manner described here have not been performed to date, but comparisons of specific ML/DL methods in different research domains have been reported. Findings by Cabello et al. (<xref ref-type="bibr" rid="B40">40</xref>) on different types of time series data such as electrocardiographic recordings, stock market prices, seismic data, power demand over time, and other 1D time series data from the UCR database (<xref ref-type="bibr" rid="B47">47</xref>), suggest that the interval-based method sTSF is superior to TSF in terms of critical difference ranking (compared against 100+ datasets from the UCR database). This study informed our choice of sTSF in our study. Whilst not reported here, we did implement TSF and found sTSF to produce better results for time series fMRI data. In previous work, Bagnall et al. compared TSF with Rotation Forest (our GML method) and found TSF to be the best time series classification method (<xref ref-type="bibr" rid="B28">28</xref>), which were not concordant with our findings. In a subsequent study by Bagnall et al., the inception time approach, a DL algorithm and ROCKET, a kernel-based approach, sometimes did not perform well (<xref ref-type="bibr" rid="B48">48</xref>), perhaps because of over-fitting during training of these algorithms. This agrees with our finding that the DL-based method was inferior in classification performance to the others that we considered.</p>
<p>Our analyses indicate that, machine learning classification methods can be used to identify brain activation from fMRI time series data. The highlight the potential for ML/DL methods to identify activation in fMRI studies without pre-specified task regressors and in cognitive domains other than language.</p>
<sec id="s4a"><label>4.1</label><title>Limitations and future work</title>
<p>Our study focused on comparing different ML/DL methods for the classification of task-based language fMRI time series but an analogous approach should be applicable to fMRI time series with a similar block design. We used default hyper-parameters for training the ML/DL methods and fMRI time series across different language tasks of similar block design and length were used (or the time series modified to ensure uniform length, such as padding for the AG task). Further work should examine how these methods generalize to other block designs or tasks (and fMRI time series without block designs such as naturalistic fMRI). This may involve identification of which time frame or combination of time frames within the fMRI time series contribute most to the classification resulting from the ML/DL method.</p>
</sec>
</sec>
<sec id="s5" sec-type="conclusions"><label>5</label><title>Conclusions</title>
<p>Our study involved seven routinely used fMRI language activation tasks. We evaluated the utility of different ML/DL methods from different time series classification algorithm categories in predicting which task-based language fMRI 1D time series data are activated by stimuli. The GML and interval-based method were able to best identify language areas and shows promise for use in fMRI data analysis. Our findings may lead to other work where the potential of machine learning approaches for 1D fMRI time series analysis are considered under different paradigms, such as visual and motor activation.</p>
</sec>
</body>
<back>
<sec id="s6" sec-type="data-availability"><title>Data availability statement</title>
<p>The datasets presented in this article are not readily available as the data and code for this article is not publicly available due to patient privacy and ethical limitations. Requests to access the datasets should be directed to the corresponding author.</p>
</sec>
<sec id="s7" sec-type="ethics-statement"><title>Ethics statement</title>
<p>The studies involving humans were approved by Royal Brisbane and Women&#x2019;s Hospital Human Research Ethics Committee. The studies were conducted in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study.</p>
</sec>
<sec id="s9" sec-type="author-contributions"><title>Author contributions</title>
<p>EK: Conceptualization, Data curation, Formal Analysis, Investigation, Methodology, Project administration, Resources, Software, Validation, Visualization, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing; VV: Conceptualization, Investigation, Methodology, Supervision, Validation, Writing &#x2013; review &#x0026; editing; JP: Data curation, Resources, Writing &#x2013; original draft; KO: Conceptualization, Funding acquisition, Writing &#x2013; review &#x0026; editing; AH: Funding acquisition, Writing &#x2013; original draft; DR: Conceptualization, Funding acquisition, Methodology, Resources, Supervision, Validation, Writing &#x2013; review &#x0026; editing.</p>
</sec>
<sec id="s10" sec-type="funding-information"><title>Funding</title>
<p>The author(s) declare financial support was received for the research, authorship, and/or publication of this article. The work has been supported by the Australian Research Council Training Centre for Innovation Biomedical Imaging Technology (IC170100035) funded by the Australian Government. The authors acknowledge the the facilities of the National Imaging Facility at the Centre for Advanced Imaging (CAI) and Herston Imaging Research Facility (HIRF).</p>
</sec>
<sec id="s11" sec-type="COI-statement"><title>Conflict of interest</title>
<p>KO and AH were employed by Siemens Healthcare Pty Ltd.</p>
<p>The remaining authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="s12" sec-type="disclaimer"><title>Publisher&#x0027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list><title>References</title>
<ref id="B1"><label>1.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Seghier</surname><given-names>ML</given-names></name><name><surname>Lazeyras</surname><given-names>F</given-names></name><name><surname>Pegna</surname><given-names>AJ</given-names></name><name><surname>Annoni</surname><given-names>J-M</given-names></name><name><surname>Zimine</surname><given-names>I</given-names></name><name><surname>Mayer</surname><given-names>E</given-names></name></person-group>, et al. <article-title>Variability of fMRI activation during a phonological and semantic language task in healthy subjects</article-title>. <source>Hum Brain Mapp</source>. (<year>2004</year>) <volume>23</volume>(<issue>3</issue>):<fpage>140</fpage>&#x2013;<lpage>55</lpage>. <pub-id pub-id-type="doi">10.1002/hbm.20053</pub-id><pub-id pub-id-type="pmid">15449358</pub-id></citation></ref>
<ref id="B2"><label>2.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Knecht</surname><given-names>S</given-names></name><name><surname>Jansen</surname><given-names>A</given-names></name><name><surname>Frank</surname><given-names>A</given-names></name><name><surname>van Randenborgh</surname><given-names>J</given-names></name><name><surname>Sommer</surname><given-names>J</given-names></name><name><surname>Kanowski</surname><given-names>M</given-names></name></person-group>, et al. <article-title>How atypical is atypical language dominance?</article-title> <source>Neuroimage</source>. (<year>2003</year>) <volume>18</volume>(<issue>4</issue>):<fpage>917</fpage>&#x2013;<lpage>27</lpage>. <pub-id pub-id-type="doi">10.1016/S1053-8119(03)00039-9</pub-id><pub-id pub-id-type="pmid">12725767</pub-id></citation></ref>
<ref id="B3"><label>3.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Berl</surname><given-names>MM</given-names></name><name><surname>Zimmaro</surname><given-names>LA</given-names></name><name><surname>Khan</surname><given-names>OI</given-names></name><name><surname>Dustin</surname><given-names>I</given-names></name><name><surname>Ritzl</surname><given-names>E</given-names></name><name><surname>Duke</surname><given-names>ES</given-names></name></person-group>, et al. <article-title>Characterization of atypical language activation patterns in focal epilepsy</article-title>. <source>Ann Neurol</source>. (<year>2014</year>) <volume>75</volume>(<issue>1</issue>):<fpage>33</fpage>&#x2013;<lpage>42</lpage>. <pub-id pub-id-type="doi">10.1002/ana.24015</pub-id><pub-id pub-id-type="pmid">24038442</pub-id></citation></ref>
<ref id="B4"><label>4.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Powell</surname><given-names>HR</given-names></name><name><surname>Parker</surname><given-names>GJ</given-names></name><name><surname>Alexander</surname><given-names>DC</given-names></name><name><surname>Symms</surname><given-names>MR</given-names></name><name><surname>Boulby</surname><given-names>PA</given-names></name><name><surname>Wheeler-Kingshott</surname><given-names>CA</given-names></name></person-group>, et al. <article-title>Abnormalities of language networks in temporal lobe epilepsy</article-title>. <source>Neuroimage</source>. (<year>2007</year>) <volume>36</volume>(<issue>1</issue>):<fpage>209</fpage>&#x2013;<lpage>21</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2007.02.028</pub-id><pub-id pub-id-type="pmid">17400477</pub-id></citation></ref>
<ref id="B5"><label>5.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ogawa</surname><given-names>S</given-names></name><name><surname>Lee</surname><given-names>T-M</given-names></name><name><surname>Kay</surname><given-names>AR</given-names></name><name><surname>Tank</surname><given-names>DW</given-names></name></person-group>. <article-title>Brain magnetic resonance imaging with contrast dependent on blood oxygenation</article-title>. <source>Proc Natl Acad Sci</source>. (<year>1990</year>) <volume>87</volume>(<issue>24</issue>):<fpage>9868</fpage>&#x2013;<lpage>72</lpage>. <pub-id pub-id-type="doi">10.1073/pnas.87.24.9868</pub-id><pub-id pub-id-type="pmid">2124706</pub-id></citation></ref>
<ref id="B6"><label>6.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kim</surname><given-names>S-G</given-names></name><name><surname>Ogawa</surname><given-names>S</given-names></name></person-group>. <article-title>Biophysical and physiological origins of blood oxygenation level-dependent fMRI signals</article-title>. <source>J Cereb Blood Flow Metab</source>. (<year>2012</year>) <volume>32</volume>(<issue>7</issue>):<fpage>1188</fpage>&#x2013;<lpage>206</lpage>. <pub-id pub-id-type="doi">10.1038/jcbfm.2012.23</pub-id><pub-id pub-id-type="pmid">22395207</pub-id></citation></ref>
<ref id="B7"><label>7.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Orringer</surname><given-names>DA</given-names></name><name><surname>Vago</surname><given-names>DR</given-names></name><name><surname>Golby</surname><given-names>AJ</given-names></name></person-group>. <article-title>Clinical applications and future directions of functional MRI</article-title>. In: <source>Seminars in Neurology</source>. Vol. <volume>32</volume>. <publisher-loc>New York, NY</publisher-loc>: <publisher-name>Thieme Medical Publishers</publisher-name> (<year>2012</year>). pp. <fpage>466</fpage>&#x2013;<lpage>75</lpage>.</citation></ref>
<ref id="B8"><label>8.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Benjamin</surname><given-names>CF</given-names></name><name><surname>Gkiatis</surname><given-names>K</given-names></name><name><surname>Matsopoulos</surname><given-names>GK</given-names></name><name><surname>Garganis</surname><given-names>K</given-names></name></person-group>. <article-title>Presurgical language fMRI in epilepsy: an introduction</article-title>. <source>Transl Neurosci Speech Lang Disord</source>. (<year>2020</year>): <fpage>205</fpage>&#x2013;<lpage>39</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-030-35687-3_10</pub-id></citation></ref>
<ref id="B9"><label>9.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Manan</surname><given-names>HA</given-names></name><name><surname>Franz</surname><given-names>EA</given-names></name><name><surname>Yahya</surname><given-names>N</given-names></name></person-group>. <article-title>Utilization of functional MRI language paradigms for pre-operative mapping: a systematic review</article-title>. <source>Neuroradiology</source>. (<year>2020</year>) <volume>62</volume>:<fpage>353</fpage>&#x2013;<lpage>67</lpage>. <pub-id pub-id-type="doi">10.1007/s00234-019-02322-w</pub-id><pub-id pub-id-type="pmid">31802156</pub-id></citation></ref>
<ref id="B10"><label>10.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bookheimer</surname><given-names>S</given-names></name></person-group>. <article-title>Pre-surgical language mapping with functional magnetic resonance imaging</article-title>. <source>Neuropsychol Rev</source>. (<year>2007</year>) <volume>17</volume>:<fpage>145</fpage>&#x2013;<lpage>55</lpage>. <pub-id pub-id-type="doi">10.1007/s11065-007-9026-x</pub-id><pub-id pub-id-type="pmid">17484055</pub-id></citation></ref>
<ref id="B11"><label>11.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Black</surname><given-names>D</given-names></name><name><surname>Vachha</surname><given-names>B</given-names></name><name><surname>Mian</surname><given-names>A</given-names></name><name><surname>Faro</surname><given-names>S</given-names></name><name><surname>Maheshwari</surname><given-names>M</given-names></name><name><surname>Sair</surname><given-names>H</given-names></name></person-group>, et al. <article-title>American society of functional neuroradiology&#x2013;recommended fMRI paradigm algorithms for presurgical language assessment</article-title>. <source>Am J Neuroradiol</source>. (<year>2017</year>) <volume>38</volume>(<issue>10</issue>):<fpage>E65</fpage>&#x2013;<lpage>E73</lpage>. <pub-id pub-id-type="doi">10.3174/ajnr.A5345</pub-id><pub-id pub-id-type="pmid">28860215</pub-id></citation></ref>
<ref id="B12"><label>12.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bizzi</surname><given-names>A</given-names></name><name><surname>Blasi</surname><given-names>V</given-names></name><name><surname>Falini</surname><given-names>A</given-names></name><name><surname>Ferroli</surname><given-names>P</given-names></name><name><surname>Cadioli</surname><given-names>M</given-names></name><name><surname>Danesi</surname><given-names>U</given-names></name></person-group>, et al. <article-title>Presurgical functional MR imaging of language and motor functions: validation with intraoperative electrocortical mapping</article-title>. <source>Radiology</source>. (<year>2008</year>) <volume>248</volume>(<issue>2</issue>):<fpage>579</fpage>&#x2013;<lpage>89</lpage>. <pub-id pub-id-type="doi">10.1148/radiol.2482071214</pub-id><pub-id pub-id-type="pmid">18539893</pub-id></citation></ref>
<ref id="B13"><label>13.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Binder</surname><given-names>JR</given-names></name><name><surname>Swanson</surname><given-names>SJ</given-names></name><name><surname>Hammeke</surname><given-names>TA</given-names></name><name><surname>Morris</surname><given-names>GL</given-names></name><name><surname>Mueller</surname><given-names>WM</given-names></name><name><surname>Fischer</surname><given-names>M</given-names></name></person-group>, et al. <article-title>Determination of language dominance using functional MRI: a comparison with the Wada test</article-title>. <source>Neurology</source>. (<year>1996</year>) <volume>46</volume>(<issue>4</issue>):<fpage>978</fpage>&#x2013;<lpage>84</lpage>. <pub-id pub-id-type="doi">10.1212/WNL.46.4.978</pub-id><pub-id pub-id-type="pmid">8780076</pub-id></citation></ref>
<ref id="B14"><label>14.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Roux</surname><given-names>F-E</given-names></name><name><surname>Boulanouar</surname><given-names>K</given-names></name><name><surname>Lotterie</surname><given-names>J-A</given-names></name><name><surname>Mejdoubi</surname><given-names>M</given-names></name><name><surname>LeSage</surname><given-names>JP</given-names></name><name><surname>Berry</surname><given-names>I</given-names></name></person-group>. <article-title>Language functional magnetic resonance imaging in preoperative assessment of language areas: correlation with direct cortical stimulation</article-title>. <source>Neurosurgery</source>. (<year>2003</year>) <volume>52</volume>(<issue>6</issue>):<fpage>1335</fpage>&#x2013;<lpage>47</lpage>. <pub-id pub-id-type="doi">10.1227/01.NEU.0000064803.05077.40</pub-id><pub-id pub-id-type="pmid">12762879</pub-id></citation></ref>
<ref id="B15"><label>15.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Agarwal</surname><given-names>S</given-names></name><name><surname>Sair</surname><given-names>HI</given-names></name><name><surname>Gujar</surname><given-names>S</given-names></name><name><surname>Pillai</surname><given-names>JJ</given-names></name></person-group>. <article-title>Language mapping with fMRI: current standards and reproducibility</article-title>. <source>Top Magn Reson Imaging</source>. (<year>2019</year>) <volume>28</volume>(<issue>4</issue>):<fpage>225</fpage>&#x2013;<lpage>33</lpage>. <pub-id pub-id-type="doi">10.1097/RMR.0000000000000216</pub-id><pub-id pub-id-type="pmid">31385902</pub-id></citation></ref>
<ref id="B16"><label>16.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lindquist</surname><given-names>MA</given-names></name></person-group>. <article-title>The statistical analysis of fMRI data</article-title>. <source>Statist Sci</source>. (<year>2008</year>) <volume>23</volume>(<issue>4</issue>):<fpage>439</fpage>&#x2013;<lpage>64</lpage>. <pub-id pub-id-type="doi">10.1214/09-STS282</pub-id></citation></ref>
<ref id="B17"><label>17.</label><citation citation-type="book"><person-group person-group-type="author"><name><surname>Kiebel</surname><given-names>SJ</given-names></name><name><surname>Holmes</surname><given-names>AP</given-names></name></person-group>. <comment>The general linear model</comment>. In: <person-group person-group-type="editor"><name><surname>Penny</surname><given-names>WD</given-names></name><name><surname>Friston</surname><given-names>KJ</given-names></name><name><surname>Ashburner</surname><given-names>JT</given-names></name><name><surname>Kiebel</surname><given-names>SJ</given-names></name><name><surname>Nichols</surname><given-names>TE</given-names></name></person-group>, editors. <source>Statistical Parametric Mapping: The Analysis of Functional Brain Images</source>. <publisher-loc>San Diego, CA and London</publisher-loc>: <publisher-name>Elsevier</publisher-name> (<year>2007</year>). <comment>p. 101&#x2013;25</comment>.</citation></ref>
<ref id="B18"><label>18.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sonkusare</surname><given-names>S</given-names></name><name><surname>Breakspear</surname><given-names>M</given-names></name><name><surname>Guo</surname><given-names>C</given-names></name></person-group>. <article-title>Naturalistic stimuli in neuroscience: critically acclaimed</article-title>. <source>Trends Cogn Sci (Regul Ed)</source>. (<year>2019</year>) <volume>23</volume>(<issue>8</issue>):<fpage>699</fpage>&#x2013;<lpage>714</lpage>. <pub-id pub-id-type="doi">10.1016/j.tics.2019.05.004</pub-id></citation></ref>
<ref id="B19"><label>19.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hasson</surname><given-names>U</given-names></name><name><surname>Malach</surname><given-names>R</given-names></name><name><surname>Heeger</surname><given-names>DJ</given-names></name></person-group>. <article-title>Reliability of cortical activity during natural stimulation</article-title>. <source>Trends Cogn Sci (Regul Ed)</source>. (<year>2010</year>) <volume>14</volume>(<issue>1</issue>):<fpage>40</fpage>&#x2013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1016/j.tics.2009.10.011</pub-id></citation></ref>
<ref id="B20"><label>20.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Eickhoff</surname><given-names>SB</given-names></name><name><surname>Milham</surname><given-names>M</given-names></name><name><surname>Vanderwal</surname><given-names>T</given-names></name></person-group>. <article-title>Towards clinical applications of movie fMRI</article-title>. <source>NeuroImage</source>. (<year>2020</year>) <volume>217</volume>:<fpage>116860</fpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2020.116860</pub-id><pub-id pub-id-type="pmid">32376301</pub-id></citation></ref>
<ref id="B21"><label>21.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Finn</surname><given-names>ES</given-names></name><name><surname>Bandettini</surname><given-names>PA</given-names></name></person-group>. <article-title>Movie-watching outperforms rest for functional connectivity-based prediction of behavior</article-title>. <source>NeuroImage</source>. (<year>2021</year>) <volume>235</volume>:<fpage>117963</fpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2021.117963</pub-id><pub-id pub-id-type="pmid">33813007</pub-id></citation></ref>
<ref id="B22"><label>22.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>J&#x00E4;&#x00E4;skel&#x00E4;inen</surname><given-names>IP</given-names></name><name><surname>Sams</surname><given-names>M</given-names></name><name><surname>Glerean</surname><given-names>E</given-names></name><name><surname>Ahveninen</surname><given-names>J</given-names></name></person-group>. <article-title>Movies and narratives as naturalistic stimuli in neuroimaging</article-title>. <source>NeuroImage</source>. (<year>2021</year>) <volume>224</volume>:<fpage>117445</fpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2020.117445</pub-id></citation></ref>
<ref id="B23"><label>23.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>&#x00C7;elik</surname><given-names>E</given-names></name><name><surname>Dar</surname><given-names>SUH</given-names></name><name><surname>Y&#x0131;lmaz</surname><given-names>&#x00D6;</given-names></name><name><surname>Kele&#x015F;</surname><given-names>&#x00DC;</given-names></name><name><surname>&#x00C7;ukur</surname><given-names>T</given-names></name></person-group>. <article-title>Spatially informed voxelwise modeling for naturalistic fMRI experiments</article-title>. <source>NeuroImage</source>. (<year>2019</year>) <volume>186</volume>:<fpage>741</fpage>&#x2013;<lpage>57</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2018.11.044</pub-id></citation></ref>
<ref id="B24"><label>24.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ylipaavalniemi</surname><given-names>J</given-names></name><name><surname>Savia</surname><given-names>E</given-names></name><name><surname>Malinen</surname><given-names>S</given-names></name><name><surname>Hari</surname><given-names>R</given-names></name><name><surname>Vig&#x00E1;rio</surname><given-names>R</given-names></name><name><surname>Kaski</surname><given-names>S</given-names></name></person-group>. <article-title>Dependencies between stimuli and spatially independent fMRI sources: towards brain correlates of natural stimuli</article-title>. <source>NeuroImage</source>. (<year>2009</year>) <volume>48</volume>(<issue>1</issue>):<fpage>176</fpage>&#x2013;<lpage>85</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2009.03.056</pub-id><pub-id pub-id-type="pmid">19344775</pub-id></citation></ref>
<ref id="B25"><label>25.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rashid</surname><given-names>M</given-names></name><name><surname>Singh</surname><given-names>H</given-names></name><name><surname>Goyal</surname><given-names>V</given-names></name></person-group>. <article-title>The use of machine learning and deep learning algorithms in functional magnetic resonance imaging&#x2014;a systematic review</article-title>. <source>Expert Syst</source>. (<year>2020</year>) <volume>37</volume>(<issue>6</issue>):<fpage>e12644</fpage>. <pub-id pub-id-type="doi">10.1111/exsy.12644</pub-id></citation></ref>
<ref id="B26"><label>26.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bock</surname><given-names>C</given-names></name><name><surname>Moor</surname><given-names>M</given-names></name><name><surname>Jutzeler</surname><given-names>CR</given-names></name><name><surname>Borgwardt</surname><given-names>K</given-names></name></person-group>. <article-title>Machine learning for biomedical time series classification: from shapelets to deep learning</article-title>. <source>Artif Neural Netw</source>. (<year>2021</year>) <volume>2190</volume>:<fpage>33</fpage>&#x2013;<lpage>71</lpage>. <pub-id pub-id-type="doi">10.1007/978-1-0716-0826-5_2</pub-id></citation></ref>
<ref id="B27"><label>27.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ismail Fawaz</surname><given-names>H</given-names></name><name><surname>Forestier</surname><given-names>G</given-names></name><name><surname>Weber</surname><given-names>J</given-names></name><name><surname>Idoumghar</surname><given-names>L</given-names></name><name><surname>Muller</surname><given-names>P-A</given-names></name></person-group>. <article-title>Deep learning for time series classification: a review</article-title>. <source>Data Min Knowl Discov</source>. (<year>2019</year>) <volume>33</volume>(<issue>4</issue>):<fpage>917</fpage>&#x2013;<lpage>63</lpage>. <pub-id pub-id-type="doi">10.1007/s10618-019-00619-1</pub-id></citation></ref>
<ref id="B28"><label>28.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Bagnall</surname><given-names>A</given-names></name><name><surname>Bostrom</surname><given-names>A</given-names></name><name><surname>Large</surname><given-names>J</given-names></name><name><surname>Lines</surname><given-names>J</given-names></name></person-group>. <article-title>The great time series classification bake off: an experimental evaluation of recently proposed algorithms. extended version</article-title>. <italic>arXiv</italic> [Preprint]. <italic>arXiv:1602.01711</italic> (<year>2016</year>).</citation></ref>
<ref id="B29"><label>29.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Power</surname><given-names>JD</given-names></name><name><surname>Barnes</surname><given-names>KA</given-names></name><name><surname>Snyder</surname><given-names>AZ</given-names></name><name><surname>Schlaggar</surname><given-names>BL</given-names></name><name><surname>Petersen</surname><given-names>SE</given-names></name></person-group>. <article-title>Spurious but systematic correlations in functional connectivity MRI networks arise from subject motion</article-title>. <source>Neuroimage</source>. (<year>2012</year>) <volume>59</volume>(<issue>3</issue>):<fpage>2142</fpage>&#x2013;<lpage>54</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2011.10.018</pub-id><pub-id pub-id-type="pmid">22019881</pub-id></citation></ref>
<ref id="B30"><label>30.</label><citation citation-type="other"><article-title>SPM - statistical parametric mapping</article-title>. <comment>Available online at:</comment> <ext-link ext-link-type="uri" xlink:href="https://www.fil.ion.ucl.ac.uk/spm/">https://www.fil.ion.ucl.ac.uk/spm/</ext-link> <comment>(accessed December 19, 2023)</comment>.</citation></ref>
<ref id="B31"><label>31.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ashburner</surname><given-names>J</given-names></name><name><surname>Barnes</surname><given-names>G</given-names></name><name><surname>Chen</surname><given-names>C-C</given-names></name><name><surname>Daunizeau</surname><given-names>J</given-names></name><name><surname>Flandin</surname><given-names>G</given-names></name><name><surname>Friston</surname><given-names>K</given-names></name></person-group>, et al. <article-title>SPM12 manual</article-title>. <source>Wellc Trust Centre Neuroimaging Lond UK</source>. (<year>2014</year>) <volume>2464</volume>(<issue>4</issue>):<fpage>19</fpage>&#x2013;<lpage>78</lpage>. <ext-link ext-link-type="uri" xlink:href="https://www.fil.ion.ucl.ac.uk/spm/doc/spm12_manual.pdf">https://www.fil.ion.ucl.ac.uk/spm/doc/spm12_manual.pdf</ext-link></citation></ref>
<ref id="B32"><label>32.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>L&#x00F6;ning</surname><given-names>M</given-names></name><name><surname>Bagnall</surname><given-names>A</given-names></name><name><surname>Ganesh</surname><given-names>S</given-names></name><name><surname>Kazakov</surname><given-names>V</given-names></name><name><surname>Lines</surname><given-names>J</given-names></name><name><surname>Kir&#x00E1;ly</surname><given-names>FJ</given-names></name></person-group>. <article-title>sktime: a unified interface for machine learning with time series</article-title>. <italic>arXiv</italic> [Preprint]. <italic>arXiv:1909.07872</italic> (<year>2019</year>).</citation></ref>
<ref id="B33"><label>33.</label><citation citation-type="other"><article-title>Time series classification; sktime documentation</article-title>. <comment>Available online at:</comment> <ext-link ext-link-type="uri" xlink:href="https://sktime-backup.readthedocs.io/en/v0.13.4/api_reference/classification.html">https://sktime-backup.readthedocs.io/en/v0.13.4/api&#x005F;reference/classification.html</ext-link>. <comment>(accessed December 19, 2023)</comment>.</citation></ref>
<ref id="B34"><label>34.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pedregosa</surname><given-names>F</given-names></name><name><surname>Varoquaux</surname><given-names>G</given-names></name><name><surname>Gramfort</surname><given-names>A</given-names></name><name><surname>Michel</surname><given-names>V</given-names></name><name><surname>Thirion</surname><given-names>B</given-names></name><name><surname>Grisel</surname><given-names>O</given-names></name></person-group>, et al. <article-title>Scikit-learn: machine learning in python</article-title>. <source>J Mach Learn Res</source>. (<year>2011</year>) <volume>12</volume>:<fpage>2825</fpage>&#x2013;<lpage>30</lpage>. <ext-link ext-link-type="uri" xlink:href="http://jmlr.org/papers/v12/pedregosa11a.html">http://jmlr.org/papers/v12/pedregosa11a.html</ext-link></citation></ref>
<ref id="B35"><label>35.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rodriguez</surname><given-names>JJ</given-names></name><name><surname>Kuncheva</surname><given-names>LI</given-names></name><name><surname>Alonso</surname><given-names>CJ</given-names></name></person-group>. <article-title>Rotation forest: a new classifier ensemble method</article-title>. <source>IEEE Trans Pattern Anal Mach Intell</source>. (<year>2006</year>) <volume>28</volume>(<issue>10</issue>):<fpage>1619</fpage>&#x2013;<lpage>30</lpage>. <pub-id pub-id-type="doi">10.1109/TPAMI.2006.211</pub-id><pub-id pub-id-type="pmid">16986543</pub-id></citation></ref>
<ref id="B36"><label>36.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Probst</surname><given-names>P</given-names></name><name><surname>Boulesteix</surname><given-names>A-L</given-names></name><name><surname>Bischl</surname><given-names>B</given-names></name></person-group>. <article-title>Tunability: Importance of hyperparameters of machine learning algorithms</article-title>. <source>J Mach Learn Res</source>. (<year>2019</year>) <volume>20</volume>(<issue>53</issue>):<fpage>1</fpage>&#x2013;<lpage>32</lpage>. <ext-link ext-link-type="uri" xlink:href="http://jmlr.org/papers/v20/18-444.html">http://jmlr.org/papers/v20/18-444.html</ext-link></citation></ref>
<ref id="B37"><label>37.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Sch&#x00E4;fer</surname><given-names>P</given-names></name><name><surname>Leser</surname><given-names>U</given-names></name></person-group>. <article-title>Fast and accurate time series classification with weasel</article-title>. In: <source>Proceedings of the 2017 ACM on Conference on Information and Knowledge Management</source> (<year>2017</year>). pp. <fpage>637</fpage>&#x2013;<lpage>46</lpage>.</citation></ref>
<ref id="B38"><label>38.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Christ</surname><given-names>M</given-names></name><name><surname>Braun</surname><given-names>N</given-names></name><name><surname>Neuffer</surname><given-names>J</given-names></name><name><surname>Kempa-Liehr</surname><given-names>AW</given-names></name></person-group>. <article-title>Time series feature extraction on basis of scalable hypothesis tests (tsfresh&#x2013;a python package)</article-title>. <source>Neurocomputing</source>. (<year>2018</year>) <volume>307</volume>:<fpage>72</fpage>&#x2013;<lpage>7</lpage>. <pub-id pub-id-type="doi">10.1016/j.neucom.2018.03.067</pub-id></citation></ref>
<ref id="B39"><label>39.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lines</surname><given-names>J</given-names></name><name><surname>Taylor</surname><given-names>S</given-names></name><name><surname>Bagnall</surname><given-names>A</given-names></name></person-group>. <article-title>Time series classification with hive-cote: the hierarchical vote collective of transformation-based ensembles</article-title>. <source>ACM Trans Knowl Discov Data (TKDD)</source>. (<year>2018</year>) <volume>12</volume>(<issue>5</issue>):<fpage>1</fpage>&#x2013;<lpage>35</lpage>. <pub-id pub-id-type="doi">10.1145/3182382</pub-id></citation></ref>
<ref id="B40"><label>40.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Cabello</surname><given-names>N</given-names></name><name><surname>Naghizade</surname><given-names>E</given-names></name><name><surname>Qi</surname><given-names>J</given-names></name><name><surname>Kulik</surname><given-names>L</given-names></name></person-group>. <article-title>Fast and accurate time series classification through supervised interval search</article-title>. In: <source>2020 IEEE International Conference on Data Mining (ICDM)</source>. IEEE (<year>2020</year>). pp. <fpage>948</fpage>&#x2013;<lpage>53</lpage>.</citation></ref>
<ref id="B41"><label>41.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Middlehurst</surname><given-names>M</given-names></name><name><surname>Large</surname><given-names>J</given-names></name><name><surname>Flynn</surname><given-names>M</given-names></name><name><surname>Lines</surname><given-names>J</given-names></name><name><surname>Bostrom</surname><given-names>A</given-names></name><name><surname>Bagnall</surname><given-names>A</given-names></name></person-group>. <article-title>Hive-cote 2.0: a new meta ensemble for time series classification</article-title>. <source>Mach Learn</source>. (<year>2021b</year>) <volume>110</volume>(<issue>11-12</issue>):<fpage>3211</fpage>&#x2013;<lpage>43</lpage>. <pub-id pub-id-type="doi">10.1007/s10994-021-06057-9</pub-id></citation></ref>
<ref id="B42"><label>42.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ismail Fawaz</surname><given-names>H</given-names></name><name><surname>Lucas</surname><given-names>B</given-names></name><name><surname>Forestier</surname><given-names>G</given-names></name><name><surname>Pelletier</surname><given-names>C</given-names></name><name><surname>Schmidt</surname><given-names>DF</given-names></name><name><surname>Weber</surname><given-names>J</given-names></name></person-group>, et al. <article-title>Inceptiontime: Finding alexnet for time series classification</article-title>. <source>Data Min Knowl Discov</source>. (<year>2020</year>) <volume>34</volume>(<issue>6</issue>):<fpage>1936</fpage>&#x2013;<lpage>62</lpage>. <pub-id pub-id-type="doi">10.1007/s10618-020-00710-y</pub-id></citation></ref>
<ref id="B43"><label>43.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fedorenko</surname><given-names>E</given-names></name><name><surname>Hsieh</surname><given-names>P-J</given-names></name><name><surname>Nieto-Casta&#x00F1;&#x00F3;n</surname><given-names>A</given-names></name><name><surname>Whitfield-Gabrieli</surname><given-names>S</given-names></name><name><surname>Kanwisher</surname><given-names>N</given-names></name></person-group>. <article-title>New method for fMRI investigations of language: defining ROIs functionally in individual subjects</article-title>. <source>J Neurophysiol</source>. (<year>2010</year>) <volume>104</volume>(<issue>2</issue>):<fpage>1177</fpage>&#x2013;<lpage>94</lpage>. <pub-id pub-id-type="doi">10.1152/jn.00032.2010</pub-id><pub-id pub-id-type="pmid">20410363</pub-id></citation></ref>
<ref id="B44"><label>44.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wilson</surname><given-names>SM</given-names></name><name><surname>Bautista</surname><given-names>A</given-names></name><name><surname>Yen</surname><given-names>M</given-names></name><name><surname>Lauderdale</surname><given-names>S</given-names></name><name><surname>Eriksson</surname><given-names>DK</given-names></name></person-group>. <article-title>Validity and reliability of four language mapping paradigms</article-title>. <source>NeuroImage: Clin</source>. (<year>2017</year>) <volume>16</volume>:<fpage>399</fpage>&#x2013;<lpage>408</lpage>. <pub-id pub-id-type="doi">10.1016/j.nicl.2016.03.015</pub-id><pub-id pub-id-type="pmid">28879081</pub-id></citation></ref>
<ref id="B45"><label>45.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Handwerker</surname><given-names>DA</given-names></name><name><surname>Gonzalez-Castillo</surname><given-names>J</given-names></name><name><surname>D&#x2019;esposito</surname><given-names>M</given-names></name><name><surname>Bandettini</surname><given-names>PA</given-names></name></person-group>. <article-title>The continuing challenge of understanding and modeling hemodynamic variation in fMRI</article-title>. <source>Neuroimage</source>. (<year>2012</year>) <volume>62</volume>(<issue>2</issue>):<fpage>1017</fpage>&#x2013;<lpage>23</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2012.02.015</pub-id><pub-id pub-id-type="pmid">22366081</pub-id></citation></ref>
<ref id="B46"><label>46.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wu</surname><given-names>Y-Y</given-names></name><name><surname>Hu</surname><given-names>Y-S</given-names></name><name><surname>Wang</surname><given-names>J</given-names></name><name><surname>Zang</surname><given-names>Y-F</given-names></name><name><surname>Zhang</surname><given-names>Y</given-names></name></person-group>. <article-title>Toward precise localization of abnormal brain activity: 1D CNN on single voxel fMRI time-series</article-title>. <source>Front Comput Neurosci</source>. (<year>2022</year>) <volume>16</volume>:<fpage>399</fpage>&#x2013;<fpage>408</fpage>. <pub-id pub-id-type="doi">10.3389/fncom.2022.822237</pub-id></citation></ref>
<ref id="B47"><label>47.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dau</surname><given-names>HA</given-names></name><name><surname>Bagnall</surname><given-names>A</given-names></name><name><surname>Kamgar</surname><given-names>K</given-names></name><name><surname>Yeh</surname><given-names>C-CM</given-names></name><name><surname>Zhu</surname><given-names>Y</given-names></name><name><surname>Gharghabi</surname><given-names>S</given-names></name></person-group>, et al. <article-title>The UCR time series archive</article-title>. <source>IEEE/CAA J Autom Sin</source>. (<year>2019</year>) <volume>6</volume>(<issue>6</issue>):<fpage>1293</fpage>&#x2013;<lpage>305</lpage>. <pub-id pub-id-type="doi">10.1109/JAS.2019.1911747</pub-id></citation></ref>
<ref id="B48"><label>48.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Bagnall</surname><given-names>A</given-names></name><name><surname>Flynn</surname><given-names>M</given-names></name><name><surname>Large</surname><given-names>J</given-names></name><name><surname>Lines</surname><given-names>J</given-names></name><name><surname>Middlehurst</surname><given-names>M</given-names></name></person-group>. <article-title>A tale of two toolkits, report the third: on the usage and performance of hive-cote v1.0</article-title>. <italic>arXiv</italic> [Preprint]. <italic>arXiv:2004.06069</italic> (<year>2020</year>).</citation></ref>
<ref id="B49"><label>49.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Abanda</surname><given-names>A</given-names></name><name><surname>Mori</surname><given-names>U</given-names></name><name><surname>Lozano</surname><given-names>JA</given-names></name></person-group>. <article-title>A review on distance based time series classification</article-title>. <source>Data Min Knowl Discov</source>. (<year>2019</year>) <volume>33</volume>(<issue>2</issue>):<fpage>378</fpage>&#x2013;<lpage>412</lpage>. <pub-id pub-id-type="doi">10.1007/s10618-018-0596-4</pub-id></citation></ref>
<ref id="B50"><label>50.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Middlehurst</surname><given-names>M</given-names></name><name><surname>Large</surname><given-names>J</given-names></name><name><surname>Cawley</surname><given-names>G</given-names></name><name><surname>Bagnall</surname><given-names>A</given-names></name></person-group>. <article-title>The temporal dictionary ensemble (TDE) classifier for time series classification</article-title>. In: <source>Machine Learning and Knowledge Discovery in Databases: European Conference, ECML PKDD 2020, Ghent, Belgium, September 14&#x2013;18, 2020, Proceedings, Part I</source>. Springer (<year>2021</year>). pp. <fpage>660</fpage>&#x2013;<lpage>76</lpage>.</citation></ref>
<ref id="B51"><label>51.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Lines</surname><given-names>J</given-names></name><name><surname>Taylor</surname><given-names>S</given-names></name><name><surname>Bagnall</surname><given-names>A</given-names></name></person-group>. <article-title>Hive-cote: the hierarchical vote collective of transformation-based ensembles for time series classification</article-title>. In: <source>2016 IEEE 16th International Conference on Data Mining (ICDM)</source> (<year>2016</year>). pp. <fpage>1041</fpage>&#x2013;<lpage>6</lpage>. <pub-id pub-id-type="doi">10.1109/ICDM.2016.0133</pub-id>.</citation></ref>
<ref id="B52"><label>52.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Bagnall</surname><given-names>A</given-names></name><name><surname>Flynn</surname><given-names>M</given-names></name><name><surname>Large</surname><given-names>J</given-names></name><name><surname>Lines</surname><given-names>J</given-names></name><name><surname>Middlehurst</surname><given-names>M</given-names></name></person-group>. <article-title>On the usage and performance of the hierarchical vote collective of transformation-based ensembles version 1.0 (hive-cote v1.0)</article-title>. In: <source>Advanced Analytics and Learning on Temporal Data: 5th ECML PKDD Workshop, AALTD 2020, Ghent, Belgium, September 18, 2020, Revised Selected Papers 6</source>. Springer (<year>2020</year>). p. <fpage>3</fpage>&#x2013;<lpage>18</lpage>.</citation></ref>
<ref id="B53"><label>53.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dempster</surname><given-names>A</given-names></name><name><surname>Petitjean</surname><given-names>F</given-names></name><name><surname>Webb</surname><given-names>GI</given-names></name></person-group>. <article-title>Rocket: exceptionally fast and accurate time series classification using random convolutional kernels</article-title>. <source>Data Min Knowl Discov</source>. (<year>2020</year>) <volume>34</volume>(<issue>5</issue>):<fpage>1454</fpage>&#x2013;<lpage>95</lpage>. <pub-id pub-id-type="doi">10.1007/s10618-020-00701-z</pub-id></citation></ref>
<ref id="B54"><label>54.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Middlehurst</surname><given-names>M</given-names></name><name><surname>Large</surname><given-names>J</given-names></name><name><surname>Bagnall</surname><given-names>A</given-names></name></person-group>. <article-title>The canonical interval forest (CIF) classifier for time series classification</article-title>. In: <source>2020 IEEE International Conference on Big Data (Big Data)</source>. IEEE (<year>2020</year>). p. <fpage>188</fpage>&#x2013;<lpage>95</lpage>.</citation></ref>
<ref id="B55"><label>55.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Deng</surname><given-names>H</given-names></name><name><surname>Runger</surname><given-names>G</given-names></name><name><surname>Tuv</surname><given-names>E</given-names></name><name><surname>Vladimir</surname><given-names>M</given-names></name></person-group>. <article-title>A time series forest for classification and feature extraction</article-title>. <source>Inf Sci (Ny)</source>. (<year>2013</year>) <volume>239</volume>:<fpage>142</fpage>&#x2013;<lpage>53</lpage>. <pub-id pub-id-type="doi">10.1016/j.ins.2013.02.030</pub-id></citation></ref></ref-list>
<app-group><app id="app1"><title>Appendix</title>
<sec id="app1a"><title>A.1 Test set size for different participant and tasks</title>
<p>Refer to <xref ref-type="table" rid="T6">Table A1</xref> for test set sizes of the various fMRI language tasks.</p>
</sec>
<sec id="app1b"><title>A.2 Further justification of ML/DL choice</title>
<p>Authors in (<xref ref-type="bibr" rid="B28">28</xref>) suggest that on data that has never been evaluated before to start with Random Forest (RF), Rotation Forest (RotF) or Dynamic Time Warping (DTW) as a basic sanity check. Although DTW is known to be one of the more accurate algorithms, like other distance-based methods it is known to be slow and hard to scale (<xref ref-type="bibr" rid="B49">49</xref>) (For this reason the distance-based category was also excluded from further analysis). Hence, for the general ML category, RF and RotF were evaluated and RotF was retained due to its strong performance. For the dictionary-based category, two algorithms were short-listed, Temporal Dictionary Ensemble (TDE) and Word Extraction for Time Series Classification (WEASEL) (<xref ref-type="bibr" rid="B50">50</xref>). While TDE is built on the advantages of other dictionary-based algorithms such as Bag of Symbolic-Fourier Approximation (BOSS), Contractable Bag of Symbolic-Fourier Approximation (cBOSS) and Word Extraction for Time Series Classification (WEASLE)), TDE is known to be more accurate but also memory intensive (<xref ref-type="bibr" rid="B50">50</xref>). Authors in (<xref ref-type="bibr" rid="B50">50</xref>) also suggest that for faster prediction to use WEASLE. It was found that TDE indeed was memory intensive and took a long time to train, hence WEASLE was chosen instead. The Hierarchical Vote Collective of Transformation-Based Ensembles (HIVE-COTE) (<xref ref-type="bibr" rid="B51">51</xref>, <xref ref-type="bibr" rid="B52">52</xref>) algorithm is an ensemble of 4 different algorithms (Shapelet Transform Classifier (STC), Time Series Forest (TSF), Random Interval Spectral Forest (RISE) and Contractable Bag of Symbolic-Fourier Approximation (cBOSS)) and was the early benchmark time series classifier with high accuracy. However, it had the disadvantage of long training and test times (<xref ref-type="bibr" rid="B28">28</xref>). It was since super-seeded by the Kernal-based method, RandOm Convolutional KErnel Transform (ROCKET) (<xref ref-type="bibr" rid="B53">53</xref>) algorithm (which was faster and more scalable) and subsequently HIVE-COTE version 2 (<xref ref-type="bibr" rid="B41">41</xref>). However, we found that the version 2 (v2) of HIVE-COTE (<xref ref-type="bibr" rid="B41">41</xref>) still has long training time and is also memory intensive, and was excluded from our analysis. The ROCKET algorithm fails to produce prediction probabilities, and ARSENAL which is an ensemble of ROCKET(s) was evaluated instead. The RISE algorithm (<xref ref-type="bibr" rid="B39">39</xref>) remains the only algorithm in the frequency-based category and was the only algorithm evaluated within the category. Several feature-based algorithms were also evaluated and the best performing algorithm, the Time Series Feature Extraction based on Scalable Hypothesis Tests (TSFresh) (<xref ref-type="bibr" rid="B38">38</xref>) method was retained. Several interval-based algorithms including Canonical Interval Forest (CIF) (<xref ref-type="bibr" rid="B54">54</xref>), Diverse Representation Canonical Interval Forest (DrCIF) (<xref ref-type="bibr" rid="B41">41</xref>) Time Series Forest (TSF) (<xref ref-type="bibr" rid="B55">55</xref>) and supervised Time Series Forest (sTSF) (<xref ref-type="bibr" rid="B40">40</xref>) were evaluated as task-based fMRI time series is block-designed and is known to have repetitive intervals. The best performing algorithm i.e., sTSF was retained. The shapelet based method, Shapelet Transform Classifier, was unable to generate prediction probabilities, and was excluded from further analysis. Deep learning algorithms have had great success in the image classification domain and efforts have been made to adapt these algorithms for time series classification. We found that Inception time performed well for time series classification compared to two other methods, Residual Neural Network (ResNet) and Fully Convolutional Network (FCN) and Inception Time was retained.</p>
<table-wrap id="T6" position="float"><label>Table A1</label>
<caption><p>Test set size for different participants and tasks.</p></caption>
<table frame="hsides" rules="groups">
<colgroup>
<col align="left"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left"/>
<th valign="top" align="center" colspan="3">SC</th>
<th valign="top" align="center" colspan="3">SWG</th>
<th valign="top" align="center" colspan="3">R</th>
<th valign="top" align="center" colspan="3">ON</th>
<th valign="top" align="center" colspan="3">AG</th>
<th valign="top" align="center" colspan="3">PSL</th>
<th valign="top" align="center" colspan="3">SCL</th>
</tr>
<tr>
<th valign="top" align="center"/>
<th valign="top" align="center">0</th>
<th valign="top" align="center">1</th>
<th valign="top" align="center">Total</th>
<th valign="top" align="center">0</th>
<th valign="top" align="center">1</th>
<th valign="top" align="center">Total</th>
<th valign="top" align="center">0</th>
<th valign="top" align="center">1</th>
<th valign="top" align="center">Total</th>
<th valign="top" align="center">0</th>
<th valign="top" align="center">1</th>
<th valign="top" align="center">Total</th>
<th valign="top" align="center">0</th>
<th valign="top" align="center">1</th>
<th valign="top" align="center">Total</th>
<th valign="top" align="center">0</th>
<th valign="top" align="center">1</th>
<th valign="top" align="center">Total</th>
<th valign="top" align="center">0</th>
<th valign="top" align="center">1</th>
<th valign="top" align="center">Total</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">HC16</td>
<td valign="top" align="center">68,488</td>
<td valign="top" align="center">3,771</td>
<td valign="top" align="center">72,259</td>
<td valign="top" align="center">70,124</td>
<td valign="top" align="center">2,900</td>
<td valign="top" align="center">73,024</td>
<td valign="top" align="center">65,755</td>
<td valign="top" align="center">4,124</td>
<td valign="top" align="center">69,879</td>
<td valign="top" align="center">69,853</td>
<td valign="top" align="center">2,090</td>
<td valign="top" align="center">71,943</td>
<td valign="top" align="center">67,296</td>
<td valign="top" align="center">6,196</td>
<td valign="top" align="center">73,492</td>
<td valign="top" align="center">73,128</td>
<td valign="top" align="center">124</td>
<td valign="top" align="center">73,252</td>
<td valign="top" align="center">72,285</td>
<td valign="top" align="center">2,591</td>
<td valign="top" align="center">74,876</td>
</tr>
<tr>
<td valign="top" align="left">HC17</td>
<td valign="top" align="center">60,340</td>
<td valign="top" align="center">3,707</td>
<td valign="top" align="center">64,047</td>
<td valign="top" align="center">63,443</td>
<td valign="top" align="center">3,185</td>
<td valign="top" align="center">66,628</td>
<td valign="top" align="center">63,099</td>
<td valign="top" align="center">5,925</td>
<td valign="top" align="center">69,024</td>
<td valign="top" align="center">60,132</td>
<td valign="top" align="center">2,429</td>
<td valign="top" align="center">62,561</td>
<td valign="top" align="center">60,201</td>
<td valign="top" align="center">4,279</td>
<td valign="top" align="center">64,480</td>
<td valign="top" align="center">63,286</td>
<td valign="top" align="center">30</td>
<td valign="top" align="center">63,316</td>
<td valign="top" align="center">40,184</td>
<td valign="top" align="center">1,273</td>
<td valign="top" align="center">41,457</td>
</tr>
<tr>
<td valign="top" align="left">HC18</td>
<td valign="top" align="center">71,299</td>
<td valign="top" align="center">4,094</td>
<td valign="top" align="center">75,393</td>
<td valign="top" align="center">67,339</td>
<td valign="top" align="center">2,943</td>
<td valign="top" align="center">70,282</td>
<td valign="top" align="center">74,692</td>
<td valign="top" align="center">1,882</td>
<td valign="top" align="center">76,574</td>
<td valign="top" align="center">77,064</td>
<td valign="top" align="center">1,587</td>
<td valign="top" align="center">78,651</td>
<td valign="top" align="center">65,465</td>
<td valign="top" align="center">5,781</td>
<td valign="top" align="center">71,246</td>
<td valign="top" align="center">71,187</td>
<td valign="top" align="center">1,185</td>
<td valign="top" align="center">72,372</td>
<td valign="top" align="center">70,812</td>
<td valign="top" align="center">4,339</td>
<td valign="top" align="center">75,151</td>
</tr>
<tr>
<td valign="top" align="left">HC19</td>
<td valign="top" align="center">45,398</td>
<td valign="top" align="center">1,298</td>
<td valign="top" align="center">46,696</td>
<td valign="top" align="center">66,896</td>
<td valign="top" align="center">2,549</td>
<td valign="top" align="center">69,445</td>
<td valign="top" align="center">68,109</td>
<td valign="top" align="center">1,533</td>
<td valign="top" align="center">69,642</td>
<td valign="top" align="center">75,065</td>
<td valign="top" align="center">2,019</td>
<td valign="top" align="center">77,084</td>
<td valign="top" align="center">56,056</td>
<td valign="top" align="center">10,451</td>
<td valign="top" align="center">66,507</td>
<td valign="top" align="center">74,082</td>
<td valign="top" align="center">43</td>
<td valign="top" align="center">74,125</td>
<td valign="top" align="center">59,401</td>
<td valign="top" align="center">9,930</td>
<td valign="top" align="center">69,331</td>
</tr>
<tr>
<td valign="top" align="left">HC20</td>
<td valign="top" align="center">72,611</td>
<td valign="top" align="center">210</td>
<td valign="top" align="center">72,821</td>
<td valign="top" align="center">73,565</td>
<td valign="top" align="center">1,105</td>
<td valign="top" align="center">74,670</td>
<td valign="top" align="center">67,622</td>
<td valign="top" align="center">2,398</td>
<td valign="top" align="center">70,020</td>
<td valign="top" align="center">70,674</td>
<td valign="top" align="center">1,244</td>
<td valign="top" align="center">71,918</td>
<td valign="top" align="center">67,586</td>
<td valign="top" align="center">1,770</td>
<td valign="top" align="center">69,356</td>
<td valign="top" align="center">70,672</td>
<td valign="top" align="center">299</td>
<td valign="top" align="center">70,971</td>
<td valign="top" align="center">63,884</td>
<td valign="top" align="center">1,251</td>
<td valign="top" align="center">65,135</td>
</tr>
<tr>
<td valign="top" align="left">HC21</td>
<td valign="top" align="center">55,143</td>
<td valign="top" align="center">6,904</td>
<td valign="top" align="center">62,047</td>
<td valign="top" align="center">59,029</td>
<td valign="top" align="center">1,788</td>
<td valign="top" align="center">60,817</td>
<td valign="top" align="center">58,773</td>
<td valign="top" align="center">2,079</td>
<td valign="top" align="center">60,852</td>
<td valign="top" align="center">58,997</td>
<td valign="top" align="center">1,312</td>
<td valign="top" align="center">60,309</td>
<td valign="top" align="center">38,124</td>
<td valign="top" align="center">11,363</td>
<td valign="top" align="center">49,487</td>
<td valign="top" align="center">60,083</td>
<td valign="top" align="center">369</td>
<td valign="top" align="center">60,452</td>
<td valign="top" align="center">59,983</td>
<td valign="top" align="center">749</td>
<td valign="top" align="center">60,732</td>
</tr>
<tr>
<td valign="top" align="left">EP01</td>
<td valign="top" align="center">65,425</td>
<td valign="top" align="center">4,420</td>
<td valign="top" align="center">69,845</td>
<td valign="top" align="center">67,917</td>
<td valign="top" align="center">1,475</td>
<td valign="top" align="center">69,392</td>
<td valign="top" align="center">71,886</td>
<td valign="top" align="center">2,512</td>
<td valign="top" align="center">74,398</td>
<td valign="top" align="center">65,133</td>
<td valign="top" align="center">3,091</td>
<td valign="top" align="center">68,224</td>
<td valign="top" align="center">48,539</td>
<td valign="top" align="center">4,472</td>
<td valign="top" align="center">53,011</td>
<td valign="top" align="center">74,629</td>
<td valign="top" align="center">104</td>
<td valign="top" align="center">74,733</td>
<td valign="top" align="center">55,634</td>
<td valign="top" align="center">2,777</td>
<td valign="top" align="center">58,411</td>
</tr>
<tr>
<td valign="top" align="left">EP02</td>
<td valign="top" align="center">66,463</td>
<td valign="top" align="center">5,869</td>
<td valign="top" align="center">72,332</td>
<td valign="top" align="center">63,736</td>
<td valign="top" align="center">5,547</td>
<td valign="top" align="center">69,283</td>
<td valign="top" align="center">67,159</td>
<td valign="top" align="center">2,540</td>
<td valign="top" align="center">69,699</td>
<td valign="top" align="center">58,252</td>
<td valign="top" align="center">4,098</td>
<td valign="top" align="center">62,350</td>
<td valign="top" align="center">62,606</td>
<td valign="top" align="center">1,629</td>
<td valign="top" align="center">64,235</td>
<td valign="top" align="center">54,828</td>
<td valign="top" align="center">239</td>
<td valign="top" align="center">55,067</td>
<td valign="top" align="center">59,574</td>
<td valign="top" align="center">3,723</td>
<td valign="top" align="center">63,297</td>
</tr>
<tr>
<td valign="top" align="left">EP03</td>
<td valign="top" align="center">67,481</td>
<td valign="top" align="center">4,060</td>
<td valign="top" align="center">71,541</td>
<td valign="top" align="center">69,346</td>
<td valign="top" align="center">563</td>
<td valign="top" align="center">69,909</td>
<td valign="top" align="center">61,952</td>
<td valign="top" align="center">717</td>
<td valign="top" align="center">62,669</td>
<td valign="top" align="center">68,210</td>
<td valign="top" align="center">2,071</td>
<td valign="top" align="center">70,281</td>
<td valign="top" align="center">67,905</td>
<td valign="top" align="center">3,880</td>
<td valign="top" align="center">71,785</td>
<td valign="top" align="center">68,000</td>
<td valign="top" align="center">287</td>
<td valign="top" align="center">68,287</td>
<td valign="top" align="center">70,834</td>
<td valign="top" align="center">5,630</td>
<td valign="top" align="center">76,464</td>
</tr>
<tr>
<td valign="top" align="left">EP04</td>
<td valign="top" align="center">69,346</td>
<td valign="top" align="center">5,995</td>
<td valign="top" align="center">75,341</td>
<td valign="top" align="center">38,491</td>
<td valign="top" align="center">2,879</td>
<td valign="top" align="center">41,370</td>
<td valign="top" align="center">69,347</td>
<td valign="top" align="center">3,187</td>
<td valign="top" align="center">72,534</td>
<td valign="top" align="center">68,336</td>
<td valign="top" align="center">1,651</td>
<td valign="top" align="center">69,987</td>
<td valign="top" align="center">38,543</td>
<td valign="top" align="center">3,208</td>
<td valign="top" align="center">41,751</td>
<td valign="top" align="center">75,450</td>
<td valign="top" align="center">860</td>
<td valign="top" align="center">76,310</td>
<td valign="top" align="center">66,906</td>
<td valign="top" align="center">5,030</td>
<td valign="top" align="center">71,936</td>
</tr>
<tr>
<td valign="top" align="left">EP05</td>
<td valign="top" align="center">69,037</td>
<td valign="top" align="center">3,846</td>
<td valign="top" align="center">72,883</td>
<td valign="top" align="center">74,602</td>
<td valign="top" align="center">1,758</td>
<td valign="top" align="center">76,360</td>
<td valign="top" align="center">73,382</td>
<td valign="top" align="center">2,064</td>
<td valign="top" align="center">75,446</td>
<td valign="top" align="center">74,873</td>
<td valign="top" align="center">1,327</td>
<td valign="top" align="center">76,200</td>
<td valign="top" align="center">39,220</td>
<td valign="top" align="center">4,575</td>
<td valign="top" align="center">43,795</td>
<td valign="top" align="center">62,232</td>
<td valign="top" align="center">313</td>
<td valign="top" align="center">62,545</td>
<td valign="top" align="center">42,213</td>
<td valign="top" align="center">515</td>
<td valign="top" align="center">42,728</td>
</tr>
<tr>
<td valign="top" align="left">EP06</td>
<td valign="top" align="center">42,397</td>
<td valign="top" align="center">1,165</td>
<td valign="top" align="center">43,562</td>
<td valign="top" align="center">77,831</td>
<td valign="top" align="center">96</td>
<td valign="top" align="center">77,927</td>
<td valign="top" align="center">37,845</td>
<td valign="top" align="center">1,736</td>
<td valign="top" align="center">39,581</td>
<td valign="top" align="center">58,548</td>
<td valign="top" align="center">1,511</td>
<td valign="top" align="center">60,059</td>
<td valign="top" align="center">43,376</td>
<td valign="top" align="center">661</td>
<td valign="top" align="center">44,037</td>
<td valign="top" align="center">58,338</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">58,338</td>
<td valign="top" align="center">63,769</td>
<td valign="top" align="center">2,990</td>
<td valign="top" align="center">66,759</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec></app>
</app-group>
</back>
</article>