<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="2.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Mar. Sci.</journal-id>
<journal-title>Frontiers in Marine Science</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Mar. Sci.</abbrev-journal-title>
<issn pub-type="epub">2296-7745</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fmars.2023.1228867</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Marine Science</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Fully convolutional neural networks applied to large-scale marine morphology mapping</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Arosio</surname>
<given-names>Riccardo</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="author-notes" rid="fn001">
<sup>*</sup>
</xref>
<xref ref-type="author-notes" rid="fn003">
<sup>&#x2020;</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2227309"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Hobley</surname>
<given-names>Brandon</given-names>
</name>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<xref ref-type="author-notes" rid="fn003">
<sup>&#x2020;</sup>
</xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Wheeler</surname>
<given-names>Andrew J.</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="aff" rid="aff4">
<sup>4</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1000422"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Sacchetti</surname>
<given-names>Fabio</given-names>
</name>
<xref ref-type="aff" rid="aff5">
<sup>5</sup>
</xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Conti</surname>
<given-names>Luis A.</given-names>
</name>
<xref ref-type="aff" rid="aff6">
<sup>6</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1208298"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Furey</surname>
<given-names>Thomas</given-names>
</name>
<xref ref-type="aff" rid="aff5">
<sup>5</sup>
</xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Lim</surname>
<given-names>Aaron</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="aff" rid="aff7">
<sup>7</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/858833"/>
</contrib>
</contrib-group>
<aff id="aff1">
<sup>1</sup>
<institution>Marine Geoscience Group, School of Biological, Earth and Environmental Sciences, University College Cork</institution>, <addr-line>Cork</addr-line>, <country>Ireland</country>
</aff>
<aff id="aff2">
<sup>2</sup>
<institution>Environmental Research Institute, University College Cork</institution>, <addr-line>Cork</addr-line>, <country>Ireland</country>
</aff>
<aff id="aff3">
<sup>3</sup>
<institution>School of Computing Sciences, University of East Anglia</institution>, <addr-line>Norwich</addr-line>, <country>United Kingdom</country>
</aff>
<aff id="aff4">
<sup>4</sup>
<institution>SFI Centre for Research In Applied Geosciences, University College Cork</institution>, <addr-line>Cork</addr-line>, <country>Ireland</country>
</aff>
<aff id="aff5">
<sup>5</sup>
<institution>Marine Institute</institution>, <addr-line>Galway</addr-line>, <country>Ireland</country>
</aff>
<aff id="aff6">
<sup>6</sup>
<institution>Escola de Artes Ci&#xea;ncias e Humanidades, Universidade de S&#xe3;o Paulo</institution>, <addr-line>S&#xe3;o Paulo</addr-line>, <country>Brazil</country>
</aff>
<aff id="aff7">
<sup>7</sup>
<institution>Department of Geography, University College Cork</institution>, <addr-line>Cork</addr-line>, <country>Ireland</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>Edited by: Benjamin Misiuk, Dalhousie University, Canada</p>
</fn>
<fn fn-type="edited-by">
<p>Reviewed by: Peter Feldens, Leibniz Institute for Baltic Sea Research (LG), Germany; Jeremy Rohmer, Bureau de Recherches G&#xe9;ologiques et Mini&#xe8;res, France</p>
</fn>
<fn fn-type="corresp" id="fn001">
<p>*Correspondence: Riccardo Arosio, <email xlink:href="mailto:rarosio@ucc.ie">rarosio@ucc.ie</email>
</p>
</fn>
<fn fn-type="equal" id="fn003">
<p>&#x2020;These authors have contributed equally to this work and share first authorship</p>
</fn>
</author-notes>
<pub-date pub-type="epub">
<day>20</day>
<month>07</month>
<year>2023</year>
</pub-date>
<pub-date pub-type="collection">
<year>2023</year>
</pub-date>
<volume>10</volume>
<elocation-id>1228867</elocation-id>
<history>
<date date-type="received">
<day>25</day>
<month>05</month>
<year>2023</year>
</date>
<date date-type="accepted">
<day>26</day>
<month>06</month>
<year>2023</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2023 Arosio, Hobley, Wheeler, Sacchetti, Conti, Furey and Lim</copyright-statement>
<copyright-year>2023</copyright-year>
<copyright-holder>Arosio, Hobley, Wheeler, Sacchetti, Conti, Furey and Lim</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<p>In this study we applied for the first time Fully Convolutional Neural Networks (FCNNs) to a marine bathymetric dataset to derive morphological classes over the entire Irish continental shelf. FCNNs are a set of algorithms within Deep Learning that produce pixel-wise classifications in order to create semantically segmented maps. While they have been extensively utilised on imagery for ecological mapping, their application on elevation data is still limited, especially in the marine geomorphology realm. We employed a high-resolution bathymetric dataset to create a set of normalised derivatives commonly utilised in seabed morphology and habitat mapping that include three bathymetric position indexes (BPIs), the vector ruggedness measurement (VRM), the aspect functions and three types of hillshades. The class domains cover ten or twelve semantically distinct surface textures and submarine landforms present on the shelf, with our definitions aiming for simplicity, prevalence and distinctiveness. Sets of 50 or 100 labelled samples for each class were used to train several U-Net architectures with ResNet-50 and VGG-13 encoders. Our results show a maximum model precision of 0.84 and recall of 0.85, with some classes reaching as high as 0.99 in both. A simple majority (modal) voting combining the ten best models produced an excellent map with overall F1 score of 0.96 and class precisions and recalls superior to 0.87. For target classes exhibiting high recall (proportion of positives identified), models also show high precision (proportion of correct identifications) in predictions which confirms that the underlying class boundary has been learnt. Derivative choice plays an important part in the performance of the networks, with hillshades combined with bathymetry providing the best results and aspect functions and VRM leading to an overall deterioration of prediction accuracies. The results show that FCNNs can be successfully applied to the seabed for a morphological exploration of the dataset and as a baseline for more in-depth habitat mapping studies. For example, prediction of semantically distinct classes as &#x201c;submarine dune&#x201d; and &#x201c;bedrock outcrop&#x201d; can be precise and reliable. Nonetheless, at present state FCNNs are not suitable for tasks that require more refined geomorphological classifications, as for the recognition of detailed morphogenetic processes.</p>
</abstract>
<kwd-group>
<kwd>Fully Convolutional Neural Networks</kwd>
<kwd>marine</kwd>
<kwd>morphology</kwd>
<kwd>habitat mapping</kwd>
<kwd>bathymetry</kwd>
</kwd-group>
<counts>
<fig-count count="12"/>
<table-count count="3"/>
<equation-count count="5"/>
<ref-count count="62"/>
<page-count count="21"/>
<word-count count="9803"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-in-acceptance</meta-name>
<meta-value>Ocean Observation</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec id="s1" sec-type="intro">
<label>1</label>
<title>Introduction</title>
<p>In the fast-expanding field of marine habitat and geomorphological mapping, with an increasing influx of data at high spatial resolution being gathered by geophysical and remote sensing surveys (&#x201c;Big Data&#x201d;), rapid, machine-based and cost-effective methods that capture the nuances of the highly varying seabed environments have become essential. Thus, computer-based supervised and unsupervised mapping methods have become progressively more popular, demonstrating equivalence or superiority to traditional manual mapping (<xref ref-type="bibr" rid="B44">Micallef et&#xa0;al., 2012</xref>; <xref ref-type="bibr" rid="B13">Diesing et&#xa0;al., 2014</xref>; <xref ref-type="bibr" rid="B29">Ismail et&#xa0;al., 2015</xref>). Presently, the leading supervised mapping approach is a combination of object-based image analysis (OBIA) (<xref ref-type="bibr" rid="B5">Blaschke et&#xa0;al., 2014</xref>) and conventional machine learning models (e.g. Decision Trees, Support Vector Machines, Random Forests etc.). In this approach, OBIA first segments raw data, for example imagery, into a suitable internal representation of descriptive objects, then a machine learning sub-system detects statistical patterns in extracted descriptive features in order to distinguish different class domains. When the raw data are digital surface models (e.g. from multibeam echosounders or Lidar data), as it happens for most marine-based habitat mapping studies, the segmentation and statistics are largely based on morphological and substrate attributes (e.g. relative depth, roughness, backscatter etc.), and habitat prediction is strictly linked to morphology. Recent applications range between identification and analysis of coral mounds (<xref ref-type="bibr" rid="B14">Diesing and Thorsnes, 2018</xref>; <xref ref-type="bibr" rid="B8">Conti et&#xa0;al., 2019</xref>; <xref ref-type="bibr" rid="B12">de Oliveira et&#xa0;al., 2021</xref>), sediment wave characterisation (<xref ref-type="bibr" rid="B54">Summers et&#xa0;al., 2021</xref>) to general marine mapping (<xref ref-type="bibr" rid="B27">Ierodiaconou et&#xa0;al., 2018</xref>; <xref ref-type="bibr" rid="B38">Linklater et&#xa0;al., 2019</xref>). However, the OBIA method still requires careful engineering and considerable domain expertise and manual intervention, which increases processing time and effectiveness.</p>
<p>In the last decade, Deep Learning (DL), and in particular Convolutional Neural Networks (CNNs) have supported more traditional approaches, and have shown state of the art results on a wide range of imaging problems (<xref ref-type="bibr" rid="B40">Long et&#xa0;al., 2014</xref>; <xref ref-type="bibr" rid="B25">He et&#xa0;al., 2017</xref>; <xref ref-type="bibr" rid="B32">Krizhevsky et&#xa0;al., 2017</xref>). Fully Convolutional Neural Networks (FCNNs) are a variant of CNNs that can perform per-pixel classification. Contrarily to traditional machine learning, FCNNs allow for hierarchical feature learning, which in effect combines learning features and training a classifier in one optimisation (<xref ref-type="bibr" rid="B35">LeCun et&#xa0;al., 2015</xref>). Furthermore, FCNNs can leverage semi-supervised strategies whereby subsets of labelled data are used for optimisation; this approach can be beneficial for practical applications of FCNNs for marine geomorphology and ecology mapping where the quantity and distribution of labelled data may be limited due to associated costs of <italic>in situ</italic> surveying (<xref ref-type="bibr" rid="B36">Leit&#xe3;o et&#xa0;al., 2018</xref>; <xref ref-type="bibr" rid="B26">Hobley et&#xa0;al., 2021</xref>). While interest in DL has been shown early on by the marine community for ecological and habitat mapping (<xref ref-type="bibr" rid="B19">Gazis et&#xa0;al., 2018</xref>; <xref ref-type="bibr" rid="B61">Yasir et&#xa0;al., 2021</xref>), only a few studies have been focused on automated identification with DL of seabed geomorphological features or textures (<xref ref-type="bibr" rid="B43">McClinton et&#xa0;al., 2012</xref>; <xref ref-type="bibr" rid="B56">Valentine et&#xa0;al., 2013</xref>; <xref ref-type="bibr" rid="B30">Juliani, 2019</xref>; <xref ref-type="bibr" rid="B31">Keohane and White, 2022</xref>; <xref ref-type="bibr" rid="B41">Lundine et&#xa0;al., 2023</xref>), even though the significance of geomorphology for habitat distribution is widely acknowledged (<xref ref-type="bibr" rid="B6">Brown et&#xa0;al., 2011</xref>; <xref ref-type="bibr" rid="B34">Lecours et&#xa0;al., 2016</xref>; <xref ref-type="bibr" rid="B23">Harris and Baker, 2020</xref>). Deep Learning in geomorphology has found instead a more fertile ground in coastal and geohazard studies (<xref ref-type="bibr" rid="B42">Ma and Mei, 2021</xref>; <xref ref-type="bibr" rid="B7">Buscombe et&#xa0;al., 2023</xref>), and in outer space, in particular for Martian or Lunar geology, where several studies have taken advantage of the high resolution optical imagery available and attempted to separate specific landforms from a background (<xref ref-type="bibr" rid="B17">Foroutan and Zimbelman, 2017</xref>; <xref ref-type="bibr" rid="B48">Palafox et&#xa0;al., 2017</xref>; <xref ref-type="bibr" rid="B59">Wang et&#xa0;al., 2017</xref>; <xref ref-type="bibr" rid="B50">Rubanenko et&#xa0;al., 2021</xref>), or more generally characterise the ground surface to identify optimal landing spots or assess rover traversability (<xref ref-type="bibr" rid="B60">Wilhelm et&#xa0;al., 2020</xref>; <xref ref-type="bibr" rid="B3">Barrett et&#xa0;al., 2022</xref>). <xref ref-type="bibr" rid="B3">Barrett et&#xa0;al. (2022)</xref> in particular have demonstrated the potential of large-scale exploratory morphological mapping, where machine learning assists the geomorphologist to isolate sections of interest in the dataset, sifting through an enormous dataset.</p>
<p>Following on this latter example, and transposing it to the marine realm, in this study we explore the potential of FCNNs to map distinctive morphologies on the seabed, generate the prospective to create an automated, streamlined method to greatly increase the efficiency of many seabed mapping workflows including data exploration of the main morphological signatures, preliminary domain segmentation for ground-truthing campaigns and the identification of areas of interest or generalised habitat predictions. We align the exercise to recurrent situations and practices in seabed mapping, and we test the capability of the FCNNs to their limits, feeding the bare minimum usually available to researchers:</p>
<list list-type="simple">
<list-item>
<p>1) we furnish only bathymetry and bathymetry-derived surface functions as input layers (contrarily to the optical imagery used for the planetary studies) and do not include multibeam backscatter data as it is sometimes unreliable. Elevation as main input in itself poses challenges as DL methods are designed for optical imagery.</p>
</list-item>
<list-item>
<p>2) we provide only a very limited amount of labelled data, as the creation of large amount of labels would defeat the purpose of automation and time saving. This constitutes a second challenge, as in spite of CNNs success, these models perform best with very large, labelled training datasets (<xref ref-type="bibr" rid="B55">Tarvainen and Valpola, 2017</xref>). Labels are a pivotal concern in many real-world scenarios as CNNs are optimised based on an objective error metric between model outcomes and known outcomes.</p>
</list-item>
</list>
<p>In the next sections of the paper, firstly we describe the dataset utilised and the classification systems adopted, which include two different classifications and label sets. Secondly, we concatenate various combinations of bathymetry and derivative layer inputs to create pseudo-images and assess the value of the different derivatives in the predictions. In parallel, we trial two FCNN encoders and semi-supervision techniques to gauge their effectiveness with the non-standard input data (i.e. bathymetry). Finally, we discuss the results from the point of view of applicability to marine seabed or habitat mapping studies, including challenges behind finding the optimal set of semantic morphological classes, the impact of mapping landforms with diverging dimensions and the importance of selecting appropriate derivatives for modelling neural networks on bathymetry-derived data.</p>
</sec>
<sec id="s2" sec-type="materials|methods">
<label>2</label>
<title>Materials and methods</title>
<sec id="s2_1">
<label>2.1</label>
<title>Input layers: bathymetry and derivatives</title>
<p>The multibeam echosounder (MBES) bathymetry utilised in this study was obtained from the INFOMAR hydrographic dataset, which is freely accessible on the INFOMAR website (<ext-link ext-link-type="uri" xlink:href="https://www.infomar.ie">https://www.infomar.ie</ext-link>) (<xref ref-type="fig" rid="f1">
<bold>Figure&#xa0;1</bold>
</xref>). Bathymetric data at 10&#xa0;m resolution were downloaded and processed using ESRI ArcMap v 10.6. Firstly, fine holes in the dataset were filled with the mean of the surrounding 5x5 pixel neighbours. A general median filter (5x5 rectangle) was applied to remove &#x2018;salt-and-pepper&#x2019; imperfections and fine artefacts before re-gridding using a nearest neighbour algorithm. For the purpose of this large-scale mapping, a resolution of 25 m/pixel was deemed a good compromise between morphological detail, partial suppression of acquisition artefacts in the INFOMAR dataset (especially at the outer beam) and computing speed. Bathymetry derivatives were calculated using ArcMap built-in algorithms or with the help of the Benthic Terrain Modeller (BTM) toolbox version 3.0 (<xref ref-type="bibr" rid="B58">Walbridge et&#xa0;al., 2018</xref>). The derivatives created include three bathymetric position indexes, a vector ruggedness measurement, two aspect functions (eastness and northness) and three types of hillshades (<xref ref-type="table" rid="T1">
<bold>Table&#xa0;1</bold>
</xref>). The aspect functions rasters were smoothed using a Gaussian filter (5x5 rectangle) to simplify the signal and reduce salt-and-pepper effects.</p>
<fig id="f1" position="float">
<label>Figure&#xa0;1</label>
<caption>
<p>Areal extent of the INFOMAR bathymetric data used in this study, with the location and density of ground-truthing sediment samples consulted at the labelling stage.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmars-10-1228867-g001.tif"/>
</fig>
<table-wrap id="T1" position="float">
<label>Table&#xa0;1</label>
<caption>
<p>List of derivatives and production parameters utilised in this study.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="bottom" rowspan="2" align="left">Derivative</th>
<th valign="bottom" align="center"/>
<th valign="bottom" colspan="3" align="center">Focal statistics parameters</th>
<th valign="bottom" colspan="3" align="center">Hillshade parameters</th>
<th valign="bottom" rowspan="2" align="center">Description</th>
</tr>
<tr>
<th valign="bottom" align="center">Name</th>
<th valign="top" align="center">type</th>
<th valign="top" align="center">dimension 1</th>
<th valign="top" align="center">dimension 2</th>
<th valign="top" align="center">Azimuth</th>
<th valign="top" align="center">Altitude</th>
<th valign="top" align="center">Exaggeration</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" rowspan="3" align="left">Bathymetric position index</td>
<td valign="middle" align="left">BPI 1</td>
<td valign="middle" align="center">annulus</td>
<td valign="middle" align="center">2</td>
<td valign="middle" align="center">10</td>
<td valign="middle" align="center">na</td>
<td valign="middle" align="center">na</td>
<td valign="middle" align="center">na</td>
<td valign="middle" rowspan="3" align="left">BPI represents the positional difference of a pixel compared to the mean of its neighbours (defined by an annulus in this case).</td>
</tr>
<tr>
<td valign="middle" align="left">BPI 2</td>
<td valign="middle" align="center">annulus</td>
<td valign="middle" align="center">5</td>
<td valign="middle" align="center">50</td>
<td valign="middle" align="center">na</td>
<td valign="middle" align="center">na</td>
<td valign="middle" align="center">na</td>
</tr>
<tr>
<td valign="middle" align="left">BPI 3</td>
<td valign="middle" align="center">annulus</td>
<td valign="middle" align="center">25</td>
<td valign="middle" align="center">250</td>
<td valign="middle" align="center">na</td>
<td valign="middle" align="center">na</td>
<td valign="middle" align="center">na</td>
</tr>
<tr>
<td valign="middle" align="left">Vector ruggedness measurement</td>
<td valign="middle" align="left">VRM</td>
<td valign="middle" align="center">circle</td>
<td valign="middle" align="center">3</td>
<td valign="middle" align="center">na</td>
<td valign="middle" align="center">na</td>
<td valign="middle" align="center">na</td>
<td valign="middle" align="center">na</td>
<td valign="middle" align="left">VRM is a measure of surface roughness as the variation in three-dimensional orientation of grid cells within a neighbourhood. Vector analysis is used to calculate the dispersion of vectors (orthogonal) to grid cells within the specified neighbourhood.</td>
</tr>
<tr>
<td valign="middle" align="left">Aspect Northness [cos(aspect)]</td>
<td valign="middle" align="left">Northness</td>
<td valign="middle" rowspan="2" align="center">na</td>
<td valign="middle" rowspan="2" align="center">na</td>
<td valign="middle" rowspan="2" align="center">na</td>
<td valign="middle" rowspan="2" align="center">na</td>
<td valign="middle" rowspan="2" align="center">na</td>
<td valign="middle" rowspan="2" align="center">na</td>
<td valign="middle" rowspan="2" align="left">Aspect identifies the downslope direction of the maximum rate of change in value from each cell to its neighbours. Eastness and northness are the sine and cosine functions of aspect</td>
</tr>
<tr>
<td valign="middle" align="left">Aspect Eastness [sin(aspect)]</td>
<td valign="middle" align="left">Eastness</td>
</tr>
<tr>
<td valign="middle" rowspan="3" align="left">Hillshade</td>
<td valign="middle" align="left">HS 1</td>
<td valign="middle" align="center">na</td>
<td valign="middle" align="center">na</td>
<td valign="middle" align="center">na</td>
<td valign="middle" colspan="2" align="center">multidirectional</td>
<td valign="middle" align="center">10</td>
<td valign="middle" rowspan="3" align="left">A hillshade function produces a grayscale pseudo-3D image of the bathymetry, with the sun&#x2019;s relative position taken into account for shading the image. Different sun positions give different shading effects.</td>
</tr>
<tr>
<td valign="middle" align="left">HS 2</td>
<td valign="middle" align="center">na</td>
<td valign="middle" align="center">na</td>
<td valign="middle" align="center">na</td>
<td valign="middle" align="center">315</td>
<td valign="middle" align="center">45</td>
<td valign="middle" align="center">10</td>
</tr>
<tr>
<td valign="middle" align="left">HS 3</td>
<td valign="middle" align="center">na</td>
<td valign="middle" align="center">na</td>
<td valign="middle" align="center">na</td>
<td valign="middle" align="center">45</td>
<td valign="middle" align="center">45</td>
<td valign="middle" align="center">10</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>For the purposes of FCNN model training, the bathymetry and derivatives were normalised to a double precision value between 0 and 1 based on the minimum and maximum value recorded or calculated in the case for derivative layers.</p>
</sec>
<sec id="s2_2">
<label>2.2</label>
<title>Classification system and labelling</title>
<p>To train the weakly supervised convolutional neural network, we had to define a dataset from which the model could learn the relationship between bathymetry and derivative data and the landforms present. So firstly, a suitable classification system had to be chosen. The classification system adopted is derived from the Mareano-INFOMAR-Maremap-Geoscience Australia (MIM-GA) two-part marine geomorphology scheme, a standardised seabed mapping glossary aimed to enable more consistent seabed classifications (<xref ref-type="bibr" rid="B15">Dove et&#xa0;al., 2016</xref>; <xref ref-type="bibr" rid="B16">Dove et&#xa0;al., 2020</xref>). This framework independently describes seabed features according to their observed physical structure (Morphology), and the more subjective interpretation of their origin and evolution (Geomorphology). The separation between physical structure and genesis aligned well with the scope of the machine learning-based mapping of this study, where classes were defined based upon the textural characteristics of the surface rather than apparent geological nature or proper geomorphological definitions. This mapping approach was chosen both because of the general lack of geological ground-truthing for novel marine datasets, and for the exploratory nature of the exercise. In general, defined classes describe archetypal seabed textures which, in various combinations, form seabed landforms. The basic distinction between sediment and rock landforms was nonetheless retained (see <xref ref-type="table" rid="T2">
<bold>Table&#xa0;2</bold>
</xref>), and the INFOMAR sediment grab dataset (<ext-link ext-link-type="uri" xlink:href="https://www.infomar.ie/maps/interactive-maps/seabed-and-sediment">https://www.infomar.ie/maps/interactive-maps/seabed-and-sediment</ext-link>) was consulted at the labelling stage (<xref ref-type="fig" rid="f1">
<bold>Figure&#xa0;1</bold>
</xref>). Three principles for classification were adopted (following the advice in <xref ref-type="bibr" rid="B3">Barrett et&#xa0;al., 2022</xref>): (a) classes had to be representative of the diversity of seabed morphologies encountered on the Irish continental shelf. This is a &#x201c;completeness&#x201d; rule; FCNNs classify pixels in maximum-likelihood fashion, therefore it is essential to fully capture the problem domain as the FCNNs cannot create a new class, or leave a space blank, if an unknown type of seabed is encountered. (b) The classification sets were kept simple and short, as a comprehensive, lengthy list of classes would potentially create difficulties in the training process and especially create more subjective inconsistencies during the labelling work carried out by the expert, and (c) last, but most importantly, classes had to be distinct so that their differences could be confidently isolated visually by the human mapper. This step is critical as the labelling stage may introduce subjectivity and inconsistencies in class delineation that can affect the capabilities of the networks. Therefore, care was taken to semantically define each class, making sure that delineation could be performed with a high level of confidence notwithstanding the limited geological knowledge.</p>
<table-wrap id="T2" position="float">
<label>Table&#xa0;2</label>
<caption>
<p>Classifications adopted in this study with either 10 or 12 classes and their correspondence to the MIM-GA classification system (<xref ref-type="bibr" rid="B16">Dove et&#xa0;al., 2020</xref>).</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="bottom" align="center">MIM-GA class</th>
<th valign="bottom" align="center">Class (10)</th>
<th valign="bottom" align="center">Class (12)</th>
<th valign="bottom" align="center">Morphological description</th>
<th valign="bottom" align="center">Geomorphology</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">Plane</td>
<td valign="middle" align="left">
<bold>Plane</bold>
</td>
<td valign="middle" align="left">
<bold>Plane</bold>
</td>
<td valign="middle" align="left">Flat and smooth surface with either absent or imperceptible relief.</td>
<td valign="middle" align="left">Mainly represents areas of the seabed that are covered with modern Holocene soft, potentially mobile sediments (sands and muds)</td>
</tr>
<tr>
<td valign="middle" align="left">Ridge</td>
<td valign="middle" align="left">
<bold>Ridge (sediment)</bold>
</td>
<td valign="middle" align="left">
<bold>Ridge (sediment)</bold>
</td>
<td valign="middle" align="left">Surfaces characterised by a pattern of generally regular corrugation or ondulation, with pronounced linear or curvilinear crests. These are usually medium to high frequency and medium-high relief.</td>
<td valign="middle" align="left">Areas of developed and active or pristine dune fields</td>
</tr>
<tr>
<td valign="middle" align="left">Hummocks/Hill</td>
<td valign="middle" align="left">
<bold>Hummocky (sediment)</bold>
</td>
<td valign="middle" align="left">
<bold>Hummocky (sediment)</bold>
</td>
<td valign="middle" align="left">Distinct elongated ridges or hills with relatively smooth surface, often occurring in swarms</td>
<td valign="middle" align="left">High relief drumlin hills or other relict hilly terrain</td>
</tr>
<tr>
<td valign="middle" align="left">Ridge</td>
<td valign="middle" align="left">
<bold>Corrugated (sediment)</bold>
</td>
<td valign="middle" align="left">
<bold>Corrugated (sediment)</bold>
</td>
<td valign="middle" align="left">Surfaces characterised by regular or irregular corrugation or ondulation. These are generally high frequency and small-scale.</td>
<td valign="middle" align="left">Areas of subdued dune fields or megaripples, scouring or sorted bedforms</td>
</tr>
<tr>
<td valign="middle" align="left">Lineation</td>
<td valign="middle" align="left">
<bold>Layered (rock)</bold>
</td>
<td valign="middle" align="left">
<bold>Layered (rock)</bold>
</td>
<td valign="middle" align="left">Surfaces characterised by quasi regular or irregular patterns of repeated crests. These are generally high frequency and low relief.</td>
<td valign="middle" align="left">Bedrock exposure where the bedding or tectonic fabric is well-developed and regular.</td>
</tr>
<tr>
<td valign="middle" align="left">Groove</td>
<td valign="middle" align="left">
<bold>Grooved (sediment)</bold>
</td>
<td valign="middle" align="left">
<bold>Grooved (sediment)</bold>
</td>
<td valign="middle" align="left">Planar or quasi planar surfaces traversed by linear, shallow and narrow incisions or grooves, that can be regular and sub-parallel or irregular and criss-crossing.</td>
<td valign="middle" align="left">Iceberg ploughmarks, elongated furrows or trawling areas</td>
</tr>
<tr>
<td valign="middle" align="left">Blocks</td>
<td valign="middle" align="left">
<bold>Fissured (rock)</bold>
</td>
<td valign="middle" align="left">
<bold>Fissured (rock)</bold>
</td>
<td valign="middle" align="left">Polygonal and quasi-regular fractured seabed platforms, showing high relief compared to surrounding seabed, often breaking up into fragmented polygonal blocks, usually flat-topped.</td>
<td valign="middle" align="left">Jointed and fragmented bedrock outcrops and platforms</td>
</tr>
<tr>
<td valign="middle" align="left">Hummocks</td>
<td valign="middle" align="left">
<bold>Hummocky (rock)</bold>
</td>
<td valign="middle" align="left">
<bold>Hummocky (rock)</bold>
</td>
<td valign="middle" align="left">Rough, low-relief hummocky surfaces, often adjacent to <italic>Fissured (rock)</italic> that have been interpreted as exposed or only partially covered bedrock.</td>
<td valign="middle" align="left">Bedrock exposure where the bedding or tectonic fabric is cryptic or absent. Areas of consolidate or partially consolidated irregular glacial sediment (e.g. till).</td>
</tr>
<tr>
<td valign="middle" rowspan="2" align="left">Bank/Ridge</td>
<td valign="middle" rowspan="2" align="left">
<bold>Large ridge</bold>
</td>
<td valign="middle" align="left">
<bold>Bank (sediment)</bold>
</td>
<td valign="middle" align="left">Bank or large ridge-like feature with a generally smooth surface.</td>
<td valign="middle" align="left">Sediment bank or mounded sediment accumulation</td>
</tr>
<tr>
<td valign="middle" align="left">
<bold>Relict ridge (sediment)</bold>
</td>
<td valign="middle" align="left">large, low and broad ridge or smaller irregular arcuate ridge-like feature with a generally rough surface</td>
<td valign="middle" align="left">Glacial moraines or grounding zone wedges</td>
</tr>
<tr>
<td valign="middle" align="left">Hole</td>
<td valign="middle" rowspan="2" align="left">
<bold>Depression</bold>
</td>
<td valign="middle" align="left">
<bold>Depression (enclosed)</bold>
</td>
<td valign="middle" align="left">Closed-contour bathymetric depression below the surrounding seabed</td>
<td valign="middle" align="left">Various negative relief including scours and pockmarks</td>
</tr>
<tr>
<td valign="middle" align="left">Channel</td>
<td valign="middle" align="left">
<bold>Depression (elongated)</bold>
</td>
<td valign="middle" align="left">Elongated bathymetric depression below the surrounding seabed</td>
<td valign="middle" align="left">Various negative relief including scours and palaeo-channels</td>
</tr>
</tbody>
</table>
</table-wrap>
<sec id="s2_2_1">
<label>2.2.1</label>
<title>Terrain and landform classes</title>
<p>A list of 10 classes (<xref ref-type="table" rid="T2">
<bold>Table&#xa0;2</bold>
</xref>, <xref ref-type="fig" rid="f2">
<bold>Figure&#xa0;2</bold>
</xref>) was considered sufficient to capture the morphological domain of the study area. These classes include three types (or textures) of hard substrate &#x2013; <italic>Fissured, Hummocky</italic> and <italic>Layered (rock)</italic>, which comprise bedrock outcrops of meta-/sedimentary and igneous nature but can also include rough or rubbly glacial surfaces which are often hardly distinguishable from bedrock. <italic>Corrugated</italic> and <italic>Ridge (sediment)</italic> capture the extensive current-induced bedform fields, respectively the short wavelength megaripples, sediment ribbons or dunes and the larger dunes of different type (transverse, linear, trochoidal etc.) which occur especially in the Irish and Celtic seas (<xref ref-type="bibr" rid="B57">Van Landeghem et&#xa0;al., 2009</xref>; <xref ref-type="bibr" rid="B9">Creane et&#xa0;al., 2022</xref>). The Irish shelf glacial vestiges, which include prevalently moraines (<xref ref-type="bibr" rid="B47">&#xd3; Cofaigh et&#xa0;al., 2012</xref>) and drumlin fields (<xref ref-type="bibr" rid="B4">Benetti et&#xa0;al., 2010</xref>) are captured in the <italic>Large Ridge</italic> and <italic>Hummocky (sediment)</italic> classes respectively, although we included the Celtic &#x201c;megaridges&#x201d; (<xref ref-type="bibr" rid="B39">Lockhart et&#xa0;al., 2018</xref>) and sediment banks in the <italic>Large Ridge</italic> class. The <italic>Depression</italic> class includes the bathymetric lows on the shelf, which are prevalently channel-like features including scouring, palaeofluvial channels/tunnel valleys (<xref ref-type="bibr" rid="B20">Giglio et&#xa0;al., 2022</xref>) and some isolated cases of large pockmarks. Finally, finer scale, elongated depressions or incisions as iceberg ploughmarks and furrowing are represented by the <italic>Grooved (sediment)</italic> class. The <italic>Plane</italic> class act as filler for the areas of smooth and featureless terrain. A second list of 12 classes (<xref ref-type="table" rid="T2">
<bold>Table&#xa0;2</bold>
</xref>, <xref ref-type="fig" rid="f2">
<bold>Figure&#xa0;2</bold>
</xref>) was created to test the performance of the FCNNs with a slightly more complex problem. The second classification set was established increasing the detail for <italic>Large Ridge</italic> and <italic>Depression</italic>, splitting them respectively into <italic>Bank (sediment)</italic> and <italic>Relict Ridge</italic> (thus dividing the sediment banks from the glacial ridges), and <italic>Depression (enclosed)</italic> and <italic>Depression (elongated)</italic> (thus separating circular or quasi circular scouring and pockmarks from channels and elongated scours).</p>
<fig id="f2" position="float">
<label>Figure&#xa0;2</label>
<caption>
<p>General overview of the textures and geometries of the classes.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmars-10-1228867-g002.tif"/>
</fig>
</sec>
<sec id="s2_2_2">
<label>2.2.2</label>
<title>Labelling procedures</title>
<p>The labelling of seabed classes was carried out by a single human annotator (the first author) utilising expert judgement with the support of published studies and sediment grain size data for ground-truthing (GT samples in <xref ref-type="fig" rid="f1">
<bold>Figure&#xa0;1</bold>
</xref>) available from the INFOMAR website. Classes were labelled by manually digitising polygons on ArcGIS 10.6 and making sure they contained only the landforms or terrain textures of interest, partially or completely, regardless of their dimensions. Therefore, naturally larger landforms (e.g. the class <italic>Large Ridge</italic>) are defined by larger labels. Two sets of labels were created, one containing 50 labels per class, and a second with 100 labels per class. The labelled areas constitute only a very small proportion of the total study area (97,526 km<sup>2</sup>), with the 100-label set covering only 3.2% of the total, the 50 label (12 classes) 2.8% and the 50 labels (10 classes) 2.57%.</p>
<p>Each digitised polygon contains a unique semantic value associated to the landform or terrain texture class. FCNNs were trained with rasterised labels that contain one-to-one mappings of pixels from input layers (<xref ref-type="bibr" rid="B40">Long et&#xa0;al., 2014</xref>). The rasterised labels employed to train FCNNs were created using the geographic coordinates stored in each digitised polygon label and converting real-world coordinates for each vertex to image-coordinates. Training pseudo-images were created by centre-cropping 256 x 256 pixel (<xref ref-type="fig" rid="f3">
<bold>Figure&#xa0;3</bold>
</xref>) blocks containing multi-layered raster (bathymetry and derivatives) data. By centre-cropping digitised label polygons, the edges of each pseudo-image may possess a number of unlabelled pixels, which in turn allows for semi-supervised approaches to be leveraged (see Section 2.3). Two factors are behind the specific dimension of the pseudo-images. Firstly, the power of two (256&#xa0;=&#xa0;2<sup>8</sup>) grants numerical ease in image resizing (i.e., the blocks are divided/multiplied by 2) with sequential pooling operations and up-sample. Secondly, the image size is appropriate for GPU memory constraints and mini-batch optimisation. For instance, 256x256 may allow 8 images per batch which was found to be optimal for neural network optimisation, whereas an image of 512x512 allows only 1 to 2 images per batch and converges the neural network incorrectly. In the case digitised polygons covered a region that extended past the 256 x 256 area, centre-crops were split into several 256 x 256 blocks. This process generated 553 training images for the 50 label 12-class, 543 training images for the 50 label 10-class and 1134 training images for the 100 label 10-class. For each dataset, the training imagery was randomly subdivided into mutually exclusive training (90%) and validation (10%) sets.</p>
<fig id="f3" position="float">
<label>Figure&#xa0;3</label>
<caption>
<p>Example of the overall architecture of the FCNN used in this study, showing a VGG13 encoder network. The decoder network applies a transposed 2 by 2 convolution and concatenates feature maps from the encoding network at appropriate resolutions followed by a final 3 by 3 convolution. The final 1 by 1 convolution condenses feature maps to have the same number of channels as the total number of classes in the dataset.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmars-10-1228867-g003.tif"/>
</fig>
</sec>
</sec>
<sec id="s2_3">
<label>2.3</label>
<title>Fully Convolutional Neural Networks</title>
<p>Fully Convolutional Neural Networks (<xref ref-type="bibr" rid="B40">Long et&#xa0;al., 2014</xref>) are an extension of traditional CNN architectures (<xref ref-type="bibr" rid="B32">Krizhevsky et&#xa0;al., 2017</xref>) adapted for semantic segmentation. CNNs comprise a series of layers that process lower layer inputs through repeating convolution and pooling operations followed by a final classification layer. Each convolution and pooling layer transform the input image, or in this case bathymetric data, into higher level abstracted representations. FCNNs can be broken down into two networks: an encoder and a decoder network. The encoder network is identical to a CNN, except the final classification layer is removed. The decoder network applies transposed convolutions in order to up-sample feature maps back to the original input size, and each decoding stage combines corresponding feature maps created by the encoder network. The final classification layer utilizes 1 by 1 convolution kernels (<xref ref-type="bibr" rid="B37">Lin et&#xa0;al., 2014</xref>) to transform the original bathymetric data and derivatives source into a set of dense probabilities using a softmax transfer function. Network weights and biases are adjusted through gradient descent by minimizing the loss function between network outputs and the ground truth pixel labels.</p>
<p>The overall architecture of the FCNN used in this study (<xref ref-type="fig" rid="f3">
<bold>Figure&#xa0;3</bold>
</xref>) is based on a U-Net (<xref ref-type="bibr" rid="B49">Ronneberger et&#xa0;al., 2015</xref>) and the encoder networks are VGG-13 and ResNet50 (<xref ref-type="bibr" rid="B52">Simonyan and Zisserman, 2015</xref>; <xref ref-type="bibr" rid="B25">He et&#xa0;al., 2017</xref>). Residual learning using ResNet encoders has proven to surpass very deep neural networks such as VGG, but for completeness in results we experimented with both encoder networks. The decoder network applies a transposed 2 by 2 convolution for a learnt up-sample track and concatenates feature maps from the encoding network at appropriate resolutions followed by a final 3 by 3 convolution. The final 1 by 1 convolution condenses feature maps to have the same number of channels as the total number of classes in the dataset (<xref ref-type="fig" rid="f3">
<bold>Figure&#xa0;3</bold>
</xref>).</p>
<p>Semi-supervision is the process of incorporating unlabelled image samples for the optimisation of deep neural networks. This branch of deep learning methods is more applicable when unlabelled data are readily available, while labelled instances are often hard, expensive, and time-consuming to collect. Semi-supervised methods can be capable of building better classifiers that compensate for the lack of labelled training data and therefore present a cost-effective solution to label acquisition. In this study, where semantic segmentation was achieved with a pixel classifier, the masks that were used to label pixels did not cover entire 256x256 pseudo-images and therefore every pseudo-image had pixels that were left unlabelled. This condition allowed for unsupervised loss terms to be added into the optimisation process and thus for semi-supervision to be implemented. The supervised loss term is calculated by processing a mini batch of images <inline-formula>
<mml:math display="inline" id="im1">
<mml:mrow>
<mml:mi>X</mml:mi>
<mml:mo>&#x2208;</mml:mo>
<mml:msup>
<mml:mi>&#x211d;</mml:mi>
<mml:mrow>
<mml:mi>B</mml:mi>
<mml:mo>&#xd7;</mml:mo>
<mml:mi>C</mml:mi>
<mml:mo>&#xd7;</mml:mo>
<mml:mi>H</mml:mi>
<mml:mo>&#xd7;</mml:mo>
<mml:mi>W</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> and corresponding segmentation maps <inline-formula>
<mml:math display="inline" id="im2">
<mml:mrow>
<mml:mi>Y</mml:mi>
<mml:mo>&#x2208;</mml:mo>
<mml:msup>
<mml:mi>&#x211d;</mml:mi>
<mml:mrow>
<mml:mi>B</mml:mi>
<mml:mo>&#xd7;</mml:mo>
<mml:mi>C</mml:mi>
<mml:mo>&#xd7;</mml:mo>
<mml:mi>H</mml:mi>
<mml:mo>&#xd7;</mml:mo>
<mml:mi>W</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula>, where <italic>B</italic>, <italic>C</italic>, <italic>H</italic> and <italic>W</italic> are batch size, number of input channels, height and width. The network produces per-pixel logits <inline-formula>
<mml:math display="inline" id="im3">
<mml:mrow>
<mml:mover accent="true">
<mml:mi>Y</mml:mi>
<mml:mo>&#xaf;</mml:mo>
</mml:mover>
<mml:mo>&#x2208;</mml:mo>
<mml:msup>
<mml:mi>&#x211d;</mml:mi>
<mml:mrow>
<mml:mi>B</mml:mi>
<mml:mo>&#xd7;</mml:mo>
<mml:mi>K</mml:mi>
<mml:mo>&#xd7;</mml:mo>
<mml:mi>H</mml:mi>
<mml:mo>&#xd7;</mml:mo>
<mml:mi>W</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula>- where <italic>K</italic> is the number of target classes. The softmax transfer function (1) converts network scores into probabilities by normalizing all <italic>K</italic> scores for each pixel to sum to one:</p>
<disp-formula>
<mml:math display="block" id="M1">
<mml:mrow>
<mml:mtable>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:msub>
<mml:mi>P</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mi>x</mml:mi>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:msup>
<mml:mi>e</mml:mi>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:msub>
<mml:mi>Y</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
</mml:mrow>
<mml:mo stretchy="true">&#xaf;</mml:mo>
</mml:mover>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mi>x</mml:mi>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:msup>
</mml:mrow>
<mml:mrow>
<mml:msubsup>
<mml:mi>&#x3a3;</mml:mi>
<mml:mrow>
<mml:msup>
<mml:mi>k</mml:mi>
<mml:mo>&#x2032;</mml:mo>
</mml:msup>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>K</mml:mi>
</mml:msubsup>
<mml:msup>
<mml:mi>e</mml:mi>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:msub>
<mml:mi>Y</mml:mi>
<mml:msup>
<mml:mi>k</mml:mi>
<mml:mo>&#x2032;</mml:mo>
</mml:msup>
</mml:msub>
</mml:mrow>
<mml:mo stretchy="true">&#xaf;</mml:mo>
</mml:mover>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mi>x</mml:mi>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:mfrac>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mn>1</mml:mn>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:math>
</disp-formula>
<p>Where, <inline-formula>
<mml:math display="inline" id="im4">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>&#x2208;</mml:mo>
<mml:mtext>&#x3a9;</mml:mtext>
<mml:mo>;</mml:mo>
<mml:mtext>&#x3a9;</mml:mtext>
<mml:mo>&#x2286;</mml:mo>
<mml:msup>
<mml:mi>&#x2124;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> is a pixel location and <inline-formula>
<mml:math display="inline" id="im5">
<mml:mrow>
<mml:msub>
<mml:mi>P</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mi>x</mml:mi>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula> is the probability for the <inline-formula>
<mml:math display="inline" id="im6">
<mml:mrow>
<mml:msup>
<mml:mi>k</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mi>h</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> channel at pixel location <italic>x</italic>. The negative log-likelihood loss is calculated between segmentation maps and network probabilities:</p>
<disp-formula>
<mml:math display="block" id="M2">
<mml:mrow>
<mml:mtable>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:mi>L</mml:mi>
<mml:mo>=</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mtext>k</mml:mtext>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mtext>K</mml:mtext>
</mml:munderover>
<mml:mrow>
<mml:msub>
<mml:mstyle mathvariant="bold" mathsize="normal">
<mml:mi>Y</mml:mi>
</mml:mstyle>
<mml:mstyle mathvariant="bold" mathsize="normal">
<mml:mi>k</mml:mi>
</mml:mstyle>
</mml:msub>
<mml:mo stretchy="false">(</mml:mo>
<mml:mstyle mathvariant="bold" mathsize="normal">
<mml:mi>x</mml:mi>
</mml:mstyle>
<mml:mo stretchy="false">)</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>log</mml:mi>
<mml:mo stretchy="false">(</mml:mo>
<mml:msub>
<mml:mstyle mathvariant="bold" mathsize="normal">
<mml:mi>P</mml:mi>
</mml:mstyle>
<mml:mstyle mathvariant="bold" mathsize="normal">
<mml:mi>k</mml:mi>
</mml:mstyle>
</mml:msub>
<mml:mo stretchy="false">(</mml:mo>
<mml:mstyle mathvariant="bold" mathsize="normal">
<mml:mi>x</mml:mi>
</mml:mstyle>
<mml:mo stretchy="false">)</mml:mo>
<mml:mo stretchy="false">)</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo stretchy="false">(</mml:mo>
<mml:mn>2</mml:mn>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mstyle>
</mml:mrow>
</mml:mtd> </mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:math>
</disp-formula>
<p>For each image, the supervised loss is the sum of all losses for each pixel using Equation (2) and averaged according to the number of labelled pixels in <italic>Y</italic>. Full details on the use of semi-supervision can be found in <xref ref-type="bibr" rid="B26">Hobley et&#xa0;al. (2021)</xref>. The training parameters and convergence of FCNNs was analysed by testing multiple settings for learning rate and batch size, and assessing computed confusion matrices over several consecutive runs of the algorithm. This ensured that a fair range of different convergence approaches was evaluated. Furthermore, for the semi-supervised approach, several different loss weights were experimented to tune for the unsupervised loss term. The best performing networks were trained for 300 epochs with a batch-size of 12 using AdamW optimiser with a learning rate set to 0.001. With regards to the semi-supervised approach, the unsupervised loss was scaled down by a factor of 10 and the confidence threshold for teacher prediction was set to 0.97. All FCNNs were implemented and trained using Pytorch version 10.2, the code is freely available on our GitHub repository: <ext-link ext-link-type="uri" xlink:href="https://github.com/BrandonHobley/geomorph_deep">https://github.com/BrandonHobley/geomorph_deep</ext-link>.</p>
</sec>
<sec id="s2_4">
<label>2.4</label>
<title>Quality assessment</title>
<p>The quantitative metrics of interest to evaluate a classification algorithm are precision and recall (Equations (3) and (4)). These metrics are adequate to test classification algorithms over different datasets as well as their capability to detect false positives and false negatives. Precision and recall are metrics that can show how a classifier performs for each specific class, where precision measures the ability of the model to identify only the relevant instances, while recall measures the ability to detect correctly the occurrence of a class of interest. For instance, in a dataset with 17 confirmed landforms and 121 false landforms, an algorithm that detects every case as false would have an accuracy of 87%, but at the same time it would have an extremely poor recall of 13%. The F1-score (Equation (5)) is the harmonic mean of recall and precision, giving a suitable generalised single figure of merit to convey the performance of a classifier.</p>
<disp-formula>
<mml:math display="block" id="M3">
<mml:mrow>
<mml:mtable>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>n</mml:mi>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mstyle mathvariant="bold" mathsize="normal">
<mml:mi>t</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>e</mml:mi>
</mml:mstyle>
<mml:mo>&#xa0;</mml:mo>
<mml:mstyle mathvariant="bold" mathsize="normal">
<mml:mi>p</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>v</mml:mi>
<mml:mi>e</mml:mi>
</mml:mstyle>
</mml:mrow>
<mml:mrow>
<mml:mstyle mathvariant="bold" mathsize="normal">
<mml:mi>t</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>e</mml:mi>
</mml:mstyle>
<mml:mo>&#xa0;</mml:mo>
<mml:mstyle mathvariant="bold" mathsize="normal">
<mml:mi>p</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>v</mml:mi>
<mml:mi>e</mml:mi>
</mml:mstyle>
<mml:mo>+</mml:mo>
<mml:mstyle mathvariant="bold" mathsize="normal">
<mml:mi>f</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>e</mml:mi>
</mml:mstyle>
<mml:mo>&#xa0;</mml:mo>
<mml:mstyle mathvariant="bold" mathsize="normal">
<mml:mi>p</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>v</mml:mi>
<mml:mi>e</mml:mi>
</mml:mstyle>
</mml:mrow>
</mml:mfrac>
<mml:mo>&#xa0;</mml:mo>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mn>3</mml:mn>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:math>
</disp-formula>
<disp-formula>
<mml:math display="block" id="M4">
<mml:mrow>
<mml:mtable>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>l</mml:mi>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mstyle mathvariant="bold" mathsize="normal">
<mml:mi>t</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>e</mml:mi>
</mml:mstyle>
<mml:mo>&#xa0;</mml:mo>
<mml:mstyle mathvariant="bold" mathsize="normal">
<mml:mi>p</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>v</mml:mi>
<mml:mi>e</mml:mi>
</mml:mstyle>
</mml:mrow>
<mml:mrow>
<mml:mstyle mathvariant="bold" mathsize="normal">
<mml:mi>t</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>e</mml:mi>
</mml:mstyle>
<mml:mo>&#xa0;</mml:mo>
<mml:mstyle mathvariant="bold" mathsize="normal">
<mml:mi>p</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>v</mml:mi>
<mml:mi>e</mml:mi>
</mml:mstyle>
<mml:mo>+</mml:mo>
<mml:mstyle mathvariant="bold" mathsize="normal">
<mml:mi>f</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>e</mml:mi>
</mml:mstyle>
<mml:mo>&#xa0;</mml:mo>
<mml:mstyle mathvariant="bold" mathsize="normal">
<mml:mi>n</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>g</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>v</mml:mi>
<mml:mi>e</mml:mi>
</mml:mstyle>
</mml:mrow>
</mml:mfrac>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mn>4</mml:mn>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:math>
</disp-formula>
<disp-formula>
<mml:math display="block" id="M5">
<mml:mrow>
<mml:mtable>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:mi>F</mml:mi>
<mml:mn>1</mml:mn>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>s</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>2</mml:mn>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xd7;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mstyle mathvariant="bold" mathsize="normal">
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>l</mml:mi>
</mml:mstyle>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xd7;</mml:mo>
<mml:mstyle mathvariant="bold" mathsize="normal">
<mml:mi>p</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>n</mml:mi>
</mml:mstyle>
</mml:mrow>
<mml:mrow>
<mml:mstyle mathvariant="bold" mathsize="normal">
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>l</mml:mi>
</mml:mstyle>
<mml:mo>+</mml:mo>
<mml:mstyle mathvariant="bold" mathsize="normal">
<mml:mi>p</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>n</mml:mi>
</mml:mstyle>
</mml:mrow>
</mml:mfrac>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mn>5</mml:mn>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:math>
</disp-formula>
<p>The quantitative evaluation metrics listed above are valid if the dataset is labelled, which in our study covers a small subset of the total surface mapped. Therefore, these results can give an indication of a particular classifier performance, but visual inspection is still required to fully grasp the capabilities of FCNNs for bathymetric data.</p>
</sec>
</sec>
<sec id="s3" sec-type="results">
<label>3</label>
<title>Results</title>
<sec id="s3_1">
<label>3.1</label>
<title>Model performance</title>
<p>For this study, 40 different FCNN model runs were carried out, and total mean of their performances are presented in <xref ref-type="table" rid="T3">
<bold>Table&#xa0;3</bold>
</xref>. Precision, recall and F1 scores for each class and model are instead given in the <xref ref-type="supplementary-material" rid="SM1">
<bold>Supplementary Material</bold>
</xref>.</p>
<table-wrap id="T3" position="float">
<label>Table&#xa0;3</label>
<caption>
<p>Complete list of model results from this study.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="bottom" colspan="7" align="center"/>
<th valign="bottom" colspan="10" align="center">Input layers</th>
<th valign="bottom" align="center"/>
<th valign="bottom" align="center"/>
<th valign="bottom" align="center"/>
</tr>
<tr>
<th valign="bottom" align="center">#</th>
<th valign="bottom" align="center">Name</th>
<th valign="bottom" align="center">Encoder</th>
<th valign="bottom" align="center">Epoch</th>
<th valign="bottom" align="center">Semi <break/>supervision</th>
<th valign="bottom" align="center">Class number</th>
<th valign="bottom" align="center">Labels per class</th>
<th valign="bottom" align="left">Bathy</th>
<th valign="bottom" align="left">BPI1</th>
<th valign="bottom" align="left">BPI2</th>
<th valign="bottom" align="left">BPI3</th>
<th valign="bottom" align="center">VRM</th>
<th valign="bottom" align="center">Northn</th>
<th valign="bottom" align="center">Eastn</th>
<th valign="bottom" align="left">HS1</th>
<th valign="bottom" align="left">HS2</th>
<th valign="bottom" align="left">HS3</th>
<th valign="bottom" align="center">Precision</th>
<th valign="bottom" align="center">Recall</th>
<th valign="bottom" align="center">F1 score</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">1</td>
<td valign="middle" align="left">bbd1</td>
<td valign="bottom" align="center">VGG13</td>
<td valign="bottom" align="center">128</td>
<td valign="bottom" align="center">n</td>
<td valign="middle" align="center">10</td>
<td valign="middle" align="center">50</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">0.675</td>
<td valign="middle" align="center">0.629</td>
<td valign="middle" align="center">0.644</td>
</tr>
<tr>
<td valign="middle" align="left">2</td>
<td valign="middle" align="left">bbd2</td>
<td valign="bottom" align="center">VGG13</td>
<td valign="bottom" align="center">140</td>
<td valign="bottom" align="center">n</td>
<td valign="middle" align="center">10</td>
<td valign="middle" align="center">50</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">0.694</td>
<td valign="middle" align="center">0.674</td>
<td valign="middle" align="center">0.680</td>
</tr>
<tr>
<td valign="middle" align="left">3</td>
<td valign="middle" align="left">bbd3</td>
<td valign="bottom" align="center">ResNet50</td>
<td valign="bottom" align="center">84</td>
<td valign="bottom" align="center">n</td>
<td valign="middle" align="center">10</td>
<td valign="middle" align="center">50</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">0.717</td>
<td valign="middle" align="center">0.660</td>
<td valign="middle" align="center">0.677</td>
</tr>
<tr>
<td valign="middle" align="left">4</td>
<td valign="middle" align="left">bhsc1</td>
<td valign="bottom" align="center">VGG13</td>
<td valign="bottom" align="center">200</td>
<td valign="bottom" align="center">n</td>
<td valign="middle" align="center">10</td>
<td valign="middle" align="center">50</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">0.694</td>
<td valign="middle" align="center">0.649</td>
<td valign="middle" align="center">0.641</td>
</tr>
<tr>
<td valign="middle" align="left">5</td>
<td valign="middle" align="left">bbd4</td>
<td valign="bottom" align="center">VGG13</td>
<td valign="bottom" align="center">227</td>
<td valign="bottom" align="center">y</td>
<td valign="middle" align="center">10</td>
<td valign="middle" align="center">50</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">0.646</td>
<td valign="middle" align="center">0.638</td>
<td valign="middle" align="center">0.622</td>
</tr>
<tr>
<td valign="middle" align="left">6</td>
<td valign="middle" align="left">bbd4</td>
<td valign="bottom" align="center">VGG13</td>
<td valign="bottom" align="center">246</td>
<td valign="bottom" align="center">n</td>
<td valign="middle" align="center">10</td>
<td valign="middle" align="center">50</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">0.647</td>
<td valign="middle" align="center">0.682</td>
<td valign="middle" align="center">0.638</td>
</tr>
<tr>
<td valign="middle" align="left">7</td>
<td valign="middle" align="left">bbd5</td>
<td valign="bottom" align="center">ResNet50</td>
<td valign="bottom" align="center">240</td>
<td valign="bottom" align="center">n</td>
<td valign="middle" align="center">10</td>
<td valign="middle" align="center">50</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">0.697</td>
<td valign="bottom" align="center">0.688</td>
<td valign="bottom" align="center">0.668</td>
</tr>
<tr>
<td valign="middle" align="left">8</td>
<td valign="middle" align="left">bbd6</td>
<td valign="bottom" align="center">ResNet50</td>
<td valign="bottom" align="center">282</td>
<td valign="bottom" align="center">y</td>
<td valign="middle" align="center">10</td>
<td valign="middle" align="center">50</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">0.701</td>
<td valign="bottom" align="center">0.684</td>
<td valign="bottom" align="center">0.660</td>
</tr>
<tr>
<td valign="middle" align="left">9</td>
<td valign="middle" align="left">cs1</td>
<td valign="bottom" align="center">VGG13</td>
<td valign="bottom" align="center">199</td>
<td valign="bottom" align="center">n</td>
<td valign="middle" align="center">10</td>
<td valign="middle" align="center">50</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="bottom" align="center">0.707</td>
<td valign="bottom" align="center">0.766</td>
<td valign="bottom" align="center">0.656</td>
</tr>
<tr>
<td valign="middle" align="left">10</td>
<td valign="middle" align="left">cs2</td>
<td valign="bottom" align="center">VGG13</td>
<td valign="bottom" align="center">263</td>
<td valign="bottom" align="center">y</td>
<td valign="middle" align="center">10</td>
<td valign="middle" align="center">50</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="bottom" align="center">0.722</td>
<td valign="bottom" align="center">0.763</td>
<td valign="bottom" align="center">0.676</td>
</tr>
<tr>
<td valign="middle" align="left">11</td>
<td valign="middle" align="left">cs3</td>
<td valign="bottom" align="center">ResNet50</td>
<td valign="bottom" align="center">228</td>
<td valign="bottom" align="center">y</td>
<td valign="middle" align="center">10</td>
<td valign="middle" align="center">50</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="bottom" align="center">0.771</td>
<td valign="bottom" align="center">0.775</td>
<td valign="bottom" align="center">0.724</td>
</tr>
<tr>
<td valign="middle" align="left">12</td>
<td valign="middle" align="left">cs4</td>
<td valign="bottom" align="center">ResNet50</td>
<td valign="bottom" align="center">253</td>
<td valign="bottom" align="center">n</td>
<td valign="middle" align="center">10</td>
<td valign="middle" align="center">50</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="bottom" align="center">0.718</td>
<td valign="bottom" align="center">0.775</td>
<td valign="bottom" align="center">0.688</td>
</tr>
<tr>
<td valign="middle" align="left">13</td>
<td valign="middle" align="left">bbd7</td>
<td valign="bottom" align="center">VGG13</td>
<td valign="bottom" align="center">279</td>
<td valign="bottom" align="center">y</td>
<td valign="bottom" align="center">12</td>
<td valign="middle" align="center">50</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">0.442</td>
<td valign="bottom" align="center">0.507</td>
<td valign="bottom" align="center">0.430</td>
</tr>
<tr>
<td valign="middle" align="left">14</td>
<td valign="middle" align="left">bbd8</td>
<td valign="bottom" align="center">VGG13</td>
<td valign="bottom" align="center">287</td>
<td valign="bottom" align="center">n</td>
<td valign="bottom" align="center">12</td>
<td valign="middle" align="center">50</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">0.505</td>
<td valign="bottom" align="center">0.584</td>
<td valign="bottom" align="center">0.521</td>
</tr>
<tr>
<td valign="middle" align="left">15</td>
<td valign="middle" align="left">bbd9</td>
<td valign="bottom" align="center">ResNet50</td>
<td valign="bottom" align="center">275</td>
<td valign="bottom" align="center">n</td>
<td valign="bottom" align="center">12</td>
<td valign="middle" align="center">50</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">0.520</td>
<td valign="bottom" align="center">0.549</td>
<td valign="bottom" align="center">0.513</td>
</tr>
<tr>
<td valign="middle" align="left">16</td>
<td valign="middle" align="left">bbd10</td>
<td valign="bottom" align="center">ResNet50</td>
<td valign="bottom" align="center">291</td>
<td valign="bottom" align="center">y</td>
<td valign="bottom" align="center">12</td>
<td valign="middle" align="center">50</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">0.491</td>
<td valign="bottom" align="center">0.548</td>
<td valign="bottom" align="center">0.482</td>
</tr>
<tr>
<td valign="middle" align="left">17</td>
<td valign="middle" align="left">cs5</td>
<td valign="bottom" align="center">VGG13</td>
<td valign="bottom" align="center">111</td>
<td valign="bottom" align="center">n</td>
<td valign="middle" align="center">12</td>
<td valign="middle" align="center">50</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">0.567</td>
<td valign="middle" align="center">0.491</td>
<td valign="middle" align="center">0.460</td>
</tr>
<tr>
<td valign="middle" align="left">18</td>
<td valign="middle" align="left">cs6</td>
<td valign="bottom" align="center">VGG13</td>
<td valign="bottom" align="center">320</td>
<td valign="bottom" align="center">n</td>
<td valign="middle" align="center">12</td>
<td valign="middle" align="center">50</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">0.549</td>
<td valign="middle" align="center">0.451</td>
<td valign="middle" align="center">0.451</td>
</tr>
<tr>
<td valign="middle" align="left">19</td>
<td valign="middle" align="left">b1</td>
<td valign="bottom" align="center">VGG13</td>
<td valign="bottom" align="center">228</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">10</td>
<td valign="bottom" align="center">100</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">0.343</td>
<td valign="bottom" align="center">0.385</td>
<td valign="bottom" align="center">0.274</td>
</tr>
<tr>
<td valign="middle" align="left">20</td>
<td valign="middle" align="left">b2</td>
<td valign="bottom" align="center">ResNet50</td>
<td valign="bottom" align="center">298</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">10</td>
<td valign="bottom" align="center">100</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">0.299</td>
<td valign="bottom" align="center">0.455</td>
<td valign="bottom" align="center">0.302</td>
</tr>
<tr>
<td valign="middle" align="left">21</td>
<td valign="middle" align="left">bp1</td>
<td valign="bottom" align="center">VGG13</td>
<td valign="bottom" align="center">161</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">10</td>
<td valign="bottom" align="center">100</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">0.457</td>
<td valign="bottom" align="center">0.584</td>
<td valign="bottom" align="center">0.460</td>
</tr>
<tr>
<td valign="middle" align="left">22</td>
<td valign="middle" align="left">bp2</td>
<td valign="bottom" align="center">ResNet50</td>
<td valign="bottom" align="center">149</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">10</td>
<td valign="bottom" align="center">100</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">0.405</td>
<td valign="bottom" align="center">0.595</td>
<td valign="bottom" align="center">0.434</td>
</tr>
<tr>
<td valign="middle" align="left">23</td>
<td valign="middle" align="left">hs1</td>
<td valign="bottom" align="center">VGG13</td>
<td valign="bottom" align="center">235</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">10</td>
<td valign="bottom" align="center">100</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">0.489</td>
<td valign="bottom" align="center">0.661</td>
<td valign="bottom" align="center">0.508</td>
</tr>
<tr>
<td valign="middle" align="left">24</td>
<td valign="middle" align="left">hs2</td>
<td valign="bottom" align="center">ResNet50</td>
<td valign="bottom" align="center">243</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">10</td>
<td valign="bottom" align="center">100</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">0.612</td>
<td valign="bottom" align="center">0.672</td>
<td valign="bottom" align="center">0.619</td>
</tr>
<tr>
<td valign="middle" align="left">25</td>
<td valign="middle" align="left">hsc1</td>
<td valign="bottom" align="center">VGG13</td>
<td valign="bottom" align="center">186</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">10</td>
<td valign="bottom" align="center">100</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="bottom" align="center">0.665</td>
<td valign="bottom" align="center">0.757</td>
<td valign="bottom" align="center">0.683</td>
</tr>
<tr>
<td valign="middle" align="left">26</td>
<td valign="middle" align="left">hsc2</td>
<td valign="bottom" align="center">ResNet50</td>
<td valign="bottom" align="center">103</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">10</td>
<td valign="bottom" align="center">100</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="bottom" align="center">0.786</td>
<td valign="bottom" align="center">0.788</td>
<td valign="bottom" align="center">0.774</td>
</tr>
<tr>
<td valign="middle" align="left">27</td>
<td valign="middle" align="left">bb1</td>
<td valign="bottom" align="center">VGG13</td>
<td valign="bottom" align="center">243</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">10</td>
<td valign="bottom" align="center">100</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">0.521</td>
<td valign="bottom" align="center">0.656</td>
<td valign="bottom" align="center">0.544</td>
</tr>
<tr>
<td valign="middle" align="left">28</td>
<td valign="middle" align="left">bb2</td>
<td valign="bottom" align="center">ResNet50</td>
<td valign="bottom" align="center">286</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">10</td>
<td valign="bottom" align="center">100</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">0.530</td>
<td valign="bottom" align="center">0.655</td>
<td valign="bottom" align="center">0.555</td>
</tr>
<tr>
<td valign="middle" align="left">29</td>
<td valign="middle" align="left">bhsc2</td>
<td valign="bottom" align="center">VGG13</td>
<td valign="bottom" align="center">240</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">10</td>
<td valign="bottom" align="center">100</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="bottom" align="center">0.845</td>
<td valign="bottom" align="center">0.850</td>
<td valign="bottom" align="center">0.833</td>
</tr>
<tr>
<td valign="middle" align="left">30</td>
<td valign="middle" align="left">bhsc3</td>
<td valign="bottom" align="center">ResNet50</td>
<td valign="bottom" align="center">252</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">10</td>
<td valign="bottom" align="center">100</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="bottom" align="center">0.825</td>
<td valign="bottom" align="center">0.848</td>
<td valign="bottom" align="center">0.830</td>
</tr>
<tr>
<td valign="middle" align="left">31</td>
<td valign="middle" align="left">bphs1</td>
<td valign="bottom" align="center">VGG13</td>
<td valign="bottom" align="center">280</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">10</td>
<td valign="bottom" align="center">100</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="bottom" align="center">0.731</td>
<td valign="bottom" align="center">0.779</td>
<td valign="bottom" align="center">0.733</td>
</tr>
<tr>
<td valign="middle" align="left">32</td>
<td valign="middle" align="left">bphs2</td>
<td valign="bottom" align="center">ResNet50</td>
<td valign="bottom" align="center">165</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">10</td>
<td valign="bottom" align="center">100</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="bottom" align="center">0.772</td>
<td valign="bottom" align="center">0.801</td>
<td valign="bottom" align="center">0.770</td>
</tr>
<tr>
<td valign="middle" align="left">33</td>
<td valign="middle" align="left">bd1</td>
<td valign="bottom" align="center">VGG13</td>
<td valign="bottom" align="center">38</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">10</td>
<td valign="bottom" align="center">100</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">0.287</td>
<td valign="bottom" align="center">0.291</td>
<td valign="bottom" align="center">0.291</td>
</tr>
<tr>
<td valign="middle" align="left">34</td>
<td valign="middle" align="left">bd2</td>
<td valign="bottom" align="center">ResNet50</td>
<td valign="bottom" align="center">45</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">10</td>
<td valign="bottom" align="center">100</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">0.212</td>
<td valign="bottom" align="center">0.266</td>
<td valign="bottom" align="center">0.262</td>
</tr>
<tr>
<td valign="middle" align="left">35</td>
<td valign="middle" align="left">bpd1</td>
<td valign="bottom" align="center">VGG13</td>
<td valign="bottom" align="center">268</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">10</td>
<td valign="bottom" align="center">100</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">0.365</td>
<td valign="bottom" align="center">0.530</td>
<td valign="bottom" align="center">0.393</td>
</tr>
<tr>
<td valign="middle" align="left">36</td>
<td valign="middle" align="left">bpd2</td>
<td valign="bottom" align="center">ResNet50</td>
<td valign="bottom" align="center">240</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">10</td>
<td valign="bottom" align="center">100</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">0.545</td>
<td valign="bottom" align="center">0.629</td>
<td valign="bottom" align="center">0.565</td>
</tr>
<tr>
<td valign="middle" align="left">37</td>
<td valign="middle" align="left">hscd1</td>
<td valign="bottom" align="center">VGG13</td>
<td valign="bottom" align="center">150</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">10</td>
<td valign="bottom" align="center">100</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="bottom" align="center">0.598</td>
<td valign="bottom" align="center">0.753</td>
<td valign="bottom" align="center">0.614</td>
</tr>
<tr>
<td valign="middle" align="left">38</td>
<td valign="middle" align="left">hscd2</td>
<td valign="bottom" align="center">ResNet50</td>
<td valign="bottom" align="center">296</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">10</td>
<td valign="bottom" align="center">100</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">n</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="bottom" align="center">0.771</td>
<td valign="bottom" align="center">0.776</td>
<td valign="bottom" align="center">0.745</td>
</tr>
<tr>
<td valign="middle" align="left">39</td>
<td valign="middle" align="left">cs6</td>
<td valign="bottom" align="center">VGG13</td>
<td valign="bottom" align="center">176</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">10</td>
<td valign="bottom" align="center">100</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="bottom" align="center">0.720</td>
<td valign="bottom" align="center">0.823</td>
<td valign="bottom" align="center">0.735</td>
</tr>
<tr>
<td valign="middle" align="left">40</td>
<td valign="middle" align="left">cs7</td>
<td valign="bottom" align="center">ResNet50</td>
<td valign="bottom" align="center">280</td>
<td valign="middle" align="center">n</td>
<td valign="bottom" align="center">10</td>
<td valign="bottom" align="center">100</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="middle" align="center">y</td>
<td valign="bottom" align="center">0.749</td>
<td valign="bottom" align="center">0.818</td>
<td valign="bottom" align="center">0.765</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The first set of results (models #1 to #12) shows the initial tests carried out on the two different encoders (VGG13 and ResNet50) comparing their efficacy and assessing the utility of semi-supervision and some preliminary combinations of input layers. Overall, the scores show that neither VGG13 nor ResNet50 outperforms the other, although ResNet50 produces slightly better scores at the second decimal point, with increases between 0.01 and 0.05 (e.g. compare models #6 and #7 or #10 and #11).</p>
<p>The use of semi supervision does not improve significantly nor consistently the results, contributing to positive or negative fluctuations. For example, ResNet50 model #11 acquires 0.053 points in Precision compared to non-supervised #12 (<xref ref-type="fig" rid="f4">
<bold>Figure&#xa0;4</bold>
</xref>), with no change in Recall. ResNet50 model #8 gains only 0.004 points in Precision and loses 0.004 points in Recall compared to unsupervised model #7. VGG13 models seem instead to suffer more the application of semi-supervision, leading to higher loss in scores (e.g. compare model outputs #5 and #6).</p>
<fig id="f4" position="float">
<label>Figure&#xa0;4</label>
<caption>
<p>Comparison between non-supervised and supervised ResNet50 model runs #11 and #12. A visual inspection reveals only minor differences in the overall classification.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmars-10-1228867-g004.tif"/>
</fig>
<p>The best model results were achieved using the complete set of input layers, with ResNet50 models #11 and #12 giving F1 scores of 0.724 and 0.688 respectively. Evaluation metrics are supported by the visual assessment of the resulting thematic maps, where models #11 and #12 show the most visually pleasing results (<xref ref-type="fig" rid="f4">
<bold>Figure&#xa0;4</bold>
</xref>). Nonetheless, high scores were also obtained limiting the input to a combination of bathymetry, BPIs, VRM and Aspect functions (models #3 and #8, with F1 scores of 0.677 and 0.660) or bathymetry and hillshades (model #4, F1 score of 0.641). In order to test the contribution of the input layers to the model predictions, a series of additional model runs were carried out using both encoders but without implementing the semi-supervision -which the previous results reveal to be relatively erratic, and increasing the number of labels to 100 per class, to gauge the effect of boosting label number to model performance.</p>
<p>The results of this series of tests are presented in <xref ref-type="table" rid="T3">
<bold>Table&#xa0;3</bold>
</xref>, model numbers #19 to #40. As expected, the scores show an overall improvement caused by the increase in the number of labels from 50 to 100. While the time effort required to create labels for the classes is doubled (from 500 to 1000 labels in total), the improvements are significant, up to ~0.19 points, i.e., from 0.641 to 0.833 when comparing the F1 scores of the best VGG13 bathymetry and hillshade results (#4 vs #29).</p>
<p>Once again ResNet50 runs are slightly more successful in evaluation metrics compared to VGG13, with ResNet50 scoring higher in F1 8 times out of 11 model runs.</p>
<p>The exploration of the usefulness of each input layer in model performance provides strong indications that the hillshades are the most valuable set of layers for a correct prediction of morphological classes. Models that utilise hillshades have consistently higher scores than those that do not (cf. for example models #21 with #31 or #33 with #37, <xref ref-type="table" rid="T3">
<bold>Table&#xa0;3</bold>
</xref>). The use of hillshades alone provides very good results (model #26 F1 score: 0.774), although the combination of layers with different azimuths is essential and a single hillshade is insufficient to produce an accurate map. Overall, the combination of the other derivatives alone or with bathymetry leads to substantially inferior predictions, with Precision scores consistently under 0.59 and Recall scores only slightly better. Aspect functions and VRM do not seem to provide useful information to the models, on the contrary their addition is detrimental to their performance. For example, bathymetry as input layer alone (#19 and #20) contributes to a better score than bathymetry combined with aspect functions and VRM (#33 and #34). While the bathymetric position indexes improve the predictions of the bathymetry baseline, they do not seem to enhance significantly the performance of the hillshade layers, with oscillating results when comparing the &#x201c;HS full&#x201d; baselines (models #25 and 26) and the &#x201c;BPI + all HS&#x201d; (models #31 and 32). The only layer that does improve the predictions of the hillshades alone is the bathymetry, with model runs #29 and 30 presenting the highest scores obtained in this study (VGG13, Precision 0.845, Recall 0.850). Visually comparing the map outputs of hillshades alone against bathymetry-supported hillshades shows improvement in score metrics obtained by the latter as reflected in the outlook of the map (<xref ref-type="fig" rid="f5">
<bold>Figure&#xa0;5</bold>
</xref>), although the crispness of the boundaries is somewhat diminished, creating more &#x201c;padded&#x201d; class interfaces and generalisations.</p>
<fig id="f5" position="float">
<label>Figure&#xa0;5</label>
<caption>
<p>Comparison between the results of ResNet model #40 (complete set of layers), #30 (bathymetry and hillshades) and #26 (only hillshades); visual inspection supports the better accuracy metrics obtained by ResNet50 model #30.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmars-10-1228867-g005.tif"/>
</fig>
<p>The effect of the combination of all the input layers is given in runs #39 and 40, where the scores are only slightly superior to the model runs of the hillshades, but inferior to the bathymetry and hillshades runs. Overall, <italic>Large Ridge, Plane</italic> and <italic>Fissured (rock)</italic> are the three most successfully identified classes by all 10-class models, with an average F1 score of 0.884, 0.772 and 0.716 respectively. <italic>Corrugated (sediment)</italic>, <italic>Hummocky (sediment)</italic> and <italic>Layered (rock)</italic> score instead the lowest across all models, with average F1 scores of 0.471, 0.466 and 0.423. The confusion matrices (see the <xref ref-type="supplementary-material" rid="SM1">
<bold>Supplementary Material</bold>
</xref>) show that the prevalent misinterpretation is related to Type II errors (false negatives) where <italic>Layered (rock)</italic> is classified as <italic>Plane</italic>, <italic>Hummocky (sediment)</italic> as <italic>Depression</italic> and <italic>Corrugated (sediment)</italic> as <italic>Large Ridge</italic>. Probable causes for these misinterpretations are treated in the discussion.</p>
<p>Finally, models #13 to #18 (<xref ref-type="table" rid="T3">
<bold>Table&#xa0;3</bold>
</xref>) show the results of separate tests carried out to investigate the performance of the FCNNs with an increased number of labels. All the results show a substantial decrease in all the scores when moving from the 10 class to the 12 class problem, with Precision and Recall ranging between 0.442-0.567 and 0.451-0.584. Confusion matrices show a decline in accuracy in all classes, and not only those that were split. <italic>Depression (enclosed)</italic> and <italic>Bank (sediment)</italic> scored the lowest amongst the classes, showing that the separation from the original and more general <italic>Depression</italic> and <italic>Large Ridge</italic> classes (10 class division) weakens the training.</p>
</sec>
<sec id="s3_2">
<label>3.2</label>
<title>Modal voting and combined map</title>
<p>The use of several permutations and combinations of different input layers allows for an ensemble learning scenario to be leveraged. We have tested this hypothesis with a simple modal voting of FCNN pixel classifications for the 10 best performing models (both in terms of scores and visual quality), which produced an excellent map with an overall F1 score of 0.96 and class precisions and recalls superior to 0.87. The results and full map are presented in <xref ref-type="supplementary-material" rid="SM1">
<bold>Table S1</bold>
</xref> and <xref ref-type="supplementary-material" rid="SM1">
<bold>Figure S1</bold>
</xref> in the <xref ref-type="supplementary-material" rid="SM1">
<bold>Supplementary Material</bold>
</xref>.</p>
</sec>
</sec>
<sec id="s4" sec-type="discussion">
<label>4</label>
<title>Discussion</title>
<p>Scores and qualitative assessment of the results have shown that both ResNet50 and VGG13 encoders can achieve good accuracy, with performances driven mostly by the nature of the input layers and the quantity and precision of the labelling. The unsuccessful attempt with 12 classes is most likely caused by a fallacy in the semantic definition of these classes more than weakness of the networks, and it shows that FCNNs can be very susceptible to deceptive labelling. In the first set of tests the best score result was given by ResNet50 model #11, that included all input layers; however, our subsequent analysis of layer contribution shows that the best results are achieved with hillshades and bathymetry only. It must be said that this discrepancy relies on the comparison with a single observation in the first set (i.e. model #4 vs models #9 to 12), and if we take the worst performing model with all input layers (model #9), its scores are not too different from those of hillshade-based model #4 (only Recall being significantly higher in #9). The limitation in sample comparison coupled with the consistent observation that non-hillshade derivatives do not enhance the performance even in the best of cases, support the conclusion that either model #4 is an underperforming outlier or that the doubling of labels has substantially improved the prediction performance based on hillshades. The evaluation metrics improvement generated by the addition of the bathymetry layer to the hillshades input is possibly partly due to the nature of the offshore physiography, where some classes are preferentially found at specific bathymetric ranges. For example, bedrock outcrops are focussed close to the coastline, and unusually high F1 scores for <italic>Fissured (rock)</italic> and <italic>Hummocky (rock)</italic> in the bathymetry-based models (#19, #20, see <xref ref-type="supplementary-material" rid="SM1">
<bold>Table S1</bold>
</xref> in <xref ref-type="supplementary-material" rid="SM1">
<bold>Supplementary Material</bold>
</xref>) strengthen the suspicion of a regional bias. Therefore, the utility of the bathymetry input is potentially lower in different datasets.</p>
<p>
<xref ref-type="fig" rid="f6">
<bold>Figures&#xa0;6</bold>
</xref>&#x2013;<xref ref-type="fig" rid="f8">
<bold>8</bold>
</xref> give an overview of the results provided by the best performing model (#30) and the combined modal vote map. A qualitative assessment of the maps shows that slightly better performances are sometimes achieved to the detriment, in places, of boundary crispness and detail. The evaluation metrics, calculated on a pixel basis, give a good approximation of the effectiveness of a model, however in order to fully assess the models&#x2019; performance and potential for seabed mapping studies, we need to consider the results in term of boundary position, nature of misclassifications, type of class misclassified and general distribution of errors.</p>
<fig id="f6" position="float">
<label>Figure&#xa0;6</label>
<caption>
<p>Results from best performing ResNet50 model (#30 &#x2013; bathymetry and hillshades only) and the modal vote map. While producing overall the best precision and recall scores amongst the model runs, model #30 has underperformed in the detection of the <italic>Layered (rock)</italic> class (F1 score 0.584), completely misinterpreting the sorted bedforms in the Celtic Sea as rock <bold>(A)</bold>. The modal vote map is instead effective in recognising the bedforms, having better efficacy in identifying <italic>Layered (rock)</italic> (F1 score 0.90). The glacial streamlined terrain in <bold>(B)</bold> is well captured by model #30, with only minor mixing between <italic>Large Ridge</italic> and <italic>Hummocky (sediment)</italic> where Rogen moraines become larger and are intertwined with larger underlying morainic ridges. While the modal vote map gives also a fair depiction of the area, it overestimates the presence of <italic>Depressions</italic>, probably due to the interference of the BPI layers.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmars-10-1228867-g006.tif"/>
</fig>
<fig id="f7" position="float">
<label>Figure&#xa0;7</label>
<caption>
<p>Results from best performing ResNet50 model (#30 &#x2013; bathymetry and hillshades only) and the modal vote map. <bold>(A)</bold> Model #30 classifies correctly the extent of the large dune field, although once again the <italic>Layered (rock)</italic> class is erroneously predicted in liminal places. Both <bold>(A)</bold> and <bold>(B)</bold> show well the higher detail provided by the modal vote map, for example in <bold>(A)</bold> singular dune ridges are mapped correctly at the centre of the field, while for model #30 they are generalised with the surrounding flat or depressed terrain.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmars-10-1228867-g007.tif"/>
</fig>
<fig id="f8" position="float">
<label>Figure&#xa0;8</label>
<caption>
<p>Results from best performing ResNet50 model (#30 &#x2013; bathymetry and hillshades only) and the modal vote map. <bold>(A)</bold> this inset shows the overinterpreted <italic>Depressions</italic> for the modal vote map adjacent to the rocky outcrops, possibly caused by the BPI layers and totally absent for model #30. <bold>(B)</bold> model #30 correctly identifies the series of moraines in Donegal Bay, while the modal vote map produces a result which is a mixture of textural interpretation (corrugated seabed over the moraines) and larger features interpretation. The bathymetry artefacts that cover the otherwise featureless seabed in the southern portion of the inset have caused misinterpretations in both models; in particular model #30 shows again the confusion in predicting the location of <italic>Layered (rock)</italic>, assigning the artefacts that value.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmars-10-1228867-g008.tif"/>
</fig>
<sec id="s4_1">
<label>4.1</label>
<title>Sources of error and uncertainty</title>
<p>In the breakdown of evaluation metrics for each class (<xref ref-type="supplementary-material" rid="SM1">
<bold>Table S1</bold>
</xref> in <xref ref-type="supplementary-material" rid="SM1">
<bold>Supplementary Material</bold>
</xref>) the three most recurring weakest predictions are linked to the classes <italic>Corrugated (sediment)</italic>, <italic>Hummocky (sediment)</italic> and <italic>Layered (rock)</italic>. Coupling the observations of class type misinterpretation (see Results) and the qualitative assessment of the map outputs has led to the identification of three main types of errors or uncertainty.</p>
<p>Misclassifications linked to liminal spaces between classes is the first type of ambiguities we discuss (<xref ref-type="fig" rid="f9">
<bold>Figures&#xa0;9A, B</bold>
</xref>). This misclassification is reflected in the significant confusion between <italic>Layered (rock)</italic> and <italic>Plane</italic> or <italic>Hummocky (sediment)</italic> and <italic>Depression</italic>. Stratified, gently dipping bedrock possesses significant extents of planar features within them (bedding planes), that transition into fine elongated and often isolated ridges. This texture is sometimes misidentified as <italic>Plane</italic>, but in unlabelled data can also be observed as <italic>Layered (rock)</italic> in areas of sorted bedforms, that possess similar geometry. A similar case is provided by the <italic>Hummocky (sediment)</italic> class, which includes the occurrences of drumlins (oval shaped, glacial-flow aligned, moraine hills formed beneath fast-moving ablating ice flows). The drumlins are surrounded by depressed areas, the &#x201c;connecting surface&#x201d; between the high relief landforms. Models tended to confuse the proximal interconnecting surface as <italic>Depression</italic> instead of &#x201c;drumlin&#x201d;, leading to the lower score. In defence of the networks, it is often very difficult even for a geomorphologist to find the &#x201c;correct&#x201d; place to draw a boundary to define a landform (<xref ref-type="bibr" rid="B53">Smith and Mark, 2003</xref>). One major reason that labelling was carried out by a single expert, was to try to achieve maximum consistency in delineation, as another geomorphologist might introduce subjective bias and training conflicts for the networks. Moreover, complex terrains or where class assignment felt ambiguous were deliberately not labelled, leaving effectively the model to decide. We have stressed in the Methods section that good care was taken in the definition of distinctive semantic classes, however these errors indicate that morphological textures form part of a spectrum that is fundamentally difficult to compartmentalise (e.g., at what scale and configuration does a corrugation become a hummock or vice-versa)?, and the shortcomings of the FCNNs are at least partly by-products of natural variability and the inability of a set of classes to fully capture it. Without using a more complex set of classes and fuzzy classifiers it is not possible to treat any existing terrain variation.</p>
<fig id="f9" position="float">
<label>Figure&#xa0;9</label>
<caption>
<p>Types of error and ambiguities encountered in the maps. <bold>(A, B)</bold> sharp class transitions/interfaces and misclassification due to the ambiguous nature of the terrain. This is especially evident in <bold>(B)</bold>, where the dunes cross a rugged bedrock terrain with a similar signature. <bold>(C)</bold> bathymetry artefacts caused by MBES swath merging and correction that leads to a striping effect (misclassified as <italic>Large Ridge</italic>). <bold>(D)</bold> bathymetry artefacts and pixelation produced by low quality older MBES data.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmars-10-1228867-g009.tif"/>
</fig>
<p>The second type of ambiguity is related to scale (<xref ref-type="fig" rid="f7">
<bold>Figures&#xa0;7</bold>
</xref>, <xref ref-type="fig" rid="f8">
<bold>8</bold>
</xref>, <xref ref-type="fig" rid="f10">
<bold>10</bold>
</xref>). Our classifications included the class <italic>Large Ridge</italic> (or <italic>Bank (sediment)</italic> and <italic>Relict ridge</italic>), which can be significantly bigger than other terrain or landform classes. This factor of scale ambiguity was introduced wittingly into the models, as we wanted to explore the &#x201c;style&#x201d; and ability of the networks to disentangle the problem of multiscale classification, which is very common in geomorphology and habitat mapping. If restricted to create a single map layer with a small number of classes, the human mind would prioritise the assignation of a class depending on what they think is the most important attribute to classify. So, for example, a large moraine which is covered by a boulder field might be preferentially mapped as &#x201c;moraine&#x201d;, even though both classes identify a correct characteristic of the ground. The hierarchical nature of BTM (<xref ref-type="bibr" rid="B58">Walbridge et&#xa0;al., 2018</xref>; <xref ref-type="bibr" rid="B21">Goes et&#xa0;al., 2019</xref>; <xref ref-type="bibr" rid="B11">de Oliveira et&#xa0;al., 2020</xref>) perpetuates this problem. In our results, this multiscale ambiguity is well reflected in the misclassification of <italic>Corrugated (sediment)</italic> as <italic>Large Ridge</italic>; corrugated surfaces such as smaller dunes or sorted bedforms occur extensively on the shelf and can overprint larger features, such as sediment banks or large moraines. The networks preferentially choosing the classification as <italic>Large Ridge</italic> might reduce the scores in the evaluation metrics but do not technically produce a wrong interpretation, rather a partial one. In some instances (e.g. the on shelf edge, see <xref ref-type="fig" rid="f10">
<bold>Figure&#xa0;10</bold>
</xref>), model predictions have dissected longer wavelength dunes (i.e. large underlying landforms) interpreting them partially as <italic>Large Ridges</italic> and partially as <italic>Corrugated (sediment)</italic>, where the superficial sorted bedforms are more pronounced. Class prioritisation seems to be dependent on the way the model has learned the classes and boundaries, which in turn depends on adjusted weights and biases the model has learned during model training. However, understanding the individual activations and the internal workings of the neural network would require a study of class activation maps or the visualisation of deconvolutional layers (<xref ref-type="bibr" rid="B45">Noh et&#xa0;al., 2015</xref>).</p>
<fig id="f10" position="float">
<label>Figure&#xa0;10</label>
<caption>
<p>Representation of the different classification styles adopted by the networks when dealing with &#x201c;nested&#x201d; bedforms with different dimensions (large dunes, finer megaripples and sorted bedforms) using discrete and non-overlapping classes. All models map the most visible class in an area, reaching different competing results. Models #30 and #40 produce good alternative representations, while model #29 fails to reach a proper depiction of the area.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmars-10-1228867-g010.tif"/>
</fig>
<p>Finally, a third type of recurring errors is connected to an inherent problem of the input layers: namely artefacts. MBES data can present many type of artefacts mostly caused by the limits of the instrument, the motions of the survey vessel (dynamic systematic errors), poor tidal or water sound velocity control causing vertical shifts and sound refraction. These artefacts are difficult to eliminate completely and a common obstacle in automated marine mapping (<xref ref-type="bibr" rid="B33">Lecours et&#xa0;al., 2017</xref>). Artefacts are recurrent in the extensive INFOMAR MBES bathymetry dataset, which is a combination of data from hundreds of different surveys with an array of vessels and survey operators, acquired with different (improving) instrumentation, in the space of about 25 years. The topographic variability introduced may consist in pixelation (salt-and-pepper effects), undulation along the swath, striping effects and cliff-like edges, and the vertical difference is often comparable with real features at seabed (e.g. megaripples or furrowing) (<xref ref-type="fig" rid="f9">
<bold>Figures&#xa0;9C, D</bold>
</xref>). Additionally, our hillshades are particularly susceptible to this kind of &#x201c;topographic noise&#x201d;, as they are vertically exaggerated to enhance the visibility of faint&#xa0;terrain patterns, which diminishes considerably their effectiveness. While a study of the effect of artefacts was outside the scope of this paper, it is reasonable to affirm that much stronger predictions can be achieved with a &#x201c;cleaner&#x201d; dataset.</p>
</sec>
<sec id="s4_2">
<label>4.2</label>
<title>Habitat mapping applications</title>
<p>Morphological maps provide the backbone for seabed habitat mapping studies, with classifications commonly obtained using semi-automated techniques as OBIA, BTM or other GIS tools (<xref ref-type="bibr" rid="B24">Harris et&#xa0;al., 2014</xref>; <xref ref-type="bibr" rid="B21">Goes et&#xa0;al., 2019</xref>; <xref ref-type="bibr" rid="B38">Linklater et&#xa0;al., 2019</xref>; <xref ref-type="bibr" rid="B1">Arosio et&#xa0;al., 2021</xref>) that segment the seabed in discrete parcels subsequently classified on the basis of pixel group statistics or geometrical characteristics. While grounded on mathematical rules and granting replicability, these techniques lack flexibility (e.g. how to treat morphological exceptions or near-isomorphisms) and require a good measure of engineering. Moreover, rules applied in one seabed region do not necessarily work elsewhere, so each dataset might need to be treated differently. On the contrary, FCNNs can provide the flexibility needed to capture any instance of discrete landforms or terrain textures without requiring <italic>ad hoc</italic> segmentation protocols (OBIA) or formulation of classification rulesets (BTM).</p>
<p>A semi-quantitative assessment of the effectiveness of the FCNN predictions for habitat mapping can be made comparing bedrock or sediment texture substrates to existing maps. We compared the predicted FCNN &#x201c;bedrock&#x201d; classes (<italic>Fissured, Layered</italic> and <italic>Hummocky (rock)</italic>), with the bedrock substrate layer produced by INFOMAR and available on the INFOMAR portal (<xref ref-type="bibr" rid="B28">INFOMAR, 2022</xref>). In <xref ref-type="fig" rid="f11">
<bold>Figure&#xa0;11</bold>
</xref> we take the models with best scores in &#x201c;bedrock&#x201d; prediction (model #29 and the modal vote map) and overlap the INFOMAR layer. We limit the comparison area to a subsection of the entire dataset (indicated in <xref ref-type="fig" rid="f11">
<bold>Figure&#xa0;11C</bold>
</xref>), as parts of the INFOMAR layer are mapped at very low resolution (e.g. the areas in <xref ref-type="fig" rid="f11">
<bold>Figure&#xa0;11C</bold>
</xref> pointed by the red arrows), introducing further deviations, and in other zones the <italic>Hummocky (rock)</italic> class includes also rough glacial till substrate. The best comparison is provided by the modal vote map, with a total bedrock area of 2721 km<sup>2</sup> (INFOMAR = 2336 km<sup>2</sup>) and an overlap of 77%. Model #29 has a slightly better overlap (~78%), but has also a larger area mapped as rock (3276 km<sup>2</sup>). Most of this excess bedrock is caused by misinterpretation of <italic>Layered (rock)</italic> (<xref ref-type="fig" rid="f11">
<bold>Figure&#xa0;11D</bold>
</xref>), which is over-represented in the model (F1 score 0.58). These numbers have to be taken with a pinch of salt, the mapping approaches are different (e.g. in the INFOMAR dataset the fissures in the bedrock outcrops are given another class), at a slightly different resolution and using different input layers (the INFOMAR map relies abundantly on backscatter data). Nonetheless, there is a broad agreement between the two, and the FCNNs consistently predict bedrock where it has been effectively mapped (see <xref ref-type="fig" rid="f11">
<bold>Figure&#xa0;11</bold>
</xref>). Moreover they give further information on the texture of the bedrock, which can be useful for habitat predictions (<xref ref-type="bibr" rid="B46">Novaczek et&#xa0;al., 2017</xref>). A similar comparison can be made with submarine dune fields. In <xref ref-type="fig" rid="f12">
<bold>Figure&#xa0;12</bold>
</xref> we compare the general location of submarine dune ridges extracted using semi-automated techniques and checked manually (<xref ref-type="bibr" rid="B2">Arosio et&#xa0;al., 2023</xref>) with the class <italic>Ridge (sediment)</italic> in the best performing models (#12 and the modal vote map). Once more the results show an overall agreement, with <italic>Ridge (sediment)</italic> predictions corresponding with dune field areas (<xref ref-type="fig" rid="f12">
<bold>Figures&#xa0;12A, B</bold>
</xref>). In some places the FCNN is more efficient in identifying subtler ridges (e.g. <xref ref-type="fig" rid="f12">
<bold>Figure&#xa0;12F</bold>
</xref>), however in parts the related classes <italic>Large Ridge</italic> and <italic>Corrugation (sediment)</italic> were preferentially selected (e.g. <xref ref-type="fig" rid="f12">
<bold>Figures&#xa0;12E, F</bold>
</xref>). The models show higher levels of confusion in the presence of trochoidal dunes (<xref ref-type="fig" rid="f12">
<bold>Figure&#xa0;12D</bold>
</xref>) that are often misclassified as <italic>Fissured (rock)</italic> indicating that the labelling is not effective enough to train for this particular morphological distinction.</p>
<fig id="f11" position="float">
<label>Figure&#xa0;11</label>
<caption>
<p>Bedrock mapping results for the best achieving models (in rock-related classes) and comparison with INFOMAR substrate map <bold>(A&#x2013;C)</bold>. Insets <bold>(D, E)</bold> show a zoom-in for the results of models #29 and the modal vote map respectively, and the amount of correspondence to the INFOMAR shapefile. The INFOMAR bedrock vector shapefile (in light red) is overlaid on the FCNN green shapefile. Inset <bold>(F)</bold> shows the hillshaded bathymetry of the same area.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmars-10-1228867-g011.tif"/>
</fig>
<fig id="f12" position="float">
<label>Figure&#xa0;12</label>
<caption>
<p>Submarine dune fields (Ridge, sediment) mapping results for the best achieving models and comparison with unpublished semi-automated mapping performed by the authors <bold>(A&#x2013;C)</bold>. The semi-automated dune vector shapefile (in light red) is overlaid on the FCNN green shapefile. Insets <bold>(D&#x2013;F)</bold> show zoom-ins of the modal vote map.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmars-10-1228867-g012.tif"/>
</fig>
</sec>
<sec id="s4_3">
<label>4.3</label>
<title>Final considerations</title>
<p>This exploratory study has shown that FCNNs have considerable potential for the creation of large scale seabed landforms and terrain textures map, and that even with relatively modest human input the results can be satisfactory. A clear semantic class definition and label delineation (including numerous boundary cases) will improve the accuracy of the classification, while a more rigorous consistency in mapping scale will most likely reduce ambiguity. Our results show that the optimisation of derivative selection helps the model outputs, and a combination of hillshaded layers contribute substantially to prediction improvements. Further insights on the contribution of each layer could be obtained using techniques based on feature importance, as saliency maps (e.g. <xref ref-type="bibr" rid="B51">Simonyan et&#xa0;al., 2014</xref>). The ensemble voting map, which constituted the best outcome of these experiments, clearly shows the utility of using learnt biases on different subsets of input data, and that assembling predictions from several &#x2018;weak&#x2019; learners outperforms a single &#x2018;expert&#x2019; network, which is the premise of ensemble learning (<xref ref-type="bibr" rid="B18">Ganaie et&#xa0;al., 2022</xref>). For further work, several FCNNs could be trained concurrently on different subsets of input data, and a loss could be calculated based on the confidence of individual networks (<xref ref-type="bibr" rid="B22">Goyal et&#xa0;al., 2020</xref>; <xref ref-type="bibr" rid="B62">Zhou et&#xa0;al., 2021</xref>). The latter is akin to several Decision trees in a Random Forest in classical machine learning (<xref ref-type="bibr" rid="B10">Cutler et&#xa0;al., 2012</xref>).</p>
<p>From a habitat mapper&#x2019;s perspective, the use of FCNNs can be successfully applied to seabed maps for morphological characterisation, and very good results and flexibility can be achieved provided the model is well trained and furnished with clean data. Very large scale mapping endeavours, as that presented in <xref ref-type="bibr" rid="B24">Harris et&#xa0;al. (2014)</xref>, could be easily replicated and improved upon using FCNNs. Moreover previously trained models could be applied on the new datasets that are being collected and gathered for Seabed 2030. If a sufficient volume of labelled classes is cooperatively assembled in a &#x201c;dictionary&#x201d; and made publicly available, it could be used by the community to predict morphological classes across different datasets, improving upon map objectivity and inter-comparison. The time invested in creating such a dictionary would be considerable but worthwhile, as the FCNN method will be eventually better, quicker and easily repeatable compared to semi-automated or manual digitisations. We shared our labelled dataset on GitHub (<ext-link ext-link-type="uri" xlink:href="https://github.com/BrandonHobley/geomorph_deep">https://github.com/BrandonHobley/geomorph_deep</ext-link>) as a starting point. While discrete computer power it is necessary, the code is open source and requires a relatively basic level of coding expertise to be run, allowing for a widespread adoption.</p>
<p>FCNNs have also their significant drawbacks. Firstly, they are essentially a blackbox whose internal workings are not fully understood. Secondly, labelling and training at one determinate pixel resolution is most likely not transferable to a different one. So having mapped at 25m/pixel our dataset is probably ineffective to map at 2m/pixel, and more <italic>ad hoc</italic> labelling will be required. Finally, at this stage of sophistication, FCNNs fail to recognize complex geomorphological processes, especially in cases of isomorphism, so human intervention is still required. This limitation is also caused by the input types themselves, as bathymetry-derived raster data alone are often insufficient (for human geomorphologists too!) to unequivocally identify seabed landforms. Only when different types of datasets (seismic lines, ground-truthing etc.) can be included in the predictions, will machine learning be useful for more complex seabed geological interpretations.</p>
</sec>
</sec>
<sec id="s5" sec-type="data-availability">
<title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p>
</sec>
<sec id="s6" sec-type="author-contributions">
<title>Author contributions</title>
<p>RA conceived the original idea, performed the labelling and analysed the results. BH wrote the neural networks and run the models. RA and BH developed the approach and wrote the manuscript. AL and AW obtained the funding and reviewed the&#xa0;manuscript. FS, LC, and TF reviewed the manuscript. All authors provided useful feedback and helped shape the research. All authors contributed to the article and approved the submitted version.</p>
</sec>
</body>
<back>
<sec id="s7" sec-type="funding-information">
<title>Funding</title>
<p>RA received funding from the Irish Marine Institute&#x2019;s research grant PDOC 19/08/03. LC and TF were funded by the European Union&#x2019;s Horizon 2020 research and innovation programme under Grant Agreement No 862428 (MISSION ATLANTIC).</p>
</sec>
<ack>
<title>Acknowledgments</title>
<p>Firstly, we would like to thank the hard work of the captains and crews of the Irish research vessels that assisted in the collection of the INFOMAR dataset. The map contains Irish Public Sector Data (Marine Institute &amp; Geological Survey Ireland) licensed under a Creative Commons Attribution 4.0 International (CC BY 4.0) licence. </p>
</ack>
<sec id="s8" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="s9" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec id="s10" sec-type="supplementary-material">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fmars.2023.1228867/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fmars.2023.1228867/full#supplementary-material</ext-link>
</p>
<supplementary-material xlink:href="DataSheet_1.zip" id="SM1" mimetype="application/zip"/>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Arosio</surname> <given-names>R.</given-names>
</name>
<name>
<surname>Mitchell</surname> <given-names>P.</given-names>
</name>
<name>
<surname>Hawes</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Bolam</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Benson</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Sperry</surname> <given-names>J.</given-names>
</name>
</person-group> (<year>2021</year>). <source>Small island developing states (SIDS) and the sea: creating high resolution habitat maps to support effective marine management in St. Lucia</source> (<publisher-loc>Vienna</publisher-loc>: <publisher-name>Presented at the EGU</publisher-name>). doi:&#xa0;<pub-id pub-id-type="doi">10.5194/egusphere-egu21-102</pub-id>
</citation>
</ref>
<ref id="B2">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Arosio</surname> <given-names>R.</given-names>
</name>
<name>
<surname>Wheeler</surname> <given-names>A. J.</given-names>
</name>
<name>
<surname>Sacchetti</surname> <given-names>F.</given-names>
</name>
<name>
<surname>Guinan</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Conti</surname> <given-names>L. A.</given-names>
</name>
<name>
<surname>Furey</surname> <given-names>T.</given-names>
</name>
<etal/>
</person-group>. (<year>2023</year>). <source>The NOMANS_TIF map: ireland&#x2019;s first complete shallow seabed geomorphology map</source> (<publisher-loc>Saint-Gilles-Les-Bains</publisher-loc>: <publisher-name>Presented at the GeoHab 2023, La R&#xe9;union</publisher-name>). doi:&#xa0;<pub-id pub-id-type="doi">10.5281/zenodo.7890332</pub-id>
</citation>
</ref>
<ref id="B3">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Barrett</surname> <given-names>A. M.</given-names>
</name>
<name>
<surname>Balme</surname> <given-names>M. R.</given-names>
</name>
<name>
<surname>Woods</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Karachalios</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Petrocelli</surname> <given-names>D.</given-names>
</name>
<name>
<surname>Joudrier</surname> <given-names>L.</given-names>
</name>
<etal/>
</person-group>. (<year>2022</year>). <article-title>NOAH-h, a deep-learning, terrain classification system for Mars: results for the ExoMars rover candidate landing sites</article-title>. <source>Icarus</source> <volume>371</volume>, <elocation-id>114701</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.icarus.2021.114701</pub-id>
</citation>
</ref>
<ref id="B4">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Benetti</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Dunlop</surname> <given-names>P.</given-names>
</name>
<name>
<surname>Cofaigh</surname> <given-names>C.&#xd3;.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>Glacial and glacially-related features on the continental margin of northwest Ireland mapped from marine geophysical data</article-title>. <source>J. Maps</source> <volume>6</volume>, <fpage>14</fpage>&#x2013;<lpage>29</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.4113/jom.2010.1092</pub-id>
</citation>
</ref>
<ref id="B5">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Blaschke</surname> <given-names>T.</given-names>
</name>
<name>
<surname>Hay</surname> <given-names>G. J.</given-names>
</name>
<name>
<surname>Kelly</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Lang</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Hofmann</surname> <given-names>P.</given-names>
</name>
<name>
<surname>Addink</surname> <given-names>E.</given-names>
</name>
<etal/>
</person-group>. (<year>2014</year>). <article-title>Geographic object-based image analysis &#x2013; towards a new paradigm</article-title>. <source>ISPRS J. Photogrammetry Remote Sens.</source> <volume>87</volume>, <fpage>180</fpage>&#x2013;<lpage>191</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.isprsjprs.2013.09.014</pub-id>
</citation>
</ref>
<ref id="B6">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Brown</surname> <given-names>C. J.</given-names>
</name>
<name>
<surname>Smith</surname> <given-names>S. J.</given-names>
</name>
<name>
<surname>Lawton</surname> <given-names>P.</given-names>
</name>
<name>
<surname>Anderson</surname> <given-names>J. T.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>Benthic habitat mapping: a review of progress towards improved understanding of the spatial ecology of the seafloor using acoustic techniques</article-title>. <source>Estuarine Coast. Shelf Sci.</source> <volume>92</volume>, <fpage>502</fpage>&#x2013;<lpage>520</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.ecss.2011.02.007</pub-id>
</citation>
</ref>
<ref id="B7">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Buscombe</surname> <given-names>D.</given-names>
</name>
<name>
<surname>Wernette</surname> <given-names>P.</given-names>
</name>
<name>
<surname>Fitzpatrick</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Favela</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Goldstein</surname> <given-names>E. B.</given-names>
</name>
<name>
<surname>Enwright</surname> <given-names>N. M.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>A 1.2 billion pixel human-labeled dataset for data-driven classification of coastal environments</article-title>. <source>Sci. Data</source> <volume>10</volume>, <fpage>46</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41597-023-01929-2</pub-id>
</citation>
</ref>
<ref id="B8">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Conti</surname> <given-names>L. A.</given-names>
</name>
<name>
<surname>Lim</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Wheeler</surname> <given-names>A. J.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>High resolution mapping of a cold water coral mound</article-title>. <source>Sci. Rep.</source> <volume>9</volume>, <fpage>1016</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41598-018-37725-x</pub-id>
</citation>
</ref>
<ref id="B9">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Creane</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Coughlan</surname> <given-names>M.</given-names>
</name>
<name>
<surname>O&#x2019;Shea</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Murphy</surname> <given-names>J.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Development and dynamics of sediment waves in a complex morphological and tidal dominant system: southern Irish Sea</article-title>. <source>Geosciences</source> <volume>12</volume>, <elocation-id>431</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/geosciences12120431</pub-id>
</citation>
</ref>
<ref id="B10">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Cutler</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Cutler</surname> <given-names>D. R.</given-names>
</name>
<name>
<surname>Stevens</surname> <given-names>J. R.</given-names>
</name>
</person-group> (<year>2012</year>). &#x201c;<article-title>Random forests</article-title>,&#x201d; in <source>Ensemble machine learning</source>. Eds. <person-group person-group-type="editor">
<name>
<surname>Zhang</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Ma</surname> <given-names>Y.</given-names>
</name>
</person-group> (<publisher-loc>New York, NY</publisher-loc>: <publisher-name>Springer New York</publisher-name>), <fpage>157</fpage>&#x2013;<lpage>175</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/978-1-4419-9326-7_5</pub-id>
</citation>
</ref>
<ref id="B11">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>de Oliveira</surname> <given-names>N.</given-names>
</name>
<name>
<surname>Bastos</surname> <given-names>A. C.</given-names>
</name>
<name>
<surname>da Silva Quaresma</surname> <given-names>V.</given-names>
</name>
<name>
<surname>Vieira</surname> <given-names>F. V.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>The use of benthic terrain modeler (BTM) in the characterization of continental shelf habitats</article-title>. <source>Geo-Mar Lett.</source> <volume>40</volume>, <fpage>1087</fpage>&#x2013;<lpage>1097</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s00367-020-00642-y</pub-id>
</citation>
</ref>
<ref id="B12">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>de Oliveira</surname> <given-names>L. M. C.</given-names>
</name>
<name>
<surname>Lim</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Conti</surname> <given-names>L. A.</given-names>
</name>
<name>
<surname>Wheeler</surname> <given-names>A. J.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>3D classification of cold-water coral reefs: a comparison of classification techniques for 3D reconstructions of cold-water coral reefs and seabed</article-title>. <source>Front. Mar. Sci.</source> <volume>8</volume>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fmars.2021.640713</pub-id>
</citation>
</ref>
<ref id="B13">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Diesing</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Green</surname> <given-names>S. L.</given-names>
</name>
<name>
<surname>Stephens</surname> <given-names>D.</given-names>
</name>
<name>
<surname>Lark</surname> <given-names>R. M.</given-names>
</name>
<name>
<surname>Stewart</surname> <given-names>H. A.</given-names>
</name>
<name>
<surname>Dove</surname> <given-names>D.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Mapping seabed sediments: comparison of manual, geostatistical, object-based image analysis and machine learning approaches</article-title>. <source>Continental Shelf Res.</source> <volume>84</volume>, <fpage>107</fpage>&#x2013;<lpage>119</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.csr.2014.05.004</pub-id>
</citation>
</ref>
<ref id="B14">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Diesing</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Thorsnes</surname> <given-names>T.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Mapping of cold-water coral carbonate mounds based on geomorphometric features: an object-based approach</article-title>. <source>Geosciences</source> <volume>8</volume>, <elocation-id>34</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/geosciences8020034</pub-id>
</citation>
</ref>
<ref id="B15">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Dove</surname> <given-names>D.</given-names>
</name>
<name>
<surname>Bradwell</surname> <given-names>T.</given-names>
</name>
<name>
<surname>Carter</surname> <given-names>G.</given-names>
</name>
<name>
<surname>Cotteril</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Gafeira</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Green</surname> <given-names>S.</given-names>
</name>
<etal/>
</person-group>. (<year>2016</year>). <source>Seabed geomorphology: a two-part classification system (Marine geosciences programme open report no. OR/16/001)</source> (<publisher-loc>Edinburgh</publisher-loc>: <publisher-name>British Geological Survey</publisher-name>).</citation>
</ref>
<ref id="B16">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Dove</surname> <given-names>D.</given-names>
</name>
<name>
<surname>Nanson</surname> <given-names>R.</given-names>
</name>
<name>
<surname>Bjarnad&#xf3;ttir</surname> <given-names>L. R.</given-names>
</name>
<name>
<surname>Guinan</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Gafeira</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Post</surname> <given-names>A.</given-names>
</name>
<etal/>
</person-group>. (<year>2020</year>). <source>A two-part seabed geomorphology classification scheme (v.2); part 1: morphology features glossary</source> (<publisher-name>Zenodo</publisher-name>). doi:&#xa0;<pub-id pub-id-type="doi">10.5281/ZENODO.4075248</pub-id>
</citation>
</ref>
<ref id="B17">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Foroutan</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Zimbelman</surname> <given-names>J. R.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Semi-automatic mapping of linear-trending bedforms using &#x2018;Self-organizing maps&#x2019; algorithm</article-title>. <source>Geomorphology</source> <volume>293</volume>, <fpage>156</fpage>&#x2013;<lpage>166</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.geomorph.2017.05.016</pub-id>
</citation>
</ref>
<ref id="B18">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ganaie</surname> <given-names>M. A.</given-names>
</name>
<name>
<surname>Hu</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Malik</surname> <given-names>A. K.</given-names>
</name>
<name>
<surname>Tanveer</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Suganthan</surname> <given-names>P. N.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Ensemble deep learning: a review</article-title>. <source>Eng. Appl. Artif. Intell</source>. <volume>115</volume>, <fpage>105151</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.engappai.2022.105151</pub-id>
</citation>
</ref>
<ref id="B19">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gazis</surname> <given-names>I. Z.</given-names>
</name>
<name>
<surname>Schoening</surname> <given-names>T.</given-names>
</name>
<name>
<surname>Alevizos</surname> <given-names>E.</given-names>
</name>
<name>
<surname>Greinert</surname> <given-names>J.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Quantitative mapping and predictive modeling of Mn nodules&#x2019; distribution from hydroacoustic and optical AUV data linked by random forests machine learning</article-title>. <source>Biogeosciences</source> <volume>15</volume>, <fpage>7347</fpage>&#x2013;<lpage>7377</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.5194/bg-15-7347-2018</pub-id>
</citation>
</ref>
<ref id="B20">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Giglio</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Benetti</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Sacchetti</surname> <given-names>F.</given-names>
</name>
<name>
<surname>Lockhart</surname> <given-names>E.</given-names>
</name>
<name>
<surname>Hughes Clarke</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Plets</surname> <given-names>R.</given-names>
</name>
<etal/>
</person-group>. (<year>2022</year>). <article-title>A Late Pleistocene channelized subglacial meltwater system on the Atlantic continental shelf south of Ireland</article-title>. <source>Boreas</source>, <volume>51</volume>, <page-range>118&#x2013;135</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1111/bor.12536</pub-id>
</citation>
</ref>
<ref id="B21">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Goes</surname> <given-names>E. R.</given-names>
</name>
<name>
<surname>Brown</surname> <given-names>C. J.</given-names>
</name>
<name>
<surname>Ara&#xfa;jo</surname> <given-names>T. C.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Geomorphological classification of the benthic structures on a tropical continental shelf</article-title>. <source>Front. Mar. Sci.</source> <volume>6</volume>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fmars.2019.00047</pub-id>
</citation>
</ref>
<ref id="B22">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Goyal</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Oakley</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Bansal</surname> <given-names>P.</given-names>
</name>
<name>
<surname>Dancey</surname> <given-names>D.</given-names>
</name>
<name>
<surname>Yap</surname> <given-names>M. H.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Skin lesion segmentation in dermoscopic images with ensemble deep learning methods</article-title>. <source>IEEE Access</source> <volume>8</volume>, <fpage>4171</fpage>&#x2013;<lpage>4181</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/ACCESS.2019.2960504</pub-id>
</citation>
</ref>
<ref id="B23">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Harris</surname> <given-names>P. T.</given-names>
</name>
<name>
<surname>Baker</surname> <given-names>E.</given-names>
</name>
</person-group> (<year>2020</year>). <source>Seafloor geomorphology as benthic habitat</source> (<publisher-name>Elsevier</publisher-name>). doi:&#xa0;<pub-id pub-id-type="doi">10.1016/C2017-0-02139-0</pub-id>
</citation>
</ref>
<ref id="B24">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Harris</surname> <given-names>P. T.</given-names>
</name>
<name>
<surname>Macmillan-Lawler</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Rupp</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Baker</surname> <given-names>E. K.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Geomorphology of the oceans</article-title>. <source>Mar. Geology</source> <volume>352</volume>, <fpage>4</fpage>&#x2013;<lpage>24</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.margeo.2014.01.011</pub-id>
</citation>
</ref>
<ref id="B25">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>He</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Gkioxari</surname> <given-names>G.</given-names>
</name>
<name>
<surname>Dollar</surname> <given-names>P.</given-names>
</name>
<name>
<surname>Girshick</surname> <given-names>R.</given-names>
</name>
</person-group> (<year>2017</year>). &#x201c;<article-title>Mask R-CNN</article-title>,&#x201d; <source>2017 IEEE International Conference on Computer Vision (ICCV)</source>. (<publisher-loc>Venice, Italy</publisher-loc>) <fpage>2980</fpage>&#x2013;<lpage>2988</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/ICCV.2017.322</pub-id>
</citation>
</ref>
<ref id="B26">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hobley</surname> <given-names>B.</given-names>
</name>
<name>
<surname>Arosio</surname> <given-names>R.</given-names>
</name>
<name>
<surname>French</surname> <given-names>G.</given-names>
</name>
<name>
<surname>Bremner</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Dolphin</surname> <given-names>T.</given-names>
</name>
<name>
<surname>Mackiewicz</surname> <given-names>M.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Semi-supervised segmentation for coastal monitoring seagrass using RPA imagery</article-title>. <source>Remote Sens.</source> <volume>13</volume>, <elocation-id>1741</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/rs13091741</pub-id>
</citation>
</ref>
<ref id="B27">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ierodiaconou</surname> <given-names>D.</given-names>
</name>
<name>
<surname>Schimel</surname> <given-names>A. C. G.</given-names>
</name>
<name>
<surname>Kennedy</surname> <given-names>D.</given-names>
</name>
<name>
<surname>Monk</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Gaylard</surname> <given-names>G.</given-names>
</name>
<name>
<surname>Young</surname> <given-names>M.</given-names>
</name>
<etal/>
</person-group>. (<year>2018</year>). <article-title>Combining pixel and object based image analysis of ultra-high resolution multibeam bathymetry and backscatter for habitat mapping in shallow marine waters</article-title>. <source>Mar. Geophys Res.</source> <volume>39</volume>, <fpage>271</fpage>&#x2013;<lpage>288</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s11001-017-9338-z</pub-id>
</citation>
</ref>
<ref id="B28">
<citation citation-type="journal">
<person-group person-group-type="author">
<collab>INFOMAR</collab>
</person-group> (<year>2022</year>). <article-title>Seabed sediment classification Irish waters WGS84 shapefile</article-title>.</citation>
</ref>
<ref id="B29">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ismail</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Huvenne</surname> <given-names>V. A. I.</given-names>
</name>
<name>
<surname>Masson</surname> <given-names>D. G.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Objective automated classification technique for marine landscape mapping in submarine canyons</article-title>. <source>Mar. Geology</source> <volume>362</volume>, <fpage>17</fpage>&#x2013;<lpage>32</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.margeo.2015.01.006</pub-id>
</citation>
</ref>
<ref id="B30">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Juliani</surname> <given-names>C.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Automated discrimination of fault scarps along an Arctic mid-ocean ridge using neural networks</article-title>. <source>Comput. Geosciences</source> <volume>124</volume>, <fpage>27</fpage>&#x2013;<lpage>36</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.cageo.2018.12.010</pub-id>
</citation>
</ref>
<ref id="B31">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Keohane</surname> <given-names>I.</given-names>
</name>
<name>
<surname>White</surname> <given-names>S.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Chimney identification tool for automated detection of hydrothermal chimneys from high-resolution bathymetry using machine learning</article-title>. <source>Geosciences</source> <volume>12</volume>, <elocation-id>176</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/geosciences12040176</pub-id>
</citation>
</ref>
<ref id="B32">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Krizhevsky</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Sutskever</surname> <given-names>I.</given-names>
</name>
<name>
<surname>Hinton</surname> <given-names>G. E.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>ImageNet classification with deep convolutional neural networks</article-title>. <source>Commun. ACM</source> <volume>60</volume>, <fpage>84</fpage>&#x2013;<lpage>90</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1145/3065386</pub-id>
</citation>
</ref>
<ref id="B33">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lecours</surname> <given-names>V.</given-names>
</name>
<name>
<surname>Devillers</surname> <given-names>R.</given-names>
</name>
<name>
<surname>Edinger</surname> <given-names>E. N.</given-names>
</name>
<name>
<surname>Brown</surname> <given-names>C. J.</given-names>
</name>
<name>
<surname>Lucieer</surname> <given-names>V. L.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Influence of artefacts in marine digital terrain models on habitat maps and species distribution models: a multiscale assessment</article-title>. <source>Remote Sens. Ecol. Conserv.</source> <volume>3</volume>, <fpage>232</fpage>&#x2013;<lpage>246</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1002/rse2.49</pub-id>
</citation>
</ref>
<ref id="B34">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lecours</surname> <given-names>V.</given-names>
</name>
<name>
<surname>Dolan</surname> <given-names>M. F. J.</given-names>
</name>
<name>
<surname>Micallef</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Lucieer</surname> <given-names>V. L.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>A review of marine geomorphometry, the quantitative study of the seafloor</article-title>. <source>Hydrol. Earth Syst. Sci.</source> <volume>20</volume>, <fpage>3207</fpage>&#x2013;<lpage>3244</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.5194/hess-20-3207-2016</pub-id>
</citation>
</ref>
<ref id="B35">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>LeCun</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Bengio</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Hinton</surname> <given-names>G.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Deep learning</article-title>. <source>Nature</source> <volume>521</volume>, <fpage>436</fpage>&#x2013;<lpage>444</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/nature14539</pub-id>
</citation>
</ref>
<ref id="B36">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Leit&#xe3;o</surname> <given-names>P. J.</given-names>
</name>
<name>
<surname>Schwieder</surname> <given-names>M.</given-names>
</name>
<name>
<surname>P&#xf6;tzschner</surname> <given-names>F.</given-names>
</name>
<name>
<surname>Pinto</surname> <given-names>J. R. R.</given-names>
</name>
<name>
<surname>Teixeira</surname> <given-names>A. M. C.</given-names>
</name>
<name>
<surname>Pedroni</surname> <given-names>F.</given-names>
</name>
<etal/>
</person-group>. (<year>2018</year>). <article-title>From sample to pixel: multi-scale remote sensing data for upscaling aboveground carbon data in heterogeneous landscapes</article-title>. <source>Ecosphere</source> <volume>9</volume>, <elocation-id>e02298</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1002/ecs2.2298</pub-id>
</citation>
</ref>
<ref id="B37">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lin</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>Q.</given-names>
</name>
<name>
<surname>Yan</surname> <given-names>S.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Network in network</article-title>. <source>International Conference on Learning Representations (ICLR) (Banff)</source>. Available at: <uri xlink:href="http://arxiv.org/abs/1312.4400">http://arxiv.org/abs/1312.4400</uri>.</citation>
</ref>
<ref id="B38">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Linklater</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Ingleton</surname> <given-names>T. C.</given-names>
</name>
<name>
<surname>Kinsela</surname> <given-names>M. A.</given-names>
</name>
<name>
<surname>Morris</surname> <given-names>B. D.</given-names>
</name>
<name>
<surname>Allen</surname> <given-names>K. M.</given-names>
</name>
<name>
<surname>Sutherland</surname> <given-names>M. D.</given-names>
</name>
<etal/>
</person-group>. (<year>2019</year>). <article-title>Techniques for classifying seabed morphology and composition on a subtropical-temperate continental shelf</article-title>. <source>Geosciences</source> <volume>9</volume>, <elocation-id>141</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/geosciences9030141</pub-id>
</citation>
</ref>
<ref id="B39">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lockhart</surname> <given-names>E. A.</given-names>
</name>
<name>
<surname>Scourse</surname> <given-names>J. D.</given-names>
</name>
<name>
<surname>Praeg</surname> <given-names>D.</given-names>
</name>
<name>
<surname>Van Landeghem</surname> <given-names>K. J. J.</given-names>
</name>
<name>
<surname>Mellett</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Saher</surname> <given-names>M.</given-names>
</name>
<etal/>
</person-group>. (<year>2018</year>). <article-title>A stratigraphic investigation of the celtic Sea megaridges based on seismic and core data from the Irish-UK sectors</article-title>. <source>Quaternary Sci. Rev.</source> <volume>198</volume>, <fpage>156</fpage>&#x2013;<lpage>170</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.quascirev.2018.08.029</pub-id>
</citation>
</ref>
<ref id="B40">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Long</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Shelhamer</surname> <given-names>E.</given-names>
</name>
<name>
<surname>Darrell</surname> <given-names>T.</given-names>
</name>
</person-group> (<year>2014</year>). <source>Fully convolutional networks for semantic segmentation</source> (<publisher-loc>Boston, MA, USA</publisher-loc>: <publisher-name>IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</publisher-name>), <page-range>3431&#x2013;3440</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/CVPR.2015.7298965</pub-id>
</citation>
</ref>
<ref id="B41">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lundine</surname> <given-names>M. A.</given-names>
</name>
<name>
<surname>Brothers</surname> <given-names>L. L.</given-names>
</name>
<name>
<surname>Trembanis</surname> <given-names>A. C.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Deep learning for pockmark detection: implications for quantitative seafloor characterization</article-title>. <source>Geomorphology</source> <volume>421</volume>, <elocation-id>108524</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.geomorph.2022.108524</pub-id>
</citation>
</ref>
<ref id="B42">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ma</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Mei</surname> <given-names>G.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Deep learning for geological hazards analysis: data, models, applications, and opportunities</article-title>. <source>Earth-Science Rev.</source> <volume>223</volume>, <elocation-id>103858</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.earscirev.2021.103858</pub-id>
</citation>
</ref>
<ref id="B43">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>McClinton</surname> <given-names>T. J.</given-names>
</name>
<name>
<surname>White</surname> <given-names>S. M.</given-names>
</name>
<name>
<surname>Sinton</surname> <given-names>J. M.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>Neuro-fuzzy classification of submarine lava flow morphology</article-title>. <source>Photogrammetric Eng. Remote Sens.</source> <volume>78</volume>, <fpage>605</fpage>&#x2013;<lpage>616</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.14358/PERS.78.6.605</pub-id>
</citation>
</ref>
<ref id="B44">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Micallef</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Le Bas</surname> <given-names>T. P.</given-names>
</name>
<name>
<surname>Huvenne</surname> <given-names>V. A. I.</given-names>
</name>
<name>
<surname>Blondel</surname> <given-names>P.</given-names>
</name>
<name>
<surname>H&#xfc;hnerbach</surname> <given-names>V.</given-names>
</name>
<name>
<surname>Deidun</surname> <given-names>A.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>A multi-method approach for benthic habitat mapping of shallow coastal areas with high-resolution multibeam data</article-title>. <source>Continental Shelf Res.</source> <volume>39&#x2013;40</volume>, <fpage>14</fpage>&#x2013;<lpage>26</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.csr.2012.03.008</pub-id>
</citation>
</ref>
<ref id="B45">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Noh</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Hong</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Han</surname> <given-names>B.</given-names>
</name>
</person-group> (<year>2015</year>). <source>Learning deconvolution network for semantic segmentation</source> (<publisher-loc>Santiago, Chile</publisher-loc>: <publisher-name>IEEE International Conference on Computer Vision (ICCV)</publisher-name>), <page-range>1520&#x2013;1528</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/ICCV.2015.178</pub-id>
</citation>
</ref>
<ref id="B46">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Novaczek</surname> <given-names>E.</given-names>
</name>
<name>
<surname>Devillers</surname> <given-names>R.</given-names>
</name>
<name>
<surname>Edinger</surname> <given-names>E.</given-names>
</name>
<name>
<surname>Mello</surname> <given-names>L.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>High-resolution seafloor mapping to describe coastal denning habitat of a Canadian species at risk: Atlantic wolffish (Anarhichas lupus)</article-title>. <source>Can. J. Fish. Aquat. Sci.</source> <volume>74</volume>, <fpage>2073</fpage>&#x2013;<lpage>2084</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1139/cjfas-2016-0414</pub-id>
</citation>
</ref>
<ref id="B47">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>&#xd3; Cofaigh</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Dunlop</surname> <given-names>P.</given-names>
</name>
<name>
<surname>Benetti</surname> <given-names>S.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>Marine geophysical evidence for late pleistocene ice sheet extent and recession off northwest Ireland</article-title>. <source>Quaternary Sci. Rev.</source> <volume>44</volume>, <fpage>147</fpage>&#x2013;<lpage>159</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.quascirev.2010.02.005</pub-id>
</citation>
</ref>
<ref id="B48">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Palafox</surname> <given-names>L. F.</given-names>
</name>
<name>
<surname>Hamilton</surname> <given-names>C. W.</given-names>
</name>
<name>
<surname>Scheidt</surname> <given-names>S. P.</given-names>
</name>
<name>
<surname>Alvarez</surname> <given-names>A. M.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Automated detection of geological landforms on Mars using convolutional neural networks</article-title>. <source>Comput. Geosciences</source> <volume>101</volume>, <fpage>48</fpage>&#x2013;<lpage>56</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.cageo.2016.12.015</pub-id>
</citation>
</ref>
<ref id="B49">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Ronneberger</surname> <given-names>O.</given-names>
</name>
<name>
<surname>Fischer</surname> <given-names>P.</given-names>
</name>
<name>
<surname>Brox</surname> <given-names>T.</given-names>
</name>
</person-group> (<year>2015</year>). &#x201c;<article-title>U-Net: convolutional networks for biomedical image segmentation</article-title>,&#x201d; in <source>Medical image computing and computer-assisted intervention &#x2013; MICCAI 2015, lecture notes in computer science</source>. Eds. <person-group person-group-type="editor">
<name>
<surname>Navab</surname> <given-names>N.</given-names>
</name>
<name>
<surname>Hornegger</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Wells</surname> <given-names>W. M.</given-names>
</name>
<name>
<surname>Frangi</surname> <given-names>A. F.</given-names>
</name>
</person-group> (<publisher-name>Springer International Publishing, Cham</publisher-name>), <fpage>234</fpage>&#x2013;<lpage>241</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/978-3-319-24574-4_28</pub-id>
</citation>
</ref>
<ref id="B50">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rubanenko</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Perez-Lopez</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Schull</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Lapotre</surname> <given-names>M. G. A.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Automatic detection and Segmentation of barchan dunes on Mars and earth using a convolutional neural network</article-title>. <source>IEEE J. Sel. Top. Appl. Earth Observations Remote Sens.</source> <volume>14</volume>, <fpage>9364</fpage>&#x2013;<lpage>9371</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/JSTARS.2021.3109900</pub-id>
</citation>
</ref>
<ref id="B51">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Simonyan</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Vedaldi</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Zisserman</surname> <given-names>A.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Deep inside convolutional networks: visualising image classification models and saliency maps</article-title>. <source>Workshop at International Conference on Learning Representations</source>. doi:&#xa0;<pub-id pub-id-type="doi">10.48550/arXiv.1312.6034</pub-id>
</citation>
</ref>
<ref id="B52">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Simonyan</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Zisserman</surname> <given-names>A.</given-names>
</name>
</person-group> (<year>2015</year>). <source>Very deep convolutional networks for Large-scale image recognition</source> (<publisher-loc>San Diego</publisher-loc>: <publisher-name>International Conference on Learning Representations (ICLR)</publisher-name>). Available at: <uri xlink:href="https://arxiv.org/abs/1409.1556">https://arxiv.org/abs/1409.1556</uri>.</citation>
</ref>
<ref id="B53">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Smith</surname> <given-names>B.</given-names>
</name>
<name>
<surname>Mark</surname> <given-names>D. M.</given-names>
</name>
</person-group> (<year>2003</year>). <article-title>Do mountains exist? towards an ontology of landforms</article-title>. <source>Environ. Plann B Plann Des.</source> <volume>30</volume>, <fpage>411</fpage>&#x2013;<lpage>427</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1068/b12821</pub-id>
</citation>
</ref>
<ref id="B54">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Summers</surname> <given-names>G.</given-names>
</name>
<name>
<surname>Lim</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Wheeler</surname> <given-names>A. J.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>A scalable, supervised classification of seabed sediment waves using an object-based image analysis approach</article-title>. <source>Remote Sens.</source> <volume>13</volume>, <elocation-id>2317</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/rs13122317</pub-id>
</citation>
</ref>
<ref id="B55">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Tarvainen</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Valpola</surname> <given-names>H.</given-names>
</name>
</person-group> (<year>2017</year>). &#x201c;<article-title>Mean teachers are better role models: weight-averaged consistency targets improve semi-supervised deep learning results</article-title>,&#x201d; in <conf-name>31st Conference on Neural Information Processing System. Presented at the NIPS</conf-name>, <conf-loc>Long Beach, USA</conf-loc>.</citation>
</ref>
<ref id="B56">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Valentine</surname> <given-names>A. P.</given-names>
</name>
<name>
<surname>Kalnins</surname> <given-names>L. M.</given-names>
</name>
<name>
<surname>Trampert</surname> <given-names>J.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Discovery and analysis of topographic features using learning algorithms: a seamount case study</article-title>. <source>Geophysical Res. Lett.</source> <volume>40</volume>, <fpage>3048</fpage>&#x2013;<lpage>3054</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1002/grl.50615</pub-id>
</citation>
</ref>
<ref id="B57">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Van Landeghem</surname> <given-names>K. J. J.</given-names>
</name>
<name>
<surname>Uehara</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Wheeler</surname> <given-names>A. J.</given-names>
</name>
<name>
<surname>Mitchell</surname> <given-names>N. C.</given-names>
</name>
<name>
<surname>Scourse</surname> <given-names>J. D.</given-names>
</name>
</person-group> (<year>2009</year>). <article-title>Post-glacial sediment dynamics in the Irish Sea and sediment wave morphology: data&#x2013;model comparisons</article-title>. <source>Continental Shelf Res.</source> <volume>29</volume>, <fpage>1723</fpage>&#x2013;<lpage>1736</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.csr.2009.05.014</pub-id>
</citation>
</ref>
<ref id="B58">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Walbridge</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Slocum</surname> <given-names>N.</given-names>
</name>
<name>
<surname>Pobuda</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Wright</surname> <given-names>D. J.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Unified geomorphological analysis workflows with benthic terrain modeler</article-title>. <source>Geosciences</source> <volume>8</volume>, <fpage>94</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/geosciences8030094</pub-id>
</citation>
</ref>
<ref id="B59">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Di</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Xin</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Wan</surname> <given-names>W.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Automatic detection of Martian dark slope streaks by machine learning using HiRISE images</article-title>. <source>ISPRS J. Photogrammetry Remote Sens.</source> <volume>129</volume>, <fpage>12</fpage>&#x2013;<lpage>20</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.isprsjprs.2017.04.014</pub-id>
</citation>
</ref>
<ref id="B60">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wilhelm</surname> <given-names>T.</given-names>
</name>
<name>
<surname>Geis</surname> <given-names>M.</given-names>
</name>
<name>
<surname>P&#xfc;ttschneider</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Sievernich</surname> <given-names>T.</given-names>
</name>
<name>
<surname>Weber</surname> <given-names>T.</given-names>
</name>
<name>
<surname>Wohlfarth</surname> <given-names>K.</given-names>
</name>
<etal/>
</person-group>. (<year>2020</year>). <article-title>DoMars16k: a diverse dataset for weakly supervised geomorphologic analysis on Mars</article-title>. <source>Remote Sens.</source> <volume>12</volume>, <elocation-id>3981</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/rs12233981</pub-id>
</citation>
</ref>
<ref id="B61">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yasir</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Rahman</surname> <given-names>A. U.</given-names>
</name>
<name>
<surname>Gohar</surname> <given-names>M.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Habitat mapping using deep neural networks</article-title>. <source>Multimedia Syst.</source> <volume>27</volume>, <fpage>679</fpage>&#x2013;<lpage>690</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s00530-020-00695-0</pub-id>
</citation>
</ref>
<ref id="B62">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhou</surname> <given-names>T.</given-names>
</name>
<name>
<surname>Lu</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Qiu</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Huo</surname> <given-names>B.</given-names>
</name>
<name>
<surname>Dong</surname> <given-names>Y.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>The ensemble deep learning model for novel COVID-19 on CT images</article-title>. <source>Appl. Soft Computing</source> <volume>98</volume>, <elocation-id>106885</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.asoc.2020.106885</pub-id>
</citation>
</ref>
</ref-list>
</back>
</article>