<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="2.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Mar. Sci.</journal-id>
<journal-title>Frontiers in Marine Science</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Mar. Sci.</abbrev-journal-title>
<issn pub-type="epub">2296-7745</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fmars.2023.1093542</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Marine Science</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>A Part-based Deep Learning Network for identifying individual crabs using abdomen images</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Wu</surname>
<given-names>Chenjie</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1993520"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Xie</surname>
<given-names>Zhijun</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="author-notes" rid="fn001">
<sup>*</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1658869"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Chen</surname>
<given-names>Kewei</given-names>
</name>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Shi</surname>
<given-names>Ce</given-names>
</name>
<xref ref-type="aff" rid="aff4">
<sup>4</sup>
</xref>
<xref ref-type="aff" rid="aff5">
<sup>5</sup>
</xref>
<xref ref-type="author-notes" rid="fn001">
<sup>*</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/640731"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Ye</surname>
<given-names>Yangfang</given-names>
</name>
<xref ref-type="aff" rid="aff4">
<sup>4</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/724070"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Xin</surname>
<given-names>Yu</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1688479"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Zarei</surname>
<given-names>Roozbeh</given-names>
</name>
<xref ref-type="aff" rid="aff6">
<sup>6</sup>
</xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Huang</surname>
<given-names>Guangyan</given-names>
</name>
<xref ref-type="aff" rid="aff6">
<sup>6</sup>
</xref>
</contrib>
</contrib-group>
<aff id="aff1">
<sup>1</sup>
<institution>Department of Information Science and Engineering, Ningbo University</institution>, <addr-line>Ningbo</addr-line>, <country>China</country>
</aff>
<aff id="aff2">
<sup>2</sup>
<institution>Zhejiang Engineering Research Center of Advanced Mass Spectrometry and Clinical Application</institution>, <addr-line>Ningbo</addr-line>, <country>China</country>
</aff>
<aff id="aff3">
<sup>3</sup>
<institution>Faculty of Mechanical Engineering and Mechanics, Ningbo University</institution>, <addr-line>Ningbo</addr-line>, <country>China</country>
</aff>
<aff id="aff4">
<sup>4</sup>
<institution>School of Marine Sciences, Ningbo University</institution>, <addr-line>Ningbo</addr-line>, <country>China</country>
</aff>
<aff id="aff5">
<sup>5</sup>
<institution>Key Laboratory of Applied Marine Biotechnology, Ningbo University, Chinese Ministry of Education</institution>, <addr-line>Ningbo</addr-line>, <country>China</country>
</aff>
<aff id="aff6">
<sup>6</sup>
<institution>School of Information Technology, Deakin University</institution>, <addr-line>Melbourne, VIC</addr-line>, <country>Australia</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>Edited by: Simone Marini, National Research Council (CNR), Italy</p>
</fn>
<fn fn-type="edited-by">
<p>Reviewed by: Marco Spoto, National Research Council (CNR), Italy; Mattia Cavaiola, University of Genoa, Italy; Luciano Ortenzi, University of Tuscia, Italy</p>
</fn>
<fn fn-type="corresp" id="fn001">
<p>*Correspondence: Zhijun Xie, <email xlink:href="mailto:xiezhijun@nbu.edu.cn">xiezhijun@nbu.edu.cn</email>; Ce Shi, <email xlink:href="mailto:shice@nbu.edu.cn">shice@nbu.edu.cn</email>
</p>
</fn>
<fn fn-type="other" id="fn002">
<p>This article was submitted to Ocean Observation, a section of the journal Frontiers in Marine Science</p>
</fn>
</author-notes>
<pub-date pub-type="epub">
<day>07</day>
<month>02</month>
<year>2023</year>
</pub-date>
<pub-date pub-type="collection">
<year>2023</year>
</pub-date>
<volume>10</volume>
<elocation-id>1093542</elocation-id>
<history>
<date date-type="received">
<day>10</day>
<month>11</month>
<year>2022</year>
</date>
<date date-type="accepted">
<day>17</day>
<month>01</month>
<year>2023</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2023 Wu, Xie, Chen, Shi, Ye, Xin, Zarei and Huang</copyright-statement>
<copyright-year>2023</copyright-year>
<copyright-holder>Wu, Xie, Chen, Shi, Ye, Xin, Zarei and Huang</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<p>Crabs, such as swimming crabs and mud crabs, are famous for their high nutritional value but are difficult to preserve. Thus, the traceability of crabs is vital for food safety. Existing deep-learning methods can be applied to identify individual crabs. However, there is no previous study that used abdomen images to identify individual crabs. In this paper, we provide a novel Part-based Deep Learning Network (PDN) to reliably identify an individual crab from its abdomen images captured under various conditions. In our PDN, we developed three non-overlapping and three overlapping partitions strategies of the abdomen image and further designed a part attention block. A swimming crab (Crab-201) dataset with the abdomen images of 201 swimming crabs and a more complex mud crab dataset (Crab-146) were collected to train and test the proposed PDN. Experimental results show that the proposed PDN using the overlapping partition strategy is better than the non-overlapping partition strategy. The edge texture of the abdomen has more identifiable features than the sulciform texture of the lower part of the abdomen. It also demonstrates that the proposed PDN_OS3, which emphasizes the edge texture of the abdomen with overlapping partition strategies, is more reliable and accurate than the counterpart methods to identify an individual crab.</p>
</abstract>
<kwd-group>
<kwd>crab</kwd>
<kwd>deep learning</kwd>
<kwd>individual identifcation</kwd>
<kwd>re-identification</kwd>
<kwd>local feature</kwd>
</kwd-group>
<contract-num rid="cn001">U20A20121</contract-num>
<contract-num rid="cn002">LY21F020006</contract-num>
<contract-num rid="cn003">2019B10125, 2019B10028, 20201ZDYF020077, 20211Z DYF020230, 2022Z074</contract-num>
<contract-sponsor id="cn001">National Natural Science Foundation of China<named-content content-type="fundref-id">10.13039/501100001809</named-content>
</contract-sponsor>
<contract-sponsor id="cn002">Natural Science Foundation of Zhejiang Province<named-content content-type="fundref-id">10.13039/501100004731</named-content>
</contract-sponsor>
<contract-sponsor id="cn003">Science and Technology Innovation 2025 Major Project of Ningbo<named-content content-type="fundref-id">10.13039/501100017549</named-content>
</contract-sponsor>
<counts>
<fig-count count="13"/>
<table-count count="2"/>
<equation-count count="10"/>
<ref-count count="36"/>
<page-count count="12"/>
<word-count count="6314"/>
</counts>
</article-meta>
</front>
<body>
<sec id="s1" sec-type="intro">
<label>1</label>
<title>Introduction</title>
<p>Both swimming crabs (<italic>Portunus trituberculatus</italic>) and mud crabs (<italic>Scylla paramamosain</italic>) are members of the family <italic>Portunidae</italic>. They are widely distributed in the waters of China, Japan, and Korea (<xref ref-type="bibr" rid="B2">Dai et&#xa0;al., 1986</xref>; <xref ref-type="bibr" rid="B6">Hamasaki et&#xa0;al., 2006</xref>), and are also considered critical economic crabs with high nutritional value in China (<xref ref-type="bibr" rid="B32">Yu et&#xa0;al., 2006</xref>; <xref ref-type="bibr" rid="B18">Sun and Wang, 2020</xref>). In 2019, the marine fishing production of swimming crabs was 458,380 tons, down 4.34 percent year on year (<xref ref-type="bibr" rid="B31">Yearbook, 2020</xref>). Recently, the pressure from a marine resources recession has increased, and the amount of marine fishing has decreased yearly. Strong market demand has promoted the development of aquaculture (<xref ref-type="bibr" rid="B25">Wu et&#xa0;al., 2010</xref>).</p>
<p>With the development of crab farming, various food safety problems continuously emerged. For example, the development of the industry caused heavy metal pollution of aquatic organisms, and crabs are the most severe aquatic products contaminated with heavy metal cadmium (<xref ref-type="bibr" rid="B14">Pandiyan et&#xa0;al., 2021</xref>; <xref ref-type="bibr" rid="B29">Yang et&#xa0;al., 2021</xref>). Crab meat is rich in histidine, which can produce and accumulate histamine over a long time even after the crab death (<xref ref-type="bibr" rid="B13">Lipp and Rose, 1997</xref>), and its excessive intake by humans can cause illness or poisoning (<xref ref-type="bibr" rid="B5">Feddern et&#xa0;al., 2019</xref>; <xref ref-type="bibr" rid="B24">Worm et&#xa0;al., 2019</xref>). Crabs raised in non-standard farms are likely to be contaminated with a large number of bacteria and have excessive heavy metal content. Unscrupulous merchants will sell crabs that have been dead for a long time as normal commodities; these crabs will bring serious safety problems.</p>
<p>Based on the above safety concerns, the traceability of aquatic products has become an issue. Crab traceability requires a unique identification for each crab. The identification technologies currently used for traceability include barcodes, QR codes, electronic tags, anti-counterfeit crab buckles, and RFID tags (<xref ref-type="bibr" rid="B35">Zhao et&#xa0;al., 2012</xref>; <xref ref-type="bibr" rid="B26">Xiao et&#xa0;al., 2017</xref>; <xref ref-type="bibr" rid="B4">Faggion et&#xa0;al., 2020</xref>). However, these identifiers can easily be moved or forged. It is much more reliable to obtain characteristic identification by computer vision (CV) technology processing images of crabs than by physical identification.</p>
<p>In recent years, there has been an increasing number of studies on applying CV technology to agriculture and aquaculture (<xref ref-type="bibr" rid="B11">Kumar et&#xa0;al., 2018</xref>; <xref ref-type="bibr" rid="B16">Shen et&#xa0;al., 2019</xref>; <xref ref-type="bibr" rid="B30">Yang et&#xa0;al., 2020</xref>; <xref ref-type="bibr" rid="B1">Andrew et&#xa0;al., 2021</xref>; <xref ref-type="bibr" rid="B8">Hu et&#xa0;al., 2021</xref>; <xref ref-type="bibr" rid="B28">Xi et&#xa0;al., 2021</xref>). CV can also be applied to the individual identification of crabs. Because of heredity and living habits, the surface of a crab&#x2019;s shell has special textures and patterns, which, similar to human fingerprints, are individually unique and provide conditions for the individual identification of a crab. <xref ref-type="bibr" rid="B12">Li (2019)</xref> proposed an anti-counterfeiting algorithm for <italic>Eriocheir Sinensis</italic> based on local features images that analyzed images of the back of <italic>Eriocheir Sinensis</italic> for individual identification and traceability. <xref ref-type="bibr" rid="B22">Tai et&#xa0;al. (2021)</xref> used a Speeded Up Robust Features (SURF) algorithm to extract the back feature points of <italic>Eriocheir Sinensis</italic> and then used the bi-direction Fast Library for Approximate Nearest Neighbors (FLANN) algorithm to match the feature points saved in the database. <xref ref-type="bibr" rid="B33">Zhang et&#xa0;al. (2022)</xref> proposed an individual re-identification method based on Pyramidal Feature Fusion Model (PFFM) for swimming crabs.</p>
<p>The studies mentioned above used traditional CV or deep learning for the individual identification of crabs by using crabs&#x2019; back images. The method of using specialist equipment to photograph and traditional CV to process the images has poor robustness. Deep learning algorithms designed to analyze the backs of swimming crabs with distinguishing features are not ideal for crab species with inconspicuous back shell features. As shown in <xref ref-type="fig" rid="f1">
<bold>Figure&#xa0;1</bold>
</xref>, the mud crab (b) does not have the speckled features on its back shell that the swimming crab (a) has, while the abdomen of the mud crab (c) has textural features that are more obvious than the back shell features (b). Therefore, using crab abdomen images is more suitable than using crab back images for individual crab identification. However, there is no existing work for identifying individual crabs using abdomen images.</p>
<fig id="f1" position="float">
<label>Figure&#xa0;1</label>
<caption>
<p>Differences between swimming crabs and mud crabs. <bold>(A)</bold> The back of the swimming crab. <bold>(B)</bold> The back of the mud crab. <bold>(C)</bold> The abdomen of the mud crab.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmars-10-1093542-g001.tif"/>
</fig>
<p>The proposed Part-based Deep Learning Network (PDN) algorithm uses crabs&#x2019; abdomen images as features and applies a deep learning algorithm based on local features to identify individual crabs. Our PDN algorithm is improved from a classical person re-identification (ReID) algorithm, and a Part-based Convolutional Baseline (PCB) (<xref ref-type="bibr" rid="B20">Sun et&#xa0;al., 2018</xref>). A person&#x2019;s image can be divided into different parts (head, chest, abdomen, legs, etc.) from top to bottom. It is reasonable for the PCB algorithm and other part-based person ReID studies to perform a horizontal partition of a person&#x2019;s image (<xref ref-type="bibr" rid="B23">Wang et&#xa0;al., 2018</xref>). As <xref ref-type="bibr" rid="B20">Sun et&#xa0;al. (2018)</xref> mentioned, the number of parts also affects the algorithm&#x2019;s accuracy. Sun experimented and found that dividing the images into six parts was the most effective. The image partition strategy of the PCB algorithm determines the local features of the image, and different partition strategies will directly affect the effectiveness of the algorithm. After our experiments in subsection 2.3.4, we found that simply applying the PCB to the abdomen image of the crab&#x2019;s abdomen (<xref ref-type="fig" rid="f2">
<bold>Figure&#xa0;2</bold>
</xref>) did not give good accuracy. Therefore, our PDN algorithm improves the partition strategy based on the features of the crab&#x2019;s abdomen image and adds an attention strategy to strengthen its feature expression.</p>
<fig id="f2" position="float">
<label>Figure&#xa0;2</label>
<caption>
<p>Division of a crab&#x2019;s abdomen image into horizontal parts.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmars-10-1093542-g002.tif"/>
</fig>
<p>This paper focuses on the abdomen images of swimming/mud crabs, uses a deep learning algorithm based on local features to learn abdomen features, and forms its unique code for each crab. Thus, it realizes the accurate identification of individual crabs and provides an individual identification technique for the traceability systems of crabs.</p>    <p>The contributions of this paper are as follows.</p>
<list list-type="simple">
<list-item>
<p>(1) We provide a novel PDN method, in which a deep learning algorithm based on local features is used to extract representative features from crab abdomen images. It improves the PCB by using the partition strategy based on the features of the crab&#x2019;s abdomen image and adding a Part Attention block (PA) to strengthen the feature representation.</p>
</list-item>
<list-item>
<p>(2) We further develop effective overlapping partition strategies to ensure that the representative feature of the key regions is preserved, particularly on the edges of the partitions.</p>
</list-item>
<list-item>
<p>(3) Two crab abdomen datasets (Crab-201 and Crab-146) were collected, and extensive experiments were conducted on both datasets. The experimental results demonstrate the accuracy and robustness of our proposed method with optimal overlapping partition strategies.</p>
</list-item>
</list>
</sec>
<sec id="s2" sec-type="materials|methods">
<label>2</label>
<title>Materials and methods</title>
<sec id="s2_1">
<label>2.1</label>
<title>Dataset</title>
<p>To verify the effectiveness of our proposed algorithm, we collected two datasets of crab abdomen images (the swimming crab dataset called Crab-201 and the complex mud crab dataset called Crab-146).</p>
<p>The raw data for the datasets were images and videos photographed by mobile phones. The original images or frames from the original video were fed into a trained YOLOv1 (<xref ref-type="bibr" rid="B15">Redmon et&#xa0;al., 2016</xref>) target detection algorithm, which can identify the abdomen of a crab to obtain the bounding boxes of the crab&#x2019;s abdomen image. The images were cropped according to the bounding boxes.</p>
<p>Crab-201 All swimming crabs collected in Crab-201 were provided by a crab farm in Ningbo (Zhejiang, China), and were numbered after being photographed by mobile phone. Each swimming crab was photographed several times with different light intensities and different angles to simulate the varying effects under different conditions. We defined 12 conditions for the abdomen images of the swimming crabs; the conditions of an example crab are shown in <xref ref-type="fig" rid="f3">
<bold>Figure&#xa0;3A</bold>
</xref>. The 12 conditions were standard, grainy, clockwise rotation, low resolution, high resolution, low angle, contrarotation, lower-left part, upper-right part, dim light, over-exposed, and rotate with low angle. There were 2,412 images in the dataset Crab-201. Out of these, 1716 images of 143 crabs in the dataset were used as the training set, and the remaining 696 images of 58 swimming crabs were used as the test set. One image of each crab in the training set was randomly selected as the validation set. The validation set was used to verify the model, which can be continuously adjusted according to the situation to select the best model among them.</p>
<fig id="f3" position="float">
<label>Figure&#xa0;3</label>
<caption>
<p>Demo images of the two crab abdomen datasets. <bold>(A)</bold> Crab-201. <bold>(B)</bold> Crab-146.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmars-10-1093542-g003.tif"/>
</fig>
<p>Modeled on the Person Re-identification dataset, the test set was divided into Query and Gallery, where Query was the image to be queried, and Gallery was the collection of images to be compared with Query. One image from each category (i.e., a crab) was randomly selected as the Query image, and the other images formed a subset of the Gallery. Therefore, there were 58 images in the Query queue and 638 images (58*11) in the Gallery.</p>
<p>Crab-146 was a more complex multi-device abdomen dataset of mud crabs (<italic>Scylla paramamosain</italic>). The mud crabs were collected from a crab farm (130 crabs) and a seafood market (16 crabs) in Ningbo (Zhejiang, China). The mud crab dataset was divided into a training set and a test set in the ratio of 3:1, in which the training set had 1,147 images with 97 categories and the test set had 681 images with 49 categories. Images of the 16 crabs from the seafood market were put into the test set. Images of one mud crab in the Crab-146 dataset are shown in <xref ref-type="fig" rid="f3">
<bold>Figure&#xa0;3B</bold>
</xref>. The image was named in reference to the person ReID dataset Market-1501 (<xref ref-type="bibr" rid="B36">Zheng et&#xa0;al., 2015</xref>). Using &#x201c;27_4_1&#x201d; as an example, &#x201c;27&#x201d; is the crab number, &#x201c;4&#x201d; is the camera number and &#x201c;1&#x201d; is the image number. <xref ref-type="fig" rid="f3">
<bold>Figure&#xa0;3</bold>
</xref> also shows that the mud crab dataset is more complex compared to the swimming crab dataset because multiple devices were used to photograph the mud crabs at different times.</p>
<p>Similar to the Crab-201 dataset, the test set of the Crab-146 dataset consisted of a Query and a Gallery. The difference being that there was only one image of each id in the Gallery, and the remaining images were regarded as Query. Although this division method slightly reduced the difficulty of matching Query and Gallery each time, it greatly increased the number of Query. Images from the seafood market crabs were not collected at the same time or on the same device as other images. They were not involved in algorithm training, which made the test set able to also verify whether the algorithm was overfitting.</p>
<p>The image pixels of Crab-201 were between 385, 369 and 2418, 2131, and the image pixels of Crab-146 were between 210, 179 and 2959, 2591. The dataset was pre-processed as follows: After resizing the image to 3*256*256, the pixel values were divided by 225 and normalized to [0,1]. Then each channel was standardized in turn using the formula as follows:</p>
<disp-formula>
<label>(1)</label>
<mml:math display="block" id="M1">
<mml:mrow>
<mml:mi>z</mml:mi>
<mml:mo>=</mml:mo>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>m</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>n</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mo stretchy="false">/</mml:mo>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:math>
</disp-formula>
<p>where the mean= [0.485, 0.456, 0.406], std(Standard Deviation)= [0.229, 0.224, 0.225], and the above values were calculated from the Imagenet dataset (<xref ref-type="bibr" rid="B3">Deng et&#xa0;al., 2009</xref>).</p>
</sec>
<sec id="s2_2">
<label>2.2</label>
<title>The proposed Part-based Deep learning Network</title>
<p>In this subsection, we provide a Part-based Deep learning Network (PDN) that can effectively extract representative abdomen features of crabs for individual identification. In subsection 2.2.1, we introduce the classical Person ReID algorithm PCB. In subsection 2.2.2, we develop non-overlapping partition strategies and overlapping partition strategies for dividing the whole body of a crab. In subsection 2.2.3, we detail the proposed PDN algorithm using an attention mechanism to identify individual crabs.</p>
<sec id="s2_2_1">
<label>2.2.1</label>
<title>The Part-based Convolutional Baseline</title>
<p>Person ReID is a popular CV task and most of the current animal individual identification algorithms are improved based on pedestrian reidentification algorithms. In the early person ReID, it takes the overall features of the image as the reidentification target, and then the target image is retrieved and matched by a hash algorithm. The limitations of this strategy are that the features of the reidentification target are greatly influenced by the image background and incur high feature errors. Also, the global features cannot fully capture the important local features and thus cannot accurately specify the reidentification target. Therefore, more studies consider local features extracted from key local regions to represent the features of the reidentified targets. The local feature-based ReID can tell the microscopic differences of the reidentified targets, and thus its reidentification effect is significantly better than that of the global feature-based algorithm. The target object partition is often adopted in the local feature-based ReID methods, and its representative algorithm is PCB, which has a simple workflow with high accuracy and efficiency.</p>
<p>
<xref ref-type="fig" rid="f4">
<bold>Figure&#xa0;4</bold>
</xref> shows the PCB workflow for extracting features from the abdomen images of crabs, where Backbone Network is used for original feature extraction from the image. PCB divides the original tensor <italic>T</italic>&#x2208;<italic>&#x211d;</italic>
<sup>2048&#xd7;16&#xd7;16</sup> , into <italic>p</italic> horizontal parts and averages all column vectors within the same part into a part-level column vector <italic>g</italic>
<sub>
<italic>i</italic>
</sub>&#xa0;(<italic>i</italic>&#x2208;1,2,&#x2026;,<italic>p</italic>) using traditional averaging pooling. Then, PCB uses a 1*1 kernel-sized convolutional layer to map <italic>g<sub>i</sub>
</italic> to <italic>h</italic>
<sub>
<italic>i</italic>
</sub>&#xa0;(<italic>i</italic>&#x2208;1,2,&#x2026;,<italic>p</italic>) (part descriptor). Finally, <italic>h<sub>i</sub>
</italic> is input into p classifiers, each consisting of a fully connected layer and a following Softmax function. Classifiers calculate the loss between the output prediction and the input identity (ID). In the training phase, the PCB calculates the cross-entropy loss for p-ID predictions and performs parameter optimization. In the testing phase, p pieces of <italic>h<sub>i</sub>
</italic> are connected to form the final descriptor, <italic>H</italic>.</p>
<fig id="f4" position="float">
<label>Figure&#xa0;4</label>
<caption>
<p>The workflow of PCB.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmars-10-1093542-g004.tif"/>
</fig>
</sec>
<sec id="s2_2_2">
<label>2.2.2</label>
<title>The Partition Strategies</title>
<p>The whole body of the crab is divided into cephalothorax, abdomen, and appendages. As shown in <xref ref-type="fig" rid="f5">
<bold>Figure&#xa0;5A</bold>
</xref>, the abdomen is located behind the abdomen surface of the cephalothorax armor, which is called the sternum, and is covered by it. For convenience, we called the image containing the abdomen and sternum in <xref ref-type="fig" rid="f5">
<bold>Figure&#xa0;5B</bold>
</xref> the abdomen image of the crab. The abdomen image of the crab adopted in this paper is shown in <xref ref-type="fig" rid="f5">
<bold>Figure&#xa0;5B</bold>
</xref>, including the sternum and abdomen, and the texture was divided into sulciform texture and edge texture.</p>
<fig id="f5" position="float">
<label>Figure&#xa0;5</label>
<caption>
<p>
<bold>(A, B)</bold> The abdomen image of a swimming crab.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmars-10-1093542-g005.tif"/>
</fig>
<sec id="s2_2_2_1">
<label>2.2.2.1</label>
<title>The non-overlapping partition strategies</title>
<p>
<xref ref-type="fig" rid="f5">
<bold>Figure&#xa0;5B</bold>
</xref> shows that the abdominal edge texture is obvious and contains more characteristic information, and the sulciform texture of the sternum is more obvious than that of the abdomen and can also be regarded as an important feature. The sulciform texture in the middle and lower part of the abdomen is not obvious and contains relatively less characteristic information. In order to verify the correctness of the above observations and to improve the accuracy of the algorithm, we developed three different non-overlapping partition strategies (i.e., NOS1-3) and enhanced the feature extraction of the abdominal edge texture and sternal groove texture based on the PCB algorithm. <xref ref-type="fig" rid="f6">
<bold>Figure&#xa0;6</bold>
</xref> shows the three non-overlapping partition strategies designed in this paper.</p>
<fig id="f6" position="float">
<label>Figure&#xa0;6</label>
<caption>
<p>
<bold>(A&#x2013;C)</bold> Three non-overlapping partition strategies.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmars-10-1093542-g006.tif"/>
</fig>
<p>In <xref ref-type="fig" rid="f6">
<bold>Figure&#xa0;6A</bold>
</xref>, the tensor <italic>T</italic>&#x2208;<italic>&#x211d;</italic>
<sup>2048&#xd7;16&#xd7;16</sup> is divided into 2*2 parts. Since the abdomen of the crab is symmetrical from the left to the right, and the upper half texture features significantly differ from the lower half texture features, it is easier to maintain the abdomen texture features of the crab in the image processing using the grid partition compared to the horizontal partition strategy as shown in <xref ref-type="fig" rid="f2">
<bold>Figure&#xa0;2</bold>
</xref>. Strategy 2 in <xref ref-type="fig" rid="f6">
<bold>Figure&#xa0;6B</bold>
</xref> emphasizes the sulciform texture of the lower part of the abdomen of the crab. In <xref ref-type="fig" rid="f6">
<bold>Figure&#xa0;6C</bold>
</xref>, two parts are set on the left and right sides of the abdomen, and each part contains a more complete edge texture of the abdomen compared to <xref ref-type="fig" rid="f6">
<bold>Figures&#xa0;6A, B</bold>
</xref>, so that the image features can fully express the edge texture features. The overlapping partition strategies.</p>
</sec>
<sec id="s2_2_2_2">
<label>2.2.2.2</label>
<title>The overlapping partition strategies</title>
<p>In the non-overlapping partition strategies, the key region of the image was cut into multiple parts that are independent of each other, so, the complete features of the key region could not be extracted. We assumed that A in <xref ref-type="fig" rid="f7">
<bold>Figure&#xa0;7A</bold>
</xref> was a key region in the image and that if the non-overlapping partition method was applied, region A would be divided into two independent blocks, i.e., the left part and the right part. This produced the problem that neither of the two blocks could fully reflect the characteristics of region A; that is, we lacked the complete information of region A to capture the overall characteristics of the abdomen images of crabs. To overcome this limitation, we designed the overlapping partition strategies as shown in <xref ref-type="fig" rid="f7">
<bold>Figure&#xa0;7B</bold>
</xref> in this paper. In <xref ref-type="fig" rid="f7">
<bold>Figure&#xa0;7B</bold>
</xref>, region A is at the edge of the right part, and the right part cannot contain the complete information of region A, while region A is inside the left part, and the left part contains the complete information of region A. Similarly, the left part cannot contain the complete information of region B, while the right part contains it. Therefore, the overlapping partition method could ensure that any region in the image is inside the part, and thus avoided the loss of key information due to the partition of key regions; this enhanced the extraction of representative characteristics of individual crabs. Corresponding to the non-overlapping partition strategies in <xref ref-type="fig" rid="f6">
<bold>Figure&#xa0;6</bold>
</xref>, we designed the overlapping partition strategies for the abdomen texture of crabs, as shown in <xref ref-type="fig" rid="f8">
<bold>Figure&#xa0;8</bold>
</xref>. Specifically, we extended the edges of all the parts divided by the non-overlapping strategy illustrated in <xref ref-type="fig" rid="f6">
<bold>Figure&#xa0;6</bold>
</xref> by one row outwards, which was a simple and efficient way to achieve overlapping partition without excessive regulations. <xref ref-type="fig" rid="f8">
<bold>Figure&#xa0;8A</bold>
</xref> shows the overlapping partition strategy corresponding to <xref ref-type="fig" rid="f6">
<bold>Figure&#xa0;6A</bold>
</xref>, dividing the crab abdomen image equally into 2*2 parts; <xref ref-type="fig" rid="f8">
<bold>Figure&#xa0;8B</bold>
</xref> shows the overlapping partition corresponding to <xref ref-type="fig" rid="f6">
<bold>Figure&#xa0;6B</bold>
</xref>, emphasizing the sulciform texture of the lower part of the abdomen of the crab; while <xref ref-type="fig" rid="f8">
<bold>Figure&#xa0;8C</bold>
</xref> shows the overlapping partitioning corresponding to <xref ref-type="fig" rid="f6">
<bold>Figure&#xa0;6C</bold>
</xref>, emphasizing the edge texture of the abdomen.</p>
<fig id="f7" position="float">
<label>Figure&#xa0;7</label>
<caption>
<p>Difference between an overlapping and a non-overlapping partition. <bold>(A)</bold> non-overlap partition. <bold>(B)</bold> overlap partition.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmars-10-1093542-g007.tif"/>
</fig>
<fig id="f8" position="float">
<label>Figure&#xa0;8</label>
<caption>
<p>
<bold>(A&#x2013;C)</bold> Three overlapping partition strategies.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmars-10-1093542-g008.tif"/>
</fig>
<p>Therefore, through the comparison of these three non-overlapping partition strategies and three overlapping partition strategies it was possible to explore the effect of emphasizing or not different textures and overlapping partitions on the effectiveness of the algorithm.</p>
</sec>
<sec id="s2_2_2_3">
<label>2.2.2.3</label>
<title>The attention mechanism and the architecture of PDN</title>
<p>Inspired by the human visual attention mechanism, we provided an attention mechanism in which different parts of the input data or feature map that have different focus intensities were given their attention weights to weigh different spatial regions. In the proposed overlapping and non-overlapping partition strategies, the amount of feature information of the texture in each part was different, therefore, it was necessary to focus on the part with high feature information for identifying individual crabs. The Part Attention (PA) block of PDN is shown in the upper right region in <xref ref-type="fig" rid="f9">
<bold>Figure&#xa0;9</bold>
</xref>. For <italic>h</italic>
<sub>
<italic>i</italic>
</sub>&#xa0;(<italic>i</italic>&#x2208;1,2,&#x2026;,<italic>p</italic> ) in the column vector <italic>h</italic>, the vector <italic>h<sub>i</sub>
</italic> is input to a fully connected layer <italic>F</italic>C1<italic>
<sub>i</sub>
</italic> and a sigmoid layer to output an attention weight <italic>&#x3b1;<sub>i</sub>
</italic>, which is multiplied by the corresponding vector <italic>h<sub>i</sub>
</italic> to obtain the weighted vector <italic>n<sub>i</sub>
</italic>. Unlike other spatial attention mechanism methods (shown in <xref ref-type="fig" rid="f10">
<bold>Figure&#xa0;10A</bold>
</xref>), which generate weights for feature map vectors (e.g., RGA-S (<xref ref-type="bibr" rid="B34">Zhang et&#xa0;al., 2020</xref>)), our PA block generates weights at the part-level (shown in <xref ref-type="fig" rid="f10">
<bold>Figure&#xa0;10B</bold>
</xref>), which significantly reduces the number of parameters in the algorithm.</p>
<fig id="f9" position="float">
<label>Figure&#xa0;9</label>
<caption>
<p>The architecture of the PDN.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmars-10-1093542-g009.tif"/>
</fig>
<fig id="f10" position="float">
<label>Figure&#xa0;10</label>
<caption>
<p>Visualization of attention mechanisms. <bold>(A)</bold> visualization of RGA-S <bold>(B)</bold> visualization of our part attention (PA) block.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmars-10-1093542-g010.tif"/>
</fig>
<p>
<xref ref-type="fig" rid="f9">
<bold>Figure&#xa0;9</bold>
</xref> shows the architecture of the PDN. The input image is resized to a size of 256*256, which was inputted into a backbone network (we used the ResNet50 (<xref ref-type="bibr" rid="B7">He et&#xa0;al., 2016</xref>) as the backbone) to obtain tensor <italic>T</italic>&#x2208;<italic>&#x211d;</italic>
<sup>2048&#xd7;16&#xd7;16</sup> . Then tensor <italic>T</italic> was divided into <italic>p</italic> parts by our partition strategy, in this paper <italic>p</italic> = 4. Each part was fed into an average pooling layer to obtain column vector <italic>g</italic>
<sub>
<italic>i</italic>
</sub>&#xa0;(<italic>i</italic>&#x2208;1,2,&#x2026;,<italic>p</italic> ). Then, using a 1*1 convolutional layer of 1024 filters (with stride=1, padding=0) to reduce dimension from 2048 to 1024, we obtained vector <italic>h<sub>i</sub>
</italic>, then <italic>h<sub>i</sub>
</italic> was fed into the Part Attention block to obtain the weighted vector <italic>n<sub>i</sub>
</italic>. In the inference phase, p pieces of <italic>n<sub>i</sub>
</italic> concatenated to obtain the final descriptor <italic>N</italic>.</p>
<p>In the training phase, vector <italic>n<sub>i</sub>
</italic> was fed into a Classifier, which outputted the predication <inline-formula>
<mml:math display="inline" id="im1">
<mml:mrow>
<mml:msub>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo>^</mml:mo>
</mml:mover>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> of the size of the training set categories through a fully connected layer <italic>F</italic>C1 and a Softmax layer. Then we calculated the cross-entropy loss of <inline-formula>
<mml:math display="inline" id="im2">
<mml:mrow>
<mml:msub>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo>^</mml:mo>
</mml:mover>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> and the ground truth label <italic>y</italic>: <inline-formula>
<mml:math display="inline" id="im3">
<mml:mrow>
<mml:msub>
<mml:mi>L</mml:mi>
<mml:mrow>
<mml:mi>C</mml:mi>
<mml:mi>E</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo>^</mml:mo>
</mml:mover>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>.</p>
<p>To further learn the discriminative features of the crab abdomen image, we also used circle loss (<xref ref-type="bibr" rid="B19">Sun et&#xa0;al., 2020</xref>) to reduce the similarity between vector <italic>n<sub>i</sub>
</italic> of different categories and increase the similarity between vector <italic>n<sub>i</sub>
</italic> of the same category. The similarity <italic>S<sub>i</sub>
</italic> is the cosine similarity of two vectors <italic>n<sub>i</sub>
</italic> of different images, as shown in Equation (2), where the <italic>n<sub>i</sub>
</italic>
<sub>1</sub> and <italic>n<sub>i</sub>
</italic>
<sub>2</sub> are the vector <italic>n<sub>i</sub>
</italic> of two different images. The similarity between vectors <italic>n<sub>i</sub>
</italic> of the same category was called <inline-formula>
<mml:math display="inline" id="im4">
<mml:mrow>
<mml:msubsup>
<mml:mi>S</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>p</mml:mi>
</mml:mrow>
<mml:mi>u</mml:mi>
</mml:msubsup>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>u</mml:mi>
<mml:mo>&#x2208;</mml:mo>
<mml:mn>1</mml:mn>
<mml:mo>,</mml:mo>
<mml:mn>2</mml:mn>
<mml:mo>,</mml:mo>
<mml:mo>&#x2026;</mml:mo>
<mml:mi>M</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>, and the similarity between different ids was called <inline-formula>
<mml:math display="inline" id="im5">
<mml:mrow>
<mml:msubsup>
<mml:mi>S</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>n</mml:mi>
</mml:mrow>
<mml:mi>u</mml:mi>
</mml:msubsup>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>u</mml:mi>
<mml:mo>&#x2208;</mml:mo>
<mml:mn>1</mml:mn>
<mml:mo>,</mml:mo>
<mml:mn>2</mml:mn>
<mml:mo>,</mml:mo>
<mml:mo>&#x2026;</mml:mo>
<mml:mi>L</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>.</p>
<disp-formula>
<label>(2)</label>
<mml:math display="block" id="M2">
<mml:mrow>
<mml:msub>
<mml:mi>S</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mi>n</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>n</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:msub>
<mml:mi>n</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#xb7;</mml:mo>
<mml:msub>
<mml:mi>n</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2016;</mml:mo>
<mml:msub>
<mml:mi>n</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2016;</mml:mo>
<mml:mo>&#x2016;</mml:mo>
<mml:msub>
<mml:mi>n</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2016;</mml:mo>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<p>The circle loss used in this paper was as follows:</p>
<disp-formula>
<label>(3)</label>
<mml:math display="block" id="M3">
<mml:mrow>
<mml:msub>
<mml:mi>L</mml:mi>
<mml:mrow>
<mml:mi>c</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>e</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>=</mml:mo>
<mml:mtext>log</mml:mtext>
<mml:mrow>
<mml:mo stretchy="false">[</mml:mo>
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>+</mml:mo>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>L</mml:mi>
</mml:munderover>
</mml:mstyle>
<mml:mtext>exp</mml:mtext>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
<mml:msubsup>
<mml:mi>&#x3b1;</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>j</mml:mi>
</mml:msubsup>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:msubsup>
<mml:mi>S</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>n</mml:mi>
</mml:mrow>
<mml:mi>j</mml:mi>
</mml:msubsup>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mi>&#x394;</mml:mi>
<mml:mi>n</mml:mi>
</mml:msub>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>k</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>M</mml:mi>
</mml:munderover>
</mml:mstyle>
<mml:mtext>exp</mml:mtext>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:msubsup>
<mml:mi>&#x3b1;</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>k</mml:mi>
</mml:msubsup>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:msubsup>
<mml:mi>S</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>p</mml:mi>
</mml:mrow>
<mml:mi>k</mml:mi>
</mml:msubsup>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mi>&#x394;</mml:mi>
<mml:mi>p</mml:mi>
</mml:msub>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo stretchy="false">]</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</disp-formula>
<disp-formula>
<label>(4)</label>
<mml:math display="block" id="M4">
<mml:mrow>
<mml:mrow>
<mml:mo>{</mml:mo>
<mml:mrow>
<mml:mtable>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:msubsup>
<mml:mi>&#x3b1;</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>k</mml:mi>
</mml:msubsup>
<mml:mo>=</mml:mo>
<mml:mi>m</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>x</mml:mi>
<mml:mrow>
<mml:mo>{</mml:mo>
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>+</mml:mo>
<mml:mi>m</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:msubsup>
<mml:mi>S</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>p</mml:mi>
</mml:mrow>
<mml:mi>k</mml:mi>
</mml:msubsup>
<mml:mo>,</mml:mo>
<mml:mn>0</mml:mn>
</mml:mrow>
<mml:mo>}</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:msubsup>
<mml:mi>&#x3b1;</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>j</mml:mi>
</mml:msubsup>
<mml:mo>=</mml:mo>
<mml:mi>max</mml:mi>
<mml:mrow>
<mml:mo>{</mml:mo>
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>+</mml:mo>
<mml:msubsup>
<mml:mi>S</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>n</mml:mi>
</mml:mrow>
<mml:mi>j</mml:mi>
</mml:msubsup>
<mml:mo>,</mml:mo>
<mml:mn>0</mml:mn>
</mml:mrow>
<mml:mo>}</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:msub>
<mml:mi>&#x394;</mml:mi>
<mml:mi>p</mml:mi>
</mml:msub>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>m</mml:mi>
</mml:mrow>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:msub>
<mml:mi>&#x394;</mml:mi>
<mml:mi>n</mml:mi>
</mml:msub>
<mml:mo>=</mml:mo>
<mml:mi>m</mml:mi>
</mml:mrow>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:mrow>
</mml:mrow>
</mml:math>
</disp-formula>
<p>The two hyperparameters <italic>&#x3b3;</italic> and <italic>m</italic> were set to 32 and 0.25, respectively.</p>
<p>The two aforementioned losses were then combined to obtain the final loss:</p>
<disp-formula>
<label>(5)</label>
<mml:math display="block" id="M5">
<mml:mrow>
<mml:mi>L</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>s</mml:mi>
<mml:mo>=</mml:mo>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>p</mml:mi>
</mml:munderover>
</mml:mstyle>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mi>L</mml:mi>
<mml:mrow>
<mml:mi>C</mml:mi>
<mml:mi>E</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo>^</mml:mo>
</mml:mover>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mo>+</mml:mo>
<mml:msub>
<mml:mi>L</mml:mi>
<mml:mrow>
<mml:mi>c</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>e</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mi>S</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>p</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>S</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>n</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</disp-formula>
<p>We adopted the SGDM (Stochastic Gradient Descent with momentum) optimizer to train all models for 60 epochs with a learning rate of 5 &#xd7; 10&#x2212;2, a weight decay of 5 &#xd7; 10&#x2212;4, and a momentum of 0.9. The batch size was set to 32.</p>
</sec>
</sec>
</sec>
</sec>
<sec id="s3">
<label>3</label>
<title>Experiments and result</title>
<sec id="s3_1">
<label>3.1</label>
<title>Experimental setup</title>
<p>The experiments of this paper were conducted on a computer with GPU (OS: Ubuntu 18.04, GPU: NVIDIA GeForce RTX 3090, CUDA: CUDA-11.1), and all algorithms were implemented using Python 3.7.10 and Pytorch 1.8.1.</p>
<p>In this experiment, six partition strategies were implemented for dividing the crabs&#x2019; abdomen images, including three non-overlapping partition algorithms (PDN_NOS1, PDN_NOS2, and PDN_NOS3), as shown in <xref ref-type="fig" rid="f6">
<bold>Figure&#xa0;6</bold>
</xref>, and three overlapping partition algorithms (PDN_OS1, PDN_OS2, and PDN_OS3), as shown in <xref ref-type="fig" rid="f8">
<bold>Figure&#xa0;8</bold>
</xref>. PDN_NOS1 and PDN_OS1 were uniformly divided, PDN_NOS2 and PDN_OS2 emphasized the sulciform texture of the lower half of the abdomen, and PDN_NOS3 and PDN_OS3 emphasized the edge texture of the abdomen.</p>
<p>In the experiments, we studied the distribution of same-id similarity and different-id similarity obtained by the six partition strategies using Crab-201 in Section 3.2 and evaluated the Precision-recall curve of the six partition strategies under different confidence levels in Section 3.3. We also compared the Rank-1 (average accuracy rate) and mAP (mean average precision) of the optimal partition strategy with other algorithms on Crab-146 and visualized the classification ability of the algorithms in Section 3.4.</p>
</sec>
<sec id="s3_2">
<label>3.2</label>
<title>Study of descriptor distinguishability</title>
<p>The feature encoding descriptor was the basis for individual identification. A good algorithm can generate discriminative descriptors and thus distinguish individual crabs. So, we evaluated the distinguishability of the final descriptor <italic>H</italic> of the crabs to prove the identification effect of our PDN algorithm.</p>
<p>In this experiment, we compared the similarity between descriptors <italic>H</italic> of the same ID extracted by the algorithms. In the test phase, the higher the similarity of descriptors between the same ID in different conditions and the lower the similarity of descriptors between different ids, the more reliable the algorithm was.</p>
<p>Similar to Equation (2), the similarity between different descriptors <italic>H</italic> was calculated as follows:</p>
<disp-formula>
<label>(6)</label>
<mml:math display="block" id="M6">
<mml:mrow>
<mml:mi>S</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mi>H</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>H</mml:mi>
<mml:mn>2</mml:mn>
</mml:msub>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:msub>
<mml:mi>H</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
<mml:mo>&#xb7;</mml:mo>
<mml:msub>
<mml:mi>H</mml:mi>
<mml:mn>2</mml:mn>
</mml:msub>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2016;</mml:mo>
<mml:msub>
<mml:mi>H</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
<mml:mo>&#x2016;</mml:mo>
<mml:mo>&#x2016;</mml:mo>
<mml:msub>
<mml:mi>H</mml:mi>
<mml:mn>2</mml:mn>
</mml:msub>
<mml:mo>&#x2016;</mml:mo>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<p>Where the <italic>H</italic>
<sub>1</sub> and <italic>H</italic>
<sub>2</sub> are the descriptor <italic>H</italic> of two different images. The similarity between descriptor <italic>H</italic> of the same category is called <italic>S<sub>p</sub>
</italic>, and the similarity between different ids is called <italic>S<sub>n</sub>
</italic>.</p>
<p>
<xref ref-type="fig" rid="f11">
<bold>Figure&#xa0;11</bold>
</xref> shows the distribution of similarity between descriptors <italic>H</italic> extracted by six partition strategies algorithms from the test set images, the pink dots represent <italic>S<sub>p</sub>
</italic> and the blue dots represent <italic>S<sub>n</sub>
</italic>, <xref ref-type="fig" rid="f11">
<bold>Figure&#xa0;11</bold>
</xref> also shows the boxplot of the distribution of the dots.</p>
<fig id="f11" position="float">
<label>Figure&#xa0;11</label>
<caption>
<p>The distribution of S<sub>p</sub> and S<sub>n</sub> of six partition strategies.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmars-10-1093542-g011.tif"/>
</fig>
<p>The <italic>S<sub>p</sub>
</italic> needs to be distinguished from the <italic>S<sub>n</sub>
</italic> by a certain distance. The overall trend in <xref ref-type="fig" rid="f11">
<bold>Figure&#xa0;11</bold>
</xref> was that the overlapping strategy had a larger difference between the lower quartile of the similarity with the same ID and the upper quartile of the similarity with different IDs compared to the non-overlapping strategy (i.e., the overlapping partition algorithm had a greater differentiation distance for the same ID and different ID descriptors). It could be seen from <xref ref-type="fig" rid="f11">
<bold>Figure&#xa0;11</bold>
</xref> that the differentiation distance of strategy 2 was the smallest whether it was an overlapping partition or a non-overlapping partition. In the overlapping partition algorithm, strategy 3(OS3) had the largest distinguishing distance, which means that the OS3 was more suitable for partitioning the abdomen of the crab. From the data above, we also concluded that the edge texture of the abdomen emphasized in strategy 3 had more identifiable features than the sulciform texture of the lower part of the abdomen of the crab emphasized in strategy 2.</p>
</sec>
<sec id="s3_3">
<label>3.3</label>
<title>Precision and Recall under different confidence levels</title>
<p>The algorithm was tested with different confidence levels/thresholds, and the Precision and Recall were calculated at different confidence levels. For each query, images in the gallery with similarity above the threshold were classified as positive and those below the threshold were classified as negative. Positive examples that were correctly classified were denoted as TP (true positive), negative examples that were correctly classified were denoted as TN (true negative); positive examples that were incorrectly classified as negative were denoted as FN (false negative) and negative examples that were incorrectly classified as positive were denoted as FP (false positive). The Precision and Recall rates were calculated as follows:</p>
<disp-formula>
<label>(7)</label>
<mml:math display="block" id="M7">
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>n</mml:mi>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<disp-formula>
<label>(8)</label>
<mml:math display="block" id="M8">
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>l</mml:mi>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<p>The Precision-recall curve was drawn with recall as the horizontal coordinate and precision as the vertical coordinate, and the results are shown in <xref ref-type="fig" rid="f12">
<bold>Figure&#xa0;12</bold>
</xref>. As can be seen from <xref ref-type="fig" rid="f12">
<bold>Figure&#xa0;12</bold>
</xref>, the performance of PDN_OS1 and PDN_OS2 was poor and the performance of PDN_OS3 was better compared to the other policies.</p>
<fig id="f12" position="float">
<label>Figure&#xa0;12</label>
<caption>
<p>Precision-recall curve under different confidence levels.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmars-10-1093542-g012.tif"/>
</fig>
</sec>
<sec id="s3_4">
<label>3.4</label>
<title>Robustness</title>
<p>To verify the suitability of the algorithm for real-life extension and the generality of individual identification of different species of crabs, we used a more complex multi-device abdomen dataset of mud crabs (Crab-146) to train and test our model.</p>
<p>The algorithm evaluation metrics used in this experiment were the Rank-1 (average accuracy rate) and the mAP (mean average precision), which were originally used as algorithm evaluation metrics for person re-identification. Rank-1 represented the average accuracy rate of whether the image with the highest similarity to the query matched the query correctly. The mAP of an algorithm was the mean of the average precision scores for each query of the test set. The higher the score of Rank-1 and mAP, the more accurate the model was in its predictions. Given a set of Query as <italic>Q</italic> = {<italic>q</italic>
<sub>1</sub>, <italic>q</italic>
<sub>2</sub>,&#x2026;,<italic>q<sub>i</sub>
</italic>,&#x2026;<italic>q<sub>N</sub>
</italic>} and a set of Gallery as <italic>G</italic> = {<italic>g</italic>
<sub>1</sub>, <italic>g</italic>
<sub>2</sub>,&#x2026;,<italic>g<sub>j</sub>
</italic>,&#x2026;<italic>g<sub>M</sub>
</italic>}, we assumed that the number of times the ID corresponding to <italic>q<sub>i</sub>
</italic> appeared in <italic>G</italic> was <italic>K</italic>
<sub>
<italic>q</italic>
<sub>
<italic>i</italic>
</sub>
</sub> . The Query process for each query image was given as follows. The algorithm extracted the descriptor <italic>H</italic>
<sub>
<italic>q</italic>
<sub>
<italic>i</italic>
</sub>
</sub> of <italic>q<sub>i</sub>
</italic>, compared it with the descriptor <italic>H</italic>
<sub>
<italic>g</italic>
<sub>
<italic>j</italic>
</sub>
</sub> of each image in <italic>G</italic> and calculated the similarity between descriptors <italic>H</italic>
<sub>
<italic>q</italic>
<sub>
<italic>i</italic>
</sub>
</sub> and <italic>H</italic>
<sub>
<italic>g</italic>
<sub>
<italic>j</italic>
</sub>
</sub> . Then, the Gallery was ranked in ascending order according to the similarity, the ranked gallery was noted as <italic>G</italic>
<sub>
<italic>q</italic>
<sub>
<italic>i</italic>
</sub>
</sub> , the set consisting of images hit by <italic>q<sub>i</sub>
</italic> was noted as <inline-formula>
<mml:math display="inline" id="im6">
<mml:mrow>
<mml:msubsup>
<mml:mi>G</mml:mi>
<mml:mrow>
<mml:msub>
<mml:mi>q</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
<mml:mo>&#x2032;</mml:mo>
</mml:msubsup>
<mml:mo>=</mml:mo>
<mml:mrow>
<mml:mo>{</mml:mo>
<mml:mrow>
<mml:msubsup>
<mml:mi>g</mml:mi>
<mml:mn>1</mml:mn>
<mml:mo>&#x2032;</mml:mo>
</mml:msubsup>
<mml:mo>,</mml:mo>
<mml:mtext>&#xa0;</mml:mtext>
<mml:msubsup>
<mml:mi>g</mml:mi>
<mml:mn>2</mml:mn>
<mml:mo>&#x2032;</mml:mo>
</mml:msubsup>
<mml:mo>,</mml:mo>
<mml:mtext>&#xa0;</mml:mtext>
<mml:mo>&#x2026;</mml:mo>
<mml:mo>,</mml:mo>
<mml:mtext>&#xa0;</mml:mtext>
<mml:msubsup>
<mml:mi>g</mml:mi>
<mml:mi>j</mml:mi>
<mml:mo>&#x2032;</mml:mo>
</mml:msubsup>
<mml:mo>,</mml:mo>
<mml:mtext>&#xa0;</mml:mtext>
<mml:mo>&#x2026;</mml:mo>
<mml:mo>,</mml:mo>
<mml:mtext>&#xa0;</mml:mtext>
<mml:msubsup>
<mml:mi>g</mml:mi>
<mml:mrow>
<mml:msub>
<mml:mi>K</mml:mi>
<mml:mrow>
<mml:mi>q</mml:mi>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mo>&#x2032;</mml:mo>
</mml:msubsup>
</mml:mrow>
<mml:mo>}</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>, which was a subset of <italic>G</italic>
<sub>
<italic>q</italic>
<sub>
<italic>i</italic>
</sub>
</sub> . We assumed that <inline-formula>
<mml:math display="inline" id="im7">
<mml:mrow>
<mml:msubsup>
<mml:mi>g</mml:mi>
<mml:mi>j</mml:mi>
<mml:mo>&#x2032;</mml:mo>
</mml:msubsup>
</mml:mrow>
</mml:math>
</inline-formula> was ranked as <inline-formula>
<mml:math display="inline" id="im8">
<mml:mrow>
<mml:msubsup>
<mml:mi>r</mml:mi>
<mml:mi>j</mml:mi>
<mml:mrow>
<mml:msub>
<mml:mi>q</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:msubsup>
<mml:mtext>&#xa0;</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula>in <italic>G</italic>
<sub>
<italic>q</italic>
<sub>
<italic>i</italic>
</sub>
</sub> , and <inline-formula>
<mml:math display="inline" id="im9">
<mml:mrow>
<mml:msubsup>
<mml:mi>r</mml:mi>
<mml:mi>j</mml:mi>
<mml:mrow>
<mml:msub>
<mml:mi>q</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2032;</mml:mo>
</mml:mrow>
</mml:msubsup>
</mml:mrow>
</mml:math>
</inline-formula> in <inline-formula>
<mml:math display="inline" id="im10">
<mml:mrow>
<mml:msubsup>
<mml:mi>G</mml:mi>
<mml:mrow>
<mml:msub>
<mml:mi>q</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
<mml:mo>&#x2032;</mml:mo>
</mml:msubsup>
</mml:mrow>
</mml:math>
</inline-formula>, this process was repeated for all queries in Query, and the mAP was calculated by</p>
<disp-formula>
<label>(9)</label>
<mml:math display="block" id="M9">
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mi>A</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mi>N</mml:mi>
</mml:mfrac>
<mml:msubsup>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>N</mml:mi>
</mml:msubsup>
<mml:msubsup>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mi>K</mml:mi>
<mml:mrow>
<mml:msub>
<mml:mi>q</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:msubsup>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:msubsup>
<mml:mi>r</mml:mi>
<mml:mi>j</mml:mi>
<mml:mrow>
<mml:msub>
<mml:mi>q</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2032;</mml:mo>
</mml:mrow>
</mml:msubsup>
<mml:mo stretchy="false">/</mml:mo>
<mml:msubsup>
<mml:mi>r</mml:mi>
<mml:mi>j</mml:mi>
<mml:mrow>
<mml:msub>
<mml:mi>q</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:msubsup>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</disp-formula>
<p>In Crab-146, <italic>K</italic>
<sub>
<italic>q</italic>
<sub>
<italic>i</italic>
</sub>
</sub>=1 . So, the mAP was calculated by:</p>
<disp-formula>
<label>(10)</label>
<mml:math display="block" id="M10">
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mi>A</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mi>N</mml:mi>
</mml:mfrac>
<mml:msubsup>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>N</mml:mi>
</mml:msubsup>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:msup>
<mml:mi>r</mml:mi>
<mml:mrow>
<mml:msub>
<mml:mi>q</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2032;</mml:mo>
</mml:mrow>
</mml:msup>
<mml:mo stretchy="false">/</mml:mo>
<mml:msup>
<mml:mi>r</mml:mi>
<mml:mrow>
<mml:msub>
<mml:mi>q</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:msup>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</disp-formula>
<p>In <xref ref-type="table" rid="T1">
<bold>Table&#xa0;1</bold>
</xref>, we evaluate the PA block of PDN_OS3. Through the ablation experiment of the PA block, it can be seen that without the PA block, the rank1 and mAP of the proposed PDN_OS3 algorithm decreased by 4% and 2.9%, which shows that the part attention mechanism of this paper was beneficial for the individual identification of the crab abdomen. We also compare the performance of PDN_OS3 using the PA block and the PDN_OS3 using RGA-S (<xref ref-type="bibr" rid="B34">Zhang et&#xa0;al., 2020</xref>). RGA-S is a relation-aware global attention block for spatial attention with a higher number of parameters compared to the PA block. The rank1 and mAP of the PDN_OS3 using RGA-S decreased by 1.6% and 1.1%. Using the PA block as the attention block of the PDN_OS3 led to a better performance with a smaller number of parameters.</p>
<table-wrap id="T1" position="float">
<label>Table&#xa0;1</label>
<caption>
<p>Comparison of the attention blocks.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="left">Model</th>
<th valign="middle" align="center">Rank-1</th>
<th valign="middle" align="center">
<italic>mAP</italic>
</th>
<th valign="middle" align="center">Number of parameters of the attention block</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">PDN_OS3(w/o PA)</td>
<td valign="middle" align="center">0.894</td>
<td valign="middle" align="center">0.914</td>
<td valign="middle" align="center">&#x2013;</td>
</tr>
<tr>
<td valign="middle" align="left">PDN_OS3(w/o PA)+RGA-S</td>
<td valign="middle" align="center">0.918</td>
<td valign="middle" align="center">0.934</td>
<td valign="middle" align="center">1.59M</td>
</tr>
<tr>
<td valign="middle" align="left">PDN_OS3</td>
<td valign="middle" align="center">
<bold>0.934</bold>
</td>
<td valign="middle" align="center">
<bold>0.945</bold>
</td>
<td valign="middle" align="center">
<bold>4.1K</bold>
</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>The bold values represents the best value of the three.</p>
</fn>
</table-wrap-foot>
</table-wrap>
<p>In <xref ref-type="table" rid="T2">
<bold>Table&#xa0;2</bold>
</xref>, we compare the rank1 and mAP of our proposed PDN_OS3 with other counterpart algorithms. The <italic>i</italic> in PCB-i indicates that the image is divided horizontally into i parts. To generate the descriptors, we removed the final fully-connected layer from all the comparison algorithms, and the feature mAP output from the last convolutional layer was average pooled to obtain the descriptors. Without the parameter restriction of the fully connected layer, the input size of the algorithm can be changed as required. We uniformly resized the input size of all comparison algorithms to the same 256*256 as the PDN. Then the feature was fed into the average pooling layer after being output from the final convolution layer, where the output vector was the descriptor. In the training phase, the descriptors were fed into the classifier for classification, and the loss function was the final loss in section 3.1.</p>
<table-wrap id="T2" position="float">
<label>Table&#xa0;2</label>
<caption>
<p>Performance comparison on the Crab-146 dataset.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="left">Model</th>
<th valign="middle" align="center">Rank-1</th>
<th valign="middle" align="center">
<italic>mAP</italic>
</th>
<th valign="middle" align="center">Descriptor size</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">AlexNet (<xref ref-type="bibr" rid="B10">Krizhevsky et&#xa0;al., 2012</xref>)</td>
<td valign="middle" align="center">0.786</td>
<td valign="middle" align="center">0.822</td>
<td valign="middle" align="center">256</td>
</tr>
<tr>
<td valign="middle" align="left">VGG16 (<xref ref-type="bibr" rid="B17">Simonyan and Zisserman, 2014</xref>)</td>
<td valign="middle" align="center">0.889</td>
<td valign="middle" align="center">0.909</td>
<td valign="middle" align="center">512</td>
</tr>
<tr>
<td valign="middle" align="left">Googenet (<xref ref-type="bibr" rid="B21">Szegedy et&#xa0;al., 2015</xref>)</td>
<td valign="middle" align="center">0.847</td>
<td valign="middle" align="center">0.872</td>
<td valign="middle" align="center">1024</td>
</tr>
<tr>
<td valign="middle" align="left">ResNet50 (<xref ref-type="bibr" rid="B7">He et&#xa0;al., 2016</xref>)</td>
<td valign="middle" align="center">0.875</td>
<td valign="middle" align="center">0.898</td>
<td valign="middle" align="center">2048</td>
</tr>
<tr>
<td valign="middle" align="left">ResNeXt50 (<xref ref-type="bibr" rid="B27">Xie et&#xa0;al., 2017</xref>)</td>
<td valign="middle" align="center">0.848</td>
<td valign="middle" align="center">0.875</td>
<td valign="middle" align="center">2048</td>
</tr>
<tr>
<td valign="middle" align="left">Densenet121 (<xref ref-type="bibr" rid="B9">Huang et&#xa0;al., 2017</xref>)</td>
<td valign="middle" align="center">0.884</td>
<td valign="middle" align="center">0.906</td>
<td valign="middle" align="center">1024</td>
</tr>
<tr>
<td valign="middle" align="left">PCB-6 <xref ref-type="bibr" rid="B20">(Sun et&#xa0;al., 2018</xref>)</td>
<td valign="middle" align="center">0.832</td>
<td valign="middle" align="center">0.857</td>
<td valign="middle" align="center">12288</td>
</tr>
<tr>
<td valign="middle" align="left">PCB-4 (<xref ref-type="bibr" rid="B20">Sun et&#xa0;al., 2018</xref>)</td>
<td valign="middle" align="center">0.840</td>
<td valign="middle" align="center">0.868</td>
<td valign="middle" align="center">8192</td>
</tr>
<tr>
<td valign="middle" align="left">PDN_OS3(w/o PA)</td>
<td valign="middle" align="center">0.894</td>
<td valign="middle" align="center">0.914</td>
<td valign="middle" align="center">4096</td>
</tr>
<tr>
<td valign="middle" align="left">PDN_OS3</td>
<td valign="middle" align="center">
<bold>0.934</bold>
</td>
<td valign="middle" align="center">
<bold>0.945</bold>
</td>
<td valign="middle" align="center">4096</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>Bold indicates that the corresponding value is best in the relevant algorithm.</p>
</fn>
</table-wrap-foot>
</table-wrap>
<p>As shown in <xref ref-type="table" rid="T2">
<bold>Table&#xa0;2</bold>
</xref>, the proposed PDN_OS3 achieved the best performance in mAP and Rank-1, which indicates that our method can capture more discriminative features than other methods. The results also show that descriptor size is not a significant factor in the effectiveness of the algorithm. In PCBs, finer partitions can, on the contrary, reduce the performance of the algorithm.</p>
<p>We further evaluated our proposed PDN method using the T-SNE (T-distributed Stochastic Neighbor Embedding) to visualize the test set descriptors extracted by the algorithm. T-SNE is essentially an embedding model that maps data from a high-dimensional space to a low-dimensional space while preserving the local characteristics of the data. <xref ref-type="fig" rid="f13">
<bold>Figure&#xa0;13</bold>
</xref> visualizes the distribution of image descriptors from the test set extracted by PDN_OS1 and five counterparts (Googlenet, ResNet50, ResNeXt50, PCB-4, and PCB-6). Where points of the same color represent descriptors of the same id, the tighter the clustering of points of the same color represents a tighter intra-class distribution, and the greater the distance between points of different colors represents greater inter-class spacing. We can see from <xref ref-type="fig" rid="f13">
<bold>Figure&#xa0;13</bold>
</xref> that PDN_OS3 generates descriptors with a greater gap between classes and closer intra-class distribution.</p>
<fig id="f13" position="float">
<label>Figure&#xa0;13</label>
<caption>
<p>
<bold>(A&#x2013;F)</bold> Visualization of the classification ability of algorithms.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmars-10-1093542-g013.tif"/>
</fig>
</sec>
</sec>
<sec id="s4" sec-type="discussion">
<label>4</label>
<title>Discussion</title>
<p>This paper provided a reliable PDN method with six partition strategies to identify individual crabs from their abdomen images. The experiments on a swimming crab dataset (Crab-201) demonstrated that the proposed PDN algorithm can distinguish crabs&#x2019; abdomen images with different IDs. The overlapping partition strategy had a larger distinguishing distance and the overlapping partition strategy 3 (PDN_OS3) had the largest distinguishing distance, which means that the edge texture of the abdomen emphasized in the PDN_OS3 had the most identifiable features, and the PDN_OS3 was the most suitable for dividing the abdomen of the crab. Using the PA block as the attention block of the PDN_OS3 led to a better performance with a smaller number of parameters. In a more complex mud crab dataset (Crab-146), the PDN_OS3 also achieved a higher rank1 and mAP compared with five counterpart algorithms (PCB, AlexNet, VGG16, Googlenet, ResNet50, ResNeXt50, and Densenet121) and showed the best classification ability by separating different classes while ensuring the closest distribution intra-classes. In summary, the proposed PDN_OS3 algorithm achieved the best performance in individual identification using crabs&#x2019; abdomen images.</p>
</sec>
<sec id="s5" sec-type="conclusion">
<label>5</label>
<title>Conclusion</title>
<p>This paper proposes a new Part-based Deep Learning Network (PDN) to identify individual crabs from their abdomen images. In the PDN, we also develop six partition strategies (three non-overlapping strategies and three overlapping strategies) according to the abdomen texture features of crabs and demonstrate that the PDN_OS3 (strategy 3 using overlapping partition) achieves the best performance. Extensive experiments on two real-world farm crab datasets (the swimming crab dataset Crab-201 and the complex mud crab dataset Crab-146) demonstrate the accuracy and robustness of our PDN_OS3 algorithm for the identification of individual crabs. The experimental results show that the algorithm using overlapping partition strategies can distinguish different crabs&#x2019; abdomen images with a larger distinguishing distance, and the OS3 has the largest distinguishing distance. The edge texture of the abdomen emphasized in OS3 has the most identifiable features. The PDN_OS3 has the highest Rank1 and mAP compared with other algorithms and can achieve a distinction between the classes with the closest intra-class distribution. The results show that the PDN_OS3 is more suitable for the individual identification of crabs than the original PCB network. The four local descriptors <italic>h</italic> of the PDN_OS3 algorithm are connected to form a global descriptor <italic>H</italic> that can be used as the individual identity ID of the crab in the traceability system.</p>
</sec>
<sec id="s6" sec-type="data-availability">
<title>Data availability statement</title>
<p>The datasets presented in this study can be found in online repositories. The names of the repository/repositories and accession number(s) can be found below: <uri xlink:href="https://github.com/bergi111/crabreid">https://github.com/bergi111/crabreid</uri>.</p>
</sec>
<sec id="s7" sec-type="author-contributions">
<title>Author contributions</title>
<p>CW: Proposed research ideas, designed research protocols, performed experiments, drafted paper and completed final revision. ZX and KC: Revised the paper. SC and YY: Data collection, dataset production. YX: Proposed research ideas, dataset production, revised the paper. RZ: Revised the paper. GH: Revised the paper. All authors contributed to the article and approved the submitted version.</p>
</sec>
</body>
<back>
<sec id="s8" sec-type="funding-information">
<title>Funding</title>
<p>This work was supported by National Natural Science Foundation of China (Grant No. U20A20121); Ningbo public welfare project (Grant No. 202002N3109, 2022S094); Natural Science Foundation of Zhejiang Province (Grant No. LY21F020006); Science and Technology Innovation 2025 Major Project of Ningbo (Grant No. 2019B10125, 2019B10028, 2020Z016,2021Z031, 2022Z074); Ningbo Fenghua District industrial chain key core technology "unveiled the commander" project (Grant No. 202106206)</p>
</sec>
<sec id="s9" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="s10" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Andrew</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Gao</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Mullan</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Campbell</surname> <given-names>N.</given-names>
</name>
<name>
<surname>Dowsey</surname> <given-names>A. W.</given-names>
</name>
<name>
<surname>Burghardt</surname> <given-names>T.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Visual identification of individual Holstein-friesian cattle via deep metric learning</article-title>. <source>Comput. Electron. Agric.</source> <volume>185</volume>, <fpage>106133</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.compag.2021.106133</pub-id>
</citation>
</ref>
<ref id="B2">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Dai</surname> <given-names>A. Y.</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>S. L.</given-names>
</name>
<name>
<surname>Song</surname> <given-names>Y. Z.</given-names>
</name>
</person-group> (<year>1986</year>). <source>Marine crabs in China Sea</source> (<publisher-loc>Beijing</publisher-loc>: <publisher-name>Marine Publishing Press</publisher-name>).</citation>
</ref>
<ref id="B3">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Deng</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Dong</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Socher</surname> <given-names>R.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>L. J.</given-names>
</name>
<name>
<surname>Kai</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>F.-F.</given-names>
</name>
</person-group> (<year>2009</year>). &#x201c;<article-title>ImageNet: A large-scale hierarchical image database</article-title>,&#x201d; <source>2009 IEEE Conference on Computer Vision and Pattern Recognition</source>, <publisher-loc>Miami, FL, USA</publisher-loc>, <publisher-name>IEEE</publisher-name>, <volume>2009</volume>, pp. <fpage>248</fpage>&#x2013;<lpage>255</lpage>, doi:&#xa0;<pub-id pub-id-type="doi">10.1109/CVPR.2009.5206848</pub-id>.</citation>
</ref>
<ref id="B4">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Faggion</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Sanchez</surname> <given-names>P.</given-names>
</name>
<name>
<surname>Vandeputte</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Clota</surname> <given-names>F.</given-names>
</name>
<name>
<surname>Vergnet</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Blanc</surname> <given-names>M.-O.</given-names>
</name>
<etal/>
</person-group>. (<year>2020</year>). <article-title>Evaluation of a European sea bass (Dicentrarchus labrax l.) post-larval tagging method with ultra-small RFID tags</article-title>. <source>Aquaculture</source> <volume>520</volume>, <elocation-id>734945</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.aquaculture.2020.734945</pub-id>
</citation>
</ref>
<ref id="B5">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Feddern</surname> <given-names>V.</given-names>
</name>
<name>
<surname>Mazzuco</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Fonseca</surname> <given-names>F. N.</given-names>
</name>
<name>
<surname>De Lima</surname> <given-names>G. J. M. M.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>A review on biogenic amines in food and feed: Toxicological aspects, impact on health and control measures</article-title>. <source>Anim. Production. Sci.</source> <volume>59</volume> (<issue>4</issue>), <fpage>608</fpage>&#x2013;<lpage>618</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1071/AN18076</pub-id>
</citation>
</ref>
<ref id="B6">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hamasaki</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Fukunaga</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Kitada</surname> <given-names>S.</given-names>
</name>
</person-group> (<year>2006</year>). <article-title>Batch fecundity of the swimming crab portunus trituberculatus (Brachyura: Portunidae)</article-title>. <source>Aquaculture</source> <volume>253</volume> (<issue>1-4</issue>), <fpage>359</fpage>&#x2013;<lpage>365</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.aquaculture.2005.08.002</pub-id>
</citation>
</ref>
<ref id="B7">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>He</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Ren</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Sun</surname> <given-names>J.</given-names>
</name>
</person-group> (<year>2016</year>). &#x201c;<article-title>Deep residual learning for image recognition</article-title>,&#x201d; in <source>Proceedings of the IEEE conference on computer vision and pattern recognition</source>. <publisher-name>IEEE</publisher-name>: <publisher-loc>Las Vegas, NV, USA</publisher-loc>
</citation>
</ref>
<ref id="B8">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hu</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Zhao</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Sun</surname> <given-names>C.</given-names>
</name>
<etal/>
</person-group>. (<year>2021</year>). <article-title>Real-time detection of uneaten feed pellets in underwater images for aquaculture using an improved YOLO-V4 network</article-title>. <source>Comput. Electron. Agric.</source> <volume>185</volume>, <elocation-id>106135</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2021.106135</pub-id>
</citation>
</ref>
<ref id="B9">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Huang</surname> <given-names>G.</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>van der Maaten</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Weinberger</surname> <given-names>K. Q.</given-names>
</name>
</person-group> (<year>2017</year>). &#x201c;<article-title>Densely connected convolutional networks</article-title>,&#x201d; in <source>Proceedings of the IEEE conference on computer vision and pattern recognition</source>. <publisher-name>IEEE</publisher-name>: <publisher-loc>Honolulu, HI, USA</publisher-loc>
</citation>
</ref>
<ref id="B10">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Krizhevsky</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Sutskever</surname> <given-names>I.</given-names>
</name>
<name>
<surname>Hinton</surname> <given-names>G. E.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Imagenet classification with deep convolutional neural networks</article-title>. <source>Commun. ACM</source> <volume>60</volume>, <fpage>84</fpage>&#x2013;<lpage>90</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1145/3065386</pub-id>
</citation>
</ref>
<ref id="B11">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kumar</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Pandey</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Satwik</surname> <given-names>K. S. R.</given-names>
</name>
<name>
<surname>Kumar</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Singh</surname> <given-names>S. K.</given-names>
</name>
<name>
<surname>Singh</surname> <given-names>A. K.</given-names>
</name>
<etal/>
</person-group>. (<year>2018</year>). <article-title>Deep learning framework for recognition of cattle using muzzle point image pattern</article-title>. <source>Measurement</source> <volume>116</volume>, <fpage>1</fpage>&#x2013;<lpage>17</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.measurement.2017.10.064</pub-id>
</citation>
</ref>
<ref id="B12">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Li</surname> <given-names>H.</given-names>
</name>
</person-group> (<year>2019</year>). <source>Research of anti-counterfeiting algorithm of eriocheir sinensis based on local features of images</source> (<publisher-loc>China</publisher-loc>: <publisher-name>Anhui University of Technology</publisher-name>).</citation>
</ref>
<ref id="B13">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lipp</surname> <given-names>E. K.</given-names>
</name>
<name>
<surname>Rose</surname> <given-names>J. B.</given-names>
</name>
</person-group> (<year>1997</year>). <article-title>The role of seafood in foodborne diseases in the united states of America</article-title>. <source>Rev. Scientifique. Technique.</source> <volume>16</volume> (<issue>2</issue>), <fpage>620</fpage>&#x2013;<lpage>640</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.20506/rst.16.2.1048</pub-id>
</citation>
</ref>
<ref id="B14">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Pandiyan</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Mahboob</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Govindarajan</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Al-Ghanim</surname> <given-names>K. A.</given-names>
</name>
<name>
<surname>Ahmed</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Al-Mulhm</surname> <given-names>N.</given-names>
</name>
<etal/>
</person-group>. (<year>2021</year>). <article-title>An assessment of level of heavy metals pollution in the water, sediment and aquatic organisms: A perspective of tackling environmental threats for food security</article-title>. <source>Saudi. J. Biol. Sci.</source> <volume>28</volume> (<issue>2</issue>), <fpage>1218</fpage>&#x2013;<lpage>1225</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.sjbs.2020.11.072</pub-id>
</citation>
</ref>
<ref id="B15">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Redmon</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Divvala</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Girshick</surname> <given-names>R.</given-names>
</name>
<name>
<surname>Farhadi</surname> <given-names>A.</given-names>
</name>
</person-group> (<year>2016</year>). &#x201c;<article-title>You only look once: Unified, real-time object detection</article-title>,&#x201d; in <source>Proceedings of the IEEE conference on computer vision and pattern recognition</source>. <publisher-name>IEEE</publisher-name>: <publisher-loc>Las Vegas, NV, USA</publisher-loc>
</citation>
</ref>
<ref id="B16">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Shen</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Hu</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Dai</surname> <given-names>B.</given-names>
</name>
<name>
<surname>Wei</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Sun</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Jiang</surname> <given-names>L.</given-names>
</name>
<etal/>
</person-group>. (<year>2019</year>). <article-title>Individual identification of dairy cows based on convolutional neural networks</article-title>. <source>Multimedia. Tools Appl.</source> <volume>79</volume>, <fpage>14711</fpage>&#x2013;<lpage>14724</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s11042-019-7344-7</pub-id>
</citation>
</ref>
<ref id="B17">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Simonyan</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Zisserman</surname> <given-names>A.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Very deep convolutional networks for large-scale image recognition</article-title>. <source>arXiv. preprint. arXiv:1409.1556</source>.</citation>
</ref>
<ref id="B18">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sun</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>Y.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Impacts of the sampling design on the abundance index estimation of portunus trituberculatus using bottom trawl</article-title>. <source>Acta Oceanol. Sin.</source> <volume>39</volume> (<issue>6</issue>), <fpage>48</fpage>&#x2013;<lpage>57</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s13131-020-1607-z</pub-id>
</citation>
</ref>
<ref id="B19">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Sun</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Cheng</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Zheng</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>Z.</given-names>
</name>
<etal/>
</person-group>. (<year>2020</year>). &#x201c;<article-title>Circle loss: A unified perspective of pair similarity optimization</article-title>,&#x201d; in <source>Proceedings of the IEEE/CVF conference on computer vision and pattern recognition</source>. <publisher-name>IEEE</publisher-name>: <publisher-loc>Seattle, WA, USA</publisher-loc>
</citation>
</ref>
<ref id="B20">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Sun</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Zheng</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Tian</surname> <given-names>Q.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>S.</given-names>
</name>
</person-group> (<year>2018</year>). &#x201c;<article-title>Beyond part models: Person retrieval with refined part pooling (and a strong convolutional baseline)</article-title>,&#x201d; in <source>Proceedings of the European conference on computer vision (ECCV)</source>. <publisher-loc>Cham</publisher-loc>: <publisher-name>Springer</publisher-name>
</citation>
</ref>
<ref id="B21">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Szegedy</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Jia</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Sermanet</surname> <given-names>P.</given-names>
</name>
<name>
<surname>Reed</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Anguelov</surname> <given-names>D.</given-names>
</name>
<etal/>
</person-group>. (<year>2015</year>). &#x201c;<article-title>Going deeper with convolutions</article-title>,&#x201d; in <source>Proceedings of the IEEE conference on computer vision and pattern recognition</source>. <publisher-loc>Boston, MA</publisher-loc>: <publisher-name>IEEE</publisher-name>
</citation>
</ref>
<ref id="B22">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Tai</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>B.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>C.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Research on the feature recognition and algorithm of the carapace of eriocheir sinensis</article-title>. <source>Periodical. Ocean. Univ. China</source> <volume>51</volume> (<issue>1</issue>), <fpage>138</fpage>&#x2013;<lpage>146</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.16441/j.cnki.hdxb.20180416</pub-id>
</citation>
</ref>
<ref id="B23">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Wang</surname> <given-names>G.</given-names>
</name>
<name>
<surname>Yuan</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Zhou</surname> <given-names>X.</given-names>
</name>
</person-group> (<year>2018</year>). &#x201c;<article-title>Learning discriminative features with multiple granularities for person re-identification</article-title>,&#x201d; in <source>Proceedings of the 26th ACM international conference on multimedia</source>. <publisher-name>Association for Computing Machinery</publisher-name>: <publisher-loc>New York, NY, USA</publisher-loc>
</citation>
</ref>
<ref id="B24">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Worm</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Falkenberg</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Olesen</surname> <given-names>J.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Histamine and migraine revisited: Mechanisms and possible drug targets</article-title>. <source>J. Headache. Pain</source> <volume>20</volume> (<issue>1</issue>), <fpage>30</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s10194-019-0984-1</pub-id>
</citation>
</ref>
<ref id="B25">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wu</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Cheng</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Zeng</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Cui</surname> <given-names>Z.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>Reproductive performance and offspring quality of the first and the second brood of female swimming crab, portunus trituberculatus</article-title>. <source>Aquaculture</source> <volume>303</volume> (<issue>1-4</issue>), <fpage>94</fpage>&#x2013;<lpage>100</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.aquaculture.2010.03.006</pub-id>
</citation>
</ref>
<ref id="B26">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Xiao</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Fu</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Peng</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>X.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>SMS-CQ: A quality and safety traceability system for aquatic products in cold-chain integrated WSN and QR code</article-title>. <source>J. Food Process. Eng.</source> <volume>40</volume> (<issue>1</issue>), <elocation-id>e12303</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1111/jfpe.12303</pub-id>
</citation>
</ref>
<ref id="B27">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Xie</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Girshick</surname> <given-names>R.</given-names>
</name>
<name>
<surname>Doll&#xe1;r</surname> <given-names>P.</given-names>
</name>
<name>
<surname>Tu</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>He</surname> <given-names>K.</given-names>
</name>
</person-group> (<year>2017</year>). &#x201c;<article-title>Aggregated residual transformations for deep neural networks</article-title>,&#x201d; in <source>Proceedings of the IEEE conference on computer vision and pattern recognition</source>. <publisher-name>IEEE</publisher-name>: <publisher-loc>Honolulu, HI, USA</publisher-loc>
</citation>
</ref>
<ref id="B28">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Xi</surname> <given-names>R.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>Z.</given-names>
</name>
</person-group> (<year>2021</year>). &#x201c;<article-title>Individual identification method of leopard in multiple scenarios</article-title>,&#x201d; in <source>2021 the 4th international conference on image and graphics processing</source>. <publisher-name>Association for Computing Machinery</publisher-name>: <publisher-loc>New York, NY, USA</publisher-loc>
</citation>
</ref>
<ref id="B29">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yang</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Shan</surname> <given-names>B.</given-names>
</name>
<name>
<surname>Yu</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>H.</given-names>
</name>
<etal/>
</person-group>. (<year>2021</year>). <article-title>Heavy metal pollution and stable isotope ratios (&#x3b4;13C and &#x3b4;15N) in marine organisms from the northern beibu gulf, south China Sea</article-title>. <source>Mar. pollut. Bull.</source> <volume>166</volume>, <fpage>112230</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.marpolbul.2021.112230</pub-id>
</citation>
</ref>
<ref id="B30">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Yang</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Song</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Gao</surname> <given-names>Q.</given-names>
</name>
<name>
<surname>Dong</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Zhou</surname> <given-names>C.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Deep learning for smart fish farming: applications, opportunities and challenges</article-title>. <source>Rev. Aquacult.</source> <volume>13</volume>, <fpage>66</fpage>&#x2013;<lpage>90</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1111/raq.12464</pub-id>
</citation>
</ref>
<ref id="B31">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Yearbook</surname> <given-names>C. F. S.</given-names>
</name>
</person-group> (<year>2020</year>). <source>China Fishery statistical yearbook</source> Vol. <volume>2020</volume> (<publisher-loc>Beijing</publisher-loc>: <publisher-name>China Agriculture Press</publisher-name>), <fpage>24</fpage>&#x2013;<lpage>34</lpage>.</citation>
</ref>
<ref id="B32">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yu</surname> <given-names>C. G.</given-names>
</name>
<name>
<surname>Song</surname> <given-names>H. T.</given-names>
</name>
<name>
<surname>Yao</surname> <given-names>G. Z.</given-names>
</name>
<name>
<surname>Lv</surname> <given-names>H. Q.</given-names>
</name>
</person-group> (<year>2006</year>). <article-title>Composition and distribution of economic crab species in the East China Sea</article-title>. <source>Oceanol. Limnol. Sin.</source> <volume>37</volume> (<issue>1</issue>), <fpage>53</fpage>&#x2013;<lpage>60</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3321/j.issn:0029-814X.2006.01.009</pub-id>
</citation>
</ref>
<ref id="B33">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Xin</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Shi</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Xie</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Ren</surname> <given-names>Z.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>A pyramidal feature fusion model on swimming crab portunus trituberculatus re-identification</article-title>. <source>Front. Mar. Sci.</source> <volume>9</volume>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fmars.2022.845112</pub-id>
</citation>
</ref>
<ref id="B34">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Lan</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Zeng</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Jin</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>Z.</given-names>
</name>
</person-group> (<year>2020</year>). &#x201c;<article-title>Relation-aware global attention for person re-identification</article-title>,&#x201d; in <source>Proceedings of the ieee/cvf conference on computer vision and pattern recognition</source>. <publisher-name>IEEE</publisher-name>: <publisher-loc>Seattle, WA, USA</publisher-loc>
</citation>
</ref>
<ref id="B35">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhao</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Xing</surname> <given-names>B.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Wu</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>X.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>Agricultural products quality and safety traceability system based on two-dimension barcode recognition of mobile phones</article-title>. <source>Nongye. Jixie. Xuebao/Transactions. Chin. Soc. Agric. Machinery.</source> <volume>43</volume> (<issue>7</issue>), <fpage>124</fpage>&#x2013;<lpage>129</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.6041/j.issn.1000-1298.2012.07.023</pub-id>
</citation>
</ref>
<ref id="B36">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Zheng</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Shen</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Tian</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Tian</surname> <given-names>Q.</given-names>
</name>
</person-group> (<year>2015</year>). &#x201c;<article-title>Scalable person re-identification: A benchmark</article-title>,&#x201d; in <source>Proceedings of the IEEE international conference on computer vision</source>. <publisher-name>IEEE</publisher-name>: <publisher-loc>Santiago, Chile</publisher-loc>
</citation>
</ref>
</ref-list>
</back>
</article>