<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Plant Sci.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Plant Science</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Plant Sci.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">1664-462X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fpls.2026.1745861</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Improved YOLOv11n-seg for impurity detection in mechanically harvested sugarcane</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>He</surname><given-names>Fengguang</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="author-notes" rid="fn003"><sup>&#x2020;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3249463/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Zhou</surname><given-names>Sili</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="author-notes" rid="fn003"><sup>&#x2020;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3050994/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Chen</surname><given-names>Pinlan</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>*</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2975942/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Deng</surname><given-names>Ganran</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Feng</surname><given-names>Shaobo</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>*</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Li</surname><given-names>Guojie</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project-administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Cui</surname><given-names>Zhende</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Zheng</surname><given-names>Shuang</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Li</surname><given-names>Ling</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Yan</surname><given-names>Bin</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2819240/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Qin</surname><given-names>Shuangmei</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project-administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Wang</surname><given-names>Xilin</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Dai</surname><given-names>Ye</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Liu</surname><given-names>Zehua</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Agricultural Machinery Research Institute, Chinese Academy of Tropical Agricultural Sciences</institution>, <city>Zhanjiang</city>, <state>Guangdong</state>,&#xa0;<country country="cn">China</country></aff>
<aff id="aff2"><label>2</label><institution>Key Laboratory of Tropical Agricultural Machinery, Ministry of Agriculture and Rural Affairs</institution>, <city>Zhanjiang</city>, <state>Guangdong</state>,&#xa0;<country country="cn">China</country></aff>
<aff id="aff3"><label>3</label><institution>Guangxi Research Institute of Metrology &amp; Test</institution>, <city>Nanning</city>, <state>Guangxi</state>,&#xa0;<country country="cn">China</country></aff>
<author-notes>
<corresp id="c001"><label>*</label>Correspondence: Pinlan Chen, <email xlink:href="mailto:pl1600875479@gmail.com">pl1600875479@gmail.com</email>; Shaobo Feng, <email xlink:href="mailto:fengshaob@126.com">fengshaob@126.com</email></corresp>
<fn fn-type="other" id="fn003">
<p>&#x2020;These authors share first authorship</p></fn>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-18">
<day>18</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>17</volume>
<elocation-id>1745861</elocation-id>
<history>
<date date-type="received">
<day>13</day>
<month>11</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>26</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="rev-recd">
<day>22</day>
<month>01</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2026 He, Zhou, Chen, Deng, Feng, Li, Cui, Zheng, Li, Yan, Qin, Wang, Dai and Liu.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>He, Zhou, Chen, Deng, Feng, Li, Cui, Zheng, Li, Yan, Qin, Wang, Dai and Liu</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-18">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<p>The content of impurities in mechanically harvested sugarcane is a critical factor for evaluating harvest quality and determining market price. To enable intelligent detection of impurities in mechanically harvested sugarcane, this study proposes an impurity detection method based on an improved YOLOv11n-seg model. The method integrates four enhancement modules into the original YOLOv11n-seg architecture. Firstly, a lightweight C2_Ghost module is introduced into the high-channel feature extraction stages of both the backbone and neck, thereby reducing computational complexity and feature redundancy. Subsequently, a C2_FSAS module is designed to perform frequency-domain relationship modelling, enhancing long-range semantic dependency representation. An Efficient Channel Attention (ECA) mechanism is then applied to deep high-level semantic features to adaptively reweight salient feature channels. Finally, the traditional fixed interpolation-based upsampling operation is replaced with a dynamic DySample upsampling strategy to recover fine-grained edge features. Experimental results indicate that Improved YOLOv11n-seg achieves segmentation performance of 97.0%, 98.1%, 99.2%, and 82.9% in terms of P, R, mAP<sub>0.5</sub>, and mAP<sub>0.5:0.95</sub>, respectively. Compared with the original YOLOv11n-seg, the proposed model achieves a 1.8% improvement in mAP0.5:0.95, a 10.2% reduction in parameter count, and maintains a real-time inference speed of 34.8 FPS on the Jetson Xavier NX under TensorRT acceleration. Ablation studies validate the effectiveness of the four-module synergistic design, with C2_FSAS and DySample contributing most significantly to the improvement in mAP. Moreover, the model exhibits enhanced edge delineation accuracy and inter-class discrimination capability. In summary, the Improved YOLOv11n-seg achieves a favourable balance between segmentation accuracy and real-time performance, enabling precise segmentation of sugarcane segments and diverse impurity types. The proposed method provides reliable technical support for intelligent impurity rate detection in mechanically harvested sugarcane and practical deployment on edge computing platforms.</p>
</abstract>
<kwd-group>
<kwd>sugarcane</kwd>
<kwd>impurity detection</kwd>
<kwd>instance segmentation</kwd>
<kwd>YOLOv11</kwd>
<kwd>deep learning</kwd>
<kwd>lightweight</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This work was supported by Hainan Provincial Natural Science Foundation of China (grant number 522QN385 and 324MS095) and Science and Technology Project of Market Supervision Administration, Guangxi (GSJKJZC2024-3).</funding-statement>
</funding-group>
<counts>
<fig-count count="10"/>
<table-count count="3"/>
<equation-count count="8"/>
<ref-count count="32"/>
<page-count count="15"/>
<word-count count="7017"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Technical Advances in Plant Science</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec id="s1" sec-type="intro">
<label>1</label>
<title>Introduction</title>    
<p>As one of the world&#x2019;s most important sugar crops, sugarcane production is essential for safeguarding sugar security and supporting economic development (<xref ref-type="bibr" rid="B14">Liu et&#xa0;al., 2020</xref>). In 2023, China&#x2019;s sugarcane production exceeded 100 million tons (Food and Agriculture Organization of the United Nations, 2025). The State Council Document [2018] No. 42 specifies that China&#x2019;s sugarcane harvesting mechanization rate must reach 30% by 2025 (<xref ref-type="bibr" rid="B13">Liu et&#xa0;al., 2024</xref>; <xref ref-type="bibr" rid="B32">Zhou et&#xa0;al., 2024</xref>). However, mechanized harvesting inevitably introduces various impurities (<xref ref-type="bibr" rid="B20">Que et al., 2024</xref>)&#x2014;including sugarcane tops, leaves, roots, and soil. Mechanized sugarcane harvesting can reduce soil impurity intake through optimized cutting height adjustment (<xref ref-type="bibr" rid="B22">Shi et&#xa0;al., 2024</xref>) and by improving the conveying and cutting systems to remove sugarcane leaves and soil debris (<xref ref-type="bibr" rid="B1">Afsharnia et&#xa0;al., 2025</xref>; <xref ref-type="bibr" rid="B29">Zhou et&#xa0;al., 2025a</xref>). The cut sugarcane and associated impurities are conveyed into the impurity-removal fan channel. By leveraging the differences in suspension velocities between sugarcane segments and impurities, lighter components such as sugarcane tops and leaves are removed (<xref ref-type="bibr" rid="B30">Zhou et&#xa0;al., 2025b</xref>). However, excessively low fan speeds lead to impurity retention, whereas excessively high speeds may cause unintended removal of sugarcane segments. Therefore, the optimization of variable-speed fan control strategies can effectively reduce impurity content (<xref ref-type="bibr" rid="B25">Wu et&#xa0;al., 2024</xref>). Although mechanized sugarcane harvesting currently removes over 90% of impurities, residual impurities remain, and impurity levels cannot be monitored during post-fan collection. Due to limitations in current sugar production equipment and processing technologies in China, sugar mills require raw sugarcane impurity rates to be below 5% (<xref ref-type="bibr" rid="B11">Li et&#xa0;al., 2024</xref>; <xref ref-type="bibr" rid="B31">Zhou et&#xa0;al., 2025a</xref>; <xref ref-type="bibr" rid="B30">Zhou et&#xa0;al., 2025c</xref>). Impurities significantly reduce the purity of raw sugarcane, directly impacting sugar yield and quality, accelerating equipment wear, increasing energy consumption, and raising production costs (<xref ref-type="bibr" rid="B3">De Mello et&#xa0;al., 2022</xref>). As a result, impurity content has become a critical metric for assessing raw material quality, managing production costs, and determining purchase prices in sugar processing. Raw sugarcane undergoes secondary impurity removal procedures&#x2014;including unloading, feeding, impurity separation, and discharge&#x2014;at sugar mills. However, these procedures lack the capability for automatic detection of impurity content. Instead, traditional approaches rely on manual sampling and weighing, with impurity levels estimated from randomly selected small-scale samples. These methods are associated with high subjectivity, low efficiency, and considerable variability (<xref ref-type="bibr" rid="B12">Li et&#xa0;al., 2023</xref>; <xref ref-type="bibr" rid="B32">Zhou et&#xa0;al., 2024</xref>). Therefore, the development of rapid and objective devices for measuring sugarcane impurity levels is urgently required in the sugar industry, and intelligent impurity detection technologies are crucial for advancing such equipment.</p>
<p>In recent years, deep learning has achieved remarkable advancements, and scholars worldwide have conducted extensive research on its application in impurity detection (<xref ref-type="bibr" rid="B4">Ding et&#xa0;al., 2025a</xref>). <xref ref-type="bibr" rid="B8">Huang and Liang (2022)</xref> integrated a feed forward convolutional attention mechanism, spatial pyramid pooling, and depth wise separable convolutions into a lightweight YOLOv5-based tea impurity detection model, achieving a multi-category mAP of 96.05%. <xref ref-type="bibr" rid="B24">Wang et&#xa0;al (2024)</xref> proposed an improved YOLOv5 model for cotton surface impurity detection, incorporating MCA to enhance feature extraction for impurity targets, resulting in a mAP of 92.5%. <xref ref-type="bibr" rid="B27">Yu et&#xa0;al (2023)</xref> replaced convolutional blocks with a Transformer-Encoder module to improve global feature representation in walnut kernel impurity detection, while integrating GhostNet reduced detection time by 10.4% and increased mAP to 88.9%. <xref ref-type="bibr" rid="B5">Ding et&#xa0;al (2025b)</xref> improved detection performance and model lightweighting by modifying the loss function and employing lightweight convolutions along with model pruning, yielding a 2.6% improvement in mAP and a 37% reduction in inference time. However, bounding-box-based object detection methods are limited in accurately delineating object boundaries for both primary targets and impurities, resulting in suboptimal precision in impurity content estimation.</p>
<p>Image-based instance segmentation methods can identify objects of different categories at the pixel level, enabling precise localization of primary objects and impurity regions. <xref ref-type="bibr" rid="B2">Chen et&#xa0;al., (2023)</xref> developed a U-Net-based method for impurity rate detection in machine-harvested wheat. <xref ref-type="bibr" rid="B28">Zhao et&#xa0;al. (2024)</xref> enhanced the Mask R-CNN model by incorporating a CA attention mechanism and fully connected layers to detect impurities such as dried leaves, senescent vegetable leaves, and paper fragments in leafy vegetables, achieving a mAP of 98.55%, although the detection speed remained suboptimal. <xref ref-type="bibr" rid="B18">Pan et&#xa0;al. (2025)</xref> proposed a potato impurity detection model based on PLP-net, which incorporates an ECA mechanism to enhance feature extraction, achieving an mAP of 96.0%. <xref ref-type="bibr" rid="B19">Qi et&#xa0;al. (2024)</xref> introduced a DeepLab-EDA segmentation model for assessing wheat crushing rate and impurity rate, yielding an accuracy of 95.97%. <xref ref-type="bibr" rid="B12">Li et&#xa0;al. (2023)</xref> developed the MDSC-DeepLabv3+ model by integrating MobileNetv2, ASPP, depth wise separable convolutions, and CA attention mechanisms for the segmentation and detection of sugarcane impurities including sugarcane segments, tops, and leaves, achieving a mAP of 97.55%. These studies demonstrate that instance segmentation-based approaches enable pixel-level precision analysis, providing a solid technical foundation for subsequent high-precision impurity quantification.</p>
<p>Although mechanical screening and air separation methods can effectively remove most impurities during harvesting, these approaches primarily focus on physical separation processes rather than the perception of impurity composition information. Compared with traditional image processing methods, YOLO-based segmentation frameworks enable real-time, end-to-end detection, classification, and pixel-level segmentation of sugarcane impurities. This approach provides rapid, non-contact, and objective visual perception for assessing impurity levels in mechanically harvested sugarcane. Consequently, this study focuses on impurity rate detection during post-harvest transportation to sugar mills. This study proposes an Improved YOLOv11n-seg (<xref ref-type="bibr" rid="B9">Khanam and Hussain, 2024</xref>) method targeting three key biological impurities&#x2014;leaves, tops, and roots. The model integrates the Ghost module (<xref ref-type="bibr" rid="B6">Han et&#xa0;al., 2020</xref>), ECA mechanism (<xref ref-type="bibr" rid="B23">Wang et&#xa0;al., 2020</xref>), efficient frequency domain-based self-attention solver (FSAS) (<xref ref-type="bibr" rid="B10">Kong et&#xa0;al., 2023</xref>), and DySample upsample module (<xref ref-type="bibr" rid="B15">Liu et&#xa0;al., 2023</xref>) to enhance representational capability. This enhancement yields a more robust sugarcane impurity segmentation model, offering technical support for impurity-rate detection equipment in sugarcane processing.</p>
</sec>
<sec id="s2" sec-type="materials|methods">
<label>2</label>
<title>Materials and methods</title>
<sec id="s2_1">
<label>2.1</label>
<title>Image acquisition</title>
<p>The sugarcane impurity image data used in this study were acquired in July 2025 at the Experimental Base of the Agricultural Machinery Research Institute, Chinese Academy of Tropical Agricultural Sciences in Zhanjiang, Guangdong Province (21.17&#xb0;N, 110.27&#xb0;E). <xref ref-type="fig" rid="f1"><bold>Figure&#xa0;1</bold></xref> shows the image acquisition device. The camera is equipped with a Lenovo WL24A autofocus lens, offering a maximum resolution of 2592&#xd7;1944 and a maximum image transmission rate of 30 FPS. It is positioned approximately 45 cm above the conveyor belt and oriented vertically downward. Two LED light sources were arranged on both sides of the camera to ensure consistent illumination. During image acquisition, sugarcane moves along the conveyor belt at a constant speed of approximately 17 cm/s. Simultaneously, the camera captures images at a rate of one frame per second using a Python-based acquisition program. The sugarcane variety used was &#x201c;Yuetang 94-128&#x201d; and the captured samples consisted of small segments, reflecting the typical condition of sugarcane following mechanical harvesting.</p>
<fig id="f1" position="float">
<label>Figure&#xa0;1</label>
<caption>
<p>Image acquisition device.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1745861-g001.tif">
<alt-text content-type="machine-generated">A conveyor belt system with labeled components: a display, Jetson Xavier NX, LED brightness controller, camera, and LED light source. An inset shows an illuminance measurement device displaying 82 Lux.</alt-text>
</graphic></fig>
</sec>
<sec id="s2_2">
<label>2.2</label>
<title>Preparation of sugarcane impurity dataset</title>
<p>Soil impurities can be substantially reduced by adjusting the height between the harvesting device and the ridge surface or through mechanical vibration-assisted cleaning methods (<xref ref-type="bibr" rid="B26">Xie et&#xa0;al., 2018</xref>; <xref ref-type="bibr" rid="B16">Martins and Ruiz, 2020</xref>). However, impurities such as sugarcane, tops, leaves, and roots cannot be effectively removed during harvesting. Therefore, this study selected sugarcane tops, leaves, and roots as the primary targets for impurity detection. The collected images were manually annotated using X-AnyLabeling software and categorized into four types: sugarcane segments, sugarcane tops, sugarcane leaves, and sugarcane roots, with the corresponding label names cane_segment, cane_top, cane_leaves, and cane_root, and the annotations were in an instance segmentation format. The annotated images cover pure sugarcane segments, pure sugarcane tops, pure sugarcane leaves, pure sugarcane roots, and mixed-category scenarios under varying brightness conditions, as shown in <xref ref-type="fig" rid="f2"><bold>Figure&#xa0;2</bold></xref>. Illuminance levels were measured using a DELIXI DLK-LSK2304 illuminance meter under low-light (23 Lux) and well-lit (82 Lux) conditions. In total, 4,974 images were annotated. The aspect ratio of all images was uniformly preserved while resizing the longer side to 640 pixels. <xref ref-type="fig" rid="f3"><bold>Figure&#xa0;3</bold></xref> details the dataset composition across all scenarios. Finally, the JSON files produced during annotation were converted into YOLO format text files. To ensure stable model training and improve generalization, the dataset was divided into training, validation, and test sets at a ratio of 8:1:1.</p>
<fig id="f2" position="float">
<label>Figure&#xa0;2</label>
<caption>
<p>Categories within the dataset. <bold>(a)</bold> cane_segment. <bold>(b)</bold> cane_top. <bold>(c)</bold> mixed_low-light. <bold>(d)</bold> cane_leaves. <bold>(e)</bold> cane_root. <bold>(f)</bold> mixed_well-lit.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1745861-g002.tif">
<alt-text content-type="machine-generated">Six images of sugarcane parts are arranged in two rows. (a) Only sugarcane segments (b) Only sugarcane tops (c) Mixed categories under low-light conditions (d) Only sugarcane leaves (e) Only sugarcane roots (f) Mixed categories under well-lit conditions.</alt-text>
</graphic></fig>
<fig id="f3" position="float">
<label>Figure&#xa0;3</label>
<caption>
<p>Composition of sugarcane impurity dataset.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1745861-g003.tif">
<alt-text content-type="machine-generated">Bar chart showing image quantities for five categories: cane_segment (777), cane_top (690), cane_leaves (807), cane_root (987), and mixed (1713). Mixed category has the highest quantity.</alt-text>
</graphic></fig>
</sec>
<sec id="s2_3">
<label>2.3</label>
<title>Design of the sugarcane impurity detection model</title>
<p>The YOLOv11-seg backbone network extracts low-level edge and texture information along with high-level semantic features from input images through hierarchical convolutions and multi-scale feature extraction. The neck structure then performs multi-scale feature fusion and enhancement. Finally, the anchor-free detection head jointly outputs bounding boxes, classification confidence, and pixel-level segmentation masks, while non-maximum suppression (NMS) removes redundant predictions. Compared with other YOLO variants and segmentation models such as U-Net (<xref ref-type="bibr" rid="B21">Ronneberger et&#xa0;al., 2015</xref>), YOLOv11n-seg offers a better trade-off between detection speed and segmentation accuracy. Furthermore, unlike two-stage instance segmentation models such as Mask R-CNN (<xref ref-type="bibr" rid="B7">He et&#xa0;al., 2017</xref>), YOLOv11n-seg utilizes a single-stage end-to-end design that maintains high accuracy and robust real-time performance, making it particularly suitable for deployment on low-power edge devices to meet sugarcane impurity detection requirements.</p>
<p>To address the issue of feature confusion among sugarcane segments, tops, and leaves, as well as the blurry boundaries in sugarcane root segmentation, this study introduces targeted improvements to the YOLOv11n-seg architecture. First, a lightweight Ghost Block-based C2_Ghost module, combining the Ghost module with the C3K2 module from YOLOv11, is applied to the high-channel regions of both the backbone and neck networks, significantly reducing redundant computation while maintaining expressive feature representation. In deeper layers of the backbone, spatial resolution decreases whereas channel dimensions increase, enabling the final C2_Ghost output to aggregate rich high-level semantic information. However, excessive channels may lead to redundancy and imbalance, potentially weakening informative features and retaining irrelevant ones. Therefore, the ECA mechanism is introduced between the C2_Ghost and SPPE modules to enhance channel-wise weighting and improve attention to critical regions.</p>
<p>Next, the PSA module in the original C2PSA structure is replaced with a frequency-domain self-attention solver (FSAS) to construct the C2_FSAS semantic feature extraction module. By transforming feature maps into the frequency domain for global relationship modeling, this module effectively captures long-range dependencies and distinguishes categories with similar shape or color characteristics, further improving impurity recognition accuracy. In the neck network, the traditional UpSample module is replaced with the lightweight and dynamically adjustable DySample module, which adaptively adjusts interpolation weights to restore spatial resolution and reduce edge-blurring effects, thereby enhancing boundary segmentation precision for sugarcane segments and impurities. Additionally, ECA is integrated between the final C3K2 and Segment modules in the neck to emphasize salient channels and suppress redundant ones, improving classification performance for multiple impurity categories. The architecture of the Improved YOLOv11n-seg is illustrated in <xref ref-type="fig" rid="f4"><bold>Figure&#xa0;4</bold></xref>.</p>
<fig id="f4" position="float">
<label>Figure&#xa0;4</label>
<caption>
<p>Improved YOLOv11n-seg network structure. (Conv, C3K2, SPPE, and Segment are modules from the original YOLOv11n-seg, where Conv uses SiLU activation by default, and Concat refers feature channel concatenation.).</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1745861-g004.tif">
<alt-text content-type="machine-generated">Flowchart illustrating a neural network architecture divided into sections: Backbone, Neck, and Head. The Backbone includes Conv, C3K2, and C2_Ghost layers. The Neck uses DySample and Concat processes, while the Head produces three features labeled as Segment. A small insert image in the Backbone area serves as input features.</alt-text>
</graphic></fig>
<sec id="s2_3_1">
<label>2.3.1</label>
<title>C2_ghost module</title>
<p>Convolutional operations may produce redundant feature maps during feature extraction, and these redundant representations can be generated via computationally efficient linear transformations (<xref ref-type="bibr" rid="B6">Han et&#xa0;al., 2020</xref>). In the YOLOv11n-seg network, the C3K module used within the C3K2 block is a composite structure consisting of multiple convolutional layers. As the number of feature channels increases, these operations tend to generate a greater number of redundant feature representations, introducing unnecessary computational overhead. To address this issue, the Ghost module efficiently derives redundant feature representations from primary intrinsic features through low-cost linear transformations. Specifically, the Ghost module first employs a standard 1&#xd7;1 convolution to produce representative intrinsic feature maps. Subsequently, it enhances these intrinsic features by generating additional feature maps through a 3&#xd7;3 depth wise convolution, which serves as a computationally inexpensive linear operation. Finally, concatenation is applied to maintain consistent input-output channel dimensions, enabling the expression of feature representations equivalent to those from standard convolutions but with significantly reduced computational cost. As shown in the Ghost module section of <xref ref-type="fig" rid="f5"><bold>Figure&#xa0;5</bold></xref>, the process can be mathematically formulated as <xref ref-type="disp-formula" rid="eq1">Equation 1</xref>.</p>
<fig id="f5" position="float">
<label>Figure&#xa0;5</label>
<caption>
<p>C2_X module structure. (The X Block in the figure refers either a Ghost Block or an FSAS Block, which correspond to the C2_Ghost and C2_FSAS modules respectively. Split refers to average channel partitioning, where C refers the number of channels; DW Conv refers to depth wise convolution; &#x3a6; refers low-cost linear operations; FFT and IFFT denote Fast Fourier Transform and Inverse Fast Fourier Transform; WBLN refers With Bias Layer Norm; BN refers to Batch Norm; and Act refers the activation function.).</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1745861-g005.tif">
<alt-text content-type="machine-generated">Diagram illustrating a neural network architecture. It features ghost modules, FSAS blocks, and FFT operations. The process includes convolution layers, ghost blocks, FFN, and X block. Various data flows are highlighted with different colors, indicating operations like Conv, BN, and Act with annotations like Act=False and BN=False. The structure divides into sections with specific operations such as FFT, IFFT, and Split, showing intricate interactions and processes within the architecture.</alt-text>
</graphic></fig>
<disp-formula id="eq1"><label>(1)</label>
<mml:math display="block" id="M1"><mml:mrow><mml:mi>O</mml:mi><mml:mi>u</mml:mi><mml:mi>t</mml:mi><mml:mi>p</mml:mi><mml:mi>u</mml:mi><mml:mi>t</mml:mi><mml:mo>=</mml:mo><mml:mi>C</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:mi>c</mml:mi><mml:mi>a</mml:mi><mml:mi>t</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>C</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>v</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mo>&#xd7;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>F</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>,</mml:mo><mml:mo>&#xa0;</mml:mo><mml:mi>D</mml:mi><mml:mi>W</mml:mi><mml:mo>&#xa0;</mml:mo><mml:mi>C</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>v</mml:mi><mml:mrow><mml:mn>3</mml:mn><mml:mo>&#xd7;</mml:mo><mml:mn>3</mml:mn></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>C</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>v</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mo>&#xd7;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>F</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math>
</disp-formula>
<p>Where <italic>F</italic> refers the input feature map and <italic>Output</italic> refers the output feature map. To achieve high real-time performance and lightweight inference in sugarcane impurity detection, this study replaces the original C3K structure in the C3K2 module with a Ghost Block constructed using Ghost modules, thereby forming the lightweight C2_Ghost feature extraction module. As illustrated in the C2_X structure in <xref ref-type="fig" rid="f5"><bold>Figure&#xa0;5</bold></xref>, C2_Ghost sequentially processes feature information via a 1&#xd7;1 convolution, channel splitting (Split), Ghost Block transformation, channel concatenation (Concat), and a 1&#xd7;1 convolution. This design retains strong feature representation capability while substantially reducing the number of parameters and computational overhead.</p>
</sec>
<sec id="s2_3_2">
<label>2.3.2</label>
<title>C2_FSAS module</title>
<p>The sugarcane impurity detection task, when conducted in a conveyor belt format, suffers from motion blur. Moreover, sugarcane segments, tops, and leaves share highly similar color, shape, and texture characteristics, making feature discrimination challenging. To enhance the extraction of high-level semantic information and improve category differentiation, the FSAS mechanism is integrated into the C2PSA architecture of YOLOv11n-seg, forming a more efficient feature extraction module that performs global dependency modelling in the frequency domain. Specifically, FSAS utilizes the FFT to convert input feature maps from the spatial domain to the frequency domain, where low-frequency components represent smooth background structures and global contours, whereas high-frequency components preserve fine-grained structural details such as edges and textures. By performing frequency-domain filtering, FSAS effectively captures long-range pixel dependencies with low computational complexity. Subsequently, the refined frequency-domain features are converted back to the spatial domain using the IFFT. As illustrated in the FSAS section of <xref ref-type="fig" rid="f5"><bold>Figure&#xa0;5</bold></xref>, this procedure is mathematically formulated in <xref ref-type="disp-formula" rid="eq2">Equations 2</xref>, <xref ref-type="disp-formula" rid="eq3">3</xref>.</p>
<disp-formula id="eq2"><label>(2)</label>
<mml:math display="block" id="M2"><mml:mrow><mml:mi>Q</mml:mi><mml:mo>,</mml:mo><mml:mo>&#xa0;</mml:mo><mml:mi>K</mml:mi><mml:mo>,</mml:mo><mml:mo>&#xa0;</mml:mo><mml:mi>V</mml:mi><mml:mo>=</mml:mo><mml:mi>S</mml:mi><mml:mi>p</mml:mi><mml:mi>l</mml:mi><mml:mi>i</mml:mi><mml:mi>t</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>D</mml:mi><mml:mi>W</mml:mi><mml:mo>&#xa0;</mml:mo><mml:mi>C</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>v</mml:mi><mml:mrow><mml:mn>3</mml:mn><mml:mo>&#xd7;</mml:mo><mml:mn>3</mml:mn></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>C</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>v</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mo>&#xd7;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>F</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math>
</disp-formula>
<disp-formula id="eq3"><label>(3)</label>
<mml:math display="block" id="M3"><mml:mrow><mml:mi>O</mml:mi><mml:mi>u</mml:mi><mml:mi>t</mml:mi><mml:mi>p</mml:mi><mml:mi>u</mml:mi><mml:mi>t</mml:mi><mml:mo>=</mml:mo><mml:mi>F</mml:mi><mml:mo>+</mml:mo><mml:mi>C</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>v</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mo>&#xd7;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>W</mml:mi><mml:mi>B</mml:mi><mml:mi>L</mml:mi><mml:mi>N</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>I</mml:mi><mml:mi>F</mml:mi><mml:mi>F</mml:mi><mml:mi>T</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>F</mml:mi><mml:mi>F</mml:mi><mml:mi>T</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>Q</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>&#x2297;</mml:mo><mml:mi>F</mml:mi><mml:mi>F</mml:mi><mml:mi>T</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>K</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo><mml:mo>&#x2297;</mml:mo><mml:mi>V</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math>
</disp-formula>
<p>According to the convolution theorem, the convolution or correlation of two features in the spatial domain corresponds to their element-wise product in the frequency domain (<xref ref-type="bibr" rid="B10">Kong et&#xa0;al., 2023</xref>). FSAS therefore extracts discriminative features through element-wise multiplication in the frequency domain, enhancing or suppressing specific frequency characteristics of sugarcane target objects. This mitigates detail loss resulting from target motion and yields more accurate category boundary representations. For impurities whose color and texture closely resemble those of sugarcane segments, their spectral distributions still present fine-grained distinctions in the frequency domain. Accordingly, FSAS can more effectively detect inter-class variations in sugarcane impurity detection. Following FSAS, a Feed-Forward Network (FFN) module is introduced to perform both non-linear transformations and inter-channel feature fusion, thus enhancing the model&#x2019;s representational capacity and classification ability and ultimately forming the FSAS Block. As illustrated in <xref ref-type="fig" rid="f5"><bold>Figure&#xa0;5</bold></xref> (C2_X), C2_FSAS processes features sequentially via 1&#xd7;1 Conv, channel splitting (Split), the FSAS Block, channel concatenation (Concat), and a final 1&#xd7;1 Conv.</p>
</sec>
<sec id="s2_3_3">
<label>2.3.3</label>
<title>ECA module</title>
<p>In the sugarcane impurity detection task, image data include background regions and multiple impurity categories. The contribution of each feature channel to identifying sugarcane segments, leaves, tops and roots differs significantly. With progressive decreases in spatial resolution, the number of feature channels increases substantially, causing deeper layers to aggregate richer semantic information while also introducing substantial feature redundancy. As a result, the discriminative channels associated with specific impurity categories may be suppressed by irrelevant or noisy channels, ultimately degrading detection accuracy. Compared with the SE attention mechanism, ECA eliminates the negative influence of dimensionality reduction on channel attention and achieves higher computational efficiency (<xref ref-type="bibr" rid="B23">Wang et&#xa0;al., 2020</xref>). Therefore, this study introduces the ECA mechanism after the C2_Ghost module, which possesses the lowest spatial resolution and the highest channel dimensionality, to reduce the influence of redundant and noisy channels. As illustrated in <xref ref-type="fig" rid="f6"><bold>Figure&#xa0;6</bold></xref>, the ECA mechanism is mathematically formulated by <xref ref-type="disp-formula" rid="eq4">Equations 4</xref>.</p>
<fig id="f6" position="float">
<label>Figure&#xa0;6</label>
<caption>
<p>ECA module. (GAP refers to Global Average Pooling; 1D Conv denotes one-dimensional convolution; and expand refers to dimension expansion.).</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1745861-g006.tif">
<alt-text content-type="machine-generated">Diagram of a neural network model process. It shows the flow from an input tensor labeled F, which has dimensions W, H, and C. The input goes through a section labeled GAP and ID Conv with kernel size 3, producing a 1x1xC tensor. The tensor is passed through SiLU activation, expanded, and combined with the original input before outputting a tensor of dimensions W, H, C.</alt-text>
</graphic></fig>
<disp-formula id="eq4"><label>(4)</label>
<mml:math display="block" id="M4"><mml:mrow><mml:mi>O</mml:mi><mml:mi>u</mml:mi><mml:mi>t</mml:mi><mml:mi>p</mml:mi><mml:mi>u</mml:mi><mml:mi>t</mml:mi><mml:mo>=</mml:mo><mml:mi>F</mml:mi><mml:mo>&#x2297;</mml:mo><mml:mi>S</mml:mi><mml:mi>i</mml:mi><mml:mi>L</mml:mi><mml:mi>U</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mn>1</mml:mn><mml:mi>D</mml:mi><mml:mo>&#xa0;</mml:mo><mml:mi>C</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>v</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mo>&#xd7;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>G</mml:mi><mml:mi>A</mml:mi><mml:mi>P</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>F</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math>
</disp-formula>
<p>The ECA mechanism first computes channel-wise average statistics of the input feature map using Global Average Pooling (GAP). A 1D convolution with a kernel size of 3 is then applied to the resulting 1&#xd7;1&#xd7;C descriptor to generate channel attention weights with minimal computational overhead. The obtained weights are subsequently broadcast to match the spatial dimensions of the input feature map. Finally, the recalibrated weights are multiplied with the original feature map to enable lightweight local cross-channel interaction.</p>
</sec>
<sec id="s2_3_4">
<label>2.3.4</label>
<title>DySample module</title>
<p>upsample is essential for reconstructing accurate pixel-level masks from low-resolution, high-level semantic feature maps. However, impurity boundaries in sugarcane commonly exhibit irregular shapes, and the contours of sugarcane segments and tops are highly similar. Traditional interpolation-based upsample uses fixed interpolation kernels, which can easily result in feature loss and blurred boundaries when processing complex shapes. This reduces localization and segmentation accuracy between sugarcane segments and impurities. To overcome these limitations, the DySample module is incorporated into the neck network to replace the original UpSample operator. DySample utilizes learnable dynamic weights to adaptively refine sampling positions and interpolation weights based on local feature content. This enables differentiated upsample for spatially diverse regions, allowing better preservation of structural details during high-resolution feature reconstruction. Furthermore, DySample is lightweight and does not require custom CUDA implementations, achieving excellent efficiency in terms of parameter count, FLOPs, and inference latency. As illustrated in <xref ref-type="fig" rid="f7"><bold>Figure&#xa0;7</bold></xref>, DySample generates the upsample feature X<sub>o</sub> by feeding the input feature X<sub>I</sub> and sampling set &#x3c2;, obtained from the sampling point generator, into the grid_sample function, as formulated in <xref ref-type="disp-formula" rid="eq5">Equation 5</xref>.</p>
<fig id="f7" position="float">
<label>Figure&#xa0;7</label>
<caption>
<p>DySample module. (X<sub>I</sub>, X<sub>o</sub>, O, and G denote the input features, upsample features, offset, and original sampling grid, respectively. g refers the grouping number, s refers the upsample ratio, and &#x3c2; refers to the sampling set.).</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1745861-g007.tif">
<alt-text content-type="machine-generated">Diagram illustrating sampling point generation in DySample. The top part shows “Sampling based dynamic upsampling” with a flow from input \(X_I\) through a sampling point generator, a sampling set \( \zeta \), to output \(X_O\). The bottom section details static and dynamic scope factors. Static uses a constant multiplier and pixel shuffle, while dynamic involves two linear functions combining outputs for adaptive scaling, followed by pixel shuffle.</alt-text>
</graphic></fig>
<disp-formula id="eq5"><label>(5)</label>
<mml:math display="block" id="M5"><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mi>o</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mi>g</mml:mi><mml:mi>r</mml:mi><mml:mi>i</mml:mi><mml:mi>d</mml:mi><mml:mo>_</mml:mo><mml:mi>s</mml:mi><mml:mi>a</mml:mi><mml:mi>m</mml:mi><mml:mi>p</mml:mi><mml:mi>l</mml:mi><mml:mi>e</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>X</mml:mi><mml:mi>I</mml:mi></mml:msub><mml:mo>,</mml:mo><mml:mi>&#x3c2;</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math>
</disp-formula>  
<p>The input features are processed through a linear layer to produce a 2gs<sup>2</sup>&#xd7;H&#xd7;W offset, which is subsequently converted via pixel shuffle into a 2g&#xd7;sH&#xd7;sW offset O. The static and dynamic offsets are mathematically formulated in <xref ref-type="disp-formula" rid="eq6">Equations 6</xref> and <xref ref-type="disp-formula" rid="eq7">Equations 7</xref>, respectively. The sampling set &#x3c2; is then defined as the sum of offset O and the original sampling grid G, as formulated in <xref ref-type="disp-formula" rid="eq8"><bold>Equation 8.</bold></xref></p>
<disp-formula id="eq6"><label>(6)</label>
<mml:math display="block" id="M6"><mml:mrow><mml:mi>O</mml:mi><mml:mo>=</mml:mo><mml:mi>p</mml:mi><mml:mi>i</mml:mi><mml:mi>x</mml:mi><mml:mi>e</mml:mi><mml:mi>l</mml:mi><mml:mo>&#xa0;</mml:mo><mml:mi>s</mml:mi><mml:mi>h</mml:mi><mml:mi>u</mml:mi><mml:mi>f</mml:mi><mml:mi>f</mml:mi><mml:mi>l</mml:mi><mml:mi>e</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mn>0.25</mml:mn><mml:mo>&#xd7;</mml:mo><mml:mi>l</mml:mi><mml:mi>i</mml:mi><mml:mi>n</mml:mi><mml:mi>e</mml:mi><mml:mi>a</mml:mi><mml:mi>r</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>X</mml:mi><mml:mi>I</mml:mi></mml:msub><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math>
</disp-formula>
<disp-formula id="eq7"><label>(7)</label>
<mml:math display="block" id="M7"><mml:mrow><mml:mi>O</mml:mi><mml:mo>=</mml:mo><mml:mi>p</mml:mi><mml:mi>i</mml:mi><mml:mi>x</mml:mi><mml:mi>e</mml:mi><mml:mi>l</mml:mi><mml:mo>&#xa0;</mml:mo><mml:mi>s</mml:mi><mml:mi>h</mml:mi><mml:mi>u</mml:mi><mml:mi>f</mml:mi><mml:mi>f</mml:mi><mml:mi>l</mml:mi><mml:mi>e</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mn>0.5</mml:mn><mml:mo>&#xd7;</mml:mo><mml:mi>s</mml:mi><mml:mi>i</mml:mi><mml:mi>g</mml:mi><mml:mi>m</mml:mi><mml:mi>o</mml:mi><mml:mi>i</mml:mi><mml:mi>d</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>l</mml:mi><mml:mi>i</mml:mi><mml:mi>n</mml:mi><mml:mi>e</mml:mi><mml:mi>a</mml:mi><mml:msub><mml:mi>r</mml:mi><mml:mn>1</mml:mn></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>X</mml:mi><mml:mi>I</mml:mi></mml:msub><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo><mml:mo>+</mml:mo><mml:mi>l</mml:mi><mml:mi>i</mml:mi><mml:mi>n</mml:mi><mml:mi>e</mml:mi><mml:mi>a</mml:mi><mml:msub><mml:mi>r</mml:mi><mml:mn>2</mml:mn></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>X</mml:mi><mml:mi>I</mml:mi></mml:msub><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math>
</disp-formula>
<disp-formula id="eq8"><label>(8)</label>
<mml:math display="block" id="M8"><mml:mrow><mml:mi>&#x3c2;</mml:mi><mml:mo>=</mml:mo><mml:mi>G</mml:mi><mml:mo>+</mml:mo><mml:mi>O</mml:mi></mml:mrow></mml:math>
</disp-formula>
</sec>
</sec>
<sec id="s2_4">
<label>2.4</label>
<title>Model training configuration</title>
<p>The hardware configuration for model training in this study consisted of an Intel<sup>&#xae;</sup> Xeon<sup>&#xae;</sup> Gold 6256 CPU @ 3.60GHz and an NVIDIA RTX A6000 GPU with 48 GB. The software environment was based on Ubuntu 20.04, using CUDA 11.8 and cuDNN 8.6.0 to accelerate deep learning computations. A dedicated Anaconda virtual environment was created for model training, with key dependencies including PyTorch 2.0.1 and OpenCV 4.10, and Python 3.10 as the programming language. During training, the number of epochs was set to 200, the batch size was 32, and the initial learning rate was 0.01. The AdamW optimizer was employed, and 4 data loader workers were used.</p>
</sec>
<sec id="s2_5">
<label>2.5</label>
<title>Evaluation metrics</title>
<p>Model testing was conducted on a Jetson Xavier NX edge device running JetPack 5.1, configured with TensorRT 8.5.2.2, CUDA 11.4, and cuDNN 8.6.0, and equipped with 16 GB of GPU memory. To objectively evaluate the performance of the Improved YOLOv11n-seg model, the following evaluation metrics were utilized: Precision (P), Recall (R), Mean Average Precision (mAP), parameter count, and inference speed (FPS). P measures the proportion of correctly predicted positive samples, indicating the model&#x2019;s ability to control false positives. R measures the proportion of actual positive samples correctly identified, reflecting its effectiveness in minimizing false negatives. mAP provides a comprehensive assessment of detection and segmentation performance based on the mean accuracy over all categories. The number of parameters reflects model complexity, where fewer parameters indicate greater suitability for deployment on resource-limited devices. FPS measures the number of frames processed per second, with higher FPS indicating faster inference and improved efficiency in real-time applications.</p>
</sec>
</sec>
<sec id="s3" sec-type="results">
<label>3</label>
<title>Results and analysis</title>
<sec id="s3_1">
<label>3.1</label>
<title>Comparison of different object detection models</title>
<p>This study utilized five YOLO-based instance segmentation models, all trained using an identical dataset. Each model was trained for 200 epochs, and the best-performing weights were selected as the final model parameters. The performance comparison of all models on the test set is presented in <xref ref-type="table" rid="T1"><bold>Table&#xa0;1</bold></xref>.</p>
<table-wrap id="T1" position="float">
<label>Table&#xa0;1</label>
<caption>
<p>Performance comparison of different YOLO-based instance segmentation models on the test set.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Model</th>
<th valign="middle" align="center">P</th>
<th valign="middle" align="center">R</th>
<th valign="middle" align="center">mAP<sub>0.5</sub></th>
<th valign="middle" align="center">mAP<sub>0.5:0.95</sub></th>
<th valign="middle" align="center">Parameters(&#xd7;10<sup>5</sup>)</th>
<th valign="middle" align="center">Speed(FPS)</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="center">YOLOv5n-seg</td>
<td valign="middle" align="center">96.3</td>
<td valign="middle" align="center">95.7</td>
<td valign="middle" align="center">98.4</td>
<td valign="middle" align="center">80.2</td>
<td valign="middle" align="center">27.6</td>
<td valign="middle" align="center">19.6</td>
</tr>
<tr>
<td valign="middle" align="center">YOLOv8n-seg</td>
<td valign="middle" align="center">96.1</td>
<td valign="middle" align="center">96.2</td>
<td valign="middle" align="center">98.5</td>
<td valign="middle" align="center">81.8</td>
<td valign="middle" align="center">32.6</td>
<td valign="middle" align="center">19.4</td>
</tr>
<tr>
<td valign="middle" align="center">YOLOv9t-seg</td>
<td valign="middle" align="center">96.6</td>
<td valign="middle" align="center">95.7</td>
<td valign="middle" align="center">98.6</td>
<td valign="middle" align="center">81.1</td>
<td valign="middle" align="center"><bold>23.8</bold></td>
<td valign="middle" align="center">10.0</td>
</tr>
<tr>
<td valign="middle" align="center">YOLOv10n-seg</td>
<td valign="middle" align="center">96.6</td>
<td valign="middle" align="center">96.3</td>
<td valign="middle" align="center">98.7</td>
<td valign="middle" align="center">81.2</td>
<td valign="middle" align="center">25.2</td>
<td valign="middle" align="center"><bold>20.0</bold></td>
</tr>
<tr>
<td valign="middle" align="center">YOLOv11n-seg</td>
<td valign="middle" align="center">95.6</td>
<td valign="middle" align="center">96.3</td>
<td valign="middle" align="center">98.3</td>
<td valign="middle" align="center">81.1</td>
<td valign="middle" align="center">28.4</td>
<td valign="middle" align="center">18.0</td>
</tr>
<tr>
<td valign="middle" align="center">YOLOv12n-seg</td>
<td valign="middle" align="center">95.4</td>
<td valign="middle" align="center">96.8</td>
<td valign="middle" align="center">98.3</td>
<td valign="middle" align="center">81.1</td>
<td valign="middle" align="center">28.1</td>
<td valign="middle" align="center">14.2</td>
</tr>
<tr>
<td valign="middle" align="center">Improved YOLOv11n-seg</td>
<td valign="middle" align="center"><bold>97.0</bold></td>
<td valign="middle" align="center"><bold>98.1</bold></td>
<td valign="middle" align="center"><bold>99.2</bold></td>
<td valign="middle" align="center"><bold>82.9</bold></td>
<td valign="middle" align="center">25.5</td>
<td valign="middle" align="center">18.0</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>Bold text indicates optimal values.</p></fn>
</table-wrap-foot>
</table-wrap>
<p>The results demonstrate that the proposed Improved YOLOv11n-seg model exhibits superior performance across key metrics, including P, R, and mAP, confirming the effectiveness and soundness of the model enhancement strategy. Regarding detection accuracy, Improved YOLOv11n-seg achieves a P of 97.0%, the highest among the compared models, with improved discriminative performance for sugarcane segmentation targets and a reduced false positive rate. Concurrently, the improved model achieved a R of 98.1%, surpassing all compared models. It exceeded the original YOLOv11n-seg by 1.8% and outperformed YOLOv5n-seg and YOLOv9t-seg by 2.4%. This demonstrates improved target sensitivity, thereby reducing false negatives. Regarding segmentation accuracy, Improved YOLOv11n-seg achieves mAP<sub>0.5</sub> and mAP<sub>0.5:0.95</sub> of 99.2% and 82.9%, respectively, outperforming all comparison models. Notably, mAP<sub>0.5:0.95</sub> demonstrates a 1.8% improvement over the original YOLOv11n-seg, indicating improved robustness and localization accuracy for segmentation targets across different scales. Enhancing segmentation performance at higher IoU thresholds is of greater practical significance for accurate sugarcane impurity rate detection.</p>
<p>Regarding model parameter count and inference speed, Improved YOLOv11n-seg has 255,000 parameters, which is only marginally higher than that of YOLOv10n-seg. Compared with YOLOv8n-seg, YOLOv11n-seg, and YOLOv12n-seg, it achieves parameter reductions of 21.8%, 10.2%, and 9.3%, respectively. Its detection speed of 18 FPS is equivalent to YOLOv11n-seg, while substantially surpassing both YOLOv9t-seg and YOLOv12n-seg. The model thus balances real-time efficiency and detection accuracy.</p>
<p><xref ref-type="table" rid="T2"><bold>Table&#xa0;2</bold></xref> presents the detection results for four sugarcane categories on the test set, including a comparison between the original and Improved YOLOv11n-seg models. As shown, Improved YOLOv11n-seg demonstrates varying degrees of improvement across all performance metrics for each category, indicating consistently enhanced detection performance. The enhanced model provides more precise segmentation boundaries. Among the categories, cane_top exhibits the most significant improvement, with R and AP<sub>0.5:0.95</sub> improving by 2.8% and 3.5%, respectively. Furthermore, cane_segment and cane_top show relatively high performance metrics, whereas cane_leaves and cane_root show the lowest, primarily due to higher intra-class variability in color, shape, and texture.</p>
<table-wrap id="T2" position="float">
<label>Table&#xa0;2</label>
<caption>
<p>Test sets for each category before and after model improvement.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" rowspan="2" align="center">Class</th>
<th valign="middle" colspan="4" align="center">YOLOv11n-seg</th>
<th valign="middle" colspan="4" align="center">Improved YOLOv11n-seg</th>
</tr>
<tr>
<th valign="middle" align="center">P</th>
<th valign="middle" align="center">R</th>
<th valign="middle" align="center">AP<sub>0.5</sub></th>
<th valign="middle" align="center">AP<sub>0.5:0.95</sub></th>
<th valign="middle" align="center">P</th>
<th valign="middle" align="center">R</th>
<th valign="middle" align="center">AP<sub>0.5</sub></th>
<th valign="middle" align="center">AP<sub>0.5:0.95</sub></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="center">cane_segment</td>
<td valign="middle" align="center">97.0</td>
<td valign="middle" align="center">98.7</td>
<td valign="middle" align="center">99.3</td>
<td valign="middle" align="center">86.7</td>
<td valign="middle" align="center">98.8</td>
<td valign="middle" align="center">98.7</td>
<td valign="middle" align="center">99.4</td>
<td valign="middle" align="center">87.7</td>
</tr>
<tr>
<td valign="middle" align="center">cane_top</td>
<td valign="middle" align="center">96.2</td>
<td valign="middle" align="center">97.4</td>
<td valign="middle" align="center">98.7</td>
<td valign="middle" align="center">87.9</td>
<td valign="middle" align="center">97.8</td>
<td valign="middle" align="center">98.5</td>
<td valign="middle" align="center">99.4</td>
<td valign="middle" align="center">88.6</td>
</tr>
<tr>
<td valign="middle" align="center">cane_leaves</td>
<td valign="middle" align="center">93.5</td>
<td valign="middle" align="center">93.3</td>
<td valign="middle" align="center">96.7</td>
<td valign="middle" align="center">74.7</td>
<td valign="middle" align="center">94.4</td>
<td valign="middle" align="center">96.5</td>
<td valign="middle" align="center">98.6</td>
<td valign="middle" align="center">76.6</td>
</tr>
<tr>
<td valign="middle" align="center">cane_root</td>
<td valign="middle" align="center">95.8</td>
<td valign="middle" align="center">95.8</td>
<td valign="middle" align="center">98.7</td>
<td valign="middle" align="center">75.2</td>
<td valign="middle" align="center">97.0</td>
<td valign="middle" align="center">98.6</td>
<td valign="middle" align="center">99.4</td>
<td valign="middle" align="center">78.7</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>In summary, a comprehensive analysis of multiple performance metrics indicates that Improved YOLOv11n-seg achieves a well-balanced trade-off between accuracy and efficiency, making it more suitable for sugarcane impurity segmentation tasks.</p>
</sec>
<sec id="s3_2">
<label>3.2</label>
<title>Ablation study results</title>
<p>To evaluate the effectiveness of the Improved YOLOv11n-seg model for sugarcane object detection, YOLOv11n-seg was incrementally enhanced with the C2_Ghost, C2_FSAS, ECA, and DySample modules. Model performance was evaluated through sequential module integration. The ablation test results for each configuration on the test set are summarized in <xref ref-type="table" rid="T3"><bold>Table&#xa0;3</bold></xref>.</p>
<table-wrap id="T3" position="float">
<label>Table&#xa0;3</label>
<caption>
<p>Ablation study.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" colspan="4" align="center">Module</th>
<th valign="middle" rowspan="2" align="center">P</th>
<th valign="middle" rowspan="2" align="center">R</th>
<th valign="middle" rowspan="2" align="center">mAP<sub>0.5</sub></th>
<th valign="middle" rowspan="2" align="center">mAP<sub>0.5:0.95</sub></th>
<th valign="middle" rowspan="2" align="center">Parameters (&#xd7;10<sup>5</sup>)</th>
<th valign="middle" rowspan="2" align="center">Speed (FPS)</th>
</tr>
<tr>
<th valign="middle" align="left">C2_Ghost</th>
<th valign="middle" align="left">C2_FSAS</th>
<th valign="middle" align="left">ECA</th>
<th valign="middle" align="left">DySample</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center"/>
<td valign="middle" align="center"/>
<td valign="middle" align="center">95.6</td>
<td valign="middle" align="center">96.3</td>
<td valign="middle" align="center">98.3</td>
<td valign="middle" align="center">80.8</td>
<td valign="middle" align="center"><bold>24.5</bold></td>
<td valign="middle" align="center"><bold>19.3</bold></td>
</tr>
<tr>
<td valign="middle" align="center"/>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center"/>
<td valign="middle" align="center">96.3</td>
<td valign="middle" align="center">97.4</td>
<td valign="middle" align="center">98.9</td>
<td valign="middle" align="center">82.5</td>
<td valign="middle" align="center">29.3</td>
<td valign="middle" align="center">17.4</td>
</tr>
<tr>
<td valign="middle" align="center"/>
<td valign="middle" align="center"/>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">97.4</td>
<td valign="middle" align="center">95.7</td>
<td valign="middle" align="center">98.8</td>
<td valign="middle" align="center">81.8</td>
<td valign="middle" align="center">28.4</td>
<td valign="middle" align="center">17.9</td>
</tr>
<tr>
<td valign="middle" align="center"/>
<td valign="middle" align="center"/>
<td valign="middle" align="center"/>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center">96.7</td>
<td valign="middle" align="center">97.4</td>
<td valign="middle" align="center">98.8</td>
<td valign="middle" align="center">82.0</td>
<td valign="middle" align="center">28.5</td>
<td valign="middle" align="center">18.2</td>
</tr>
<tr>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">96.4</td>
<td valign="middle" align="center">96.4</td>
<td valign="middle" align="center">98.8</td>
<td valign="middle" align="center">81.4</td>
<td valign="middle" align="center">24.5</td>
<td valign="middle" align="center">19.2</td>
</tr>
<tr>
<td valign="middle" align="center"/>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center">97.2</td>
<td valign="middle" align="center">97.8</td>
<td valign="middle" align="center">99.1</td>
<td valign="middle" align="center"><bold>82.9</bold></td>
<td valign="middle" align="center">29.4</td>
<td valign="middle" align="center">17.6</td>
</tr>
<tr>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center"><bold>97.4</bold></td>
<td valign="middle" align="center">96.6</td>
<td valign="middle" align="center">98.9</td>
<td valign="middle" align="center">82.5</td>
<td valign="middle" align="center">25.4</td>
<td valign="middle" align="center">18.6</td>
</tr>
<tr>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center">96.8</td>
<td valign="middle" align="center">96.6</td>
<td valign="middle" align="center">99.0</td>
<td valign="middle" align="center">81.8</td>
<td valign="middle" align="center">24.6</td>
<td valign="middle" align="center">18.9</td>
</tr>
<tr>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center">97.0</td>
<td valign="middle" align="center"><bold>98.1</bold></td>
<td valign="middle" align="center"><bold>99.2</bold></td>
<td valign="middle" align="center"><bold>82.9</bold></td>
<td valign="middle" align="center">25.5</td>
<td valign="middle" align="center">18.0</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>Bold text indicates optimal values.</p></fn>
</table-wrap-foot>
</table-wrap>
<p>The results indicate that progressively incorporating the C2_Ghost, C2_FSAS, ECA, and DySample modules consistently improves detection performance for sugarcane impurities, although the contribution of each module differs in magnitude. Overall, the Improved YOLOv11n-seg model incorporating all four modules attained the best overall performance in key metrics, namely P, R, mAP<sub>0.5</sub>, and mAP<sub>0.5:0.95</sub>.</p>
<p>Examining single-module contributions, the introduction of the C2_Ghost module reduced the model&#x2019;s parameter count by 13.7% and increased detection speed by 7.2%, indicating its efficacy in reducing model complexity. However, incorporating C2_Ghost led to a slight reduction in mAP<sub>0.5:0.95</sub>, indicating a minor decrease in the model&#x2019;s ability to accurately segment target regions. The incorporation of the ECA module increased P to a peak of 97.4%, highlighting the effectiveness of the channel attention mechanism in enhancing feature representation and suppressing false positives. Meanwhile, adding either the C2_FSAS or DySample module increased R, suggesting that these modules improve target coverage and reduce false negatives. Compared to the baseline model, the addition of any single module (except C2_Ghost) led to incremental improvements in mAP<sub>0.5:0.95</sub>, with C2_FSAS achieving the greatest gain of 1.4%, underscoring its crucial role in extracting semantic features for sugarcane objects. Regarding detection metrics, the model maintains high segmentation precision on the test set after integrating the C2_FSAS and ECA modules. This demonstrates that these modules do not induce overfitting under varying illumination conditions, indicating that the model exhibits strong generalization capability.</p>
<p>Among dual-module combinations, C2_FSAS+DySample delivered the most pronounced overall performance gains, with mAP<sub>0.5</sub> and mAP<sub>0.5:0.95</sub> values approaching those of the four-module configuration. The C2_Ghost+ECA pairing maintained a low parameter count while increasing inference speed to 19.2 FPS, outperforming YOLOv11n-seg across all other metrics. For the three-module combinations, the model achieved a more balanced performance; however, compared to the C2_FSAS+DySample combination, the R value decreased. After integrating all four modules, the Improved YOLOv11n-seg attained the highest R, mAP<sub>0.5</sub>, and mAP<sub>0.5:0.95</sub> values of 98.1%, 99.2%, and 82.9%, respectively. Compared to models employing a single module, this indicates that multi-module fusion synergistically enhances feature extraction, contextual modelling, and boundary refinement, significantly improving the model&#x2019;s segmentation capability for sugarcane objects.</p>
<p>In summary, ablation studies indicate that the enhanced model preserves high-precision object segmentation performance on the test set, reflecting robust generalization ability. Through the synergistic integration of its four modules, Improved YOLOv11n-seg achieves the highest overall performance in sugarcane impurity detection, substantiating the effectiveness of the proposed enhancement strategy.</p>
</sec>
<sec id="s3_3">
<label>3.3</label>
<title>Comparison of model segmentation results</title>
<p>To validate the detection performance of our model against six other YOLO variants on sugarcane segmentation targets, each model was evaluated on the same set of sugarcane segmentation images. The segmentation results are shown in <xref ref-type="fig" rid="f8"><bold>Figure&#xa0;8</bold></xref>. These results indicate that all seven models achieve satisfactory segmentation for the four target categories in most instances, with the Improved YOLOv11n-seg producing the most accurate and complete segmentation results. In Image1, Improved YOLOv11n-seg exhibited superior robustness in edge segmentation and category differentiation, whereas the other six models demonstrated incomplete segmentation and segmentation errors. YOLOv5n-seg, YOLOv8n-seg, YOLOv9t-seg, and YOLOv10n-seg all display missed detections, as illustrated in Image2. YOLOv9t-seg, YOLOv10n-seg, and YOLOv11n-seg generate false positives, as depicted in Image3. In Image4, YOLOv5n-seg, YOLOv8n-seg, YOLOv9t-seg, and YOLOv11n-seg exhibit missed detections, whereas YOLOv10n-seg, YOLOv11n-seg, and YOLOv12n-seg produce false detections. Overall, false negatives and false positives in the comparison models primarily occur when two segmentation targets are adjacent. When targets are isolated, feature extraction is more effective, resulting in accurate segmentation of most complete target regions. These results demonstrate that the proposed Improved YOLOv11n-seg outperforms the comparison models in edge segmentation precision and feature discrimination between categories, thereby demonstrating superior impurity recognition and regional segmentation accuracy.</p>
<fig id="f8" position="float">
<label>Figure&#xa0;8</label>
<caption>
<p>Detection results of different segmentation models. (Red rectangles indicate areas with significant segmentation discrepancies; each color mask corresponds to a distinct category.).</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1745861-g008.tif">
<alt-text content-type="machine-generated">Comparative analysis of different YOLO version algorithms for image segmentation of impurities in sugarcane. Rows depict the results from “Original image” to “Improved YOLOv11n-seg”. Columns show four different images labeled as Image1 to Image4. Each segment showcases variations in segmentation efficacy and precision.</alt-text>
</graphic></fig>
<p>The proposed model accurately separates sugarcane segments from various impurity categories, effectively reducing category confusion. It maintains precise segmentation boundaries even in transitional edge regions, making it highly suitable for sugarcane impurity detection tasks.</p>
</sec>
<sec id="s3_4">
<label>3.4</label>
<title>Feature output visualization</title>
<p>To evaluate the feature extraction capability of the models, feature extraction visualizations were performed for Improved YOLOv11n-seg and six comparative models. The visualized feature maps correspond to the final outputs from the Neck module of each model. Utilizing Image3 from <xref ref-type="fig" rid="f8"><bold>Figure&#xa0;8</bold></xref>, the results are shown in <xref ref-type="fig" rid="f9"><bold>Figure&#xa0;9</bold></xref>. In Feature 1, the feature attention distribution across categories for YOLOv10n-seg and YOLOv11n-seg is significantly lower than in the other models. Conversely, Improved YOLOv11n-seg displays the highest overall feature attention, with more accurate spatial localization of attention regions. Its feature attention intensity gradually diminishes from the interior regions of the segmented object towards the peripheral regions. Compared to the comparative models, the feature distribution within individual segmented objects is more uniform in the proposed model. For instance, YOLOv5n-seg, YOLOv8n-seg, YOLOv9t-seg and YOLOv12n-seg exhibit the highest feature intensity at the center or ends of segmented objects, with lower intensity in other main body regions. Uniform feature distribution within a single segmented object aids the model in expressing the object&#x2019;s overall semantic structure, thus enhancing segmentation boundary integrity. Conversely, features concentrated solely at central or localized areas may reduce the model&#x2019;s capacity to precisely delineate the true object boundaries.</p>
<fig id="f9" position="float">
<label>Figure&#xa0;9</label>
<caption>
<p>Visualization of feature outputs from different segmentation models. (Darker colors indicate higher levels of attention.).</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1745861-g009.tif">
<alt-text content-type="machine-generated">Heatmaps display feature activations from different YOLO segmentation models. Each row represents a model version, and columns show feature variations. The improved YOLOv11n-seg at the bottom row indicates enhanced feature detection. Colors range from blue (low activation) to red (high activation).</alt-text>
</graphic></fig>
<p>In Feature 2, the spatial distribution of features remains largely consistent, although their shapes differ significantly. For example, YOLOv11n-seg displays more circular features, whereas other models predominantly maintain strip-like features aligned with the object&#x2019;s orientation. YOLOv5n-seg exhibits higher feature richness; however, its image edges contain redundant features that are not effectively filtered by the model. This can impede the accurate representation of category-specific features along image edges during subsequent feature extraction. In Feature 3, although YOLOv5n-seg demonstrates stronger feature expression in Feature 2 for segmentation targets, its overall feature representation in Feature 3 remains weaker than that of other comparative models. Furthermore, YOLOv8n-seg exhibits markedly lower attention to segmentation target features compared to Improved YOLOv11n-seg. The proposed model displays the strongest feature responses at the center of the segmented object, gradually diminishing towards non-segmented regions. Concurrently, non-segmented regions across different objects demonstrate specific inter-object feature correlations. As demonstrated by the detection results in Image3 for YOLOv8n-seg, YOLOv12n-seg, and Improved YOLOv11n-seg, this feature distribution not only enhances holistic perception of primary object regions but also facilitates discrimination between adjacent objects via inter-object feature correlations.</p>
<p>In summary, the visualization results in <xref ref-type="fig" rid="f9"><bold>Figure&#xa0;9</bold></xref> further demonstrate that Improved YOLOv11n-seg achieves more accurate segmentation of edge information in sugarcane detection tasks and more effectively differentiates between segmented objects, thereby validating the contribution of each enhancement module to feature representation.</p>
</sec>
<sec id="s3_5">
<label>3.5</label>
<title>Model acceleration based on TensorRT</title>
<p>To further enhance real-time performance, half-precision (FP16) quantization using TensorRT was applied to reduce model size and accelerate inference. The detection speeds after acceleration for each model are presented in <xref ref-type="fig" rid="f10"><bold>Figure&#xa0;10</bold></xref>. As shown in the figure, all models exhibit a substantial increase in inference speed after TensorRT-based FP16 quantization. Specifically, YOLOv11n-seg attained 36.5 FPS post-acceleration, suggesting that its architecture is particularly well-suited for accelerated execution on the Jetson Xavier NX platform relative to other models. Improved YOLOv11n-seg achieves 34.8 FPS after acceleration, corresponding to a 93.3% improvement in inference speed. Although its speed remains slightly lower than that of YOLOv11n-seg, it still outperforms all other comparison models. After TensorRT optimization, Improved YOLOv11n-seg consistently maintains a processing speed above 30 FPS, fully meeting real-time video-level detection requirements.</p>
<fig id="f10" position="float">
<label>Figure&#xa0;10</label>
<caption>
<p>Comparison of model detection speed after TensorRT acceleration.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1745861-g010.tif">
<alt-text content-type="machine-generated">Bar chart comparing frames per second (FPS) performance across different models. YOLOv5n-seg has 35.6 FPS, YOLOv5n-seg 35.4 FPS, YOLOv9t-seg 28.7 FPS, YOLOv10n-seg 33.9 FPS, YOLOv11n-seg 36.5 FPS, YOLOv12n-seg 32.4 FPS, and Improved YOLOv11n-seg 34.8 FPS.</alt-text>
</graphic></fig>
</sec>
</sec>
<sec id="s4" sec-type="discussion">
<label>4</label>
<title>Discussion</title>
<p>In this study, edge devices are defined as embedded platforms featuring TensorRT acceleration. These platforms support FP16 and INT8 inference, thereby significantly reducing computational costs and memory usage while preserving detection accuracy and improving inference speed. This study proposes a lightweight segmentation model, Improved YOLOv11n-seg, for impurity detection in mechanically harvested sugarcane. Experimental results show that the model significantly improves segmentation accuracy while maintaining real-time performance on edge devices. The results validate the proposed enhancements: C2_Ghost reduces computational redundancy; C2_FSAS strengthens long-range frequency-domain feature modeling; ECA optimizes attention allocation among information channels; and DySample enables refined reconstruction of complex boundary details. Furthermore, the findings indicate that attention mechanisms and frequency-domain feature learning provide notable benefits in addressing challenges such as motion blur and texture similarity.</p>
<p>In practical sugarcane impurity rate monitoring, precise segmentation constitutes the foundation for accurate perception, whereas weight-based impurity detection necessitates establishing a mapping between visual features and weight. Subsequent research will investigate the relationships among area, geometric characteristics, and weight of different segmentation targets, facilitating the construction of a weight mapping model for each detection category. Converting segmentation outcomes into impurity weight estimates enables practical application within sugar mill processing workflows. Sensor installation during unloading, feeding, and secondary impurity removal stages enables continuous monitoring of sugarcane impurity rates. Detected impurity rates can provide guidance for raw material quality evaluation and pricing decisions. Alternatively, the system may be integrated into secondary impurity removal mechanisms to accurately detect and localize impurities, such as cane tops and roots, thereby facilitating precise removal. Moreover, deployment on sugarcane harvesters allows continuous estimation of impurity rates to optimize fan speed and conveyor parameters, enhancing impurity removal efficiency while minimizing crop loss.</p>
<p>To address a broader range of impurity types, future research will expand the dataset to incorporate inorganic impurities, thereby enabling more comprehensive sugarcane impurity detection.</p>
</sec>
<sec id="s5" sec-type="conclusions">
<label>5</label>
<title>Conclusion</title>
<list list-type="order">
<list-item>
<p>The proposed Improved YOLOv11n-seg demonstrates superior overall performance in sugarcane impurity detection. Compared with the original YOLOv11n-seg, it achieves improvements in P, R, mAP<sub>0.5</sub>, and mAP<sub>0.5:0.95</sub>, reaching 97.0%, 98.1%, 99.2%, and 82.9%, respectively. This effectively reduces the risk of false positives and false negatives. With TensorRT acceleration, the model achieves a detection speed of 34.8 FPS on the Jetson Xavier NX, maintaining strong real-time performance. The enhanced model synergistically improves feature extraction and category discrimination through four modules, while reducing the total number of parameters by 10.2%. It combines high accuracy with a lightweight architecture, demonstrating the effectiveness of the proposed improvement strategy.</p></list-item>
<list-item>
<p>Ablation studies confirmed the efficacy of each enhancement module. Results indicate that the C2_Ghost, C2_FSAS, ECA, and DySample modules demonstrate complementary and synergistic contributions to feature representation. Specifically, C2_Ghost significantly contributes to reducing model parameters, C2_FSAS and DySample significantly improve mAP, while ECA substantially enhances precision. Thus, the collaborative design of multi-attention modules provides substantial improvements for sugarcane impurity detection tasks.</p></list-item>
<list-item>
<p>Segmentation results and feature visualization demonstrate that the proposed model exhibits enhanced representational capacity in semantic extraction and feature modelling. Compared to baseline models, it more effectively preserves segmentation boundary integrity and suppresses confusion between sugarcane segments and impurity categories. Overall, the proposed model combines high accuracy, lightweight architecture, and real-time performance, offering a reliable technical solution for detecting impurity rates in mechanically harvested sugarcane.</p></list-item>
</list>
</sec>
</body>
<back>
<sec id="s6" sec-type="data-availability">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/supplementary material. Further inquiries can be directed to the corresponding authors.</p></sec>
<sec id="s7" sec-type="author-contributions">
<title>Author contributions</title>
<p>FH: Conceptualization, Funding acquisition, Methodology, Software, Validation, Writing &#x2013; original draft. SLZ: Conceptualization, Methodology, Software, Validation, Writing &#x2013; original draft. PC:&#xa0;Conceptualization, Methodology, Software, Writing &#x2013; original draft. GD: Conceptualization, Funding acquisition, Methodology, Supervision, Writing &#x2013; review &amp; editing. SF: Conceptualization, Funding acquisition, Methodology, Writing &#x2013; review &amp; editing. GL:&#xa0;Conceptualization, Methodology, Project administration, Supervision, Writing &#x2013; review &amp; editing. ZC: Conceptualization, Writing &#x2013; original draft, Writing &#x2013; review &amp; editing. SAZ: Conceptualization, Writing &#x2013; review &amp; editing. LL: Conceptualization, Data curation, Writing &#x2013; original draft. BY: Conceptualization, Validation, Writing &#x2013; original draft. SQ: Funding acquisition, Project administration, Writing &#x2013; review &amp; editing. XW: Data curation, Formal Analysis, Writing &#x2013; original draft. YD: Data curation, Investigation, Writing &#x2013; original draft. ZL: Data curation, Writing &#x2013; original draft.</p></sec>
<ack>
<title>Acknowledgments</title>
<p>We gratefully acknowledge the foundations for their financial support of this study, our supervisors for their invaluable guidance and continuous support throughout the research, and our researchers and collaborators for their valuable contributions to this work.</p>
</ack>
<sec id="s9" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p></sec>
<sec id="s10" sec-type="ai-statement">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p></sec>
<sec id="s11" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p></sec>
<ref-list>
<title>References</title>
<ref id="B1">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Afsharnia</surname> <given-names>F.</given-names></name>
<name><surname>Marzban</surname> <given-names>A.</given-names></name>
<name><surname>Asoodar</surname> <given-names>M. A.</given-names></name>
<name><surname>Abdeshahi</surname> <given-names>A.</given-names></name>
</person-group> (<year>2025</year>). 
<article-title>Sugarcane green and sustainable harvest by demand management of the sugarcane harvester machine&#x2019;s spare parts</article-title>. <source>Sustain. Futures</source> <volume>100920</volume>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.sftr.2025.100920</pub-id>
</mixed-citation>
</ref>
<ref id="B2">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Chen</surname> <given-names>M.</given-names></name>
<name><surname>Jin</surname> <given-names>C.</given-names></name>
<name><surname>Mo</surname> <given-names>G.</given-names></name>
<name><surname>Liu</surname> <given-names>S.</given-names></name>
<name><surname>Xu</surname> <given-names>J.</given-names></name>
</person-group> (<year>2023</year>). 
<article-title>Online detection method of impurity rate in wheat mechanized harvesting based on improved U-net model</article-title>. <source>Trans. Chin. Soc. Agric. Machinery.</source> <volume>54</volume>, <fpage>73</fpage>&#x2013;<lpage>82</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.6041/j.issn.1000-1298.2023.02.007</pub-id>
</mixed-citation>
</ref>
<ref id="B3">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>De Mello</surname> <given-names>M. L.</given-names></name>
<name><surname>Barros</surname> <given-names>N. Z.</given-names></name>
<name><surname>Speran&#xe7;a</surname> <given-names>M. A.</given-names></name>
<name><surname>Pereira</surname> <given-names>F. M. V.</given-names></name>
</person-group> (<year>2022</year>). 
<article-title>Impurities in raw sugarcane before and after biorefinery processing</article-title>. <source>Food Analytical Methods</source> <volume>15</volume>, <fpage>96</fpage>&#x2013;<lpage>103</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s12161-021-02105-1</pub-id>
</mixed-citation>
</ref>
<ref id="B4">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Ding</surname> <given-names>Z.</given-names></name>
<name><surname>Li</surname> <given-names>Y.</given-names></name>
<name><surname>Hu</surname> <given-names>B.</given-names></name>
<name><surname>Chen</surname> <given-names>Z.</given-names></name>
<name><surname>Jia</surname> <given-names>H.</given-names></name>
<name><surname>Shi</surname> <given-names>Y.</given-names></name>
<etal/>
</person-group>. (<year>2025</year>a). 
<article-title>Itd-yolo: an improved yolo model for impurities in premium green tea detection</article-title>. <source>Foods.</source> <volume>14</volume>, <fpage>1554</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/foods14091554</pub-id>, PMID: <pub-id pub-id-type="pmid">40361636</pub-id>
</mixed-citation>
</ref>
<ref id="B5">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Ding</surname> <given-names>Z.</given-names></name>
<name><surname>Wang</surname> <given-names>M.</given-names></name>
<name><surname>Hu</surname> <given-names>B.</given-names></name>
<name><surname>Chen</surname> <given-names>Z.</given-names></name>
<name><surname>Dong</surname> <given-names>C.</given-names></name>
</person-group> (<year>2025</year>b). 
<article-title>Impurity detection of premium green tea based on improved lightweight deep learning model</article-title>. <source>Food Res. Int.</source> <volume>200</volume>, <fpage>115516</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.foodres.2024.115516</pub-id>, PMID: <pub-id pub-id-type="pmid">39779147</pub-id>
</mixed-citation>
</ref>
<ref id="B501">
<mixed-citation publication-type="web">
<person-group person-group-type="author"><collab>Food and Agriculture Organization of the United Nations</collab>
</person-group>. (<year>2025</year>). <source>Sugar cane production</source>. Available online at: <uri xlink:href="https://ourworldindata.org/grapher/sugar-cane-production">https://ourworldindata.org/grapher/sugar-cane-production</uri> (Accessed <date-in-citation content-type="access-date">November 1, 2025</date-in-citation>).
</mixed-citation>
</ref>
<ref id="B6">
<mixed-citation publication-type="confproc">
<person-group person-group-type="author">
<name><surname>Han</surname> <given-names>K.</given-names></name>
<name><surname>Wang</surname> <given-names>Y.</given-names></name>
<name><surname>Tian</surname> <given-names>Q.</given-names></name>
<name><surname>Guo</surname> <given-names>J.</given-names></name>
<name><surname>Xu</surname> <given-names>C.</given-names></name>
<name><surname>Xu</surname> <given-names>C.</given-names></name>
</person-group> (<year>2020</year>). &#x201c;
<article-title>Ghostnet: More features from cheap operations</article-title>,&#x201d; in <conf-name>Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR)</conf-name>. (<publisher-loc>Piscataway, NJ</publisher-loc>: 
<publisher-name>IEEE Computer Society</publisher-name>), <fpage>1580</fpage>&#x2013;<lpage>1589</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.48550/arXiv.1911.11907</pub-id>
</mixed-citation>
</ref>
<ref id="B7">
<mixed-citation publication-type="confproc">
<person-group person-group-type="author">
<name><surname>He</surname> <given-names>K.</given-names></name>
<name><surname>Gkioxari</surname> <given-names>G.</given-names></name>
<name><surname>Dollar</surname> <given-names>P.</given-names></name>
<name><surname>Girshick</surname> <given-names>R.</given-names></name>
</person-group> (<year>2017</year>). &#x201c;
<article-title>Mask r-cnn</article-title>,&#x201d; in <conf-name>Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR)</conf-name>. (<publisher-loc>Piscataway, NJ</publisher-loc>: 
<publisher-name>IEEE Computer Society</publisher-name>), <fpage>2961</fpage>&#x2013;<lpage>2969</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.48550/arXiv.1703.06870</pub-id>
</mixed-citation>
</ref>
<ref id="B8">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Huang</surname> <given-names>S.</given-names></name>
<name><surname>Liang</surname> <given-names>X.</given-names></name>
</person-group> (<year>2022</year>). 
<article-title>Detecting the impurities in tea using an improved YOLOv5 model</article-title>. <source>Trans. CSAE.</source> <volume>38</volume>, <fpage>329</fpage>&#x2013;<lpage>336</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.11975/j.issn.1002-6819.2022.17.036</pub-id>
</mixed-citation>
</ref>
<ref id="B9">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Khanam</surname> <given-names>R.</given-names></name>
<name><surname>Hussain</surname> <given-names>M.</given-names></name>
</person-group> (<year>2024</year>). 
<article-title>Yolov11: an overview of the key architectural enhancements</article-title>. <source>Arxiv Preprint Arxiv:2410.17725</source>. doi:&#xa0;<pub-id pub-id-type="doi">10.48550/arXiv.2410.17725</pub-id>
</mixed-citation>
</ref>
<ref id="B10">
<mixed-citation publication-type="confproc">
<person-group person-group-type="author">
<name><surname>Kong</surname> <given-names>L.</given-names></name>
<name><surname>Dong</surname> <given-names>J.</given-names></name>
<name><surname>Ge</surname> <given-names>J.</given-names></name>
<name><surname>Li</surname> <given-names>M.</given-names></name>
<name><surname>Pan</surname> <given-names>J.</given-names></name>
</person-group> (<year>2023</year>). &#x201c;
<article-title>Efficient frequency domain-based transformers for high-quality image deblurring</article-title>,&#x201d; in <conf-name>Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR)</conf-name>. (<publisher-loc>Piscataway, NJ</publisher-loc>:
<publisher-name>IEEE Computer Society</publisher-name>), <fpage>5886</fpage>&#x2013;<lpage>5895</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.48550/arXiv.2211.12250</pub-id>
</mixed-citation>
</ref>
<ref id="B11">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Li</surname> <given-names>W.</given-names></name>
<name><surname>Ma</surname> <given-names>S.</given-names></name>
<name><surname>Zhou</surname> <given-names>B.</given-names></name>
<name><surname>Li</surname> <given-names>W.</given-names></name>
<name><surname>Huo</surname> <given-names>P.</given-names></name>
<name><surname>Qian</surname> <given-names>J.</given-names></name>
</person-group> (<year>2024</year>). 
<article-title>Core technologies of sugarcane chopper harvester extractor: a critical review</article-title>. <source>Agriculture</source> <volume>14</volume>, <fpage>1730</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/agriculture14101730</pub-id>
</mixed-citation>
</ref>
<ref id="B12">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Li</surname> <given-names>X.</given-names></name>
<name><surname>Zhang</surname> <given-names>Z.</given-names></name>
<name><surname>Lv</surname> <given-names>S.</given-names></name>
<name><surname>Liang</surname> <given-names>T.</given-names></name>
<name><surname>Zou</surname> <given-names>J.</given-names></name>
<name><surname>Ning</surname> <given-names>T.</given-names></name>
<etal/>
</person-group>. (<year>2023</year>). 
<article-title>Detection of breakage and impurity ratios for raw sugarcane based on estimation model and mdsc-deeplabv3+</article-title>. <source>Front. Plant Sci.</source> <volume>14</volume>, <elocation-id>1283230</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fpls.2023.1283230</pub-id>, PMID: <pub-id pub-id-type="pmid">38023873</pub-id>
</mixed-citation>
</ref>
<ref id="B13">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Liu</surname> <given-names>Q.</given-names></name>
<name><surname>Huang</surname> <given-names>Y.</given-names></name>
<name><surname>Jang</surname> <given-names>J.</given-names></name>
<name><surname>Wu</surname> <given-names>T.</given-names></name>
</person-group> (<year>2024</year>). 
<article-title>Technology and equipment of sugarcane harvesting mechanization review</article-title>. <source>Trans. Chin. Soc. Agric. Machinery.</source> <volume>55</volume>, <fpage>1</fpage>&#x2013;<lpage>21</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.6041/j.issn.1000-1298.2024.12.001</pub-id>
</mixed-citation>
</ref>
<ref id="B14">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Liu</surname> <given-names>Q.</given-names></name>
<name><surname>Liu</surname> <given-names>X.</given-names></name>
<name><surname>Wu</surname> <given-names>T.</given-names></name>
<name><surname>Qu</surname> <given-names>Y.</given-names></name>
</person-group> (<year>2020</year>). 
<article-title>The 14th five-year plan of sugarcane mechanization and sustainable development of sugarcane ndustry in China</article-title>. <source>Modern Agric. Equipment.</source> <volume>41</volume>, <fpage>2</fpage>&#x2013;<lpage>9</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3969/j.issn.1673-2154.2020.06.001</pub-id>
</mixed-citation>
</ref>
<ref id="B15">
<mixed-citation publication-type="confproc">
<person-group person-group-type="author">
<name><surname>Liu</surname> <given-names>W.</given-names></name>
<name><surname>Lu</surname> <given-names>H.</given-names></name>
<name><surname>Fu</surname> <given-names>H.</given-names></name>
<name><surname>Cao</surname> <given-names>Z.</given-names></name>
</person-group> (<year>2023</year>). &#x201c;
<article-title>Learning to upsample by learning to sample</article-title>,&#x201d; in <conf-name>Proceedings of the IEEE/CVF international conference on computer vision (ICCV)</conf-name>. (<publisher-loc>Piscataway, NJ</publisher-loc>: 
<publisher-name>IEEE Computer Society</publisher-name>), <fpage>6027</fpage>&#x2013;<lpage>6037</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.48550/arXiv.2308.15085</pub-id>
</mixed-citation>
</ref>
<ref id="B16">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Martins</surname> <given-names>M. B.</given-names></name>
<name><surname>Ruiz</surname> <given-names>D.</given-names></name>
</person-group> (<year>2020</year>). 
<article-title>Influence of operational conditions of mechanized harvesting on sugarcane losses and impurities</article-title>. <source>. Engenharia Agr&#xed;cola</source> <volume>40</volume>, <fpage>352</fpage>&#x2013;<lpage>355</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1590/1809-4430-eng.agric.v40n3p352-355/2020</pub-id>
</mixed-citation>
</ref>
<ref id="B18">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Pan</surname> <given-names>Z.</given-names></name>
<name><surname>Qiu</surname> <given-names>B.</given-names></name>
<name><surname>Yang</surname> <given-names>R.</given-names></name>
<name><surname>Zhang</surname> <given-names>H.</given-names></name>
<name><surname>Zhang</surname> <given-names>J.</given-names></name>
<name><surname>Li</surname> <given-names>Y.</given-names></name>
<etal/>
</person-group>. (<year>2025</year>). 
<article-title>Potato pickup harvesting impurity detection method based on PLP-net lightweight model</article-title>. <source>Trans. CSAE.</source> <volume>41</volume>, <fpage>208</fpage>&#x2013;<lpage>218</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.11975/j.issn.1002-6819.202412222</pub-id>
</mixed-citation>
</ref>
<ref id="B19">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Qi</surname> <given-names>Z.</given-names></name>
<name><surname>Ling</surname> <given-names>W.</given-names></name>
<name><surname>Xindong</surname> <given-names>N. I.</given-names></name>
<name><surname>Faming</surname> <given-names>W.</given-names></name>
<name><surname>Du</surname> <given-names>C.</given-names></name>
<name><surname>Shumao</surname> <given-names>W.</given-names></name>
</person-group> (<year>2024</year>). 
<article-title>Research on wheat broken rate and impurity rate detection method based on deeplab-eda model and system construction</article-title>. <source>Comput. Electron. Agric.</source> <volume>226</volume>, <fpage>109375</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2024.109375</pub-id>
</mixed-citation>
</ref>
<ref id="B20">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Que</surname> <given-names>Y.</given-names></name>
<name><surname>Wu</surname> <given-names>Q.</given-names></name>
<name><surname>Zhang</surname> <given-names>H.</given-names></name>
<name><surname>Luo</surname> <given-names>J.</given-names></name>
<name><surname>Zhang</surname> <given-names>Y.</given-names></name>
</person-group> (<year>2024</year>). 
<article-title>Developing new sugarcane varieties suitab le for mechanized production in China: principles, strategies and prospects</article-title>. <source>Front. Plant Sci.</source> <volume>14</volume>, <elocation-id>1337144</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fpls.2023.1337144</pub-id>, PMID: <pub-id pub-id-type="pmid">38259907</pub-id>
</mixed-citation>
</ref>
<ref id="B21">
<mixed-citation publication-type="confproc">
<person-group person-group-type="author">
<name><surname>Ronneberger</surname> <given-names>O.</given-names></name>
<name><surname>Fischer</surname> <given-names>P.</given-names></name>
<name><surname>Brox</surname> <given-names>T.</given-names></name>
</person-group> (<year>2015</year>). &#x201c;
<article-title>U-Net: Convolutional Networks for Biomedical Image Segmentation</article-title>,&#x201d; in <conf-name>International Conference on Medical Image Computing and Computer-Assisted Intervention</conf-name>. (<publisher-loc>Cham, Switzerland</publisher-loc>: 
<publisher-name>Springer</publisher-name>), <fpage>234</fpage>&#x2013;<lpage>241</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/978-3-319-24574-4_28</pub-id>
</mixed-citation>
</ref>
<ref id="B22">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Shi</surname> <given-names>M.</given-names></name>
<name><surname>Li</surname> <given-names>Y.</given-names></name>
<name><surname>Pan</surname> <given-names>Y.</given-names></name>
<name><surname>Lu</surname> <given-names>L.</given-names></name>
<name><surname>Wei</surname> <given-names>J.</given-names></name>
</person-group> (<year>2024</year>). 
<article-title>Design of an adaptive height control system for sugarcane harvester header</article-title>. <source>Agronomy</source> <volume>14</volume>, <fpage>1644</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/agronomy14081644</pub-id>
</mixed-citation>
</ref>
<ref id="B24">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Wang</surname> <given-names>Z.</given-names></name>
<name><surname>Wu</surname> <given-names>Z.</given-names></name>
<name><surname>Zhang</surname> <given-names>L.</given-names></name>
<name><surname>Abudurexiti</surname> <given-names>M.</given-names></name>
<name><surname>Zhang</surname> <given-names>Q.</given-names></name>
</person-group> (<year>2024</year>). 
<article-title>Cotton impurity detection based on ZC-YOLO</article-title>. <source>Wool Textile J.</source> <volume>52</volume>, <fpage>95</fpage>&#x2013;<lpage>101</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.19333/j.mfkj.20240505707</pub-id>
</mixed-citation>
</ref>
<ref id="B23">
<mixed-citation publication-type="confproc">
<person-group person-group-type="author">
<name><surname>Wang</surname> <given-names>Q.</given-names></name>
<name><surname>Wu</surname> <given-names>B.</given-names></name>
<name><surname>Zhu</surname> <given-names>P.</given-names></name>
<name><surname>Li</surname> <given-names>P.</given-names></name>
<name><surname>Zuo</surname> <given-names>W.</given-names></name>
<name><surname>Hu</surname> <given-names>Q.</given-names></name>
</person-group> (<year>2020</year>). &#x201c;
<article-title>ECA-net:Efficient channel attention for deep convolutional neural networks</article-title>,&#x201d; in <conf-name>Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR)</conf-name>. (<publisher-loc>Piscataway, NJ</publisher-loc>: 
<publisher-name>IEEE Computer Society</publisher-name>), <fpage>11534</fpage>&#x2013;<lpage>11542</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/CVPR42600.2020.01155</pub-id>
</mixed-citation>
</ref>
<ref id="B25">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Wu</surname> <given-names>T.</given-names></name>
<name><surname>Li</surname> <given-names>F.</given-names></name>
<name><surname>Liu</surname> <given-names>Q.</given-names></name>
<name><surname>Ren</surname> <given-names>J.</given-names></name>
<name><surname>Huang</surname> <given-names>J.</given-names></name>
<name><surname>Qin</surname> <given-names>Z.</given-names></name>
</person-group> (<year>2024</year>). 
<article-title>Numerical simulation and analysis of the impurity removal process of a sugarcane chopper harvester based on a cfd&#x2013;dem model</article-title>. <source>Agriculture</source> <volume>14</volume>, <fpage>1392</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/agriculture14081392</pub-id>
</mixed-citation>
</ref>
<ref id="B26">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Xie</surname> <given-names>L.</given-names></name>
<name><surname>Wang</surname> <given-names>J.</given-names></name>
<name><surname>Cheng</surname> <given-names>S.</given-names></name>
<name><surname>Zeng</surname> <given-names>B.</given-names></name>
<name><surname>Yang</surname> <given-names>Z.</given-names></name>
</person-group> (<year>2018</year>). 
<article-title>Optimisation and finite element simulation of the chopping process for chopper sugarcane harvesting</article-title>. <source>Biosyst. Eng.</source> <volume>175</volume>, <fpage>16</fpage>&#x2013;<lpage>26</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.biosystemseng.2018.08.004</pub-id>
</mixed-citation>
</ref>
<ref id="B27">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Yu</surname> <given-names>L.</given-names></name>
<name><surname>Qian</surname> <given-names>M.</given-names></name>
<name><surname>Chen</surname> <given-names>Q.</given-names></name>
<name><surname>Sun</surname> <given-names>F.</given-names></name>
<name><surname>Pan</surname> <given-names>J.</given-names></name>
</person-group> (<year>2023</year>). 
<article-title>An improved yolov5 model: application to mixed impurities detection for walnut kernels</article-title>. <source>Foods</source> <volume>12</volume>, <fpage>624</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/foods12030624</pub-id>, PMID: <pub-id pub-id-type="pmid">36766152</pub-id>
</mixed-citation>
</ref>
<ref id="B28">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zhao</surname> <given-names>S.</given-names></name>
<name><surname>Yu</surname> <given-names>Y.</given-names></name>
<name><surname>Miao</surname> <given-names>Y.</given-names></name>
<name><surname>Liu</surname> <given-names>K.</given-names></name>
</person-group> (<year>2024</year>). 
<article-title>Research of impurity detection of green vegetabl e based on improved Mask R-CNN</article-title>. <source>J. Chin. Agric. Mechanization</source> <volume>45</volume>, <fpage>77</fpage>&#x2013;<lpage>82</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.13733/j.jcam.issn.2095-5553.2024.09.012</pub-id>
</mixed-citation>
</ref>
<ref id="B32">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zhou</surname> <given-names>S.</given-names></name>
<name><surname>Deng</surname> <given-names>G.</given-names></name>
<name><surname>He</surname> <given-names>F.</given-names></name>
<name><surname>Li</surname> <given-names>G.</given-names></name>
<name><surname>Cul</surname> <given-names>Z.</given-names></name>
<name><surname>Dai</surname> <given-names>Y.</given-names></name>
</person-group> (<year>2024</year>). 
<article-title>Research progress on detection technology of impurity rate in mechanized sugarcane harvest</article-title>. <source>Modern Agric. Equip.</source> <volume>45</volume>, <fpage>2</fpage>&#x2013;<lpage>5</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3969/j.issn.1673-2154.2024.04.001</pub-id>
</mixed-citation>
</ref>
<ref id="B31">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zhou</surname> <given-names>B.</given-names></name>
<name><surname>Ma</surname> <given-names>S.</given-names></name>
<name><surname>Li</surname> <given-names>W.</given-names></name>
<name><surname>Qian</surname> <given-names>J.</given-names></name>
<name><surname>Li</surname> <given-names>W.</given-names></name>
<name><surname>Yang</surname> <given-names>S.</given-names></name>
</person-group> (<year>2025</year>a). 
<article-title>Design and experiment of monitoring system for feed rate on sugarcane chopper harvester</article-title>. <source>Comput. Electron. Agric.</source> <volume>228</volume>, <fpage>109695</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2024.109695</pub-id>
</mixed-citation>
</ref>
<ref id="B29">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zhou</surname> <given-names>B.</given-names></name>
<name><surname>Ma</surname> <given-names>S.</given-names></name>
<name><surname>Li</surname> <given-names>W.</given-names></name>
<name><surname>Wu</surname> <given-names>Z.</given-names></name>
<name><surname>Qian</surname> <given-names>J.</given-names></name>
<name><surname>Huo</surname> <given-names>P.</given-names></name>
<etal/>
</person-group> (<year>2025</year>b). 
<article-title>CFD-DEM coupling simulation and parameter optimization of sugarcane harvester extractor</article-title>. <source>Biosystems Engineering</source> <volume>250</volume>, <fpage>80</fpage>&#x2013;<lpage>93</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.biosystemseng.2024.12.003</pub-id>
</mixed-citation>
</ref>
<ref id="B30">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zhou</surname> <given-names>B.</given-names></name>
<name><surname>Han</surname> <given-names>J.</given-names></name>
<name><surname>Wu</surname> <given-names>Z.</given-names></name>
<name><surname>Feng</surname> <given-names>F.</given-names></name>
<name><surname>Ma</surname> <given-names>S.</given-names></name>
</person-group> (<year>2025</year>c). 
<article-title>Optimal structure design of a sugarcane harvester extractor based on CFD</article-title>. <source>Int. J. Agric. Biol. Eng.</source> <volume>18</volume>, <fpage>117</fpage>&#x2013;<lpage>127</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.25165/j.ijabe.20251804.9323</pub-id>
</mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn id="n1" fn-type="custom" custom-type="edited-by">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/55589">Milind B. Ratnaparkhe</ext-link>, ICAR Indian Institute of Soybean Research, India</p></fn>
<fn id="n2" fn-type="custom" custom-type="reviewed-by">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3310424">Manisha Tapale</ext-link>, KLE Technological University, India</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3338326">Li Bing</ext-link>, Anhui Agricultural University, China</p></fn>
</fn-group>
</back>
</article>