<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Plant Sci.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Plant Science</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Plant Sci.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">1664-462X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fpls.2026.1770912</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Oblique-view video tracking and density-based counting: accurate counting of late-stage rapeseed seedlings for breeding assessment</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Luo</surname><given-names>Bowen</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3298017/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Yang</surname><given-names>Yuang</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Zhang</surname><given-names>Kuanyan</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Lv</surname><given-names>Xuan</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Liu</surname><given-names>Yujie</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Yang</surname><given-names>Yicheng</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Zhang</surname><given-names>Fugui</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1602474/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Liu</surname><given-names>Lu</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2252635/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Zhang</surname><given-names>Gang</given-names></name>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Wang</surname><given-names>Xiaole</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>*</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Wu</surname><given-names>Zhenchao</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>*</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3323785/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>School of Engineering, Anhui Agricultural University</institution>, <city>Hefei</city>, <state>Anhui</state>, <country country="cn">China</country></aff>
<aff id="aff2"><label>2</label><institution>School of Agriculture, Anhui Agricultural University</institution>, <city>Hefei</city>, <state>Anhui</state>, <country country="cn">China</country></aff>
<aff id="aff3"><label>3</label><institution>Anhui Agricultural University New Countryside Development Research Institute Wandong Comprehensive Test Station</institution>, <city>Chuzhou</city>, <state>Anhui</state>, <country country="cn">China</country></aff>
<aff id="aff4"><label>4</label><institution>Anhui Provincial Engineering Research Center for Intelligent Agricultural Machinery Equipment</institution>, <city>Hefei</city>, <state>Anhui</state>, <country country="cn">China</country></aff>
<aff id="aff5"><label>5</label><institution>School of Intelligent Manufacturing, Anhui Science and Technology University</institution>, <city>Fengyang</city>, <state>Anhui</state>, <country country="cn">China</country></aff>
<author-notes>
<corresp id="c001"><label>*</label>Correspondence: Xiaole Wang, <email xlink:href="mailto:wangxiaole@ahau.edu.cn">wangxiaole@ahau.edu.cn</email>; Zhenchao Wu, <email xlink:href="mailto:wuzhenchao@ahau.edu.cn">wuzhenchao@ahau.edu.cn</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-17">
<day>17</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>17</volume>
<elocation-id>1770912</elocation-id>
<history>
<date date-type="received">
<day>19</day>
<month>12</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>30</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="rev-recd">
<day>27</day>
<month>01</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2026 Luo, Yang, Zhang, Lv, Liu, Yang, Zhang, Liu, Zhang, Wang and Wu.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Luo, Yang, Zhang, Lv, Liu, Yang, Zhang, Liu, Zhang, Wang and Wu</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-17">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<p>Accurate counting of late-stage rapeseed seedlings is critical for yield estimation and field management, while traditional manual counting is inefficient and labor-intensive, calling for an automated counting method. A novel video tracking and counting method (CropTriangulator) was proposed, which uses smartphone-captured videos to achieve row-based accurate counting based on oblique view and target density distribution. It integrates three core components: YOLOv11n was selected for its balanced detection accuracy and inference speed after model comparison; an adaptive DBSCAN (AdapDBSCAN) algorithm was designed to eliminate non-target seedlings by dynamically adjusting parameters to address perspective distortion; the SORT algorithm was adopted for tracking and counting, with permanent ID marking to ensure uniqueness when seedlings cross frame boundaries. Experiments on 20 test videos (10 for 45&#xb0; oblique view, 10 for 90&#xb0; vertical view) showed that CropTriangulator achieved an average counting accuracy of 97.13% at 45&#xb0; (14% higher than 90&#xb0;), with the R-squared of 45&#xb0; row-based counts reaching 0.917. AdapDBSCAN reduced over-counting compared with fixed-parameter DBSCAN, and SORT had a much lower ID switch rate (8.47%) than DeepSORT (36.05%). The 45&#xb0; oblique view is proven optimal for rapeseed seedling counting. The proposed CropTriangulator provides a low-cost and efficient solution for automated row-based counting in complex field environments, supporting precise yield estimation and scientific field management decisions. The video comparing the effects of the CropTriangulator method is available at: <ext-link ext-link-type="uri" xlink:href="https://github.com/Possibility007/Comparison-of-counting-results.git">https://github.com/Possibility007/Comparison-of-counting-results.git</ext-link></p>
</abstract>
<kwd-group>
<kwd>adaptive DBSCAN</kwd>
<kwd>field-based phenotyping</kwd>
<kwd>rapeseed seedlings counting</kwd>
<kwd>tracking</kwd>
<kwd>YOLO</kwd>
</kwd-group>
<funding-group>
<award-group id="gs1">
<funding-source id="sp1">
<institution-wrap>
<institution>National Natural Science Foundation of China</institution>
<institution-id institution-id-type="doi" vocab="open-funder-registry" vocab-identifier="10.13039/open_funder_registry">10.13039/501100001809</institution-id>
</institution-wrap>
</funding-source>
<award-id rid="sp1">32372003</award-id>
</award-group>
<award-group id="gs2">
<funding-source id="sp2">
<institution-wrap>
<institution>Anhui Provincial Department of Education</institution>
<institution-id institution-id-type="doi" vocab="open-funder-registry" vocab-identifier="10.13039/open_funder_registry">10.13039/501100010814</institution-id>
</institution-wrap>
</funding-source>
<award-id rid="sp2">2022AH030090</award-id>
</award-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This work was supported by National Natural Science Foundation of China (grant numbers 32372003), Anhui Provincial Department of Education (grant number 2022AH030090), and Scientific Research Start-up Foundation of Anhui Agricultural University (grant number rc412405).</funding-statement>
</funding-group>
<counts>
<fig-count count="15"/>
<table-count count="5"/>
<equation-count count="10"/>
<ref-count count="46"/>
<page-count count="19"/>
<word-count count="10060"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Sustainable and Intelligent Phytoprotection</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec id="s1" sec-type="intro">
<label>1</label>
<title>Introduction</title>
<p>Seedling emergence rate is one of the main bases for rapeseed breeding and field management, which traditionally relies on time-consuming and labor-intensive manual field observation and counting, thus highlighting the urgent need for automated methods. Breeding superior varieties has become a high-priority demand in the agricultural market, and achieving uniform seedling emergence is key to crop breeding (<xref ref-type="bibr" rid="B25">Paparella et&#xa0;al., 2015</xref>; <xref ref-type="bibr" rid="B30">Sun et&#xa0;al., 2023</xref>). As a critical indicator of crop phenotypic traits, seedling emergence rate is usually calculated by counting the number of germinated seeds and the total number of sown seeds. Traditionally, these data are obtained through manual counting, which is inefficient and costly (<xref ref-type="bibr" rid="B46">Zhuang et al., 2024</xref>). By acquiring images of rapeseed seedlings and processing them using deep learning techniques, automatic target detection and quantity statistics of rapeseed seedlings can be realized, providing strong support for seed selection, breeding, and optimization of field management measures.</p>
<p>Deep learning techniques have been widely applied to automate target detection and quantity statistics, showing potential in identifying rapeseed seedlings in complex scenarios from images (<xref ref-type="bibr" rid="B44">Zhao et&#xa0;al., 2018</xref>). To address the effective identification of different parts of tomato plants, <xref ref-type="bibr" rid="B3">Cardellicchio et&#xa0;al. (2023)</xref> proposed a model based on YOLOv5 single-stage detectors (both standalone and ensemble detectors) to automatically identify and extract key phenotypic traits from tomato images under various stress conditions. <xref ref-type="bibr" rid="B27">Rong et&#xa0;al. (2019)</xref> applied two different convolutional neural network structures to walnut images to solve the problem of rapid detection of foreign objects in walnuts, achieving an accuracy of 99.5% in automatically segmenting images and detecting natural foreign objects of different sizes. <xref ref-type="bibr" rid="B13">Khaki et&#xa0;al. (2022)</xref> proposed a new deep learning framework, WheatNet, which can accurately and efficiently count wheat ears in the field, collecting real-time data for farmers to make scientific and reasonable wheat planting and management decisions. <xref ref-type="bibr" rid="B23">Misra et&#xa0;al. (2020)</xref> proposed an object detection-based method to automatically identify and count wheat ears from images. Additionally, a wheat ear counting method based on frequency domain decomposition was proposed (<xref ref-type="bibr" rid="B1">Bao et&#xa0;al., 2023</xref>), significantly improving the accuracy of wheat ear counting in images to 91.5%. For single images, deep learning techniques perform well in crop target detection and counting, but they cannot cover all objects in a single field.</p>
<p>The identification of rapeseed seedlings in a single image usually fails to reflect the total number of targets in a single field, which can be addressed using video tracking. To solve the low efficiency of traditional manual monitoring of peanut seedling emergence rate in fields, <xref ref-type="bibr" rid="B18">Lin et&#xa0;al. (2022)</xref> proposed a real-time peanut video counting model (combining improved YOLOv5s and DeepSort), achieving a counting ability close to that of humans with an accuracy of 98.08%. <xref ref-type="bibr" rid="B31">Tan et&#xa0;al. (2022)</xref> improved the cotton seedling tracking method by combining a one-stage target detection deep neural network with optical flow, providing an automatic and near-real-time video tracking method that achieves high-precision seedling detection under high occlusion, image blur, complex backgrounds, and extreme lighting conditions, with an average precision of 99.12%. <xref ref-type="bibr" rid="B32">Tan et&#xa0;al. (2023)</xref> developed a plant seedling and flower counting method using an anchor-free deep convolutional neural network-based tracking approach, conducting experiments on 75 cotton seedling videos and 50 cotton flower videos collected in fields, with average relative errors of 5.5% and 10.8%, respectively. <xref ref-type="bibr" rid="B28">Rong et&#xa0;al. (2023)</xref> proposed an improved tomato cluster counting method combining target detection, multi-object tracking, and counting in specific tracking regions, addressing the challenges of automated tomato yield estimation in practical applications and realizing tomato cluster yield estimation in greenhouse scenarios with an accuracy of 97.9%. <xref ref-type="bibr" rid="B2">Barreto et&#xa0;al. (2021)</xref> successfully achieved fully automated counting of sugar beets, maize, and strawberries by combining a UAV-based camera system with deep learning algorithms, with errors below 4.6%. A major advantage of crop detection using image data is the ease of implementing application algorithms, while using video data for crop monitoring is more conducive to practical field applications (<xref ref-type="bibr" rid="B18">Lin et&#xa0;al., 2022</xref>). For field crops, video data are usually acquired from a top-down overhead view, which struggles to accurately capture crop features in complex scenarios.</p>
<p>Video tracking from an overhead view typically achieves good results in the early stages of crop growth. However, during rapeseed breeding, different varieties exhibit varying growth statuses: even when sown simultaneously, some varieties (e.g., mustard-type rapeseed) remain in the early seedling stage, while others (e.g., cabbage-type rapeseed) may enter the late seedling stage, where stems and leaves overlap as they grow, posing challenges for rapeseed counting. <xref ref-type="bibr" rid="B5">Chen et&#xa0;al. (2024)</xref> proposed a regression deep learning-based visual model, HOB-CNNv2, to segment tree branches under extreme occlusion using data acquired from the side of fruit trees. <xref ref-type="bibr" rid="B4">Chen et&#xa0;al. (2023)</xref> proposed a lightweight multi-class occluded target detection method for Camellia oleifera fruits, testing data acquired from different oblique angles and improving detection accuracy loss caused by multiple occlusion types, with an average precision of 94.1%. <xref ref-type="bibr" rid="B45">Zheng et&#xa0;al. (2022)</xref> optimized YOLOv4 to address the impact of leaf occlusion on tomato detection accuracy for picking robots, achieving an average detection accuracy of 94.44% when the camera angle is 90&#xb0; relative to the ground. From an overhead view, most bottom leaves are covered or obscured, hindering comprehensive analysis of the entire crop. From an oblique view, the entire plant structure is visible, enabling quantification of leaves, plant height, and branch area (<xref ref-type="bibr" rid="B41">Zhang et&#xa0;al., 2019</xref>). Therefore, data acquired from different oblique views is expected to distinguish individual rapeseed plants for accurate counting of rapeseed seedlings.</p>
<p>Even though rapeseed plants can be distinguished from an oblique view, due to linear perspective, crop morphology in videos acquired from oblique angles usually exhibits distortion (smaller in the distance and larger in the foreground), affecting clustering accuracy and thus counting accuracy. To address perspective issues, <xref ref-type="bibr" rid="B8">Dolata et&#xa0;al. (2021)</xref> proposed an adaptive nonlinear regression model that adaptively adjusts parameters to match the morphological characteristics of different plants, predicting the contour of each plant in online-acquired images with an accuracy of 86.9%. <xref ref-type="bibr" rid="B21">Liu A. et&#xa0;al. (2025)</xref> proposed a threshing gap adaptive adjustment system based on feed rate monitoring and established a feed rate monitoring model, with an average precision of 90.8% for the system. <xref ref-type="bibr" rid="B42">Zhang et&#xa0;al. (2023)</xref> corrected distorted images from oblique views by adaptively adjusting perspective transformation, solving the vanishing point problem commonly occurring at the top of parallel crop rows. Adaptively adjusting the clustering radius is expected to address minor distortion in videos caused by linear perspective, enabling accurate counting of crops in fields.</p>
<p>In summary, the aforementioned studies on static multi-view data acquisition and adaptive adjustment methods provide various insights for target counting. However, existing methods still have room for optimization in counting at the late seedling stage. In particular, rapeseed seedlings exhibit complex growth states in actual field scenarios, requiring counting methods to balance efficiency and accuracy. Therefore, integrating current research results with practical application needs, this study proposes a video tracking and counting method for rapeseed seedlings at the late seedling stage based on oblique view and target density distribution, using easily operable smartphones to acquire videos of late-stage rapeseed seedlings, realizing accurate counting of rapeseed seedlings in target regions. Firstly, YOLOv11 was utilized to detect rapeseed seedlings from 45&#xb0; oblique and 90&#xb0; vertical views. Secondly, rapeseed seedlings in target regions were extracted based on target density distribution. Then, extracted rapeseed seedlings were assigned IDs and counted. Finally, target detection and counting performance from the two views are compared, and the superior view was selected for the counting method in this study.</p>
</sec>
<sec id="s2" sec-type="materials|methods">
<label>2</label>
<title>Materials and methods</title>
<p>An automatic rapeseed seedling counting method for calculating the seedling emergence rate in modern rapeseed fields is proposed in this study. The method workflow is shown in <xref ref-type="fig" rid="f1"><bold>Figure&#xa0;1</bold></xref>. Firstly, videos of rapeseed seedlings at different oblique angles were collected in modern rapeseed fields to construct a dataset. Secondly, different scales of YOLOv11 models were trained and compared to achieve accurate counting of rapeseed seedlings. Thirdly, based on YOLOv11n detection of rapeseed seedlings in videos, the AdapDBSCAN algorithm was proposed to eliminate rapeseed seedlings in non-target regions. Fourthly, after eliminating rapeseed seedlings using the AdapDBSCAN algorithm, the SORT algorithm was utilized to assign IDs to the extracted rapeseed seedlings and count them. Finally, a rapeseed seedling video counting method, CropTriangulator, was proposed, integrating three core modules: YOLOv11 target detection, adaptive DBSCAN clustering, and SORT counting algorithm, forming a complete computer vision processing pipeline. Detailed information outlining each step is provided below.</p>
<fig id="f1" position="float">
<label>Figure&#xa0;1</label>
<caption>
<p>Workflow of the method, including detection, clustering, tracking, and counting.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1770912-g001.tif">
<alt-text content-type="machine-generated">Flowchart illustrating the CropTriangulator system for crop seedling detection, clustering, and counting in video frames. The process starts with input images, proceeds through a neural network for object detection, adjusts for perspective distortion with dynamic clustering radius, eliminates noise points, and outputs the number of seedlings in counting boxes for each video frame. Contains color-coded annotations for detection and clustering stages.</alt-text>
</graphic></fig>
<sec id="s2_1">
<label>2.1</label>
<title>Dataset</title>
<sec id="s2_1_1">
<label>2.1.1</label>
<title>Study area</title>
<p>The study area is located at the Teaching and Demonstration Base of Anhui Agricultural University in Hefei, Anhui Province, China (N: 31&#xb0;29&#x2032;4.36&#x2033;, E: 117&#xb0;13&#x2032;23.97&#x2033;, altitude 47 m), belonging to the northern subtropical humid monsoon climate zone, as shown in <xref ref-type="fig" rid="f2"><bold>Figure&#xa0;2</bold></xref>. A total of 108 rapeseed varieties, including Brassica rapa, Brassica juncea, and Brassica napus, were selected. The spacing between each rapeseed plant is 0.05 m, and furrows are dug on both sides of the planting area for irrigation and waterlogging drainage (i.e., row spacing of 0.3 m). Each variety is planted in plots (1.2 m in width and 14.4 m in length).</p>
<fig id="f2" position="float">
<label>Figure&#xa0;2</label>
<caption>
<p>Data collection area. The red solid line area represents the rapeseed planting area, the red dashed line area represents a complete data acquisition area, and the red arrow area represents the width of the drainage ditch.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1770912-g002.tif">
<alt-text content-type="machine-generated">Composite graphic showing the location and layout of an experimental planting area. In the upper left, a map highlights Anhui Province, with Hefei shaded and Luyang District in yellow. The upper right inset depicts a satellite view of rectangular fields, with two red rectangles marking planting areas. At the bottom, a photograph shows crop rows bordered by dashed red lines indicating a target area measuring zero point three meters by fourteen point four meters. A legend clarifies map colors and labels the furrow and target area.</alt-text>
</graphic></fig>
</sec>
<sec id="s2_1_2">
<label>2.1.2</label>
<title>Data acquisition</title>
<p>The boundary of the rapeseed seedling population between two adjacent furrows is defined as a row unit, from the beginning to the end within the planting area. Staff held an iPhone 14 Pro Max (Apple Inc., California, USA) fixed on a DJI Osmo Mobile SE gimbal (DJI, Shenzhen, China), walking at a constant speed along the furrow from the beginning of each plot to acquire complete video data of each row of rapeseed seedlings (image resolution: 1920 &#xd7; 1080 pixels, frame rate: 30 frames per second, number of videos: 28), with the tester&#x2019;s walking speed of approximately 0.8 m/s, as shown in <xref ref-type="fig" rid="f3"><bold>Figure&#xa0;3a</bold></xref>. During data collection, the weather was initially clear before transitioning to partly cloudy conditions. The iPhone 14 Pro Max camera was adjusted to ultra-wide-angle mode, and the DJI Osmo Mobile SE gimbal was set to pitch-lock mode, with video shooting angles divided into a 45&#xb0; angle (referred to as the 45&#xb0; oblique view, abbreviated as 45&#xb0; view) and a 90&#xb0; angle (referred to as the 90&#xb0; vertical view, abbreviated as 90&#xb0; view) between the phone and the ground, as shown in <xref ref-type="fig" rid="f3"><bold>Figure&#xa0;3b</bold></xref>, at a height of approximately 0.5 m from the ground. After data collection, the staff manually count the number of rape seedlings in each row and record it as the actual number of rape seedlings in each row, which was called ground truth (<italic>GT</italic>) and was utilized to analyze and discuss the tracking and counting performance.</p>
<fig id="f3" position="float">
<label>Figure&#xa0;3</label>
<caption>
<p><bold>(a)</bold> Staff holding a smartphone fixed on a DJI gimbal walking along the furrow to acquire data, where the light yellow area represents the maximum horizontal angle range that can be captured, and the yellow area represents the target row captured by the camera; <bold>(b)</bold> example images of rapeseed plants under 45&#xb0; oblique view and 90&#xb0; vertical view set by the gimbal.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1770912-g003.tif">
<alt-text content-type="machine-generated">Panel (a) shows a person in a field holding a camera or sensor device over a row of crops, with yellow and red graphics indicating the target row for data collection. Panel (b) illustrates two camera angles, forty-five and ninety degrees, directed at crop rows, accompanied by corresponding images displaying the ground and plants from each angle.</alt-text>
</graphic></fig>
</sec>
<sec id="s2_1_3">
<label>2.1.3</label>
<title>Dataset preparation and data processing</title>
<p>From 28 collected videos on November 15, 2024 (14 for 45&#xb0; view and 14 for 90&#xb0; view), 8 videos (4 per view) were selected, with each video having a fixed frame rate of 30 fps, duration of 20 seconds, and original resolution of 1920&#xd7;1080 pixels. A sampling strategy of extracting 1 frame from every 6 consecutive frames (5-frame interval) was adopted, which generated 800 images (400 per view). This approach avoids redundant overlapping frames (which would reduce diversity if the sampling interval were reduced), while ensuring the richness of the data, covering the complete video sequence, 108 rapeseed varieties with diverse phenotypes, and complex field conditions. The remaining 20 videos were employed to validate the performance of the rapeseed counting model. The dataset was randomly divided into training, validation, and test sets in an 8:1:1 ratio for training the detection model. Detailed dataset information is shown in <xref ref-type="table" rid="T1"><bold>Table&#xa0;1</bold></xref>. Secondly, the image dataset was manually annotated using LabelImg, following the method of <xref ref-type="bibr" rid="B20">Liu D. et&#xa0;al. (2025)</xref>. It is worth noting that a special strategy was adopted for dataset annotation: only valid regions within target rows of images were annotated, as shown in <xref ref-type="fig" rid="f4"><bold>Figure&#xa0;4</bold></xref>(iii). The area formed by annotated rapeseed seedlings in an image is referred to as the valid region under the special annotation strategy. For rows where rapeseed seedlings can be clearly displayed in full, all seedlings were annotated; for rows where more than half of the rapeseed seedlings are blurred or incompletely displayed, the entire row was not annotated. The total annotation time was approximately 200 hours. Rectangular bounding boxes indicate the positions of seedlings. After manual annotation, TXT files containing target types and coordinate information were generated for training on the dataset. The total data processing workflow is shown in <xref ref-type="fig" rid="f4"><bold>Figure&#xa0;4</bold></xref>.</p>
<table-wrap id="T1" position="float">
<label>Table&#xa0;1</label>
<caption>
<p>The detailed information of data acquisition.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="left">Collection date</th>
<th valign="middle" align="left">Collection views</th>
<th valign="middle" align="left">Number of videos</th>
<th valign="middle" align="left">Initial image resolution</th>
<th valign="middle" align="left">Number of images</th>
<th valign="middle" align="left">Train dataset</th>
<th valign="middle" align="left">Test dataset</th>
<th valign="middle" align="left">Acquisition time</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">Nov 15, 2024</td>
<td valign="middle" align="left">45&#xb0; view</td>
<td valign="middle" align="left">14</td>
<td valign="middle" align="left">1920&#xd7;1080</td>
<td valign="middle" align="left">400</td>
<td valign="middle" align="left">320</td>
<td valign="middle" align="left">80</td>
<td valign="middle" align="left">8:00~13:00</td>
</tr>
<tr>
<td valign="middle" align="left">Nov 15, 2024</td>
<td valign="middle" align="left">90&#xb0; view</td>
<td valign="middle" align="left">14</td>
<td valign="middle" align="left">1920&#xd7;1080</td>
<td valign="middle" align="left">400</td>
<td valign="middle" align="left">320</td>
<td valign="middle" align="left">80</td>
<td valign="middle" align="left">13:00~17:00</td>
</tr>
</tbody>
</table>
</table-wrap>
<fig id="f4" position="float">
<label>Figure&#xa0;4</label>
<caption>
<p>Data processing workflow. <bold>(A)</bold> Data acquisition; <bold>(B)</bold> dataset creation; <bold>(C)</bold> special annotation strategy, where the area framed by the yellow dashed line represents the region meeting the annotation strategy, the area outside the frame represents the non-annotated region, and the red rectangular box represents the annotation box from LabelImg; <bold>(D)</bold> dataset training.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1770912-g004.tif">
<alt-text content-type="machine-generated">Flowchart illustrates a dataset creation and object detection pipeline for Brassica plants in a field, showing steps from video acquisition with a handheld device, frame extraction, annotation, and training object detection models, with labeled sample images.</alt-text>
</graphic></fig>
<p>Rapeseed seedling detection provides a basis for counting. Rapeseed seedling detection faces unique challenges due to its classification as dense object detection. As a representative of one-stage target detection algorithms, YOLOv11 performs excellently in small-scale dense target detection, combining high precision and speed (<xref ref-type="bibr" rid="B11">Huang et&#xa0;al., 2025</xref>). The performance of different scales of YOLOv11 models was compared and analyzed, and the optimal model was selected for rapeseed seedling detection.</p>
<p>In this study, network training was implemented on a desktop computer equipped with an Intel Core i9-12900K (3.19 GHz) CPU, NVIDIA GeForce RTX 3090 GPU, 16 GB RAM, and 64-bit Windows 10. Specific experimental configurations are shown in <xref ref-type="table" rid="T2"><bold>Table&#xa0;2</bold></xref>. The training batch size was 16, epochs were 500, and image size was 960 (<xref ref-type="bibr" rid="B7">Daniels et&#xa0;al., 2021</xref>). The original resolution of the images in the dataset is 1920&#xd7;1080 pixels. Due to the large number of pixels in the original images, the model requires excessive computational resources during training, resulting in slow training speed. Moreover, a resolution of 960 pixels is sufficient to retain the features of crop seedlings without causing accuracy loss due to scaling. To meet the computational efficiency and input size requirements of YOLOv11 model training, a long-side scaling strategy is adopted during training to uniformly adjust the images to a long-side length of 960 pixels. Five models with different performances of YOLOv11 were trained on datasets from 45&#xb0; and 90&#xb0; views, respectively. Then, the models with the highest precision (<italic>AP</italic>) and optimal inference time were selected for video tracking from 45&#xb0; and 90&#xb0; views, respectively. All models were trained on the constructed dataset with the same predefined parameters to ensure consistency and comparability.</p>
<table-wrap id="T2" position="float">
<label>Table&#xa0;2</label>
<caption>
<p>The specifications details of hardware and software.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="left">Configuration</th>
<th valign="middle" align="left">Parameter</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">CPU</td>
<td valign="middle" align="left">Intel Core i9&#x2013;12900 K(3.19GHz)</td>
</tr>
<tr>
<td valign="middle" align="left">GPU</td>
<td valign="middle" align="left">NVIDIA GeForce RTX 3090</td>
</tr>
<tr>
<td valign="middle" align="left">Operating system</td>
<td valign="middle" align="left">Windows 10</td>
</tr>
<tr>
<td valign="middle" align="left">Accelerated environment</td>
<td valign="middle" align="left">CUDA12.6 CUDNN8.9.7</td>
</tr>
<tr>
<td valign="middle" align="left">Development environment</td>
<td valign="middle" align="left">Pycharm 2023</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec id="s2_2">
<label>2.2</label>
<title>Rapeseed seedling density distribution clustering method based on perspective adaptive adjustment (AdapDBSCAN)</title>
<p>Due to linear perspective effects, the acquired video data in this study exhibits distortion, with rapeseed seedling density appearing denser in the distance and sparser in the foreground along the shooting direction, and the pixel values occupied by rapeseed seedlings showing smaller in the distance and larger in the foreground, as shown in <xref ref-type="fig" rid="f5"><bold>Figure&#xa0;5a</bold></xref>.</p>
<fig id="f5" position="float">
<label>Figure&#xa0;5</label>
<caption>
<p><bold>(A)</bold> Perspective-induced &#x201c;small in distance and large in foreground&#x201d; of rapeseed seedlings; <bold>(B)</bold> adaptively adjusting clustering radius; <bold>(C)</bold> workflow of the AdapDBSCAN algorithm. The area surrounded by brown lines represents the drainage ditch, red points represent samples, yellow points represent core points, blue crosses represent noise points, and dashed circles of different colors represent different clustering radius.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1770912-g005.tif">
<alt-text content-type="machine-generated">Diagram illustrates an adaptive clustering algorithm for plant detection, showing camera perspective effects, adaptive parameter adjustment with dynamic clustering radii, core point determination via a flowchart, and clustering process steps including field image sampling, clustering, and marking core or noise points.</alt-text>
</graphic></fig>
<p>DBSCAN is a classic density-based unsupervised clustering algorithm that enables data-driven clustering via neighborhood density thresholds (i.e., clustering radius eps and minimum sample count min_samples) (<xref ref-type="bibr" rid="B9">Ester et&#xa0;al., 1996</xref>; <xref ref-type="bibr" rid="B6">Cheng et&#xa0;al., 2024</xref>). However, the two core parameters of the original DBSCAN algorithm are fixed and necessitate manual tuning, which renders it inadequate to accommodate the &#x201c;small in distance and large in foreground&#x201d; perspective distortion of rapeseed seedlings in oblique-view videos, resulting in erroneous clustering in areas with adjacent density variations. Existing adaptive DBSCAN variants (e.g., <xref ref-type="bibr" rid="B14">Khan et&#xa0;al., 2018</xref>) are predominantly optimized for general-purpose scenarios and do not account for the unique density distortion characteristics of crops under field oblique viewing conditions. To address the perspective-induced &#x201c;small in distance and large in foreground&#x201d; challenge, an adaptive DBSCAN parameter-tuning method (AdapDBSCAN) is proposed. A parameter optimization method based on local density was introduced in this study, constructing a nonlinear mapping relationship between the eps parameter and regional target density, while establishing a dynamic calculation model for the min_samples parameter. The center point of the detection box is taken as the position of each detected rapeseed seedling, with each center point treated as a sample point representing a rapeseed seedling. Firstly, a nonlinear functional relationship between the eps parameter and local regional target density is constructed to dynamically adjust the eps: when the number of samples in a region is dense, the eps value decreases; when the number of samples is sparse, eps increases. Secondly, the number of samples within the eps is counted to dynamically calculate the min_samples parameter: when the number of samples in a region is denser, the number of core point min_samples is larger; when the number of samples is sparser, the number of core point min_samples is smaller. Finally, based on this functional relationship, starting from any point <italic>x</italic>, if the number of samples within the eps of point <italic>x</italic> is greater than or equal to min_samples, point <italic>x</italic> is marked as a core point; otherwise, it is marked as a noise point. Core points form clusters, and noise points are marked as candidate elimination set <italic>N</italic> and eliminated during clustering, as shown in <xref ref-type="fig" rid="f5"><bold>Figure&#xa0;5b</bold></xref>. Through dynamic determination of parameters twice, rapeseed seedlings in non-target regions (i.e., sparse seedlings detected on both sides) are eliminated. Specific formulas are shown in <xref ref-type="disp-formula" rid="eq1">Equations 1</xref>, <xref ref-type="disp-formula" rid="eq2">2</xref>. The process is shown in <xref ref-type="fig" rid="f5"><bold>Figure&#xa0;5c</bold></xref>, with the red area representing the target region.</p>
<disp-formula id="eq1"><label>(1)</label>
<mml:math display="block" id="M1"><mml:mrow><mml:mi>e</mml:mi><mml:mi>p</mml:mi><mml:mi>s</mml:mi><mml:mo>=</mml:mo><mml:mi>max</mml:mi><mml:mo>(</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mfrac><mml:mrow><mml:mi>&#x394;</mml:mi><mml:mi>v</mml:mi></mml:mrow><mml:mi>&#x3b1;</mml:mi></mml:mfrac><mml:mo>)</mml:mo><mml:mo>&#xd7;</mml:mo><mml:mi>m</mml:mi><mml:mi>e</mml:mi><mml:mi>d</mml:mi><mml:mi>i</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>D</mml:mi><mml:mrow><mml:mi>f</mml:mi><mml:mi>i</mml:mi><mml:mi>l</mml:mi><mml:mi>t</mml:mi><mml:mi>e</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>d</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math>
</disp-formula>
<disp-formula id="eq2"><label>(2)</label>
<mml:math display="block" id="M2"><mml:mrow><mml:mtext>min</mml:mtext><mml:mo>_</mml:mo><mml:mi>s</mml:mi><mml:mi>a</mml:mi><mml:mi>m</mml:mi><mml:mi>p</mml:mi><mml:mi>l</mml:mi><mml:mi>e</mml:mi><mml:mi>s</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>k</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:mo>[</mml:mo><mml:mfrac><mml:mrow><mml:mstyle displaystyle="true"><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>x</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>k</mml:mi></mml:munderover><mml:mrow><mml:msub><mml:mi>&#x3c1;</mml:mi><mml:mi>x</mml:mi></mml:msub></mml:mrow></mml:mstyle></mml:mrow><mml:mrow><mml:mi>k</mml:mi><mml:mo>&#xb7;</mml:mo><mml:mi>&#x3b4;</mml:mi></mml:mrow></mml:mfrac><mml:mo>]</mml:mo></mml:mrow></mml:math>
</disp-formula>
<p>where <italic>&#x394;v</italic> is the vertical span, <italic>&#x3b1;</italic> is a normalization constant utilized to map the vertical span <italic>&#x394;v</italic> to a scaling factor; <italic>median(D<sub>filtered</sub>)</italic> is the filtered horizontal spacing of rapeseed seedlings (i.e., filtering out outliers such as excessively large spacing) to avoid interference of extreme values on the reference distance. <italic>&#x3c1;<sub>x</sub></italic> is the neighborhood density of the <italic>x</italic> sample within the eps, and <italic>&#x3b4;</italic> is the density decay factor.</p>
<p>In addition, rapeseed seedlings in non-target regions have small imaging sizes and sparse distribution in images. If not controlled, DBSCAN is forced to continuously increase eps until covering the entire image to meet the min_sample threshold, leading to clustering failure (all points are classified into the same cluster or noise). In this study, the image size is 1920&#xd7;1080 pixels. Considering that metric coordinates in images are depth-dependent, a global pixel-to-actual-distance mapping was not adopted. Instead, based on camera calibration at a fixed shooting height (0.5 m) and actual measurement of the target region, the average pixel-to-distance ratio in the central area of the image (where target seedlings are concentrated) was approximately 800 pixels per meter. When the maximum threshold of eps exceeds the width of the drainage ditch, a broader non-target area enters. Here, the spacing between the rapeseed seedlings has further increased. Even if the eps is further expanded, it is difficult to gather min_sample samples within the neighborhood. Eventually, it falls into a cycle of infinitely increasing the eps to meet the threshold. Based on this, according to the image spatial scale and the actual width of the drainage ditch (0.3 meters), the maximum threshold of eps was preset to 250 pixels. The corresponding actual space is approximately 0.3125 meters, which has exceeded the width of the drainage ditch. And a dual-threshold collaborative constraint mechanism was constructed with the filtered horizontal spacing of rapeseed seedlings (<italic>median(D<sub>filtered</sub>)</italic>) to effectively suppress unlimited expansion of the eps parameter.</p>
</sec>
<sec id="s2_3">
<label>2.3</label>
<title>Rapeseed seedling counting method: CropTriangulator</title>
<p>Representative algorithms were compared via controlled experiments. The SORT algorithm was ultimately selected.</p>
<p>Integrating previous research, the rapeseed seedling counting method integrated YOLOv11, AdapDBSCAN, and SORT to realize the counting of rapeseed seedlings in target regions. To better count rapeseed seedlings, when the geometric center of a counting box (for counting seedlings) crosses the lower edge of the video frame, the counting box is automatically removed, and its unique identifier (ID) assigned by the SORT algorithm is permanently marked to ensure no reuse in subsequent frame processing, thus ensuring ID uniqueness (<xref ref-type="bibr" rid="B37">Wu et&#xa0;al., 2023b</xref>). This counting method eliminates non-target rows through the AdapDBSCAN algorithm, only counts target regions, and displays the current number of detected rapeseed seedlings and frame number in the upper left corner of the video.</p>
</sec>
<sec id="s2_4">
<label>2.4</label>
<title>Application development of CropTriangulator</title>
<p>The mobile client implementation of the CropTriangulator system adopts a three-tier architectural framework comprising: (1) a user interface (UI) layer built with native UI components for video upload and visualization; (2) an interaction layer managing file selection and processing requests through native event handlers; and (3) a presentation layer generating dynamic result displays. The system incorporates mobile-optimized UI design with screen-adaptive layouts, file picker functionality providing visual feedback, and animated processing indicators. The user operation interfaces are illustrated in <xref ref-type="fig" rid="f6"><bold>Figure&#xa0;6</bold></xref>.</p>
<fig id="f6" position="float">
<label>Figure&#xa0;6</label>
<caption>
<p>The interfaces of the client application. <bold>(a)</bold> The home interface of the client application. <bold>(b)</bold> The functional interface of the client application. <bold>(c)</bold> The output interface for row-based rapeseed seedling counting results.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1770912-g006.tif">
<alt-text content-type="machine-generated">Panel (a) displays a mobile app screen with a vegetable icon and a &#x201c;Home&#x201d; button. Panel (b) shows the same app with a large plus sign to select a media video and an &#x201c;Input&#x201d; button. Panel (c) features a counted result screen displaying a small video of plants with a count of six hundred seventy-nine and a &#x201c;Result&#x201d; button.</alt-text>
</graphic></fig>
<p>The operational workflow begins when users upload field-captured videos through the functional interface. These video files are transmitted to the backend server via HTTP protocol, where they are queued for processing. The server executes the complete CropTriangulator analysis pipeline, including YOLOv11-based seedling detection, AdapDBSCAN clustering for target region identification, and SORT algorithm for seedling tracking and counting. Upon completion, the processed results are returned to the client application. Users can view the rapeseed seedling count calculated by the CropTriangulator method on the counting results interface, as shown in <xref ref-type="fig" rid="f6"><bold>Figure&#xa0;6c</bold></xref>. The entire client application is implemented with native mobile development technologies to ensure platform compatibility and maintainability.</p>
</sec>
<sec id="s2_5">
<label>2.5</label>
<title>Evaluation indicators</title>
<sec id="s2_5_1">
<label>2.5.1</label>
<title>Target detection performance indicators</title>
<p>In this study, to correctly evaluate the accuracy of target detection, commonly used indicators in the deep learning field were adopted: precision (<italic>P</italic>) and recall (<italic>R</italic>), with calculation formulas shown in <xref ref-type="disp-formula" rid="eq3">Equations 3</xref>, <xref ref-type="disp-formula" rid="eq4">4</xref>. Average precision (<italic>AP</italic>), an indicator reflecting the performance of target detection models (<xref ref-type="bibr" rid="B39">Yang J. et&#xa0;al., 2025</xref>; <xref ref-type="bibr" rid="B26">Qin et&#xa0;al., 2025</xref>), is defined by <italic>P</italic> and <italic>R</italic>, as shown in <xref ref-type="disp-formula" rid="eq5">Equation 5</xref>.</p>
<disp-formula id="eq3"><label>(3)</label>
<mml:math display="block" id="M3"><mml:mrow><mml:mi>P</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi><mml:mo>+</mml:mo><mml:mi>F</mml:mi><mml:mi>P</mml:mi></mml:mrow></mml:mfrac></mml:mrow></mml:math>
</disp-formula>
<disp-formula id="eq4"><label>(4)</label>
<mml:math display="block" id="M4"><mml:mrow><mml:mi>R</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi><mml:mo>+</mml:mo><mml:mi>F</mml:mi><mml:mi>N</mml:mi></mml:mrow></mml:mfrac></mml:mrow></mml:math>
</disp-formula>
<disp-formula id="eq5"><label>(5)</label>
<mml:math display="block" id="M5"><mml:mrow><mml:mi>A</mml:mi><mml:mi>P</mml:mi><mml:mo>=</mml:mo><mml:mstyle displaystyle="true"><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>P</mml:mi><mml:mtext>i</mml:mtext></mml:msub><mml:mo>&#xb7;</mml:mo><mml:mtext>&#x394;</mml:mtext><mml:msub><mml:mi>R</mml:mi><mml:mtext>i</mml:mtext></mml:msub><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mstyle></mml:mrow></mml:math>
</disp-formula>
<p>where <italic>TP</italic> (True Positives) is the number of correctly predicted positive instances; <italic>FP</italic> (False Positives) is the number of instances predicted as positive but actually negative; <italic>FN</italic> (False Negatives) is the number of positive instances not identified by the model; <italic>AP</italic> is the average precision.</p>
</sec>
<sec id="s2_5_2">
<label>2.5.2</label>
<title>Tracking performance and counting performance indicators</title>
<p>Three commonly used indicators in the tracking field were utilized to evaluate the performance of rapeseed tracking in videos: ID switch rate (<italic>W<sub>ID</sub></italic>), target tracking accuracy (<italic>P<sub>tr</sub></italic>), and target tracking precision (<italic>P<sub>mt</sub></italic>) (<xref ref-type="bibr" rid="B36">Wu et&#xa0;al., 2023a</xref>). The calculation formulas for these indicators are shown in <xref ref-type="disp-formula" rid="eq6">Equations 6</xref>-<xref ref-type="disp-formula" rid="eq8">8</xref>.</p>
<disp-formula id="eq6"><label>(6)</label>
<mml:math display="block" id="M6"><mml:mrow><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>I</mml:mi><mml:mi>D</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:msub><mml:mi>Q</mml:mi><mml:mrow><mml:mtext>sh</mml:mtext></mml:mrow></mml:msub></mml:mrow><mml:mi>S</mml:mi></mml:mfrac></mml:mrow></mml:math>
</disp-formula>
<disp-formula id="eq7"><label>(7)</label>
<mml:math display="block" id="M7"><mml:mrow><mml:msub><mml:mi>P</mml:mi><mml:mrow><mml:mtext>tr</mml:mtext></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:msub><mml:mi>M</mml:mi><mml:mrow><mml:mtext>mat</mml:mtext></mml:mrow></mml:msub></mml:mrow><mml:mi>S</mml:mi></mml:mfrac></mml:mrow></mml:math>
</disp-formula>
<disp-formula id="eq8"><label>(8)</label>
<mml:math display="block" id="M8"><mml:mrow><mml:msub><mml:mi>P</mml:mi><mml:mrow><mml:mtext>mt</mml:mtext></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:msub><mml:mi>M</mml:mi><mml:mrow><mml:mtext>mat</mml:mtext></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:msub><mml:mi>T</mml:mi><mml:mrow><mml:mtext>mat</mml:mtext></mml:mrow></mml:msub></mml:mrow></mml:mfrac></mml:mrow></mml:math>
</disp-formula>
<p>where <italic>Q<sub>sh</sub></italic> is the number of rapeseed seedlings with ID switches during counting in the experimental video; <italic>S</italic> is the number of rapeseed seedlings counted by the SORT algorithm in the experimental video, with each <italic>S</italic> corresponding to a uniquely determined video; <italic>M<sub>mat</sub></italic> and <italic>T<sub>mat</sub></italic> are the number of correctly tracked rapeseed seedlings and the total number of matched rapeseed seedlings in the experimental video, respectively.</p>
<p>The performance of the rapeseed seedling counting model was evaluated using the accuracy (<italic>Acc</italic>) metric, as defined in <xref ref-type="disp-formula" rid="eq9">Equation 9</xref>, which is utilized to evaluate the performance of the CropTriangulator method and different oblique angles. This indicator measures the ratio of correctly counted rapeseed seedlings to the total number of rapeseed seedlings in the row across 20 test videos from 45&#xb0; oblique and 90&#xb0; vertical views. According to the shooting order and angle of the videos, the test videos are denoted as Video <italic>i</italic> (<italic>i</italic> = 1, 2,&#x2026;, 20), where Videos 1 to 10 represent videos shot from 45&#xb0; view, and Videos 11 to 20 represent videos shot from 90&#xb0; view. Videos 1 to 10 and Videos 11 to 20 form pairwise counterparts, each pair capturing the same row from different views.</p>
<disp-formula id="eq9"><label>(9)</label>
<mml:math display="block" id="M9"><mml:mrow><mml:mi>A</mml:mi><mml:mtext>cc</mml:mtext><mml:mo>=</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:mfrac><mml:mrow><mml:mo>|</mml:mo><mml:mover accent="true"><mml:mi>y</mml:mi><mml:mo>^</mml:mo></mml:mover><mml:mo>&#x2212;</mml:mo><mml:mi>y</mml:mi><mml:mo>|</mml:mo></mml:mrow><mml:mi>y</mml:mi></mml:mfrac><mml:mo stretchy="false">)</mml:mo><mml:mo>&#xd7;</mml:mo><mml:mn>100</mml:mn><mml:mo>%</mml:mo></mml:mrow></mml:math>
</disp-formula>
<p>where <italic>y</italic> represents the actual number of rapeseed seedlings in the target row of the video (<italic>GT</italic>), and <inline-formula>
<mml:math display="inline" id="im1"><mml:mover accent="true"><mml:mi>y</mml:mi><mml:mo>^</mml:mo></mml:mover></mml:math></inline-formula> represents the number of rapeseed seedlings counted by the algorithm. The R-square (<italic>R</italic><sup>2</sup>), which is defined by <xref ref-type="disp-formula" rid="eq10">Equation 10</xref>, is employed to assess the overall error between the counting of rapeseed seedlings and <italic>GT</italic>.</p>
<disp-formula id="eq10">
<label>(10)</label>
<mml:math display="block" id="M10">
<mml:mrow>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mstyle displaystyle="true">
<mml:mrow>
<mml:msubsup>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mtext>i</mml:mtext>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mn>10</mml:mn>
</mml:mrow>
</mml:msubsup>
<mml:msup>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo>^</mml:mo>
</mml:mover>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:mstyle>
</mml:mrow>
<mml:mrow>
<mml:mstyle displaystyle="true">
<mml:mrow>
<mml:msubsup>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mtext>i</mml:mtext>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mn>10</mml:mn>
</mml:mrow>
</mml:msubsup>
<mml:msup>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo>&#xaf;</mml:mo>
</mml:mover>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:mstyle>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<p>where <italic>y<sub>i</sub></italic> represents the number of rapeseed seedlings in Video <italic>i</italic>, and <inline-formula>
<mml:math display="inline" id="im2"><mml:mrow><mml:mover accent="true"><mml:mi>y</mml:mi><mml:mo>&#xaf;</mml:mo></mml:mover></mml:mrow></mml:math></inline-formula> represents the average number of rapeseed seedlings per row at 45&#xb0; or 90&#xb0; shooting angles.</p>
</sec>
</sec>
</sec>
<sec id="s3" sec-type="results">
<label>3</label>
<title>Result and discussion</title>
<sec id="s3_1">
<label>3.1</label>
<title>Performance of object detection model</title>
<p>Different scales of YOLOv11 models all performed well in rapeseed seedling detection, as they balanced <italic>AP</italic> and inference time. The <italic>AP</italic> and inference time of different scales of YOLOv11 models from different angles are summarized in <xref ref-type="table" rid="T3"><bold>Table&#xa0;3</bold></xref>, with the best scores highlighted in bold. As the number of parameters in the feature extraction network increased, <italic>AP</italic><sub>0.5</sub> and <italic>AP</italic><sub>0.5:0.95</sub> for both 45&#xb0; and 90&#xb0; views showed a downward trend, and inference time also increased. The nano-scale and medium-scale models for the 45&#xb0; view had the same <italic>AP</italic><sub>0.5</sub> and <italic>AP</italic><sub>0.5:0.95</sub> values, but the medium-scale model had an inference time 115% slower than the nano-scale model, while the nano-scale model achieved the fastest inference time and required the smallest model size. For the 90&#xb0; view, the nano-scale model had the highest <italic>AP</italic><sub>0.5</sub> and <italic>AP</italic><sub>0.5:0.95</sub> and the shortest inference time. <xref ref-type="fig" rid="f7"><bold>Figure&#xa0;7</bold></xref> shows visualization heatmaps generated by Grad-CAM (<xref ref-type="bibr" rid="B38">Yang Y. et&#xa0;al., 2025</xref>), intuitively presenting the regions focused on by target detection models of different scales. The brightness of each region indicates its importance to the model, with higher brightness representing stronger attention. Results showed that the nano-scale model performed best for the 45&#xb0; view, and the small-scale model performed best for the 90&#xb0; view, but with little difference from the nano-scale model. YOLOv11n outperforms larger-scale YOLOv11 models due to its superior adaptation to late-stage rapeseed seedling detection scenarios, rather than being constrained by dataset scale. Late-stage rapeseed seedlings exhibit relatively uniform morphological and spectral features compared to general targets, leading larger models with excessive parameters to overfit trivial background noise (e.g., soil texture variations, isolated weed pixels) in field images. In contrast, the streamlined architecture of YOLOv11n (incorporating Focus and C3k2 modules) prioritizes the extraction of core seedling features (e.g., leaf contours, stem-root connections), enabling it to achieve the same <italic>AP</italic><sub>0.5</sub> and <italic>AP</italic><sub>0.5:0.95</sub> values as the medium-scale model. By comprehensively considering detection accuracy, computational efficiency, and heatmap results, the nano-scale model has advantages in detecting small-scale targets, so it was selected for target detection in both 45&#xb0; and 90&#xb0; views.</p>
<table-wrap id="T3" position="float">
<label>Table&#xa0;3</label>
<caption>
<p>Results of YOLOv11 models in different scales.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" rowspan="2" align="left">Model scale</th>
<th valign="middle" colspan="2" align="left"><italic>P</italic></th>
<th valign="middle" colspan="2" align="left"><italic>R</italic></th>
<th valign="middle" colspan="2" align="left"><italic>AP</italic><sub>0.5</sub></th>
<th valign="middle" colspan="2" align="left"><italic>AP</italic><sub>0.5:0.95</sub></th>
<th valign="middle" colspan="2" align="left">Inference time/ms</th>
</tr>
<tr>
<th valign="middle" align="left">45&#xb0;</th>
<th valign="middle" align="left">90&#xb0;</th>
<th valign="middle" align="left">45&#xb0;</th>
<th valign="middle" align="left">90&#xb0;</th>
<th valign="middle" align="left">45&#xb0;</th>
<th valign="middle" align="left">90&#xb0;</th>
<th valign="middle" align="left">45&#xb0;</th>
<th valign="middle" align="left">90&#xb0;</th>
<th valign="middle" align="left">45&#xb0;</th>
<th valign="middle" align="left">90&#xb0;</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">Nano</td>
<td valign="middle" align="left"><bold>0.908</bold></td>
<td valign="middle" align="left"><bold>0.775</bold></td>
<td valign="middle" align="left">0.850</td>
<td valign="middle" align="left"><bold>0.802</bold></td>
<td valign="middle" align="left"><bold>0.942</bold></td>
<td valign="middle" align="left"><bold>0.856</bold></td>
<td valign="middle" align="left"><bold>0.811</bold></td>
<td valign="middle" align="left"><bold>0.488</bold></td>
<td valign="middle" align="left"><bold>4.6</bold></td>
<td valign="middle" align="left"><bold>3.9</bold></td>
</tr>
<tr>
<td valign="middle" align="left">Small</td>
<td valign="middle" align="left">0.887</td>
<td valign="middle" align="left">0.774</td>
<td valign="middle" align="left">0.876</td>
<td valign="middle" align="left">0.819</td>
<td valign="middle" align="left">0.941</td>
<td valign="middle" align="left">0.850</td>
<td valign="middle" align="left">0.805</td>
<td valign="middle" align="left">0.474</td>
<td valign="middle" align="left">6.0</td>
<td valign="middle" align="left">5.4</td>
</tr>
<tr>
<td valign="middle" align="left">Medium</td>
<td valign="middle" align="left">0.869</td>
<td valign="middle" align="left">0.764</td>
<td valign="middle" align="left"><bold>0.887</bold></td>
<td valign="middle" align="left">0.796</td>
<td valign="middle" align="left"><bold>0.942</bold></td>
<td valign="middle" align="left">0.826</td>
<td valign="middle" align="left"><bold>0.811</bold></td>
<td valign="middle" align="left">0.461</td>
<td valign="middle" align="left">9.9</td>
<td valign="middle" align="left">3.3</td>
</tr>
<tr>
<td valign="middle" align="left">Large</td>
<td valign="middle" align="left">0.892</td>
<td valign="middle" align="left">0.751</td>
<td valign="middle" align="left">0.865</td>
<td valign="middle" align="left">0.748</td>
<td valign="middle" align="left">0.940</td>
<td valign="middle" align="left">0.777</td>
<td valign="middle" align="left">0.807</td>
<td valign="middle" align="left">0.440</td>
<td valign="middle" align="left">13.4</td>
<td valign="middle" align="left">12.4</td>
</tr>
<tr>
<td valign="middle" align="left">Xlarge</td>
<td valign="middle" align="left">0.855</td>
<td valign="middle" align="left">0.728</td>
<td valign="middle" align="left">0.832</td>
<td valign="middle" align="left">0.701</td>
<td valign="middle" align="left">0.932</td>
<td valign="middle" align="left">0.751</td>
<td valign="middle" align="left">0.792</td>
<td valign="middle" align="left">0.415</td>
<td valign="middle" align="left">23.6</td>
<td valign="middle" align="left">19.8</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>The optimal values for each model under different conditions were displayed in bold.</p></fn>
</table-wrap-foot>
</table-wrap>
<fig id="f7" position="float">
<label>Figure&#xa0;7</label>
<caption>
<p>Heatmaps from different angles. Yellow boxes represent randomly selected comparison regions, and red regions represent areas with more attention in the model.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1770912-g007.tif">
<alt-text content-type="machine-generated">Two photographs of crop fields taken at forty-five and ninety-degree angles are shown on the left, each with a yellow rectangle highlighting a plant cluster. Enlarged sections from each angle are presented on the right, accompanied by six colored heatmaps labeled Nano, Small, Medium, Original, Large, and Xlarge, illustrating visual or computational analysis results for the selected regions.</alt-text>
</graphic></fig>
<p>However, the superior performance of YOLOv11 is affected to some extent, leading to detection boxes in unannotated regions. For example, when the model scans complex scenes, its strong feature extraction capability captures elements in unannotated regions that are similar to target features, as shown in <xref ref-type="fig" rid="f8"><bold>Figure&#xa0;8a</bold></xref>. This not only generates false detection results in unmarked regions but also complicates subsequent result screening, as shown in <xref ref-type="fig" rid="f8"><bold>Figure&#xa0;8b</bold></xref>. Additionally, YOLOv11&#x2019;s excellent noise reduction and detail enhancement capabilities may parse the fuzzy pixel clusters in unannotated regions into recognizable target forms, causing detection boxes in non-target regions, which interfere with the detection accuracy of the model.</p>
<fig id="f8" position="float">
<label>Figure&#xa0;8</label>
<caption>
<p><bold>(A)</bold> Rapeseed seedlings in non-target regions detected; <bold>(B)</bold> impact of detected rapeseed seedlings in non-target regions on counting.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1770912-g008.tif">
<alt-text content-type="machine-generated">Side-by-side comparison of two agricultural field images showing young plants marked with bounding boxes: the left image uses blue boxes and numeric labels, while the right uses green boxes and numeric IDs with red text for plant counting.</alt-text>
</graphic></fig>
<p>Although some studies have improved model detection performance using multi-source data such as depth information and infrared imaging (<xref ref-type="bibr" rid="B29">Stumpe et&#xa0;al., 2025</xref>; <xref ref-type="bibr" rid="B38">Yang Y. et&#xa0;al., 2025</xref>), most current smartphones (especially mid-to-low-end models accounting for the majority of the market) do not have dedicated hardware depth sensors (e.g., ToF, LiDAR). While depth can be inferred from stereo cameras based on multi-view geometry, this method is constrained by environmental interference, high computational complexity, and poor adaptability to dynamic field shooting. By focusing on smartphone RGB cameras, convenient data collection has ultimately been achieved for users; as more users upload RGB video data to the cloud, the dataset can be continuously enriched to boost model generalization. To ensure the universality, real-time performance, and scalability of the method, the counting pipeline has been optimized based solely on RGB video. Therefore, with the continuous improvement of future phone performance, smartphones are expected to break through existing hardware limitations and become terminal devices capable of directly acquiring, processing multi-source data, and outputting results.</p>
<p>Among different scales of YOLOv11 models, target detection results from the 45&#xb0; view were significantly better than those from the 90&#xb0; view, with <italic>P</italic>, <italic>AP</italic><sub>0.5</sub>, and <italic>AP</italic><sub>0.5:0.95</sub> for the 45&#xb0; view significantly higher than those for the 90&#xb0; view. In particular, for <italic>AP</italic><sub>0.5:0.95</sub>, the selected nano-scale model for the 45&#xb0; view improved by more than 30% compared to the 90&#xb0; view, with little difference in response time, providing guidance for tracking and counting performance from different views. As shown in <xref ref-type="table" rid="T3"><bold>Table&#xa0;3</bold></xref>, the precision of the nano target detection model selected for the two oblique views in rapeseed scenarios was 90.8% and 77.5%, respectively, with recall rates of 85% and 80.2%. High precision and stable recall rates provide a basis for reducing target loss during tracking and counting, avoiding overestimation of counting results due to misjudgment of targets.</p>
</sec>
<sec id="s3_2">
<label>3.2</label>
<title>Performance of rapeseed tracking and counting method</title>
<p>The AdapDBSCAN clustering method (adaptive method) based on SORT and DeepSORT exhibited excellent counting performance in processing rapeseed seedling videos captured by smartphones. In addition, the counting method using fixed-parameter DBSCAN clustering based on SORT and DeepSORT (conventional method) showed considerable drawbacks. As shown in <xref ref-type="fig" rid="f9"><bold>Figure&#xa0;9</bold></xref>, counting results using the adaptive method for Videos 1 to 20 were closer to the <italic>GT</italic> values, while the conventional method had larger deviations compared to the adaptive method. Taking Video 4 (<italic>GT</italic> = 680) as an example, the counting result using the CropTriangulator method was 679, with an accuracy of 99.85%, and the result using the AdapDBSCAN-DeepSORT method was 554, with an accuracy of 81.47%. Compared to the DBSCAN-SORT and DBSCAN-DeepSORT methods, the accuracy increased by 3.53% and 26.32%, respectively. A comparison of randomly selected frames from Video 4 using the DBSCAN-SORT method and the CropTriangulator method is shown in <xref ref-type="fig" rid="f10"><bold>Figure&#xa0;10</bold></xref>. The adaptive method effectively filtered out detected rapeseed seedlings in non-target rows, improving counting accuracy. These results demonstrate the excellent performance and feasibility of the adaptive method.</p>
<fig id="f9" position="float">
<label>Figure&#xa0;9</label>
<caption>
<p>Counting results using different methods. The red dashed line represents the <italic>GT</italic> value of the video. Videos 1 to 10 represent videos shot in order from a 45&#xb0; view, and Videos 11 to 20 represent videos shot in order from a 90&#xb0; view. The red dashed line represents the <italic>GT</italic> of the video.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1770912-g009.tif">
<alt-text content-type="machine-generated">Grouped bar chart comparing five detection methods&#x2014;DBSCAN-SORT, DBSCAN-DeepSORT, AdaptiveDBSCAN-DeepSORT, GT, and CropTriangulator&#x2014;across counts for twenty videos. GT bars reach or approach the highest count line, indicating ground truth reference. Other methods vary, showing deviations from GT across different videos.</alt-text>
</graphic></fig>
<fig id="f10" position="float">
<label>Figure&#xa0;10</label>
<caption>
<p><bold>(A)</bold> Counting result of the 539th frame of Video 4 using the DBSCAN-SORT method; <bold>(B)</bold> counting result of the 539th frame of Video 4 using the CropTriangulator method. Yellow circles indicate regions where detected rapeseed seedlings in non-target rows were effectively filtered out.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1770912-g010.tif">
<alt-text content-type="machine-generated">Two side-by-side images labeled (a) and (b) show a crop field with green plants, each plant identified by a green bounding box and red identifier text. Both images include a yellow oval highlighting a specific plant cluster on the left, and a garden sign labeled &#x201c;Tioats&#x201d; near the center left.</alt-text>
</graphic></fig>
<p>In a certain frame of the video, due to the geometric characteristics of perspective projection, the actual spatial distribution of distant rapeseed seedlings is compressed in the image, which is manifested as an increase in the number of seedlings per unit image area (i.e., visually increasing density); at the same time, distant rapeseed seedlings occupy fewer pixels in the image, reflecting the impact of projection scaling on target imaging size (<xref ref-type="bibr" rid="B35">Wang Z. et&#xa0;al., 2022</xref>). In contrast, due to the short distance from the camera, foreground rapeseed seedlings are less affected by perspective projection scaling, showing sparse spatial distribution in the image, fewer seedlings per unit image area, and more pixels occupied by individual seedlings. This imaging feature of &#x201c;denser in distance and sparser in foreground&#x201d; leads to uneven density distribution of acquired rapeseed seedling data (<xref ref-type="bibr" rid="B12">Islam et&#xa0;al., 2024</xref>). The conventional method cannot adapt to local density differences, resulting in incorrect clustering and failure to filter out detection results from unmarked regions in the target detection stage, which are instead classified as target row rapeseed seedlings, leading to lower accuracy of the conventional method compared to the adaptive method.</p>
<p>The proposed adaptive method (AdapDBSCAN) fully leverages local target density information and a dual-threshold constraint mechanism to address these inherent limitations: it constructs a nonlinear mapping between the clustering radius (eps) and regional seedling density, dynamically shrinking eps in dense distant regions to maintain precise clustering and expanding eps in sparse foreground regions to preserve valid seedlings, while synchronously adjusting the min_samples parameter based on local sample counts. This scenario-specific optimization enables the adaptive method to accurately identify and separate target rows from non-target areas (e.g., drainage ditches and adjacent rows) even under perspective-induced density variations, effectively filtering out misdetected seedlings in unmarked regions that the conventional method fails to exclude. This approach significantly reduces erroneous clustering associated with fixed parameters, enhances the precision of non-target seedling elimination on both sides of the target rows, and ultimately achieves higher counting accuracy and stability compared to traditional approaches, fully demonstrating its superiority in handling the uneven density distribution of rapeseed seedlings in oblique-view videos.</p>
<p>Perspective distortion introduces three key challenges in practical applications: in the 45&#xb0; view, the foreground (0-2m from the camera) has a seedling density of 2&#x2013;3 plants per m&#xb2; (sparse), while the background (8-14m) reaches 8&#x2013;10 plants per m&#xb2; (dense), and fixed-parameter clustering (e.g., traditional DBSCAN) either over-clusters dense regions (merging adjacent seedlings) or under-clusters sparse regions (classifying valid seedlings as noise), leading to &#xb1;15% counting error; distant seedlings (background) occupy 3&#x2013;8 pixels, while foreground seedlings occupy 30&#x2013;50 pixels, and YOLOv11n&#x2019;s detection confidence for small targets (less than 10 pixels) drops by 35%, increasing <italic>FN</italic>; the transition zone between target rows and drainage ditches (depth 12-14m) suffers from blurred edges due to perspective compression, leading to 10-12% of <italic>FP</italic> from misclassifying ditch edges as seedlings.</p>
<p>By applying AdapDBSCAN for algorithm optimization, these problems can be solved. The dual-threshold constraint (eps_max=250 pixels and <italic>median<sub>(</sub>D<sub>filtered)</sub></italic>) dynamically adjusts clustering parameters based on local density. For dense background regions, eps is reduced to 50&#x2013;80 pixels to avoid over-clustering, and for sparse foreground regions, eps is increased to 150&#x2013;200 pixels to retain valid seedlings which reduces perspective-induced error by 42% compared to fixed-parameter DBSCAN.</p>
<p>Furthermore, to compare counting accuracy from different oblique angles, this study systematically evaluated performance differences between 45&#xb0; oblique view and 90&#xb0; vertical view. Given the significant advantages of target detection from the 45&#xb0; view over the 90&#xb0; view in performance indicators, it is reasonable to infer that superior performance is also expected in target tracking and final result output. Detection from the 45&#xb0; view can more accurately capture target features, reducing error accumulation in key links such as feature matching and motion prediction, ultimately improving overall tracking performance. Experimental results are shown in <xref ref-type="fig" rid="f9"><bold>Figure&#xa0;9</bold></xref>, indicating that for the 45&#xb0; view, the average accuracy of the DBSCAN-SORT method (94.44%) was approximately 19% higher than that for the 90&#xb0; view (75.59%); the average accuracy of the CropTriangulator method (97.13%) was more than 14% higher than that for the 90&#xb0; view (82.94%). Additionally, as shown in <xref ref-type="fig" rid="f9"><bold>Figure&#xa0;9</bold></xref>, for the 45&#xb0; oblique view using the CropTriangulator method, 5 out of 10 videos had an accuracy of over 98%, with the highest at 99.85%, closely matching manual counting results and far exceeding the counting accuracy of the 90&#xb0; vertical view using the CropTriangulator method. Moreover, linear regression analysis was applied between counting results obtained using the CropTriangulator method and <italic>GT</italic>. As shown in <xref ref-type="fig" rid="f11"><bold>Figure&#xa0;11</bold></xref>, the <italic>R</italic><sup>2</sup> of counting results from the 45&#xb0; oblique view was 0.917. These results highlight the excellent counting performance of the 45&#xb0; oblique view and its strong stability, fully verifying that the 45&#xb0; oblique view is superior for counting tasks.</p>
<fig id="f11" position="float">
<label>Figure&#xa0;11</label>
<caption>
<p>The result of 45&#xb0; oblique view rapeseed seedling counts calculated based on the CropTriangulator method. The blue scatter represents the counts based on the CropTriangulator method, while the blue solid line represents its corresponding linear regression. The black dashed line represents the ideal counting result.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1770912-g011.tif">
<alt-text content-type="machine-generated">Scatter plot comparing algorithm predictions and ground truth counts for ten videos, with a dashed ideal prediction line, a solid algorithm prediction line, and regression equation y equals 1.3318x minus 263.3, R squared equals 0.917, in the lower right corner.</alt-text>
</graphic></fig>
<p>Differences in counting performance between angles may stem from three key factors: firstly, the overlap rate of seedlings increases from the vertical view, with foreground seedlings occluding background seedlings; secondly, perspective distortion is enhanced from the 90&#xb0; vertical view, increasing distortion; finally, rapeseed seedlings from the 45&#xb0; oblique view contain both root and leaf features, enabling more efficient handling of complex scene interference and accurate counting by the SORT algorithm, while the 90&#xb0; vertical view contains highly similar crop root features, causing false detections that severely affect target detection and counting performance (<xref ref-type="bibr" rid="B15">Kumar et&#xa0;al., 2014</xref>; <xref ref-type="bibr" rid="B19">Liu et&#xa0;al., 2024</xref>).</p>
<p>Therefore, future research should focus on constructing more complex datasets containing more detailed and variable video acquisition views. In the future, we will adopt an image distortion correction model to correct the position of each pixel in the image through geometric transformation, restoring spatial relationships between pixels and improving data density consistency (<xref ref-type="bibr" rid="B17">Li et&#xa0;al., 2019</xref>). Additionally, efforts should be made to improve the efficiency and flexibility of counting methods, especially in environments with uneven density distribution.</p>
<p>Having established the superiority of the adaptive method and the 45&#xb0; viewing angle for the framework, the subsequent step involved selecting the most suitable tracking component. For this purpose, control experiments were conducted on various tracking algorithms under the same experimental conditions. Experimental results indicated that BotTrack achieved an average <italic>W<sub>ID</sub></italic> of 42.56% and a counting accuracy of 59.70% for the 45&#xb0; view, while ByteTrack exhibited a <italic>W<sub>ID</sub></italic> of 28.35% and an accuracy of 80.25% for the same view. Both algorithms showed higher <italic>W<sub>ID</sub></italic> and lower counting accuracy compared to SORT. The <italic>W<sub>ID</sub></italic> of ByteTrack, although lower than that of DeepSORT, remained substantially higher than the benchmark of 8.47% set by SORT, as shown in <xref ref-type="table" rid="T4"><bold>Table&#xa0;4</bold></xref>. The <italic>W<sub>ID</sub></italic> (8.47%) of SORT was 19.88% lower than that of ByteTrack and 34.10% lower than that of BotTrack. Furthermore, its counting accuracy (97.13%) was 16.88% and 37.43% higher than those of ByteTrack and BotTrack, respectively.</p>
<table-wrap id="T4" position="float">
<label>Table&#xa0;4</label>
<caption>
<p>Performance comparison of tracking algorithms based on adaptive methods.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="left">Algorithms</th>
<th valign="middle" align="left"><italic>W<sub>ID</sub></italic> (%)</th>
<th valign="middle" align="left"><italic>P<sub>tr</sub></italic> (%)</th>
<th valign="middle" align="left"><italic>P<sub>mt</sub></italic> (%)</th>
<th valign="middle" align="left">45&#xb0; View <italic>Acc</italic> (%)</th>
<th valign="middle" align="left">Inference time(ms/frame)</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">SORT</td>
<td valign="middle" align="left">8.47</td>
<td valign="middle" align="left">87.53</td>
<td valign="middle" align="left">89.75</td>
<td valign="middle" align="left">97.13</td>
<td valign="middle" align="left">4.6</td>
</tr>
<tr>
<td valign="middle" align="left">DeepSORT</td>
<td valign="middle" align="left">36.05</td>
<td valign="middle" align="left">71.50</td>
<td valign="middle" align="left">54.72</td>
<td valign="middle" align="left">81.47</td>
<td valign="middle" align="left">12.8</td>
</tr>
<tr>
<td valign="middle" align="left">BotTrack</td>
<td valign="middle" align="left">42.56</td>
<td valign="middle" align="left">61.30</td>
<td valign="middle" align="left">47.88</td>
<td valign="middle" align="left">59.70</td>
<td valign="middle" align="left">12.4</td>
</tr>
<tr>
<td valign="middle" align="left">ByteTrack</td>
<td valign="middle" align="left">28.35</td>
<td valign="middle" align="left">78.62</td>
<td valign="middle" align="left">73.90</td>
<td valign="middle" align="left">80.25</td>
<td valign="middle" align="left">8.7</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The inferior tracking stability of BotTrack and ByteTrack in this study can be attributed to two scenario-specific factors. First, despite its plant-targeted design, the feature matching module in BotTrack struggles to distinguish late-stage rapeseed seedlings with highly uniform morphology, leading to frequent ID misassignments and the highest <italic>W<sub>ID</sub></italic> (42.56%) among all tested algorithms. Its <italic>P<sub>tr</sub></italic> (61.30%) and <italic>P<sub>mt</sub></italic> (47.88%) are also the lowest, reflecting poor target association and matching performance under high-similarity conditions. Second, the strategy employed by ByteTrack for associating low-confidence detections, while beneficial in general crowded scenes, becomes counterproductive here. Since the preprocessing stage has already filtered out most noise, the remaining low-confidence boxes largely correspond to ambiguous patches or background clutter. Attempting to associate them introduces unnecessary computational overhead and increases the risk of ID switches, as reflected in its elevated <italic>W<sub>ID</sub></italic> (28.35%). Moreover, their inference speeds (BotTrack: 12.4 ms per frame; ByteTrack: 8.7 ms per frame) remain significantly slower than that of SORT (4.6 ms per frame), a critical disadvantage for real-time processing in practical deployment.</p>
<p>To focus on balancing efficiency and stability in practical applications and to avoid increasing computational burden and the risk of ID confusion due to the introduction of overly complex feature matching mechanisms, the subsequent comparison of tracking algorithms in this study was concentrated on SORT and DeepSORT. Although algorithms such as BotTrack and ByteTrack excel in general object tracking, their complex feature matching mechanisms were primarily designed for targets with significant appearance differences or variable motion patterns. Under the specific conditions where seedling features were nearly identical and motion was solely induced by camera movement, these sophisticated mechanisms not only struggled to provide effective discrimination but also could increase the risk of ID switches and computational overhead by relying on easily confusable appearance features. This approach aimed to more clearly demonstrate the advantages of a streamlined and efficient tracking framework (SORT) over deep trackers dependent on appearance features (DeepSORT) in the high-similarity scenario of rapeseed seedlings.</p>
<p>The SORT tracking algorithm exhibited good tracking performance in rapeseed fields. As shown in <xref ref-type="fig" rid="f12"><bold>Figure&#xa0;12</bold></xref>, for 20 videos using the adaptive counting method based on SORT, the average ID switch rate (<italic>W<sub>ID</sub></italic>) was 8.47%, meaning most rapeseed seedlings (even those with severe mutual occlusion) maintained their IDs from appearance to disappearance, as shown in <xref ref-type="fig" rid="f13"><bold>Figures&#xa0;13a, c, e</bold></xref>. In contrast, for the same videos with three consecutive frames using the adaptive counting method based on DeepSORT, IDs of rapeseed seedlings in dense regions changed significantly, as shown in <xref ref-type="fig" rid="f13"><bold>Figures&#xa0;13b, d, f</bold></xref>. For the counting method based on SORT, the average target tracking accuracy (<italic>P<sub>tr</sub></italic>) and average target tracking precision (<italic>P<sub>mt</sub></italic>) were 87.53% and 89.75%, respectively. For videos using the counting method based on DeepSORT, the <italic>W<sub>ID</sub></italic>, <italic>P<sub>tr</sub></italic>, and <italic>P<sub>mt</sub></italic> were 36.05%, 71.50%, and 54.72%, respectively. The <italic>P<sub>mt</sub></italic> of the SORT-based counting method was 35.03% higher than that of DeepSORT, indicating that SORT is far superior to DeepSORT in terms of trajectory continuity and stability. Additionally, two consecutive frames randomly selected from a video using the DeepSORT tracking algorithm are shown in <xref ref-type="fig" rid="f14"><bold>Figure&#xa0;14</bold></xref>. As can be seen from <xref ref-type="fig" rid="f13"><bold>Figures&#xa0;13b, d</bold></xref> and <xref ref-type="fig" rid="f14"><bold>14</bold></xref>, DeepSORT exhibits large fluctuations in tracking stability, which possibly due to its poor robustness in dense or occluded scenarios in this study.</p>
<fig id="f12" position="float">
<label>Figure&#xa0;12</label>
<caption>
<p>Tracking results of rapeseed seedlings using adaptive counting methods based on SORT and DeepSORT. Results of the adaptive counting method based on SORT are marked in blue; results of the adaptive counting method based on DeepSORT are marked in red; colored filled bars represent ID switch rate (<italic>W<sub>ID</sub></italic>) results; diagonal bars represent target tracking accuracy (<italic>P<sub>tr</sub></italic>) results; horizontal bars represent target tracking precision (<italic>P<sub>mt</sub></italic>) results.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1770912-g012.tif">
<alt-text content-type="machine-generated">Grouped bar charts display performance percentages of rapeseed seedlings across twenty videos, with each video featuring four blue and four red bars of varying patterns, allowing performance comparison among multiple groups or conditions.</alt-text>
</graphic></fig>
<fig id="f13" position="float">
<label>Figure&#xa0;13</label>
<caption>
<p><bold>(A, C, E)</bold> Examples of three consecutive frames starting from the 219th frame of Video 4 using the SORT-based adaptive method, where most rapeseed seedlings maintained their IDs; <bold>(B, D, F)</bold> examples of three consecutive frames starting from the 219th frame of Video 4 using the DeepSORT-based adaptive method, where IDs of rapeseed seedlings in dense regions changed significantly. Yellow circles indicate regions with changed counting boxes.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1770912-g013.tif">
<alt-text content-type="machine-generated">Six photographic panels labeled a through f display a vegetable field with green plants arranged in rows. Green rectangular boxes with red numerical annotations mark detected objects in all panels; panels b, d, and f contain additional yellow circles highlighting specific plant clusters, indicating areas of interest for comparison.</alt-text>
</graphic></fig>
<fig id="f14" position="float">
<label>Figure&#xa0;14</label>
<caption>
<p><bold>(A, B)</bold> shows two consecutive frames randomly selected from Video 14, showing large fluctuations in the tracking stability of DeepSORT. Yellow circles indicate regions with changed counting boxes.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1770912-g014.tif">
<alt-text content-type="machine-generated">Side-by-side comparison of two annotated field images labeled (a) and (b), each showing a vegetable crop with bounding boxes and numerical data highlighting detected objects, with a yellow circle emphasizing a specific region amid a cityscape background.</alt-text>
</graphic></fig>
<p>Compared to the DeepSORT-based counting method, the SORT-based counting method was more accurate. As shown in <xref ref-type="fig" rid="f9"><bold>Figure&#xa0;9</bold></xref>, counting results of the conventional and adaptive methods based on DeepSORT for Videos 1 to 10 were 0.55 and 0.79 times the <italic>GT</italic> value, respectively, while those for Videos 11 to 20 were only 0.4 and 0.45 times the <italic>GT</italic> value, far lower than the corresponding results of the SORT-based counting method. This may be because the DeepSORT-based counting method introduces a deep appearance feature extractor (<xref ref-type="bibr" rid="B43">Zhang et&#xa0;al., 2024</xref>), which struggles to learn subtle features that sufficiently distinguish highly similar plants. In the same video, IDs already assigned to rapeseed seedlings may be reassigned to new rapeseed seedlings, as shown in <xref ref-type="fig" rid="f15"><bold>Figure&#xa0;15</bold></xref>, leading to generally lower counting results compared to the <italic>GT</italic> value. The highly similar appearance of rapeseed seedlings significantly affects counting accuracy, which is also the reason for the high <italic>W<sub>ID</sub></italic> of DeepSORT. Additionally, DeepSORT relies more on static background assumptions (<xref ref-type="bibr" rid="B10">Huang et&#xa0;al., 2024</xref>), while data in this study was acquired by staff walking in the field with a gimbal, with fixed rapeseed seedlings, undermining the reliability of DeepSORT&#x2019;s motion prediction and appearance matching. In the environment of this study, the tracking performance of DeepSORT was far inferior to that of SORT. Therefore, the CropTriangulator method combining the adaptive method with SORT provides considerable advantages for real-time counting of rapeseed seedlings.</p>
<fig id="f15" position="float">
<label>Figure&#xa0;15</label>
<caption>
<p>Counting results of the DeepSORT-based adaptive method, showing rapeseed seedlings with ID 15 in the 359th and 406th frames of Video 14; <bold>(A)</bold> shows the 359th frame, <bold>(B)</bold> shows the 406th frame.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1770912-g015.tif">
<alt-text content-type="machine-generated">Side-by-side photos of a cabbage field with an urban skyline in the background. Both panels feature green boxes and red text labeling individual cabbage plants, with a highlighted yellow box focusing on a specific plant. The left panel is labeled &#x201c;Number: 225 Frame: 359&#x201d; and the right panel &#x201c;Number: 261 Frame: 406,&#x201d; indicating plant tracking between frames.</alt-text>
</graphic></fig>
<p>The main sources of counting errors in this study include target overlap, false positives (<italic>FP</italic>), and false negatives (<italic>FN</italic>): late-stage rapeseed seedlings have dense foliage, leading to 30-40% overlap in the 90&#xb0; vertical view but 15-20% in the 45&#xb0; oblique view, which causes the model to misclassify multiple seedlings as a single target and results in FN, for example, in Video 18 (90&#xb0; view), 12 out of 38 <italic>FN</italic> errors (31.6%) were attributed to severe leaf overlap, while the 45&#xb0; oblique view reduces overlap by exposing more 3D structural features (e.g., stem-root separation), and the SORT algorithm&#x2019;s motion prediction further mitigates this by maintaining ID continuity for partially occluded seedlings; <italic>FP</italic> primarily originate from non-target regions (e.g., weeds, soil clods) and perspective-induced misdetection, and the AdapDBSCAN algorithm filters 89.2% of <italic>FP</italic> by dynamic density clustering, but residual <italic>FP</italic> (accounting for 2.3% of total counts) still occur in edge regions of drainage ditches due to the pixel-to-distance ratio in edge regions deviating from the central area, leading to over-clustering; beyond overlap, <italic>FN</italic> are caused by motion blur (18.7% of <italic>FN</italic>) and small-sized seedlings in distant regions (22.1% of <italic>FN</italic>). Motion blur (resulting from walking speed fluctuations) reduces detection confidence, while distant seedlings (less than 5 pixels in diameter) are easily missed by YOLOv11n.</p>
<p>To verify the superiority of the CropTriangulator method in late-stage rapeseed seedling counting, four representative SOTA crop counting methods were selected for benchmarking. The comparison is based on publicly reported performance metrics from the original studies, with a focus on counting accuracy. The comparative results were summarized in <xref ref-type="table" rid="T5"><bold>Table&#xa0;5</bold></xref>, where the performance of existing methods was extracted from their original publications, and the performance of CropTriangulator was based on the 45&#xb0; optimal view.</p>
<table-wrap id="T5" position="float">
<label>Table&#xa0;5</label>
<caption>
<p>Performance comparison between CropTriangulator and SOTA methods.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="left">Reference</th>
<th valign="middle" align="left">Method</th>
<th valign="middle" align="left">Target Crop (Stage)</th>
<th valign="middle" align="left">Counting Accuracy</th>
<th valign="middle" align="left">CropTriangulator Accuracy (45&#xb0; View)</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left"><xref ref-type="bibr" rid="B44">Zhao et&#xa0;al. (2018)</xref></td>
<td valign="middle" align="left">UAV Image and Deep Learning Detection</td>
<td valign="middle" align="left">Rapeseed (Early/Late)</td>
<td valign="middle" align="left">83.67%</td>
<td valign="middle" align="left">97.13%</td>
</tr>
<tr>
<td valign="middle" align="left"><xref ref-type="bibr" rid="B18">Lin et&#xa0;al. (2022)</xref></td>
<td valign="middle" align="left">Improved YOLOv5s and DeepSORT</td>
<td valign="middle" align="left">Peanut (Seedling)</td>
<td valign="middle" align="left">98.08%</td>
<td valign="middle" align="left">97.13%</td>
</tr>
<tr>
<td valign="middle" align="left"><xref ref-type="bibr" rid="B32">Tan et&#xa0;al. (2023)</xref></td>
<td valign="middle" align="left">Anchor-Free Deep Convolutional Neural Network for Tracking and Counting</td>
<td valign="middle" align="left">Cotton (Seedling/Flower)</td>
<td valign="middle" align="left">94.40%</td>
<td valign="middle" align="left">97.13%</td>
</tr>
<tr>
<td valign="middle" align="left"><xref ref-type="bibr" rid="B28">Rong et&#xa0;al. (2023)</xref></td>
<td valign="middle" align="left">RGB-D Fusion and Improved YOLOv5 Detection and Multi-Object Tracking</td>
<td valign="middle" align="left">Tomato (Cluster)</td>
<td valign="middle" align="left">97.90%</td>
<td valign="middle" align="left">97.13%</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>As shown in <xref ref-type="table" rid="T5"><bold>Table&#xa0;5</bold></xref>, CropTriangulator method demonstrates significant advantages in late-stage rapeseed counting. Compared to the only rapeseed-specific method (<xref ref-type="bibr" rid="B44">Zhao et&#xa0;al., 2018</xref>), CropTriangulator achieves a 13.46% higher accuracy, solving the bottlenecks of single-image data (partial target missing) and late-stage occlusion that limit existing rapeseed counting methods. Despite targeting more complex late-stage rapeseed (with severe leaf overlap and perspective distortion), the accuracy of CropTriangulator is comparable to early-stage peanut counting (<xref ref-type="bibr" rid="B18">Lin et&#xa0;al., 2022</xref>) and outperforms dense cotton seedling counting (<xref ref-type="bibr" rid="B32">Tan et&#xa0;al., 2023</xref>), verifying strong adaptability to complex growth stages. Unlike <xref ref-type="bibr" rid="B28">Rong et&#xa0;al. (2023)</xref> which relies on depth sensors and controlled greenhouse environments, CropTriangulator achieves comparable accuracy in open fields.</p>
<p>These results confirm that the CropTriangulator method effectively addresses the optimization gap of existing methods in late-stage rapeseed seedling counting, balancing high accuracy, adaptability to complex field scenarios, and practical applicability.</p>
<p>Although the problem of data density distribution in rapeseed seedling counting has been basically solved, several challenges remain: firstly, motion blur and shooting area offset not only affect the detection stage but also impact clustering and counting performance. Secondly, the CropTriangulator method is constrained by tracking results. Some studies indicate that existing algorithms have poor tracking accuracy for dense pedestrians, suggesting poor performance of current tracking methods in complex and variable backgrounds (<xref ref-type="bibr" rid="B33">Wang W. et&#xa0;al., 2022</xref>). Fortunately, improvements in tracking algorithms have shown positive effects in kiwifruit orchard counting (<xref ref-type="bibr" rid="B40">Zhang et&#xa0;al., 2025</xref>). However, direct cross-study comparisons are challenging due to differences in data acquisition standards. Stable relative motion during data acquisition ensures effective cross-frame matching. It should be noted that rapeseed seedlings have similar phenotypic characteristics, especially those in the same growth stage, making it difficult for existing tracking technologies with feature recognition modules to distinguish them, leading to ID assignment errors. Some studies have achieved accurate crop identification by acquiring images of crops at different growth stages in the field (<xref ref-type="bibr" rid="B16">Li et&#xa0;al., 2025</xref>; <xref ref-type="bibr" rid="B24">Naseer et&#xa0;al., 2025</xref>).</p>
<p>Challenges in crop counting in complex field environments require comprehensive research in the future. The adaptive dynamic parameter adjustment mechanism of AdapDBSCAN must be optimized to improve clustering performance, especially in uneven density scenarios. User-friendly video acquisition methods should also be explored to better control video quality, reduce the impact of motion errors on clustering, and thus weaken their impact on counting. Meanwhile, developing stable and efficient rapeseed seedling tracking technologies in complex agricultural scenarios remains a topic worthy of further exploration. Additionally, future research is advised to focus on temporal analysis (<xref ref-type="bibr" rid="B22">lv et&#xa0;al., 2025</xref>; <xref ref-type="bibr" rid="B34">Wang et&#xa0;al., 2024</xref>), tracking morphological changes of the same plant at different seedling stages (early and late seedling stages), which may help improve the stability of tracking methods and avoid duplicate counting.</p>
<p>The video-based rapeseed seedling counting method proposed in this study is more convenient than previous image-based algorithms and more easily applicable to actual crop planting. These results verify the excellent performance of the CropTriangulator method and the feasibility of eliminating rapeseed seedlings in non-target regions based on target density distribution. Therefore, this study contributes to formulating strategies to improve crop emergence rates and productivity estimation.</p>
</sec>
</sec>
<sec id="s4" sec-type="conclusions">
<label>4</label>
<title>Conclusions</title>
<p>This study proposed CropTriangulator, an automatic rapeseed seedling counting pipeline that achieves accurate row-based counting with smartphone-captured videos. YOLOv11n demonstrated excellent performance in detecting seedlings against complex backgrounds while maintaining fast inference times, indicating its suitability for small-scale object detection in agricultural settings. The AdapDBSCAN method achieved promising results in row-based seedling counting by dynamically adjusting clustering parameters to filter out non-target seedlings, suggesting that adaptive density-based clustering effectively addresses perspective-induced distortions. The 45&#xb0; oblique view proved significantly superior to the 90&#xb0; vertical view, improving counting accuracy by effectively reducing leaf occlusion and providing more discriminative phenotypic features of seedlings.</p>
<p>Although the pipeline showed reliable row-based counting, results were affected by the inability to preview frames during data collection. The study did not consider the impact of path offsets or camera shakes, which may limit the applicability of this method in different field conditions. Future research should focus on developing more efficient approaches to achieve row-based rapeseed seedling counting based on video, along with user-friendly video capture techniques and more precise shooting views. Furthermore, it would be beneficial to integrate temporal growth analysis to improve tracking consistency across seedling stages. With further optimization, this pipeline holds significant potential for automated yield estimation, supporting data-driven field management decisions.</p>
</sec>
</body>
<back>
<sec id="s5" sec-type="data-availability">
<title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p></sec>
<sec id="s6" sec-type="author-contributions">
<title>Author contributions</title>
<p>BL: Investigation, Writing &#x2013; original draft, Data curation. YAY: Writing &#x2013; review &amp; editing, Methodology. KZ: Methodology, Writing &#x2013; review &amp; editing. XL: Writing &#x2013; review &amp; editing, Methodology. YL: Methodology, Writing &#x2013; review &amp; editing. YCY: Writing &#x2013; review &amp; editing, Methodology. FZ: Writing &#x2013; review &amp; editing, Methodology. LL: Writing &#x2013; review &amp; editing. GZ: Writing &#x2013; review &amp; editing. XW: Writing &#x2013; review &amp; editing, Methodology. ZW: Methodology, Data curation, Conceptualization, Supervision, Writing &#x2013; review &amp; editing.</p></sec>
<sec id="s8" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p></sec>
<sec id="s9" sec-type="ai-statement">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p></sec>
<sec id="s10" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p></sec>
<sec id="s11" sec-type="supplementary-material">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fpls.2026.1770912/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fpls.2026.1770912/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Video1.mp4" id="SM1" mimetype="video/mp4"/></sec>
<ref-list>
<title>References</title>
<ref id="B1">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Bao</surname> <given-names>W.</given-names></name>
<name><surname>Lin</surname> <given-names>Z.</given-names></name>
<name><surname>Hu</surname> <given-names>G.</given-names></name>
<name><surname>Liang</surname> <given-names>D.</given-names></name>
<name><surname>Huang</surname> <given-names>L.</given-names></name>
<name><surname>Zhang</surname> <given-names>X.</given-names></name>
</person-group> (<year>2023</year>). 
<article-title>Method for wheat ear counting based on frequency domain decomposition of MSVF-ISCT</article-title>. <source>Inf. Process. Agric.</source> <volume>10</volume>, <fpage>240</fpage>&#x2013;<lpage>255</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.inpa.2022.01.001</pub-id>
</mixed-citation>
</ref>
<ref id="B2">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Barreto</surname> <given-names>A.</given-names></name>
<name><surname>Lottes</surname> <given-names>P.</given-names></name>
<name><surname>Ispizua Yamati</surname> <given-names>F. R.</given-names></name>
<name><surname>Baumgarten</surname> <given-names>S.</given-names></name>
<name><surname>Wolf</surname> <given-names>N. A.</given-names></name>
<name><surname>Stachniss</surname> <given-names>C.</given-names></name>
<etal/>
</person-group>. (<year>2021</year>). 
<article-title>Automatic UAV-based counting of seedlings in sugar-beet field and extension to maize and strawberry</article-title>. <source>Comput. Electron. Agric.</source> <volume>191</volume>, <elocation-id>106493</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2021.106493</pub-id>
</mixed-citation>
</ref>
<ref id="B3">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Cardellicchio</surname> <given-names>A.</given-names></name>
<name><surname>Solimani</surname> <given-names>F.</given-names></name>
<name><surname>Dimauro</surname> <given-names>G.</given-names></name>
<name><surname>Petrozza</surname> <given-names>A.</given-names></name>
<name><surname>Summerer</surname> <given-names>S.</given-names></name>
<name><surname>Cellini</surname> <given-names>F.</given-names></name>
<etal/>
</person-group>. (<year>2023</year>). 
<article-title>Detection of tomato plant phenotyping traits using YOLOv5-based single stage detectors</article-title>. <source>Comput. Electron. Agric.</source> <volume>207</volume>, <elocation-id>107757</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2023.107757</pub-id>
</mixed-citation>
</ref>
<ref id="B4">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Chen</surname> <given-names>S.</given-names></name>
<name><surname>Zou</surname> <given-names>X.</given-names></name>
<name><surname>Zhou</surname> <given-names>X.</given-names></name>
<name><surname>Xiang</surname> <given-names>Y.</given-names></name>
<name><surname>Wu</surname> <given-names>M.</given-names></name>
</person-group> (<year>2023</year>). 
<article-title>Study on fusion clustering and improved YOLOv5 algorithm based on multiple occlusion of Camellia oleifera fruit</article-title>. <source>Comput. Electron. Agric.</source> <volume>206</volume>, <elocation-id>107706</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2023.107706</pub-id>
</mixed-citation>
</ref>
<ref id="B5">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Chen</surname> <given-names>Z.</given-names></name>
<name><surname>Granland</surname> <given-names>K.</given-names></name>
<name><surname>Tang</surname> <given-names>Y.</given-names></name>
<name><surname>Chen</surname> <given-names>C.</given-names></name>
</person-group> (<year>2024</year>). 
<article-title>HOB-CNNv2: Deep learning based detection of extremely occluded tree branches and reference to the dominant tree image</article-title>. <source>Comput. Electron. Agric.</source> <volume>218</volume>, <elocation-id>108727</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2024.108727</pub-id>
</mixed-citation>
</ref>
<ref id="B6">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Cheng</surname> <given-names>D.</given-names></name>
<name><surname>Zhang</surname> <given-names>C.</given-names></name>
<name><surname>Li</surname> <given-names>Y.</given-names></name>
<name><surname>Xia</surname> <given-names>S.</given-names></name>
<name><surname>Wang</surname> <given-names>G.</given-names></name>
<name><surname>Huang</surname> <given-names>J.</given-names></name>
<etal/>
</person-group>. (<year>2024</year>). 
<article-title>GB-DBSCAN: A fast granular-ball based DBSCAN clustering algorithm</article-title>. <source>Inf. Sci.</source> <volume>674</volume>, <elocation-id>120731</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/J.INS.2024.120731</pub-id>
</mixed-citation>
</ref>
<ref id="B7">
<mixed-citation publication-type="confproc">
<person-group person-group-type="author">
<name><surname>Daniels</surname> <given-names>S.</given-names></name>
<name><surname>Suciati</surname> <given-names>N.</given-names></name>
<name><surname>Fathichah</surname> <given-names>C.</given-names></name>
</person-group> (<year>2021</year>). &#x201c;
<article-title>Indonesian sign language recognition using YOLO method</article-title>,&#x201d; in <conf-name>IOP Conference Series: Materials Science and Engineering</conf-name>, (<publisher-loc>Bristol, UK</publisher-loc>: 
<publisher-name>IOP Publishing</publisher-name>) Vol. <volume>1077</volume>. <fpage>012029</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1088/1757-899x/1077/1/012029</pub-id>
</mixed-citation>
</ref>
<ref id="B8">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Dolata</surname> <given-names>P.</given-names></name>
<name><surname>Wr&#xf3;blewski</surname> <given-names>P.</given-names></name>
<name><surname>Mrzyg&#x142;&#xf3;d</surname> <given-names>M.</given-names></name>
<name><surname>Reiner</surname> <given-names>J.</given-names></name>
</person-group> (<year>2021</year>). 
<article-title>Instance segmentation of root crops and simulation-based learning to estimate their physical dimensions for on-line machine vision yield monitoring</article-title>. <source>Comput. Electron. Agric.</source> <volume>190</volume>, <elocation-id>106451</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2021.106451</pub-id>
</mixed-citation>
</ref>
<ref id="B9">
<mixed-citation publication-type="confproc">
<person-group person-group-type="author">
<name><surname>Ester</surname> <given-names>M.</given-names></name>
<name><surname>Kriegel</surname> <given-names>H. P.</given-names></name>
<name><surname>Sander</surname> <given-names>J.</given-names></name>
<name><surname>Xu</surname> <given-names>X.</given-names></name>
</person-group> (<year>1996</year>). &#x201c;
<article-title>A density-based algorithm for discovering clusters in large spatial databases with noise</article-title>,&#x201d; in <conf-name>Proceedings of the 2nd International Conference on Knowledge Discovery and Data Mining (KDD&#x2019;96)</conf-name>. (<publisher-loc>Portland, Oregon, USA</publisher-loc>: 
<publisher-name>Association for Computing Machinery (ACM)</publisher-name>) <fpage>226</fpage>&#x2013;<lpage>231</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1145/300146.300168</pub-id>
</mixed-citation>
</ref>
<ref id="B10">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Huang</surname> <given-names>C.</given-names></name>
<name><surname>Zeng</surname> <given-names>Q.</given-names></name>
<name><surname>Xiong</surname> <given-names>F.</given-names></name>
<name><surname>Xu</surname> <given-names>J.</given-names></name>
</person-group> (<year>2024</year>). 
<article-title>Space dynamic target tracking method based on five-frame difference and Deepsort</article-title>. <source>Sci. Rep.</source> <volume>14</volume>, <fpage>6020</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41598-024-56623-z</pub-id>, PMID: <pub-id pub-id-type="pmid">38472374</pub-id>
</mixed-citation>
</ref>
<ref id="B11">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Huang</surname> <given-names>Z.</given-names></name>
<name><surname>Lee</surname> <given-names>W. S.</given-names></name>
<name><surname>Yang</surname> <given-names>P.</given-names></name>
<name><surname>Ampatzidis</surname> <given-names>Y.</given-names></name>
<name><surname>Shinsuke</surname> <given-names>A.</given-names></name>
<name><surname>Peres</surname> <given-names>N. A.</given-names></name>
</person-group> (<year>2025</year>). 
<article-title>Advanced canopy size estimation in strawberry production: a machine learning approach using YOLOv11 and SAM</article-title>. <source>Comput. Electron. Agric.</source> <volume>236</volume>, <elocation-id>110501</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/J.COMPAG.2025.110501</pub-id>
</mixed-citation>
</ref>
<ref id="B12">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Islam</surname> <given-names>S.</given-names></name>
<name><surname>Reza</surname> <given-names>M. N.</given-names></name>
<name><surname>Chowdhury</surname> <given-names>M.</given-names></name>
<name><surname>Ahmed</surname> <given-names>S.</given-names></name>
<name><surname>Lee</surname> <given-names>K. H.</given-names></name>
<name><surname>Ali</surname> <given-names>M.</given-names></name>
<etal/>
</person-group>. (<year>2024</year>). 
<article-title>Detection and segmentation of lettuce seedlings from seedling-growing tray imagery using an improved mask R-CNN method</article-title>. <source>Smart Agric. Technol.</source> <volume>8</volume>, <elocation-id>100455</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/J.ATECH.2024.100455</pub-id>
</mixed-citation>
</ref>
<ref id="B13">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Khaki</surname> <given-names>S.</given-names></name>
<name><surname>Safaei</surname> <given-names>N.</given-names></name>
<name><surname>Pham</surname> <given-names>H.</given-names></name>
<name><surname>Wang</surname> <given-names>L.</given-names></name>
</person-group> (<year>2022</year>). 
<article-title>WheatNet: A lightweight convolutional neural network for high-throughput image-based wheat head detection and counting</article-title>. <source>Neurocomputing</source> <volume>489</volume>, <fpage>78</fpage>&#x2013;<lpage>89</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.neucom.2022.03.017</pub-id>
</mixed-citation>
</ref>
<ref id="B14">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Khan</surname> <given-names>S. S.</given-names></name>
<name><surname>Ahmad</surname> <given-names>A.</given-names></name>
<name><surname>Shiang</surname> <given-names>C. W.</given-names></name>
</person-group> (<year>2018</year>). 
<article-title>Adaptive DBSCAN: A density-based clustering algorithm with adaptive parameters</article-title>. <source>J. King Saud Univ. Comput. Inf. Sci.</source> <volume>30</volume>, <fpage>578</fpage>&#x2013;<lpage>589</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.jksuci.2017.05.002</pub-id>
</mixed-citation>
</ref>
<ref id="B15">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Kumar</surname> <given-names>P.</given-names></name>
<name><surname>Huang</surname> <given-names>C.</given-names></name>
<name><surname>Cai</surname> <given-names>J.</given-names></name>
<name><surname>Miklavcic</surname> <given-names>S. J.</given-names></name>
</person-group> (<year>2014</year>). 
<article-title>Root phenotyping by root tip detection and classification through statistical learning</article-title>. <source>Plant Soil</source> <volume>380</volume>, <fpage>193</fpage>&#x2013;<lpage>209</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s11104-014-2071-3</pub-id>
</mixed-citation>
</ref>
<ref id="B16">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Li</surname> <given-names>Y.</given-names></name>
<name><surname>Che</surname> <given-names>Y.</given-names></name>
<name><surname>Zhang</surname> <given-names>H.</given-names></name>
<name><surname>Zhang</surname> <given-names>S.</given-names></name>
<name><surname>Zheng</surname> <given-names>L.</given-names></name>
<name><surname>Ma</surname> <given-names>X.</given-names></name>
<etal/>
</person-group>. (<year>2025</year>). 
<article-title>Wheat growth stage identification method based on multimodal data</article-title>. <source>Eur. J. Agron.</source> <volume>162</volume>, <elocation-id>127423</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/J.EJA.2024.127423</pub-id>
</mixed-citation>
</ref>
<ref id="B17">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Li</surname> <given-names>J.</given-names></name>
<name><surname>Su</surname> <given-names>J.</given-names></name>
<name><surname>Zeng</surname> <given-names>X.</given-names></name>
</person-group> (<year>2019</year>). 
<article-title>A solution method for image distortion correction model based on bilinear interpolation</article-title>. <source>Comput. Optics</source> <volume>43</volume>, <fpage>99</fpage>&#x2013;<lpage>104</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.18287/2412-6179-2019-43</pub-id>
</mixed-citation>
</ref>
<ref id="B18">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Lin</surname> <given-names>Y.</given-names></name>
<name><surname>Chen</surname> <given-names>T.</given-names></name>
<name><surname>Liu</surname> <given-names>S.</given-names></name>
<name><surname>Cai</surname> <given-names>Y.</given-names></name>
<name><surname>Shi</surname> <given-names>H.</given-names></name>
<name><surname>Zheng</surname> <given-names>D.</given-names></name>
<etal/>
</person-group>. (<year>2022</year>). 
<article-title>Quick and accurate monitoring peanut seedlings emergence rate through UAV video and deep learning</article-title>. <source>Comput. Electron. Agric.</source> <volume>197</volume>, <elocation-id>106938</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/J.COMPAG.2022.106938</pub-id>
</mixed-citation>
</ref>
<ref id="B19">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Liu</surname> <given-names>Y.</given-names></name>
<name><surname>Guo</surname> <given-names>Y.</given-names></name>
<name><surname>Wang</surname> <given-names>X.</given-names></name>
<name><surname>Yang</surname> <given-names>Y.</given-names></name>
<name><surname>Zhang</surname> <given-names>J.</given-names></name>
<name><surname>An</surname> <given-names>D.</given-names></name>
<etal/>
</person-group>. (<year>2024</year>). 
<article-title>Crop root rows detection based on crop canopy image</article-title>. <source>Agriculture</source> <volume>14</volume>, <elocation-id>969</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/agriculture14070969</pub-id>
</mixed-citation>
</ref>
<ref id="B20">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Liu</surname> <given-names>D.</given-names></name>
<name><surname>He</surname> <given-names>P.</given-names></name>
<name><surname>Wang</surname> <given-names>Q.</given-names></name>
<name><surname>He</surname> <given-names>Y.</given-names></name>
<name><surname>Cheng</surname> <given-names>F.</given-names></name>
</person-group> (<year>2025</year>). 
<article-title>Object detection with attribute tagging task: Model design and evaluation on agricultural datasets</article-title>. <source>Comput. Electron. Agric.</source> <volume>230</volume>, <elocation-id>109880</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/J.COMPAG.2024.109880</pub-id>
</mixed-citation>
</ref>
<ref id="B21">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Liu</surname> <given-names>A.</given-names></name>
<name><surname>Li</surname> <given-names>P.</given-names></name>
<name><surname>Xie</surname> <given-names>F.</given-names></name>
<name><surname>Ashwehmbom</surname> <given-names>L. G.</given-names></name>
<name><surname>Wang</surname> <given-names>X.</given-names></name>
<name><surname>Zhu</surname> <given-names>L.</given-names></name>
<etal/>
</person-group>. (<year>2025</year>). 
<article-title>Design and experiment of adaptive adjustment of threshing gaps based on the feed rate monitoring of soybean combine harvester conveyor trough</article-title>. <source>Comput. Electron. Agric.</source> <volume>237</volume>, <elocation-id>110687</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/J.COMPAG.2025.110687</pub-id>
</mixed-citation>
</ref>
<ref id="B22">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>lv</surname> <given-names>X.</given-names></name>
<name><surname>Wang</surname> <given-names>X.</given-names></name>
<name><surname>Wang</surname> <given-names>Y.</given-names></name>
<name><surname>Zhang</surname> <given-names>F.</given-names></name>
<name><surname>Liu</surname> <given-names>L.</given-names></name>
<name><surname>Wu</surname> <given-names>Z.</given-names></name>
<etal/>
</person-group>. (<year>2025</year>). 
<article-title>Dynamic whole-life cycle measurement of individual plant height in oilseed rape through the fusion of point cloud and crop root zone localization</article-title>. <source>Comput. Electron. Agric.</source> <volume>236</volume>, <elocation-id>110505</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/J.COMPAG.2025.110505</pub-id>
</mixed-citation>
</ref>
<ref id="B23">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Misra</surname> <given-names>T.</given-names></name>
<name><surname>Arora</surname> <given-names>A.</given-names></name>
<name><surname>Marwaha</surname> <given-names>S.</given-names></name>
<name><surname>Chinnusamy</surname> <given-names>V.</given-names></name>
<name><surname>Rao</surname> <given-names>A. R.</given-names></name>
<name><surname>Jain</surname> <given-names>R.</given-names></name>
<etal/>
</person-group>. (<year>2020</year>). 
<article-title>SpikeSegNet-a deep learning approach utilizing encoder-decoder network with hourglass for spike segmentation and counting in wheat plant from visual imaging</article-title>. <source>Plant Methods</source> <volume>16</volume>, <fpage>40</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s13007-020-00582-9</pub-id>, PMID: <pub-id pub-id-type="pmid">32206080</pub-id>
</mixed-citation>
</ref>
<ref id="B24">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Naseer</surname> <given-names>A.</given-names></name>
<name><surname>Amjad</surname> <given-names>M.</given-names></name>
<name><surname>Raza</surname> <given-names>A.</given-names></name>
<name><surname>Munir</surname> <given-names>K.</given-names></name>
<name><surname>Smerat</surname> <given-names>A.</given-names></name>
<name><surname>Gongora</surname> <given-names>H. F.</given-names></name>
<etal/>
</person-group>. (<year>2025</year>). 
<article-title>Novel hybrid transfer neural network for wheat crop growth stages recognition using field images</article-title>. <source>Sci. Rep.</source> <volume>15</volume>, <fpage>11822</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41598-025-96332-9</pub-id>, PMID: <pub-id pub-id-type="pmid">40195431</pub-id>
</mixed-citation>
</ref>
<ref id="B25">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Paparella</surname> <given-names>S.</given-names></name>
<name><surname>Ara&#xfa;jo</surname> <given-names>S. S.</given-names></name>
<name><surname>Rossi</surname> <given-names>G.</given-names></name>
<name><surname>Wijayasinghe</surname> <given-names>M.</given-names></name>
<name><surname>Carbonera</surname> <given-names>D.</given-names></name>
<name><surname>Balestrazzi</surname> <given-names>A.</given-names></name>
</person-group> (<year>2015</year>). 
<article-title>Seed priming: state of the art and new perspectives</article-title>. <source>Plant Cell Rep.</source> <volume>34</volume>, <fpage>1281</fpage>&#x2013;<lpage>1293</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s00299-015-1784-y</pub-id>, PMID: <pub-id pub-id-type="pmid">25812837</pub-id>
</mixed-citation>
</ref>
<ref id="B26">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Qin</surname> <given-names>Q.</given-names></name>
<name><surname>Zhou</surname> <given-names>X.</given-names></name>
<name><surname>Gao</surname> <given-names>J.</given-names></name>
<name><surname>Wang</surname> <given-names>Z.</given-names></name>
<name><surname>Naer</surname> <given-names>A.</given-names></name>
<name><surname>Hai</surname> <given-names>L.</given-names></name>
<etal/>
</person-group>. (<year>2025</year>). 
<article-title>YOLOv8-CBAM: a study of sheep head identification in Ujumqin sheep</article-title>. <source>Front. Vet. Sci.</source> <volume>12</volume>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fvets.2025.1514212</pub-id>, PMID: <pub-id pub-id-type="pmid">39981316</pub-id>
</mixed-citation>
</ref>
<ref id="B27">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Rong</surname> <given-names>D.</given-names></name>
<name><surname>Xie</surname> <given-names>L.</given-names></name>
<name><surname>Ying</surname> <given-names>Y.</given-names></name>
</person-group> (<year>2019</year>). 
<article-title>Computer vision detection of foreign objects in walnuts using deep learning</article-title>. <source>Comput. Electron. Agric.</source> <volume>162</volume>, <fpage>1001</fpage>&#x2013;<lpage>1010</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2019.05.019</pub-id>
</mixed-citation>
</ref>
<ref id="B28">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Rong</surname> <given-names>J.</given-names></name>
<name><surname>Zhou</surname> <given-names>H.</given-names></name>
<name><surname>Zhang</surname> <given-names>F.</given-names></name>
<name><surname>Yuan</surname> <given-names>T.</given-names></name>
<name><surname>Wang</surname> <given-names>P.</given-names></name>
</person-group> (<year>2023</year>). 
<article-title>Tomato cluster detection and counting using improved YOLOv5 based on RGB-D fusion</article-title>. <source>Comput. Electron. Agric.</source> <volume>207</volume>, <elocation-id>107741</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2023.107741</pub-id>
</mixed-citation>
</ref>
<ref id="B29">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Stumpe</surname> <given-names>E.</given-names></name>
<name><surname>Bodner</surname> <given-names>G.</given-names></name>
<name><surname>Flagiello</surname> <given-names>F.</given-names></name>
<name><surname>Zeppelzauer</surname> <given-names>M.</given-names></name>
</person-group> (<year>2025</year>). 
<article-title>3D multimodal image registration for plant phenotyping</article-title>. <source>Comput. Electron. Agric.</source> <volume>237</volume>, <elocation-id>110538</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/J.COMPAG.2025.110538</pub-id>
</mixed-citation>
</ref>
<ref id="B30">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Sun</surname> <given-names>J.</given-names></name>
<name><surname>Yang</surname> <given-names>L.</given-names></name>
<name><surname>Zhang</surname> <given-names>D.</given-names></name>
<name><surname>Hu</surname> <given-names>J.</given-names></name>
<name><surname>Cui</surname> <given-names>T.</given-names></name>
<name><surname>He</surname> <given-names>X.</given-names></name>
<etal/>
</person-group>. (<year>2023</year>). 
<article-title>Development of a prediction model to determine optimal sowing depth to improve maize seedling performance</article-title>. <source>Biosyst. Eng.</source> <volume>234</volume>, <fpage>206</fpage>&#x2013;<lpage>222</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/J.BIOSYSTEMSENG.2023.09.004</pub-id>
</mixed-citation>
</ref>
<ref id="B31">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Tan</surname> <given-names>C.</given-names></name>
<name><surname>Li</surname> <given-names>C.</given-names></name>
<name><surname>He</surname> <given-names>D.</given-names></name>
<name><surname>Song</surname> <given-names>H.</given-names></name>
</person-group> (<year>2022</year>). 
<article-title>Towards real-time tracking and counting of seedlings with a one-stage detector and optical flow</article-title>. <source>Comput. Electron. Agric.</source> <volume>193</volume>, <elocation-id>106683</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2021.106683</pub-id>
</mixed-citation>
</ref>
<ref id="B32">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Tan</surname> <given-names>C.</given-names></name>
<name><surname>Li</surname> <given-names>C.</given-names></name>
<name><surname>He</surname> <given-names>D.</given-names></name>
<name><surname>Song</surname> <given-names>H.</given-names></name>
</person-group> (<year>2023</year>). 
<article-title>Anchor-free deep convolutional neural network for tracking and counting cotton seedlings and flowers</article-title>. <source>Comput. Electron. Agric.</source> <volume>215</volume>, <elocation-id>108359</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2023.108359</pub-id>
</mixed-citation>
</ref>
<ref id="B33">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Wang</surname> <given-names>W.</given-names></name>
<name><surname>Chang</surname> <given-names>X.</given-names></name>
<name><surname>Yang</surname> <given-names>J.</given-names></name>
<name><surname>Xu</surname> <given-names>G.</given-names></name>
</person-group> (<year>2022</year>). 
<article-title>LiDAR-based dense pedestrian detection and tracking</article-title>. <source>Appl. Sci.</source> <volume>12</volume>, <elocation-id>1799</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/app12041799</pub-id>
</mixed-citation>
</ref>
<ref id="B34">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Wang</surname> <given-names>K.</given-names></name>
<name><surname>Hu</surname> <given-names>X.</given-names></name>
<name><surname>Zheng</surname> <given-names>H.</given-names></name>
<name><surname>Lan</surname> <given-names>M.</given-names></name>
<name><surname>Liu</surname> <given-names>C.</given-names></name>
<name><surname>Liu</surname> <given-names>Y.</given-names></name>
<etal/>
</person-group>. (<year>2024</year>). 
<article-title>Weed detection and recognition in complex wheat fields based on an improved YOLOv7</article-title>. <source>Front. Plant Sci.</source> <volume>15</volume>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fpls.2024.1372237</pub-id>, PMID: <pub-id pub-id-type="pmid">38978522</pub-id>
</mixed-citation>
</ref>
<ref id="B35">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Wang</surname> <given-names>Z.</given-names></name>
<name><surname>Zhang</surname> <given-names>H.</given-names></name>
<name><surname>He</surname> <given-names>W.</given-names></name>
<name><surname>Zhang</surname> <given-names>L.</given-names></name>
</person-group> (<year>2022</year>). 
<article-title>Cross-phenological-region crop mapping framework using Sentinel-2 time series Imagery: A new perspective for winter crops in China</article-title>. <source>ISPRS J. Photogramm. Remote Sens.</source> <volume>193</volume>, <fpage>200</fpage>&#x2013;<lpage>215</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/J.ISPRSJPRS.2022.09.010</pub-id>
</mixed-citation>
</ref>
<ref id="B36">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Wu</surname> <given-names>Z.</given-names></name>
<name><surname>Sun</surname> <given-names>X.</given-names></name>
<name><surname>Jiang</surname> <given-names>H.</given-names></name>
<name><surname>Gao</surname> <given-names>F.</given-names></name>
<name><surname>Li</surname> <given-names>R.</given-names></name>
<name><surname>Fu</surname> <given-names>L.</given-names></name>
<etal/>
</person-group>. (<year>2023</year>a). 
<article-title>Twice matched fruit counting system: An automatic fruit counting pipeline in modern apple orchard using mutual and secondary matches</article-title>. <source>Biosyst. Eng.</source> <volume>234</volume>, <fpage>140</fpage>&#x2013;<lpage>155</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.biosystemseng.2023.09.005</pub-id>
</mixed-citation>
</ref>
<ref id="B37">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Wu</surname> <given-names>Z.</given-names></name>
<name><surname>Sun</surname> <given-names>X.</given-names></name>
<name><surname>Jiang</surname> <given-names>H.</given-names></name>
<name><surname>Mao</surname> <given-names>W.</given-names></name>
<name><surname>Li</surname> <given-names>R.</given-names></name>
<name><surname>Andriyanov</surname> <given-names>N.</given-names></name>
<etal/>
</person-group>. (<year>2023</year>b). 
<article-title>NDMFCS: An automatic fruit counting system in modern apple orchard using abatement of abnormal fruit detection</article-title>. <source>Comput. Electron. Agric.</source> <volume>211</volume>, <elocation-id>108036</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/J.COMPAG.2023.108036</pub-id>
</mixed-citation>
</ref>
<ref id="B38">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Yang</surname> <given-names>Y.</given-names></name>
<name><surname>Wang</surname> <given-names>X.</given-names></name>
<name><surname>Zhang</surname> <given-names>F.</given-names></name>
<name><surname>Wu</surname> <given-names>Z.</given-names></name>
<name><surname>Wang</surname> <given-names>Y.</given-names></name>
<name><surname>Liu</surname> <given-names>Y.</given-names></name>
<etal/>
</person-group>. (<year>2025</year>). 
<article-title>MSNet: A multispectral-image driven rapeseed canopy instance segmentation network</article-title>. <source>Artif. Intell. Agric.</source> <volume>15</volume>, <fpage>642</fpage>&#x2013;<lpage>658</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.aiia.2025.05.008</pub-id>
</mixed-citation>
</ref>
<ref id="B39">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Yang</surname> <given-names>J.</given-names></name>
<name><surname>Zhang</surname> <given-names>R.</given-names></name>
<name><surname>Ding</surname> <given-names>C.</given-names></name>
<name><surname>Chen</surname> <given-names>L.</given-names></name>
<name><surname>Xie</surname> <given-names>Y.</given-names></name>
<name><surname>Ou</surname> <given-names>H.</given-names></name>
</person-group> (<year>2025</year>). 
<article-title>YOLO-detassel: Efficient object detection for Omitted Pre-Tassel in detasseling operation for maize seed production</article-title>. <source>Comput. Electron. Agric.</source> <volume>231</volume>, <elocation-id>109951</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/J.COMPAG.2025.109951</pub-id>
</mixed-citation>
</ref>
<ref id="B40">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zhang</surname> <given-names>J.</given-names></name>
<name><surname>Jiang</surname> <given-names>L.</given-names></name>
<name><surname>He</surname> <given-names>L.</given-names></name>
<name><surname>Wu</surname> <given-names>Z.</given-names></name>
<name><surname>Li</surname> <given-names>R.</given-names></name>
<name><surname>Chen</surname> <given-names>J.</given-names></name>
<etal/>
</person-group>. (<year>2025</year>). 
<article-title>Row-based kiwifruit counting pipeline for smartphone-captured videos using fruit tracking and detection region adaptation guided by support-post</article-title>. <source>Comput. Electron. Agric.</source> <volume>237</volume>, <elocation-id>110476</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/J.COMPAG.2025.110476</pub-id>
</mixed-citation>
</ref>
<ref id="B41">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zhang</surname> <given-names>L.</given-names></name>
<name><surname>Maki</surname> <given-names>H.</given-names></name>
<name><surname>Ma</surname> <given-names>D.</given-names></name>
<name><surname>S&#xe1;nchez-Gallego</surname> <given-names>J. A.</given-names></name>
<name><surname>Mickelbart</surname> <given-names>M. V.</given-names></name>
<name><surname>Wang</surname> <given-names>L.</given-names></name>
<etal/>
</person-group>. (<year>2019</year>). 
<article-title>Optimized angles of the swing hyperspectral imaging system for single corn plant</article-title>. <source>Comput. Electron. Agric.</source> <volume>156</volume>, <fpage>349</fpage>&#x2013;<lpage>359</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2018.11.030</pub-id>
</mixed-citation>
</ref>
<ref id="B42">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zhang</surname> <given-names>X.</given-names></name>
<name><surname>Wang</surname> <given-names>Q.</given-names></name>
<name><surname>Wang</surname> <given-names>X.</given-names></name>
<name><surname>Li</surname> <given-names>H.</given-names></name>
<name><surname>He</surname> <given-names>J.</given-names></name>
<name><surname>Lu</surname> <given-names>C.</given-names></name>
<etal/>
</person-group>. (<year>2023</year>). 
<article-title>Automated detection of Crop-Row lines and measurement of maize width for boom spraying</article-title>. <source>Comput. Electron. Agric.</source> <volume>215</volume>, <elocation-id>108406</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2023.108406</pub-id>
</mixed-citation>
</ref>
<ref id="B43">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zhang</surname> <given-names>T.</given-names></name>
<name><surname>Zhao</surname> <given-names>D.</given-names></name>
<name><surname>Chen</surname> <given-names>Y.</given-names></name>
<name><surname>Zhang</surname> <given-names>H.</given-names></name>
<name><surname>Liu</surname> <given-names>S.</given-names></name>
</person-group> (<year>2024</year>). 
<article-title>DeepSORT with siamese convolution autoencoder embedded for honey peach young fruit multiple object tracking</article-title>. <source>Comput. Electron. Agric.</source> <volume>217</volume>, <elocation-id>108583</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/J.COMPAG.2023.108583</pub-id>
</mixed-citation>
</ref>
<ref id="B44">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zhao</surname> <given-names>B.</given-names></name>
<name><surname>Zhang</surname> <given-names>J.</given-names></name>
<name><surname>Yang</surname> <given-names>C.</given-names></name>
<name><surname>Zhou</surname> <given-names>G.</given-names></name>
<name><surname>Ding</surname> <given-names>Y.</given-names></name>
<name><surname>Shi</surname> <given-names>Y.</given-names></name>
<etal/>
</person-group>. (<year>2018</year>). 
<article-title>Rapeseed seedling stand counting and seeding performance evaluation at two early growth stages based on unmanned aerial vehicle imagery</article-title>. <source>Front. Plant Sci.</source> <volume>9</volume>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fpls.2018.01362</pub-id>, PMID: <pub-id pub-id-type="pmid">30298081</pub-id>
</mixed-citation>
</ref>
<ref id="B45">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zheng</surname> <given-names>T.</given-names></name>
<name><surname>Jiang</surname> <given-names>M.</given-names></name>
<name><surname>Li</surname> <given-names>Y.</given-names></name>
<name><surname>Feng</surname> <given-names>M.</given-names></name>
</person-group> (<year>2022</year>). 
<article-title>Research on tomato detection in natural environment based on RC-YOLOv4</article-title>. <source>Comput. Electron. Agric.</source> <volume>198</volume>, <elocation-id>107029</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2022.107029</pub-id>
</mixed-citation>
</ref>
<ref id="B46">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zhuang</surname> <given-names>L.</given-names></name>
<name><surname>Wang</surname> <given-names>C.</given-names></name>
<name><surname>Hao</surname> <given-names>H.</given-names></name>
<name><surname>Li</surname> <given-names>J.</given-names></name>
<name><surname>Xu</surname> <given-names>L.</given-names></name>
<name><surname>Liu</surname> <given-names>S.</given-names></name>
<etal/>
</person-group>. (<year>2024</year>). 
<article-title>Maize emergence rate and leaf emergence speed estimation via image detection under field rail-based phenotyping platform</article-title>. <source>Comput. Electron. Agric.</source> <volume>220</volume>, <elocation-id>108838</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/J.COMPAG.2024.108838</pub-id>
</mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn id="n1" fn-type="custom" custom-type="edited-by">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1509086">Dong Chen</ext-link>, Mississippi State University, United States</p></fn>
<fn id="n2" fn-type="custom" custom-type="reviewed-by">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2859878">Song Wang</ext-link>, Heilongjiang Bayi Agricultural University, China</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3330416">Moeen Ul Islam</ext-link>, Mississippi State University, United States</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3331029">Rahul Harsha</ext-link>, Kansas State University, United States</p></fn>
</fn-group>
</back>
</article>