<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Plant Sci.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Plant Science</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Plant Sci.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">1664-462X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fpls.2026.1772622</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Cotton boll extraction and single-boll weight estimation based on UAV multispectral imagery</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Chen</surname><given-names>Maoguang</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3105844/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Yin</surname><given-names>Caixia</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2252166/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Su</surname><given-names>Na</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Lin</surname><given-names>Tao</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2990049/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project-administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Jin</surname><given-names>Xiuliang</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/217983/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project-administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Wu</surname><given-names>Fengquan</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3060097/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Jiang</surname><given-names>Pingan</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>*</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project-administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Tang</surname><given-names>Qiuxiang</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>*</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Engineering Research Centre of Cotton, Ministry of Education/College of Agriculture, Xinjiang Agricultural University</institution>, <city>Urumqi</city>,&#xa0;<country country="cn">China</country></aff>
<aff id="aff2"><label>2</label><institution>Institute of Cash Crops, Xinjiang Academy Sciences, Key Laboratory of Crop Physiology, Ecology and Cultivation in Desert Oasis, Ministry of Agriculture and Rural Affairs</institution>, <city>Urumqi</city>,&#xa0;<country country="cn">China</country></aff>
<aff id="aff3"><label>3</label><institution>Key Laboratory of Crop Physiology and Ecology, Institute of Crop Sciences, Chinese Academy of Agricultural Sciences, Ministry of Agriculture</institution>, <city>Beijing</city>,&#xa0;<country country="cn">China</country></aff>
<author-notes>
<corresp id="c001"><label>*</label>Correspondence: Qiuxiang Tang, <email xlink:href="mailto:tangqiuxiang2004_2@163.com">tangqiuxiang2004_2@163.com</email>; Pingan Jiang, <email xlink:href="mailto:jiang863863@sina.com">jiang863863@sina.com</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-18">
<day>18</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>17</volume>
<elocation-id>1772622</elocation-id>
<history>
<date date-type="received">
<day>21</day>
<month>12</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>31</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="rev-recd">
<day>30</day>
<month>01</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2026 Chen, Yin, Su, Lin, Jin, Wu, Jiang and Tang.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Chen, Yin, Su, Lin, Jin, Wu, Jiang and Tang</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-18">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<p>Single-boll weight (SBW) is difficult to estimate after defoliant application because canopy spectra include numerous mixed pixels from lint, soil, and senescent leaves, leading to strong background interference. Here we propose a UAV multispectral workflow that combines object-based boll extraction, spectral feature selection, and machine-learning regression to improve SBW mapping. Data were collected from a two-year drip-irrigated cotton experiment in Xinjiang, China involving four varieties evaluated under five planting densities treatments. Boll extraction was treated as a supervised object-based classification problem, and maximum likelihood, mahalanobis distance, and parallelepiped classifiers were compared. Fifteen vegetation indices were computed from the extracted boll pixels; informative features were identified using Pearson correlation and SHapley Additive exPlanations importance ranking. SBW was then estimated with ridge regression, random forest regression, and neural network regression using an independent validation dataset. Maximum likelihood consistently achieved overall accuracy above 97% with Kappa values above 0.93, outperforming the other classifiers. Indices derived from the red, red-edge, and near-infrared bands, particularly those designed to reduce soil background effects, showed the strongest relationships with SBW and ranked highest in SHAP. The best-performing model, which integrated maximum likelihood-based boll extraction with neural network regression, achieved a coefficient of determination of 0.80 and a root mean square error of 0.31 g on the validation set. Relative errors remained below 15% across different years, varieties, and planting densities. This workflow reduces background interference and enables transferable SBW spatial estimation for breeding evaluation and density and harvest management.</p>
</abstract>
<kwd-group>
<kwd>cotton</kwd>
<kwd>cotton boll extraction</kwd>
<kwd>multispectral imagery</kwd>
<kwd>single-boll weight</kwd>
<kwd>unmanned aerial vehicle (UAV)</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This study was funded by the Xinjiang Uygur Autonomous Region Major Science and Technology Project (2022A02011), the National Modern Agricultural Industry Technology System-Cotton Industry Technology System (CARS-15-13), the earmarked fund for Xinjiang Agriculture Research System-03 (XJARS-03), the Xinjiang &#x201c;Tianshan Talents&#x201d; Training Program (2023TSYCCX0019), the National Natural Science Foundation of China (32260542), Xinjiang Uygur Autonomous Region Key Research and Development Special Project (2022B02033-1), the Science and Technology Development Program of the Pilot Zone for Innovation-Driven Development along the Silk Road Economic Belt and the Wu-Chang-Shi National Innovation Demonstration Zone (2023LQJ03).</funding-statement>
</funding-group>
<counts>
<fig-count count="11"/>
<table-count count="4"/>
<equation-count count="4"/>
<ref-count count="37"/>
<page-count count="16"/>
<word-count count="6959"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Technical Advances in Plant Science</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec id="s1" sec-type="intro">
<label>1</label>
<title>Introduction</title>
<p>Cotton (<italic>Gossypium hirsutum</italic> L.) is an important cash crop that plays a pivotal role in the textile industry and regional economic development (<xref ref-type="bibr" rid="B4">Chen et&#xa0;al., 2024</xref>). Single-boll weight (SBW), as a key component of cotton yield, not only directly determines yield but is also closely associated with harvest timing and fiber quality (<xref ref-type="bibr" rid="B32">Xu et&#xa0;al., 2020</xref>; <xref ref-type="bibr" rid="B25">Rehman et&#xa0;al., 2020</xref>). Therefore, SBW is widely used as an important trait for variety identification and population-level evaluation. Rapid and accurate monitoring of SBW facilitates the screening of genotypes with superior boll weight (<xref ref-type="bibr" rid="B23">Niu et&#xa0;al., 2023</xref>), improves understanding of varietal adaptability to planting density and environmental conditions, and enhances insights into the yield formation process. However, the conventional quadrat harvesting method is labor-intensive, time-consuming, and destructive, which limits its scalability for large breeding populations and practical agricultural production (<xref ref-type="bibr" rid="B36">Zhang et&#xa0;al., 2025</xref>).</p>
<p>With the rapid development of remote sensing technologies, unmanned aerial vehicles (UAVs), owing to their flexibility and portability, have provided a new tool for field-scale, high-throughput crop phenotyping (<xref ref-type="bibr" rid="B30">Tong and Zhang, 2025</xref>). High-spatial-resolution RGB and multispectral imagery, combined with machine-learning algorithms, has been widely used for the quantitative retrieval of crop traits such as leaf area index, biomass, and yield (<xref ref-type="bibr" rid="B7">Chen et&#xa0;al., 2025</xref>; <xref ref-type="bibr" rid="B11">Guo et&#xa0;al., 2021</xref>). Previous studies have shown that UAV imagery acquired at appropriate growth stages can support cotton yield or boll number estimation with high accuracy (<xref ref-type="bibr" rid="B6">Chen, 2023</xref>). In particular, incorporating multi-temporal data and agronomic variables (<xref ref-type="bibr" rid="B9">Dai et&#xa0;al., 2025</xref>; <xref ref-type="bibr" rid="B33">Yeom et&#xa0;al., 2018</xref>) can substantially improve the stability and interpretability of estimation models. However, most existing studies have focused on total yield or boll number, whereas SBW as an independent target trait has received limited attention. Moreover, the robustness of SBW estimation models and their applicability across varieties, planting densities, and years remain insufficiently and systematically evaluated.</p>
<p>One of the primary challenges in remotely estimating SBW is background interference. During the optimal period for SBW observation, cotton canopies typically exhibit high porosity, and bright white lint, exposed soil, senescent leaves, and stems collectively generate a large proportion of mixed pixels. When canopy reflectance or vegetation indices are used directly, the boll signal is substantially diluted, thereby reducing sensitivity to SBW and related traits. Explicitly separating cotton bolls from complex backgrounds using image segmentation algorithms may help mitigate this limitation. Early studies mainly relied on RGB imagery with color-space transformations and threshold-based segmentation to extract cotton bolls (<xref ref-type="bibr" rid="B18">Li et&#xa0;al., 2016</xref>; <xref ref-type="bibr" rid="B28">Singh et&#xa0;al., 2021</xref>). More recently, machine-vision and deep-learning frameworks have been introduced; however, most efforts have remained at the levels of object detection and counting (<xref ref-type="bibr" rid="B20">Liu et&#xa0;al., 2023</xref>), with limited work leveraging boll segmentation outputs for quantitative SBW estimation. Meanwhile, the potential of multispectral information&#x2014;particularly red-edge and near-infrared bands and derived vegetation indices&#x2014;for SBW retrieval has not yet been fully explored.</p>
<p>Object-based image analysis provides a flexible framework to address the above challenges (<xref ref-type="bibr" rid="B2">Blaschke, 2010</xref>). This approach formulates target extraction as a pixel- or object-level classification problem, thereby enabling the joint use of spectral, spatial, and contextual information (<xref ref-type="bibr" rid="B34">Yuan et&#xa0;al., 2020</xref>). When object-based image analysis is coupled with machine-learning regression, it can support an integrated &#x201c;target segmentation&#x2013;feature extraction&#x2013;trait estimation&#x201d; pipeline: high-purity boll pixels are first obtained, boll spectral features are then extracted via masking, and an SBW retrieval model is subsequently developed. However, for UAV multispectral imagery, systematic comparisons of boll extraction performance among different object-based image analysis algorithms remain scarce, and quantitative evaluations linking &#x201c;segmentation choice&#x2013;feature sensitivity&#x2013;SBW estimation performance&#x201d; are still lacking.</p>
<p>Accordingly, we conducted a two-year field experiment in a typical oasis drip-irrigated cotton system in Xinjiang, China, with four varieties and five planting densities. The objectives of this study were to: (i) evaluate the performance of three object-based supervised classification algorithms for cotton boll extraction from UAV multispectral imagery; (ii) identify vegetation indices sensitive to SBW based on pure boll pixels and elucidate the relationship between boll-scale spectral features and SBW; and (iii) develop multiple SBW estimation models using ridge regression, random forest regression, and neural network regression, and assess their robustness across years, varieties, and planting-density scenarios. By explicitly linking boll-scale spectral features with SBW, this study aims to provide a reproducible UAV remote-sensing pipeline for field-scale, high-throughput SBW estimation and to facilitate the integration of SBW and other yield-component traits into UAV-based phenotyping platforms and precision management practices. The overall workflow is shown in <xref ref-type="fig" rid="f1"><bold>Figure&#xa0;1</bold></xref>.</p>
<fig id="f1" position="float">
<label>Figure&#xa0;1</label>
<caption>
<p>Overall workflow of the proposed method.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1772622-g001.tif">
<alt-text content-type="machine-generated">Flowchart diagram illustrating the process of SBW modeling using UAV multi-spectral imagery, boll extraction methods, feature selection by Pearson correlation and SHAP analysis, and model development with ridge regression, random forest regressor, and neural network regressor, considering four cotton varieties and five plant densities.</alt-text>
</graphic></fig>
</sec>
<sec id="s2" sec-type="materials|methods">
<label>2</label>
<title>Materials and methods</title>
<sec id="s2_1">
<label>2.1</label>
<title>Study area and experimental design</title>
<sec id="s2_1_1">
<label>2.1.1</label>
<title>Study area description</title>
<p>As shown in <xref ref-type="fig" rid="f2"><bold>Figure&#xa0;2</bold></xref>, the experiment was conducted at Huaxing Farm in Changji City, Changji Hui Autonomous Prefecture, Xinjiang Uygur Autonomous Region, China (44&#xb0;12&#x2032;N, 87&#xb0;18&#x2032;E), at an elevation of 528 m. The site has a semi-arid continental climate, with an annual mean sunshine duration of 2,700 h, mean annual precipitation of approximately 190 mm, mean annual evaporation of approximately 1,787 mm, and a mean annual air temperature of 6.8 &#xb0;C. The frost-free period is about 170 days, and the annual accumulated temperature &#x2265;10 &#xb0;C is 3,450 &#xb0;C&#xb7;d; thus, local agricultural production relies entirely on irrigation.</p>
<fig id="f2" position="float">
<label>Figure&#xa0;2</label>
<caption>
<p>The study area.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1772622-g002.tif">
<alt-text content-type="machine-generated">Composite figure showing three panels: top left, a map of China (including the South China Sea dashed line); bottom left, a detailed map of Xinjiang with a red star marking the specific study area; right, an aerial view of agricultural plots outlined and labeled with alphanumeric codes, indicating the experimental field layout.</alt-text>
</graphic></fig>
<p>The soil texture at the experimental site is clay loam. Within the plough layer, the mean soil organic matter content was 12.9 g&#xb7;kg<sup>&#x2212;1</sup>, total N was 0.67 g&#xb7;kg<sup>&#x2212;1</sup>, alkali-hydrolyzable N was 27.49 mg&#xb7;kg<sup>&#x2212;1</sup>, available P was 35.65 mg&#xb7;kg<sup>&#x2212;1</sup>, available K was 479.5 mg&#xb7;kg<sup>&#x2212;1</sup>, bulk density was 1.42 g&#xb7;cm<sup>&#x2212;3</sup>, and soil pH was 8.05.</p>
</sec>
<sec id="s2_1_2">
<label>2.1.2</label>
<title>Experimental design</title>
<p>A two-factor randomized complete block design was employed, with planting density and variety as the two factors. Five planting densities were set: 90,000 plants ha<sup>&#x2212;1</sup> (D1), 135,000 plants ha<sup>&#x2212;1</sup> (D2), 180,000 plants ha<sup>&#x2212;1</sup> (D3), 225,000 plants ha<sup>&#x2212;1</sup> (D4), and 270,000 plants ha<sup>&#x2212;1</sup> (D5). The corresponding within-row plant spacing for the five densities was 29.2, 19.5, 14.6, 11.7, and 9.7 cm, respectively. Four cotton varieties were included: Xinnongdamian 1 (V1), Xinluzao 73 (V2), Xinshi 518 (V3), and CCRI 113 (CK). The CK variety was tested only at D5, resulting in a total of 16 treatments. Each treatment had three replicates, yielding 48 plots in total. Each plot measured 6.9 &#xd7; 9 m&#xb2;. The cropping pattern followed the local &#x201c;one film&#x2013;three drip lines&#x2013;six rows&#x201d; system, with a row-spacing configuration of [(10 + 66 + 10 + 66 + 10) + 66] cm. In 2023, sowing and harvest dates were 28 April and 8 October, respectively; in 2024, sowing and harvest dates were 30 April and 26 September, respectively. Other field management practices followed local standards.</p>
</sec>
</sec>
<sec id="s2_2">
<label>2.2</label>
<title>Data acquisition</title>
<sec id="s2_2_1">
<label>2.2.1</label>
<title>Multispectral UAV image acquisition</title>
<p>UAV imagery was acquired on the two harvest dates (8 October 2023 and 26 September 2024) using a DJI Matrice 350 rtk platform equipped with a RedEdge-P multispectral sensor (MicaSense, USA) (<xref ref-type="fig" rid="f3"><bold>Figure&#xa0;3</bold></xref>). The sensor measures 8.9 &#xd7; 7.0 &#xd7; 6.7 cm and weighs 363 g, comprising five discrete spectral bands (blue, green, red, red-edge, and near-infrared) channel. The center wavelengths and bandwidths of the five bands are 475 nm (32 nm), 560 nm (27 nm), 668 nm (14 nm), 717 nm (12 nm), and 842 nm (57 nm), respectively. The image resolution is 1,456 &#xd7; 1,088 pixels, with a 50&#xb0; horizontal field of view (HFOV) and a 38&#xb0; vertical field of view (VFOV).</p>
<fig id="f3" position="float">
<label>Figure&#xa0;3</label>
<caption>
<p>M350 rtk UAV platform and RedEdge-P sensing.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1772622-g003.tif">
<alt-text content-type="machine-generated">Composite image displaying a DJI M350 RTK drone mounted with a RedEdge-P sensor, with the sensor zoomed-in on the right showing five labeled lenses: blue (475 &#xb1; 32 nm), green (560 &#xb1; 27 nm), red (668 &#xb1; 14 nm), red edge (717 &#xb1; 12 nm), near-infrared (842 &#xb1; 57 nm), and a panchromatic lens.</alt-text>
</graphic></fig>
<p>Image acquisition was conducted under clear, cloud-free, and windless conditions around solar noon (13:00-15:00). Prior to each flight, the UAV was magnetically calibrated. Takeoff and landing were performed at the same location on an open and flat area. A reflectance calibration panel was placed in the cotton field to facilitate radiometric calibration and to identify the field location in the imagery.</p>
<p>The flight altitude was 30 m and the flight speed was 2 m&#xb7;s<sup>&#x2212;1</sup>, with both forward and side overlaps set to 85%. Images were captured at equal intervals along the flight path.</p>
</sec>
<sec id="s2_2_2">
<label>2.2.2</label>
<title>SBW data collection</title>
<p>To ensure that the sampled SBW was representative of the mean SBW within each sampling point, a stratified random sampling strategy was adopted. Following the conventional classification of within-plant boll distribution in cotton physiology, bolls were stratified by the vertical position of fruiting branches (i.e., sympodial branch &#x201c;nodes/tiers&#x201d;) into three layers: the lower layer (1st-3rd fruiting branches), middle layer (4th-6th fruiting branches), and upper layer (7th-9th fruiting branches or the top fruiting branches). Within each sampling point, 30 normally opened bolls without visible pest or disease damage were randomly collected from each layer. Thus, 90 bolls were collected per sampling point, air-dried to constant weight, and the mean SBW was calculated (g).</p>
<p>Each plot was divided into three sampling points according to the number of plastic mulch films, resulting in 144 samples from 48 plots per year and 288 samples over the two years. The distribution of measured SBW is shown in <xref ref-type="fig" rid="f4"><bold>Figure&#xa0;4</bold></xref>, and SBW exhibited a decreasing trend with increasing planting density.</p>
<fig id="f4" position="float">
<label>Figure&#xa0;4</label>
<caption>
<p>Measured SBW of cotton: <bold>(a)</bold> 2023 and <bold>(b)</bold> 2024.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1772622-g004.tif">
<alt-text content-type="machine-generated">Violin plots presenting the single boll weight (SBW) of cotton in grams for different treatments and varieties labeled CK, V1, V2, and V3 along the x-axis, with corresponding groupings D1 through D5 visualized in varied colors for each variety. Panel (a) and panel (b) illustrate separate data sets, both displaying individual data points overlaid with distribution curves and exhibiting differences in SBW distributions across treatments and varieties.</alt-text>
</graphic></fig>
</sec>
</sec>
<sec id="s2_3">
<label>2.3</label>
<title>Feature construction</title>
<sec id="s2_3_1">
<label>2.3.1</label>
<title>Remote-sensing image preprocessing</title>
<p>Image mosaicking was performed in Agisoft Metashape Professional (v1.8.0; Agisoft LLC, St. Petersburg, Russia). First, the acquired multispectral images and the calibration panel images were imported. Using the Multi-camera system workflow, the calibration images were moved to a separate folder. Reflectance calibration was then conducted using the calibration panel, with band-specific reflectance coefficients of 0.512, 0.514, 0.514, 0.513, and 0.511. During &#x201c;Align Photos&#x201d;, the key point limit was set to 40,000, followed by &#x201c;Optimize alignment&#x201d; and &#x201c;Build dense point cloud&#x201d;. A digital elevation model was generated from the dense point cloud, and orthomosaics were subsequently produced.</p>
<p>Following the MicaSense-recommended reflectance calibration procedure in Metashape, the orthomosaic outputs were saved as 16-bit integer values with a scale factor of 32,768. During orthomosaic export, a raster transformation was applied, and each band was normalized using the raster calculator. Ultimately, a five-band reflectance raster was generated.</p>
</sec>
<sec id="s2_3_2">
<label>2.3.2</label>
<title>Cotton boll extraction</title>
<p>Cotton bolls were extracted using three object-based supervised classification algorithms. First, the orthomosaics were imported into ENVI (v5.6; Exelis VIS, Boulder, CO, USA). Three land-cover classes&#x2014;soil, cotton stems/leaves, and cotton lint&#x2014;were labeled using regions of interest. For each class, 20 regions of interests were manually delineated and evenly distributed across the experimental plots. The imagery was then classified using three supervised classification tools implemented in ENVI. A 3 &#xd7; 3 confusion matrix was obtained for each method (<xref ref-type="table" rid="T1"><bold>Table&#xa0;1</bold></xref>), from which classification accuracy metrics were calculated. Subsequently, the cotton lint class in each classified map was converted to a vector layer, which was used as a mask to extract boll pixels from the multispectral imagery, thereby achieving cotton boll extraction. The three object-based algorithms were as follows:</p>
<table-wrap id="T1" position="float">
<label>Table&#xa0;1</label>
<caption>
<p>Confusion matrices for the classification models.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="bottom" rowspan="2" align="center">True value</th>
<th valign="bottom" rowspan="2" align="center">Soil</th>
<th valign="middle" colspan="3" align="center">Predicted value</th>
</tr>
<tr>
<th valign="middle" align="center">Cotton stems and leaves</th>
<th valign="middle" align="center">Cotton lint</th>
<th valign="middle" align="center">Total</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="center">Soil</td>
<td valign="middle" align="center">A<sub>11</sub></td>
<td valign="middle" align="center">A<sub>12</sub></td>
<td valign="middle" align="center">A<sub>13</sub></td>
<td valign="middle" align="center">&#x2211;A<sub>1j</sub></td>
</tr>
<tr>
<td valign="middle" align="center">Cotton stems and leaves</td>
<td valign="middle" align="center">A<sub>21</sub></td>
<td valign="middle" align="center">A<sub>22</sub></td>
<td valign="middle" align="center">A<sub>23</sub></td>
<td valign="middle" align="center">&#x2211;A<sub>2j</sub></td>
</tr>
<tr>
<td valign="middle" align="center">Cotton lint</td>
<td valign="middle" align="center">A<sub>31</sub></td>
<td valign="middle" align="center">A<sub>32</sub></td>
<td valign="middle" align="center">A<sub>33</sub></td>
<td valign="middle" align="center">&#x2211;A<sub>3j</sub></td>
</tr>
<tr>
<td valign="middle" align="center">Total</td>
<td valign="middle" align="center">&#x2211;A<sub>i1</sub></td>
<td valign="middle" align="center">&#x2211;A<sub>i2</sub></td>
<td valign="middle" align="center">&#x2211;A<sub>i3</sub></td>
<td valign="middle" align="center">N</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>A<sub>11</sub>, A<sub>22</sub>, and A<sub>33</sub> denote the numbers of correctly classified pixels for each class.</p></fn>
</table-wrap-foot>
</table-wrap>
<sec id="s2_3_2_1">
<label>2.3.2.1</label>
<title>Maximum likelihood</title>
<p>Maximum likelihood (<xref ref-type="bibr" rid="B7">Chen et&#xa0;al., 2025</xref>), a supervised classification method based on probabilistic decision functions and Bayes&#x2019; rule. It estimates the mean and variance of each class from regions of interest pixel values, constructs class-specific discriminant functions, and assigns each pixel to the class with the highest posterior probability. This method is simple and efficient but assumes that the data follow a normal distribution.</p>
</sec>
<sec id="s2_3_2_2">
<label>2.3.2.2</label>
<title>Mahalanobis distance</title>
<p>Mahalanobis distance (<xref ref-type="bibr" rid="B35">Zeng et&#xa0;al., 2021</xref>), an object-based classification approach based on the covariance matrix of multiple features, which effectively quantifies relative distances among data points. By accounting for correlations among features, it is suitable for complex real-world scenarios.</p>
</sec>
<sec id="s2_3_2_3">
<label>2.3.2.3</label>
<title>Parallelepiped</title>
<p>Parallelepiped (<xref ref-type="bibr" rid="B31">Wilczkowiak et&#xa0;al., 2005</xref>) classifier operated in the 5-dimensional feature space of our multispectral imagery. For each of the three target classes, a unique decision region was defined. This was done by calculating the mean and standard deviation of training pixels in all 5 bands. A pixel was assigned to a class only if its value in every band fell within the class-specific range (mean &#xb1; a scaled standard deviation). Pixels not meeting any class criteria remained unclassified.</p>
<p>In this study, overall accuracy and the Kappa coefficient were used to evaluate the performance of the segmentation/classification models, and were calculated as shown in <xref ref-type="disp-formula" rid="eq1">Equations 1</xref> and <xref ref-type="disp-formula" rid="eq2">2</xref> (<xref ref-type="bibr" rid="B12">Huang et&#xa0;al., 2025</xref>).</p>
<disp-formula id="eq1"><label>(1)</label>
<mml:math display="block" id="M1"><mml:mrow><mml:mtext>Overall&#xa0;Accuracy</mml:mtext><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:msubsup><mml:mo>&#x2211;</mml:mo><mml:mtext>i</mml:mtext><mml:mtext>k</mml:mtext></mml:msubsup><mml:msub><mml:mrow><mml:mtext>TP</mml:mtext></mml:mrow><mml:mtext>i</mml:mtext></mml:msub></mml:mrow><mml:mtext>N</mml:mtext></mml:mfrac></mml:mrow></mml:math>
</disp-formula>
<disp-formula id="eq2"><label>(2)</label>
<mml:math display="block" id="M2"><mml:mrow><mml:mtext>Kappa</mml:mtext><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:msub><mml:mtext>p</mml:mtext><mml:mn>0</mml:mn></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mtext>p</mml:mtext><mml:mtext>e</mml:mtext></mml:msub></mml:mrow><mml:mrow><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mtext>p</mml:mtext><mml:mtext>e</mml:mtext></mml:msub></mml:mrow></mml:mfrac></mml:mrow></mml:math>
</disp-formula>
<p>TP<sub>i</sub> denotes the number of pixels correctly predicted as class i, and N denotes the total number of pixels. p<sub>0</sub> represents the observed agreement (i.e., overall accuracy), and p<sub>e</sub> represents the expected agreement by chance.</p>
</sec>
</sec>
<sec id="s2_3_3">
<label>2.3.3</label>
<title>Vegetation index calculation</title>
<p>The extracted boll imagery was imported into ArcGIS (v10.6; Esri Inc., Redlands, USA). Regions of interest corresponding to each sampling point were then delineated, and the mean reflectance within each the regions of interest was extracted as the spectral information of that sample. Because no standard vegetation indices have been specifically developed for estimating cotton SBW, we constructed a comprehensive spectral feature pool. Specifically, 15 commonly used vegetation indices, representing six major spectral response categories (difference, normalized difference, soil-adjusted, triangular, non-linear, and other indices), were calculated (<xref ref-type="table" rid="T2"><bold>Table&#xa0;2</bold></xref>). These indices are sensitive to different attributes, including vegetation greenness, soil background effects, and canopy structure, thereby providing a solid basis for exploring the spectral responses associated with SBW.</p>
<table-wrap id="T2" position="float">
<label>Table&#xa0;2</label>
<caption>
<p>Formulas of vegetation indices used in this study.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Index categories</th>
<th valign="middle" align="center">Vegetation indexes</th>
<th valign="middle" align="center">Abbreviation</th>
<th valign="middle" align="center">Formulas</th>
<th valign="middle" align="center">References</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="center">Difference</td>
<td valign="middle" align="center">Difference Vegetation Index</td>
<td valign="middle" align="center">DVI</td>
<td valign="middle" align="center"><inline-formula>
<mml:math display="inline" id="im1"><mml:mrow><mml:mi>n</mml:mi><mml:mi>i</mml:mi><mml:mi>r</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>r</mml:mi></mml:mrow></mml:math></inline-formula></td>
<td valign="middle" align="center">(<xref ref-type="bibr" rid="B7">Chen et&#xa0;al., 2025</xref>)</td>
</tr>
<tr>
<td valign="middle" align="center">Difference</td>
<td valign="middle" align="center">RedEdge-Difference Vegetation Index</td>
<td valign="middle" align="center">REDVI</td>
<td valign="middle" align="center"><inline-formula>
<mml:math display="inline" id="im2"><mml:mrow><mml:mi>n</mml:mi><mml:mi>i</mml:mi><mml:mi>r</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>r</mml:mi><mml:mi>e</mml:mi></mml:mrow></mml:math></inline-formula></td>
<td valign="middle" align="center">(<xref ref-type="bibr" rid="B15">Killeen et&#xa0;al., 2024</xref>)</td>
</tr>
<tr>
<td valign="middle" align="center">Normalized Difference</td>
<td valign="middle" align="center">Normalized Difference Vegetation Index</td>
<td valign="middle" align="center">NDVI</td>
<td valign="middle" align="center"><inline-formula>
<mml:math display="inline" id="im3"><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mi>n</mml:mi><mml:mi>i</mml:mi><mml:mi>r</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>r</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo><mml:mo>&#x2212;</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mi>n</mml:mi><mml:mi>i</mml:mi><mml:mi>r</mml:mi><mml:mo>+</mml:mo><mml:mi>r</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula></td>
<td valign="middle" align="center">(<xref ref-type="bibr" rid="B7">Chen et&#xa0;al., 2025</xref>)</td>
</tr>
<tr>
<td valign="middle" align="center">Triangular</td>
<td valign="middle" align="center">RedEdge Triangle Vegetation Index</td>
<td valign="middle" align="center">RTVI</td>
<td valign="middle" align="center"><inline-formula>
<mml:math display="inline" id="im4"><mml:mrow><mml:mn>100</mml:mn><mml:mtext>&#xa0;</mml:mtext><mml:mo>*</mml:mo><mml:mtext>&#xa0;</mml:mtext><mml:mo stretchy="false">(</mml:mo><mml:mi>n</mml:mi><mml:mi>i</mml:mi><mml:mi>r</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>&#x2212;</mml:mo><mml:mn>10</mml:mn><mml:mo>*</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mi>n</mml:mi><mml:mi>i</mml:mi><mml:mi>r</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>g</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula></td>
<td valign="middle" align="center">(<xref ref-type="bibr" rid="B16">Kushal et&#xa0;al., 2024</xref>)</td>
</tr>
<tr>
<td valign="middle" align="center">Soil-Adjusted</td>
<td valign="middle" align="center">Enhanced Vegetation Index</td>
<td valign="middle" align="center">EVI</td>
<td valign="middle" align="center"><inline-formula>
<mml:math display="inline" id="im5"><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mn>2.5</mml:mn><mml:mtext>&#xa0;</mml:mtext><mml:mo>*</mml:mo><mml:mtext>&#xa0;</mml:mtext><mml:mo stretchy="false">(</mml:mo><mml:mtext>nir</mml:mtext><mml:mo>&#x2212;</mml:mo><mml:mtext>r</mml:mtext><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo><mml:mo>&#x2212;</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mtext>nir</mml:mtext><mml:mo>+</mml:mo><mml:mn>6</mml:mn><mml:mtext>&#xa0;</mml:mtext><mml:mo>*</mml:mo><mml:mtext>&#xa0;</mml:mtext><mml:mtext>r</mml:mtext><mml:mo>&#x2212;</mml:mo><mml:mn>7.5</mml:mn><mml:mtext>&#xa0;</mml:mtext><mml:mo>*</mml:mo><mml:mtext>&#xa0;</mml:mtext><mml:mtext>b</mml:mtext><mml:mo>+</mml:mo><mml:mn>1</mml:mn><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula></td>
<td valign="middle" align="center">(<xref ref-type="bibr" rid="B7">Chen et&#xa0;al., 2025</xref>)</td>
</tr>
<tr>
<td valign="middle" align="center">Soil-Adjusted</td>
<td valign="middle" align="center">Soil Adjusted Vegetation Index</td>
<td valign="middle" align="center">SAVI</td>
<td valign="middle" align="center"><inline-formula>
<mml:math display="inline" id="im6"><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mn>1</mml:mn><mml:mo>+</mml:mo><mml:mi>L</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mtext>&#xa0;</mml:mtext><mml:mo>*</mml:mo><mml:mtext>&#xa0;</mml:mtext><mml:mo stretchy="false">(</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mi>n</mml:mi><mml:mi>i</mml:mi><mml:mi>r</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mtext>r</mml:mtext><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo><mml:mo>&#x2212;</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mi>n</mml:mi><mml:mi>i</mml:mi><mml:mi>r</mml:mi><mml:mo>+</mml:mo><mml:mtext>r</mml:mtext><mml:mo>+</mml:mo><mml:mi>L</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula></td>
<td valign="middle" align="center">(<xref ref-type="bibr" rid="B16">Kushal et&#xa0;al., 2024</xref>)</td>
</tr>
<tr>
<td valign="middle" align="center">Soil-Adjusted</td>
<td valign="middle" align="center">RedEdge Soil-Adjusted Vegetation Index</td>
<td valign="middle" align="center">RESAVI</td>
<td valign="middle" align="center"><inline-formula>
<mml:math display="inline" id="im7"><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mn>1</mml:mn><mml:mo>+</mml:mo><mml:mi>L</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mtext>&#xa0;</mml:mtext><mml:mo>*</mml:mo><mml:mtext>&#xa0;</mml:mtext><mml:mo stretchy="false">(</mml:mo><mml:mfrac><mml:mrow><mml:mtext>nir</mml:mtext><mml:mo>&#x2212;</mml:mo><mml:mtext>r</mml:mtext></mml:mrow><mml:mrow><mml:mtext>nir</mml:mtext><mml:mo>+</mml:mo><mml:mtext>r</mml:mtext><mml:mo>+</mml:mo><mml:mn>0.5</mml:mn></mml:mrow></mml:mfrac><mml:mo stretchy="false">)</mml:mo><mml:mtext>&#xa0;</mml:mtext><mml:mo>*</mml:mo><mml:mtext>&#xa0;</mml:mtext><mml:mo stretchy="false">(</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mtext>nir</mml:mtext><mml:mo>&#x2212;</mml:mo><mml:mtext>re</mml:mtext><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">/</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mtext>nir</mml:mtext><mml:mo>+</mml:mo><mml:mtext>re</mml:mtext><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula></td>
<td valign="middle" align="center">(<xref ref-type="bibr" rid="B37">Zhang et&#xa0;al., 2022</xref>)</td>
</tr>
<tr>
<td valign="middle" align="center">Soil-Adjusted</td>
<td valign="middle" align="center">Optimized Soil-Adjusted Vegetation Index</td>
<td valign="middle" align="center">OSAVI</td>
<td valign="middle" align="center"><inline-formula>
<mml:math display="inline" id="im8"><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mn>1</mml:mn><mml:mo>+</mml:mo><mml:mtext>Y</mml:mtext><mml:mo stretchy="false">)</mml:mo><mml:mtext>&#xa0;</mml:mtext><mml:mo>*</mml:mo><mml:mtext>&#xa0;</mml:mtext><mml:mo stretchy="false">(</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mi>n</mml:mi><mml:mi>i</mml:mi><mml:mi>r</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mtext>r</mml:mtext><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo><mml:mo>&#x2212;</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mi>n</mml:mi><mml:mi>i</mml:mi><mml:mi>r</mml:mi><mml:mo>+</mml:mo><mml:mtext>r</mml:mtext><mml:mo>+</mml:mo><mml:mtext>Y</mml:mtext><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula></td>
<td valign="middle" align="center">(<xref ref-type="bibr" rid="B9">Dai et&#xa0;al., 2025</xref>)</td>
</tr>
<tr>
<td valign="middle" align="center">Soil-Adjusted</td>
<td valign="middle" align="center">RedEdge-Optimized Soil-Adjusted Vegetation Index</td>
<td valign="middle" align="center">REOSAVI</td>
<td valign="middle" align="center"><inline-formula>
<mml:math display="inline" id="im9"><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mn>1</mml:mn><mml:mo>+</mml:mo><mml:mtext>Y</mml:mtext><mml:mo stretchy="false">)</mml:mo><mml:mtext>&#xa0;</mml:mtext><mml:mo>*</mml:mo><mml:mtext>&#xa0;</mml:mtext><mml:mo stretchy="false">(</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mi>n</mml:mi><mml:mi>i</mml:mi><mml:mi>r</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mtext>re</mml:mtext><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo><mml:mo>&#x2212;</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mi>n</mml:mi><mml:mi>i</mml:mi><mml:mi>r</mml:mi><mml:mo>+</mml:mo><mml:mtext>re</mml:mtext><mml:mo>+</mml:mo><mml:mtext>Y</mml:mtext><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula></td>
<td valign="middle" align="center">(<xref ref-type="bibr" rid="B3">Cao et&#xa0;al., 2013</xref>)</td>
</tr>
<tr>
<td valign="middle" align="center">Normalized Difference</td>
<td valign="middle" align="center">Renormalized Difference Vegetation Index</td>
<td valign="middle" align="center">RDVI</td>
<td valign="middle" align="center"><inline-formula>
<mml:math display="inline" id="im10"><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>n</mml:mi><mml:mi>i</mml:mi><mml:mi>r</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>r</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">/</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mroot><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>n</mml:mi><mml:mi>i</mml:mi><mml:mi>r</mml:mi><mml:mo>+</mml:mo><mml:mi>r</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:mroot><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula></td>
<td valign="middle" align="center">(<xref ref-type="bibr" rid="B21">Lu et&#xa0;al., 2021</xref>)</td>
</tr>
<tr>
<td valign="middle" align="center">Normalized Difference</td>
<td valign="middle" align="center">RedEdge Renormalized Difference Vegetation Index</td>
<td valign="middle" align="center">RERDVI</td>
<td valign="middle" align="center"><inline-formula>
<mml:math display="inline" id="im11"><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>n</mml:mi><mml:mi>i</mml:mi><mml:mi>r</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">/</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mroot><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>n</mml:mi><mml:mi>i</mml:mi><mml:mi>r</mml:mi><mml:mo>+</mml:mo><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:mroot><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula></td>
<td valign="middle" align="center">(<xref ref-type="bibr" rid="B3">Cao et&#xa0;al., 2013</xref>)</td>
</tr>
<tr>
<td valign="middle" align="center">Non-Linear</td>
<td valign="middle" align="center">Non-Linear Index</td>
<td valign="middle" align="center">NLI</td>
<td valign="middle" align="center"><inline-formula>
<mml:math display="inline" id="im12"><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mi>n</mml:mi><mml:mi>i</mml:mi><mml:mi>r</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>^</mml:mo><mml:mn>2</mml:mn><mml:mo>&#x2212;</mml:mo><mml:mi>r</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>&#x2212;</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mi>n</mml:mi><mml:mi>i</mml:mi><mml:mi>r</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>^</mml:mo><mml:mn>2</mml:mn><mml:mo>+</mml:mo><mml:mi>r</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula></td>
<td valign="middle" align="center">(<xref ref-type="bibr" rid="B9">Dai et&#xa0;al., 2025</xref></td>
</tr>
<tr>
<td valign="middle" align="center">Non-Linear</td>
<td valign="middle" align="center">Non-Linear Vegetation Index</td>
<td valign="middle" align="center">NLVI</td>
<td valign="middle" align="center"><inline-formula>
<mml:math display="inline" id="im13"><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>^</mml:mo><mml:mn>2</mml:mn><mml:mo>&#x2212;</mml:mo><mml:mi>r</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>&#x2212;</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>^</mml:mo><mml:mn>2</mml:mn><mml:mo>+</mml:mo><mml:mi>r</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula></td>
<td valign="middle" align="center">(<xref ref-type="bibr" rid="B19">Li et&#xa0;al., 2013</xref>)</td>
</tr>
<tr>
<td valign="middle" align="center">Triangular</td>
<td valign="middle" align="center">Transformed Difference Vegetation Index</td>
<td valign="middle" align="center">TVI</td>
<td valign="middle" align="center"><inline-formula>
<mml:math display="inline" id="im14"><mml:mrow><mml:mn>60</mml:mn><mml:mtext>&#xa0;</mml:mtext><mml:mo>*</mml:mo><mml:mtext>&#xa0;</mml:mtext><mml:mo stretchy="false">(</mml:mo><mml:mi>n</mml:mi><mml:mi>i</mml:mi><mml:mi>r</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>g</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>&#x2212;</mml:mo><mml:mn>100</mml:mn><mml:mtext>&#xa0;</mml:mtext><mml:mo>*</mml:mo><mml:mtext>&#xa0;</mml:mtext><mml:mo stretchy="false">(</mml:mo><mml:mi>r</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>g</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula></td>
<td valign="middle" align="center">(<xref ref-type="bibr" rid="B16">Kushal et&#xa0;al., 2024</xref>)</td>
</tr>
<tr>
<td valign="middle" align="center">Other</td>
<td valign="middle" align="center">Modified Simple Ration Index</td>
<td valign="middle" align="center">MSR</td>
<td valign="middle" align="center"><inline-formula>
<mml:math display="inline" id="im15"><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>n</mml:mi><mml:mi>i</mml:mi><mml:mi>r</mml:mi><mml:mo stretchy="false">/</mml:mo><mml:mi>r</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">/</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mroot><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mfrac><mml:mrow><mml:mi>n</mml:mi><mml:mi>i</mml:mi><mml:mi>r</mml:mi></mml:mrow><mml:mi>r</mml:mi></mml:mfrac><mml:mo stretchy="false">)</mml:mo><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mn>2</mml:mn></mml:mroot><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula></td>
<td valign="middle" align="center">(<xref ref-type="bibr" rid="B15">Killeen et&#xa0;al., 2024</xref>)</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>nir, r, g, b, and re represent reflectance in the near-infrared, red, green, blue, and red-edge bands, respectively. L and Y are the soil adjustment factor (typically 0.5 and 0.16).</p></fn>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="s2_3_4">
<label>2.3.4</label>
<title>Feature optimization and selection</title>
<p>Although using the full set of indices might yield marginally similar predictive accuracy, the feature selection process is essential to develop a parsimonious, stable, and interpretable model by eliminating redundant signals and reducing the risk of overfitting, which is particularly important for potential operational applications.</p>
<p>To identify the most effective and robust predictors for SBW estimation, we adopted a quantitative feature selection strategy that integrates statistical analysis with model interpretability. First, the Pearson correlation coefficient (|r|) was used to quantify the strength of the linear association between each vegetation index and SBW, while SHapley Additive exPlanations (SHAP) analysis was employed to assess the nonlinear contribution and global importance of each index. Next, features were ranked separately based on the two criteria. The ranking information was then integrated using an equally weighted averaging scheme to prioritize vegetation indices with the strongest predictive power for subsequent model development.</p>
</sec>
</sec>
<sec id="s2_4">
<label>2.4</label>
<title>Model development and evaluation</title>
<sec id="s2_4_1">
<label>2.4.1</label>
<title>Dataset partitioning</title>
<p>A total of 288 field-measured samples were collected over two years. The dataset was randomly split into a calibration set and a validation set at a ratio of 7:3. The detailed dataset partitioning is provided in <xref ref-type="table" rid="T3"><bold>Table&#xa0;3</bold></xref>.</p>
<table-wrap id="T3" position="float">
<label>Table&#xa0;3</label>
<caption>
<p>Composition of the dataset used for SBW estimation.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Categories</th>
<th valign="middle" align="center">Subcategories</th>
<th valign="middle" align="center">Calibration sets</th>
<th valign="middle" align="center">Validation sets</th>
<th valign="middle" align="center">Total</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" rowspan="5" align="center">Densities</td>
<td valign="middle" align="center">D1</td>
<td valign="middle" align="center">38</td>
<td valign="middle" align="center">16</td>
<td valign="middle" align="center">54</td>
</tr>
<tr>
<td valign="middle" align="center">D2</td>
<td valign="middle" align="center">38</td>
<td valign="middle" align="center">16</td>
<td valign="middle" align="center">54</td>
</tr>
<tr>
<td valign="middle" align="center">D3</td>
<td valign="middle" align="center">38</td>
<td valign="middle" align="center">16</td>
<td valign="middle" align="center">54</td>
</tr>
<tr>
<td valign="middle" align="center">D4</td>
<td valign="middle" align="center">38</td>
<td valign="middle" align="center">16</td>
<td valign="middle" align="center">54</td>
</tr>
<tr>
<td valign="middle" align="center">D5</td>
<td valign="middle" align="center">50</td>
<td valign="middle" align="center">22</td>
<td valign="middle" align="center">72</td>
</tr>
<tr>
<td valign="middle" rowspan="4" align="center">Varieties</td>
<td valign="middle" align="center">V1</td>
<td valign="middle" align="center">63</td>
<td valign="middle" align="center">27</td>
<td valign="middle" align="center">90</td>
</tr>
<tr>
<td valign="middle" align="center">V2</td>
<td valign="middle" align="center">63</td>
<td valign="middle" align="center">27</td>
<td valign="middle" align="center">90</td>
</tr>
<tr>
<td valign="middle" align="center">V3</td>
<td valign="middle" align="center">63</td>
<td valign="middle" align="center">27</td>
<td valign="middle" align="center">90</td>
</tr>
<tr>
<td valign="middle" align="center">CK</td>
<td valign="middle" align="center">13</td>
<td valign="middle" align="center">5</td>
<td valign="middle" align="center">18</td>
</tr>
<tr>
<td valign="middle" rowspan="2" align="center">Years</td>
<td valign="middle" align="center">2023</td>
<td valign="middle" align="center">101</td>
<td valign="middle" align="center">43</td>
<td valign="middle" align="center">144</td>
</tr>
<tr>
<td valign="middle" align="center">2024</td>
<td valign="middle" align="center">101</td>
<td valign="middle" align="center">43</td>
<td valign="middle" align="center">144</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s2_4_2">
<label>2.4.2</label>
<title>Development of SBW estimation models</title>
<p>To systematically evaluate the capability of different algorithms for SBW estimation, three regression models were adopted: ridge regression, random forest regression, and neural network regression representing linear models, ensemble-learning models, and neural-network approaches, respectively.</p>
<sec id="s2_4_2_1">
<label>2.4.2.1</label>
<title>Ridge regression</title>
<p>Ridge regression is an extension of linear regression that introduces an L2 regularization term into the loss function to alleviate multicollinearity among predictors and improve model generalization (<xref ref-type="bibr" rid="B27">Shahzad et&#xa0;al., 2024</xref>). In this study, the regularization parameter <italic>&#x3b1;</italic> was searched over a candidate set of 0.001, 0.01, 0.1, 1, 10, and 100, and the optimal value was selected via cross-validation.</p>
</sec>
<sec id="s2_4_2_2">
<label>2.4.2.2</label>
<title>Random forest regression</title>
<p>Random forest regression is a representative ensemble-learning method that aggregates multiple decision trees to reduce model variance and effectively capture nonlinear relationships and interactions among features (<xref ref-type="bibr" rid="B10">Gao et&#xa0;al., 2024</xref>). The main hyperparameters were tuned as follows: <italic>n_estimators</italic> = 100, 200, 300; <italic>max_depth</italic> = 3, 5, 10; <italic>max_features</italic> = &#x2018;sqrt&#x2019;, &#x2018;log2&#x2019;; <italic>min_samples_split</italic> = 2, 4, 6; and <italic>min_samples_leaf</italic> = 1, 2, 4.</p>
</sec>
<sec id="s2_4_2_3">
<label>2.4.2.3</label>
<title>Neural network regression</title>
<p>Neural network regression can model complex functions through multilayer nonlinear transformations and is suitable for describing nonlinear relationships between spectral features and SBW (<xref ref-type="bibr" rid="B8">Corte et&#xa0;al., 2020</xref>). A multilayer feed-forward neural network was used, with the hyperparameter ranges set to: <italic>hidden_size</italic> = 32, 64, 128; <italic>learning_rate</italic> = 0.01, 0.001, 0.0001; <italic>epochs</italic> = 500, 1000; and <italic>batch_size</italic> = 8, 16, 32.</p>
<p>All three models used SBW as the target variable and the vegetation indices selected in Section 2.3.4 as input predictors. Model development was implemented in Python 3.9.13, primarily using open-source libraries such as scikit-learn 1.1.1. Hyperparameter optimization was conducted via grid search combined with five-fold cross-validation, with validation performance used as the selection criterion.</p>
</sec>
</sec>
<sec id="s2_4_3">
<label>2.4.3</label>
<title>Evaluation of regression model performance</title>
<p>To quantitatively assess the estimation performance of each model, the coefficient of determination (R<sup>2</sup>) and the root mean square error (RMSE) were used as the primary evaluation metrics. Specifically, R<sup>2</sup> measures the extent to which the model explains the variance in SBW, whereas RMSE reflects the absolute deviation between predicted and observed values. A higher R<sup>2</sup> (closer to 1) and a lower RMSE indicate better model fit and higher estimation accuracy. The metrics were calculated as shown in <xref ref-type="disp-formula" rid="eq3">Equations 3</xref> and <xref ref-type="disp-formula" rid="eq4">4</xref> (<xref ref-type="bibr" rid="B14">Jaafar and Sujud, 2025</xref>):</p>
<disp-formula id="eq3"><label>(3)</label>
<mml:math display="block" id="M3"><mml:mrow><mml:msup><mml:mtext>R</mml:mtext><mml:mn>2</mml:mn></mml:msup><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:mfrac><mml:mrow><mml:msubsup><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mtext>i</mml:mtext><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mtext>n</mml:mtext></mml:msubsup><mml:msup><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mtext>y</mml:mtext><mml:mtext>i</mml:mtext></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mtext>x</mml:mtext><mml:mtext>i</mml:mtext></mml:msub><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:msup></mml:mrow><mml:mrow><mml:msubsup><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mtext>i</mml:mtext><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mtext>n</mml:mtext></mml:msubsup><mml:msup><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mtext>y</mml:mtext><mml:mtext>i</mml:mtext></mml:msub><mml:mo>&#x2212;</mml:mo><mml:mover accent="true"><mml:mtext>y</mml:mtext><mml:mo>&#xaf;</mml:mo></mml:mover><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:msup></mml:mrow></mml:mfrac></mml:mrow></mml:math>
</disp-formula>
<disp-formula id="eq4"><label>(4)</label>
<mml:math display="block" id="M4"><mml:mrow><mml:mtext>RMSE</mml:mtext><mml:mo>=</mml:mo><mml:msqrt><mml:mrow><mml:mfrac><mml:mn>1</mml:mn><mml:mtext>n</mml:mtext></mml:mfrac><mml:mstyle displaystyle="true"><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mtext>i</mml:mtext><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mtext>n</mml:mtext></mml:munderover><mml:mrow><mml:msup><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mtext>y</mml:mtext><mml:mtext>i</mml:mtext></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mtext>x</mml:mtext><mml:mtext>i</mml:mtext></mml:msub><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:msup></mml:mrow></mml:mstyle></mml:mrow></mml:msqrt></mml:mrow></mml:math>
</disp-formula>
<p>where n is the sample size, i denotes the ith sample, xi is the observed value for sample i, y<sub>i</sub> is the predicted value for sample i, <inline-formula>
<mml:math display="inline" id="im16"><mml:mrow><mml:mover accent="true"><mml:mtext>y</mml:mtext><mml:mo>&#xaf;</mml:mo></mml:mover></mml:mrow></mml:math></inline-formula> and  is the mean of the predicted values.</p>
</sec>
</sec>
</sec>
<sec id="s3" sec-type="results">
<label>3</label>
<title>Results</title>
<sec id="s3_1">
<label>3.1</label>
<title>Cotton boll extraction based on object-based algorithms</title>
<p>In this study, cotton bolls were extracted from UAV imagery acquired in two years using three object-based algorithms, and the accuracy and visual performance of each method are summarized in <xref ref-type="table" rid="T4"><bold>Table&#xa0;4</bold></xref> and <xref ref-type="fig" rid="f5"><bold>Figure&#xa0;5</bold></xref>. Both mahalanobis distance and maximum likelihood achieved satisfactory boll extraction, with overall accuracies exceeding 95% and Kappa coefficients above 0.90, indicating strong agreement. As shown in <xref ref-type="fig" rid="f5"><bold>Figure&#xa0;5</bold></xref>, parallelepiped performed poorly in delineating boll boundaries, resulting in markedly lower overall accuracies than mahalanobis distance and maximum likelihood (77.79% and 88.90%, respectively), together with substantially lower Kappa values (0.2808 and 0.7007). Differences in the underlying principles of these object-based algorithms led to distinct extraction outcomes. Overall, both mahalanobis distance and maximum likelihood provided reliable boll extraction, with overall accuracies above 96% and Kappa coefficients greater than 0.91.</p>
<table-wrap id="T4" position="float">
<label>Table&#xa0;4</label>
<caption>
<p>Accuracy assessment of cotton boll extraction based on object-based algorithms.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" rowspan="2" align="center">Object-based algorithms</th>
<th valign="middle" colspan="2" align="center">In 2023</th>
<th valign="middle" colspan="2" align="center">In 2024</th>
</tr>
<tr>
<th valign="middle" align="center">Overall accuracy (%)</th>
<th valign="middle" align="center">Kappa</th>
<th valign="middle" align="center">Overall accuracy (%)</th>
<th valign="middle" align="center">Kappa</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="center">Mahalanobis distance</td>
<td valign="middle" align="center">96.6455</td>
<td valign="middle" align="center">0.9147</td>
<td valign="middle" align="center">97.0880</td>
<td valign="middle" align="center">0.9305</td>
</tr>
<tr>
<td valign="middle" align="center">Maximum likelihood</td>
<td valign="middle" align="center">98.0130</td>
<td valign="middle" align="center">0.9510</td>
<td valign="middle" align="center">97.3139</td>
<td valign="middle" align="center">0.9375</td>
</tr>
<tr>
<td valign="middle" align="center">Parallelepiped</td>
<td valign="middle" align="center">77.7879</td>
<td valign="middle" align="center">0.2808</td>
<td valign="middle" align="center">88.9042</td>
<td valign="middle" align="center">0.7007</td>
</tr>
</tbody>
</table>
</table-wrap>
<fig id="f5" position="float">
<label>Figure&#xa0;5</label>
<caption>
<p>Examples of cotton boll extraction results obtained using object-based algorithms.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1772622-g005.tif">
<alt-text content-type="machine-generated">Side-by-side comparison of six satellite or aerial image panels showing vegetation or terrain patterns, with two original images from 2023 and 2024 on the left, and four processed versions on the right using Mahalanobis distance, maximum likelihood, and parallelepiped classification methods, each outlined in red, green, or blue respectively.</alt-text>
</graphic></fig>
</sec>
<sec id="s3_2">
<label>3.2</label>
<title>Analysis of boll-scale spectral features</title>
<sec id="s3_2_1">
<label>3.2.1</label>
<title>Pearson correlation between boll-scale spectral features and SBW</title>
<p>Cotton bolls were extracted using the three object-based algorithms (maximum likelihood, mahalanobis distance, and parallelepiped), and boll-scale spectral features were subsequently calculated. Pearson correlation analysis was then performed between these spectral features and SBW, with the results shown in <xref ref-type="fig" rid="f6"><bold>Figure&#xa0;6</bold></xref>. Overall, the selected vegetation indices were positively correlated with SBW. The relative ranking of correlation coefficients between vegetation indices and SBW was generally consistent across indices derived from the three extraction algorithms. Notably, better boll extraction led to stronger correlations between extracted spectral features and SBW. Vegetation indices computed from bolls extracted by maximum likelihood exhibited the strongest correlations, followed by mahalanobis distance, whereas parallelepiped showed weaker relationships. Indices involving the near-infrared, red-edge, and red bands (e.g., RESAVI (RedEdge Soil-Adjusted Vegetation Index), RDVI (Renormalized Difference Vegetation Index), RERDVI (RedEdge Renormalized Difference Vegetation Index), OSAVI (Optimized Soil-Adjusted Vegetation Index), and REOSAVI (RedEdge-Optimized Soil-Adjusted Vegetation Index)) showed particularly strong correlations with SBW. Under the mahalanobis distance-, maximum likelihood-, and parallelepiped-based extraction results, the correlation coefficients of these indices with SBW exceeded 0.7, 0.8, and 0.5, respectively.</p>
<fig id="f6" position="float">
<label>Figure&#xa0;6</label>
<caption>
<p>Pearson correlation analysis between boll-scale spectral features and SBW.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1772622-g006.tif">
<alt-text content-type="machine-generated">Scatter plot comparing Pearson correlation coefficients for Mahalanobis distance, Maximum likelihood, and Parallelepiped methods across various features, with Maximum likelihood generally showing the highest correlations, followed by Mahalanobis distance and Parallelepiped.</alt-text>
</graphic></fig>
</sec>
<sec id="s3_2_2">
<label>3.2.2</label>
<title>SHAP analysis of boll-scale spectral features</title>
<p>To quantify the contribution of individual features to model predictions, SHAP analysis was performed on the spectral features derived from the boll-extracted imagery using an XGBoost model, and the results are visualized as SHAP summary plots in <xref ref-type="fig" rid="f7"><bold>Figure&#xa0;7</bold></xref>. The absolute contribution of a given vegetation index varied slightly with the boll extraction results; however, the overall importance ranking remained largely consistent. Across all three boll extraction algorithms, the two most influential features were consistently RESAVI and NDVI (Normalized Difference Vegetation Index), both of which involve the near-infrared, red-edge, and red bands.</p>
<fig id="f7" position="float">
<label>Figure&#xa0;7</label>
<caption>
<p>SHAP analysis of boll-scale spectral features: <bold>(a)</bold> Mahalanobis distance; <bold>(b)</bold> Mximum likelihood; <bold>(c)</bold> Prallelepiped.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1772622-g007.tif">
<alt-text content-type="machine-generated">Three grouped SHAP summary bar plots compare feature importance for Malahanobis distance, Maximum likelihood, and Parallelepiped models. RESAVI and NDVI are consistently the most impactful features across all panels, with color gradients indicating feature value magnitude.</alt-text>
</graphic></fig>
</sec>
<sec id="s3_2_3">
<label>3.2.3</label>
<title>Construction of the feature set for SBW estimation models</title>
<p>A weighted averaging approach was used to integrate the Pearson correlation coefficients and SHAP importance values, and all vegetation indices were ranked accordingly. The SBW remote-sensing feature set was then constructed by selecting indices until the cumulative contribution reached 60%, with the results shown in <xref ref-type="fig" rid="f8"><bold>Figure&#xa0;8</bold></xref>. Feature selection was influenced by the boll extraction algorithm. For mahalanobis distance, six indices were selected: RESAVI, RERDVI, REOSAVI, REDVI (RedEdge-Difference Vegetation Index), RDVI, and OSAVI. For maximum likelihood, six indices were selected: SAVI (Soil Adjusted Vegetation Index), RESAVI, RERDVI, REOSAVI, RDVI, and OSAVI. For&#xa0;parallelepiped, four indices were selected: RESAVI, RERDVI, RDVI, and NDVI.</p>
<fig id="f8" position="float">
<label>Figure&#xa0;8</label>
<caption>
<p>Feature fusion based on Pearson correlation and SHAP analysis: <bold>(a)</bold> Mahalanobis distance; <bold>(b)</bold> Maximum likelihood; <bold>(c)</bold> Parallelepiped.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1772622-g008.tif">
<alt-text content-type="machine-generated">Three network diagrams compare feature selection methods for Mahalanobis distance, Maximum likelihood, and Parallelepiped classifiers. Each diagram clusters features by Correlation (red), Intersection (purple), and Importance (blue), connected with labeled nodes and color-coded edges, with a legend explaining each color.</alt-text>
</graphic></fig>
</sec>
</sec>
<sec id="s3_3">
<label>3.3</label>
<title>Development of SBW estimation models based on fused features</title>
<p>SBW estimation models were developed using the selected features in combination with ridge regression, random forest regression, and neural network regression, and the results are shown in <xref ref-type="fig" rid="f9"><bold>Figure&#xa0;9</bold></xref>. Models built on features derived from maximum likelihood-based boll extraction achieved higher accuracy than those based on mahalanobis distance and parallelepiped (R&#xb2; = 0.68-0.80; RMSE = 0.39-0.31 g). Across the three regression approaches, on the calibration set, the nonlinear models (random forest regression, and neural network regression) yielded higher R<sup>2</sup> values (0.90-0.96 and 0.82-0.90, respectively) than the linear ridge regression model (0.73-0.86). On the validation set, model performance decreased for all methods, yet the nonlinear models maintained their advantage. Considering the overall performance on both the calibration and validation sets, the SBW estimation model combining maximum likelihood-based boll extraction with neural network regression achieved the best performance, with a validation R<sup>2</sup> of 0.80 and an RMSE of 0.31 g.</p>
<fig id="f9" position="float">
<label>Figure&#xa0;9</label>
<caption>
<p>Accuracy of SBW estimation models based on fused features. <bold>(a)</bold> Mahalanobis Distance-Ridge Regression model; <bold>(b)</bold> Mahalanobis Distance-Random Forest Regression model; <bold>(c)</bold> Mahalanobis Distance-Neural Network Regression model; <bold>(d)</bold> Maximum Likelihood-Ridge Regression model; <bold>(e)</bold> Maximum Likelihood-Random Forest Regression model; <bold>(f)</bold> Maximum Likelihood-Neural Network Regression model; <bold>(g)</bold> Prallelepiped-Ridge Regression model; <bold>(h)</bold> Prallelepiped-Random Forest Regression model; <bold>(i)</bold> Prallelepiped-Neural Network Regression model.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1772622-g009.tif">
<alt-text content-type="machine-generated">Nine-panel scientific figure showing scatterplots of estimated versus measured SBW values in grams using three methods (Mahalanobis, Maximum Likelihood, Parallelepiped) and three regressions (Ridge, Random Forest, Neural Network), with calibration set points as blue circles, validation set points as red triangles, a dotted one-to-one line, and each plot reporting R&#xb2; and RMSE values for calibration and validation sets.</alt-text>
</graphic></fig>
</sec>
<sec id="s3_4">
<label>3.4</label>
<title>SBW spatial mapping and robustness validation</title>
<p>SBW was spatially mapped across the cotton field using the optimal modeling strategy that combines maximum likelihood-based boll masking with neural network regression, with the results shown in <xref ref-type="fig" rid="f10"><bold>Figure&#xa0;10</bold></xref>. The results from both 2023 and 2024 indicate that this workflow can reliably estimate the spatial distribution of SBW, and predicted values were generally close to the measured values with relatively low errors (<xref ref-type="fig" rid="f10"><bold>Figure&#xa0;10a</bold></xref>). In addition, the zoomed-in inset maps (<xref ref-type="fig" rid="f10"><bold>Figure&#xa0;10b</bold></xref>) highlight pixel-level predictions for representative low- and high-density plots and provide a direct comparison between masked (object-based) and unmasked (canopy-based) estimation. Boll masking removed 74.72% of background pixels in the representative areas, demonstrating its ability to suppress background interference before aggregating pixel-level predictions to plot-level values for breeding evaluation.</p>
<fig id="f10" position="float">
<label>Figure&#xa0;10</label>
<caption>
<p>Plot-scale maps of cotton SBW derived from pixel-level predictions after boll masking, with comparisons to unmasked canopy-based estimation. <bold>(a)</bold> Spatial maps of measured cotton SBW, estimated cotton SBW, and estimation error for 2023 and 2024. <bold>(b)</bold> Zoomed-in examples for representative low-density (D1) and high-density (D5) plots. The upper row shows pixel-level predictions within the maximum likelihood mask, whereas the lower row shows unmasked canopy-based estimation (as indicated in the figure). The background is the red-green-blue composite, and colored pixels represent predicted cotton SBW. Pixel-level predictions were aggregated to plot-level values for breeding-oriented evaluation. Boll masking removed 74.72% of background pixels.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1772622-g010.tif">
<alt-text content-type="machine-generated">Multi-panel scientific figure compares spatial boll weight (SBW) in 2023 and 2024 using maps and close-up views. Panels (a) show measured SBW, estimated SBW, and SBW error with color gradients, for two years and various plots; D1 and D5 sites are highlighted in red. Panels (b) present maximum likelihood mask and unmask estimations for D1 and D5 at low and high density, each outlined in red, with a shared blue-yellow color bar. Panel labels and legends aid interpretation.</alt-text>
</graphic></fig>
<p>The robustness of the maximum likelihood-neural network regression-SBW model was further confirmed by the results shown in <xref ref-type="fig" rid="f11"><bold>Figure&#xa0;11</bold></xref>. Overall, the model exhibited strong robustness, with estimated values closely matching the measured values. A slight overestimation was observed in the low-value range, whereas a slight underestimation occurred in the high-value range. Overall, the maximum likelihood-neural network regression-SBW model achieved accurate SBW estimation. Notably, the relative errors across different combinations of years, varieties, and planting densities were all below 15%, further demonstrating the robustness of the maximum likelihood-neural network regression-SBW model.</p>
<fig id="f11" position="float">
<label>Figure&#xa0;11</label>
<caption>
<p>Robustness validation of the maximum likelihood-neural network regression-SBW model: <bold>(a)</bold> 2023 and <bold>(b)</bold> 2024.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1772622-g011.tif">
<alt-text content-type="machine-generated">Two side-by-side bar and line charts compare measured and estimated SBW values in grams for several sample groups from 2023 and 2024, with SBW error percentage indicated by a red line. The 2023 chart shows similar measured and estimated values with low errors, while the 2024 chart displays generally higher measured SBW values and slightly increased error percentages. Legends and axes for SBW value and error are included.</alt-text>
</graphic></fig>
</sec>
</sec>
<sec id="s4" sec-type="discussion">
<label>4</label>
<title>Discussion</title>
<sec id="s4_1">
<label>4.1</label>
<title>Role of object-based boll extraction in subsequent retrieval</title>
<p>Object-based approaches are well suited for delineating morphologically complex, small-scale targets in high-resolution imagery (<xref ref-type="bibr" rid="B2">Blaschke, 2010</xref>). Consistent with this, our results showed that all three object-based supervised classification algorithms effectively separated cotton bolls from background components. Among them, maximum likelihood achieved the highest overall accuracy (&#x2265;96.6455%) and Kappa coefficient (&#x2265;0.9147) across the two growing seasons. The Mahalanobis distance classifier demonstrated higher sensitivity to the distribution and quantity of the training samples provided. In contrast, the parallelepiped classifier, due to its reliance on simple, axis-aligned decision rules, proved particularly prone to misclassifying pixels in spectrally ambiguous zones such as boll boundaries and shaded areas.</p>
<p>Importantly, the quality of segmentation is not only reflected by classification accuracy metrics but also by the strength of subsequent spectral-trait relationships. Vegetation indices extracted using the maximum likelihood-based mask generally exhibited higher correlations with SBW and higher model importance than those derived from the other two algorithms. This indicates that high-quality boll extraction can effectively improve the &#x201c;purity&#x201d; of spectral signals by reducing interference from soil and residual leaves, thereby providing more reliable inputs for SBW retrieval. Previous studies on deep-learning-based boll detection and boll-index construction have likewise shown that extraction quality is a prerequisite for accurate yield estimation (<xref ref-type="bibr" rid="B14">Sun et&#xa0;al., 2020</xref>; <xref ref-type="bibr" rid="B17">Li et&#xa0;al., 2022</xref>; <xref ref-type="bibr" rid="B24">Reddy et&#xa0;al., 2024</xref>), and our findings provide further evidence for this conclusion from the perspective of object-based classification.</p>
</sec>
<sec id="s4_2">
<label>4.2</label>
<title>Response mechanisms between boll-scale spectral features and SBW</title>
<p>Building on boll extraction, this study identified SAVI, OSAVI and their red-edge variants, as well as RDVI and RERDVI, as core predictors for SBW estimation from multiple categories of vegetation indices. These indices share two key characteristics: (i) they are constructed using the red, red-edge, and near-infrared bands, and (ii) they generally incorporate soil-adjustment terms or re-normalized formulations. This is consistent with the spectral environment of cotton fields after boll opening. On the one hand, cotton lint exhibits high brightness in the visible bands, and some wavelengths are easily confounded with exposed soil; therefore, &#x201c;greenness-type&#x201d; indices such as NDVI alone are insufficient to characterize cotton bolls effectively. On the other hand, defoliation increases canopy porosity and the proportion of exposed soil, thereby amplifying the influence of soil brightness variability on spectral signals. Soil-adjusted vegetation indices mitigate this effect by introducing correction factors, whereas red-edge-related indices are more sensitive to residual green tissues and structural changes around the fiber (<xref ref-type="bibr" rid="B13">Huete, 1988</xref>; <xref ref-type="bibr" rid="B26">Rondeaux et&#xa0;al., 1996</xref>).</p>
<p>In this study, the Pearson correlations analysis and SHAP-based importance rankings were highly consistent, suggesting that vegetation indices incorporating red-edge information and designed to reduce soil background effects are not only strongly associated with SBW in univariate analyses but also contribute robustly in multivariate models. Compared with studies that estimate cotton yield or SBW from canopy-averaged spectra (<xref ref-type="bibr" rid="B22">Ma et&#xa0;al., 2022</xref>), our approach derives spectral predictors from boll pixels identified by object-based extraction. This design provides a closer correspondence between remote-sensing signals and yield components and supports a more physically interpretable relationship between spectral features and SBW.</p>
</sec>
<sec id="s4_3">
<label>4.3</label>
<title>Performance, error structure, and application potential of SBW estimation models</title>
<p>Using the selected features, we further evaluated the potential of different machine-learning algorithms for SBW estimation. Overall, nonlinear models outperformed the linear model, indicating a certain degree of nonlinearity between boll-scale spectral signals and SBW, which is consistent with previous findings on estimating cotton and other crop yields using UAV imagery and machine learning (<xref ref-type="bibr" rid="B1">Ashapure et&#xa0;al., 2020</xref>). Among the tested approaches, the neural network model built on maximum likelihood-based boll extraction achieved a favorable balance between the calibration and validation sets, with a validation R<sup>2</sup> of 0.80 and an RMSE of 0.31 g. Moreover, the relative errors remained below 15% across different years, varieties, and planting densities, demonstrating good robustness under the experimental conditions.</p>
<p>Notably, error analysis revealed a systematic pattern: slight overestimation at low SBW values and some underestimation at high SBW values. This pattern is more than a simple &#x201c;regression-to-the-mean&#x201d; statistic; it reflects fundamental constraints in spectral-trait modeling. We attribute this non-linear bias to two intertwined factors. First, at the high-SBW end, the observed underestimation is likely driven by spectral saturation effects, where the sensitivity of broadband vegetation indices diminishes as biomass or yield increases, compressing the dynamic range of the spectral response (<xref ref-type="bibr" rid="B1">Ashapure et&#xa0;al., 2020</xref>). Concurrently, occlusion and shading within dense canopies may exacerbate signal saturation. Second, at the low-SBW end, the overestimation may stem from increased interference from background soil and senescent material in mixed pixels, coupled with a potential data imbalance where extreme low-value samples are underrepresented in the training set. Together, these physical and statistical factors cause the model&#x2019;s predictions to be biased toward the mean, particularly at the trait distribution tails. Future research may address this issue by employing less-saturating hyperspectral or red-edge indices, extending the saturation point through fusion of multimodal data, and implementing strategic oversampling for extreme phenotypes.</p>
<p>From an application perspective, SBW spatial maps can be used together with boll-number or yield maps. In breeding trials, such joint interpretation can help distinguish genotypes characterized by &#x201c;many bolls with low SBW&#x201d; from those with &#x201c;few bolls but high SBW.&#x201d; In production management, SBW maps can provide complementary information for optimizing planting density, guiding defoliant application, and determining harvest timing, thereby extending existing yield-estimation frameworks that rely on canopy height or fractional cover (<xref ref-type="bibr" rid="B23">Niu et al., 2023</xref>; <xref ref-type="bibr" rid="B12">Huang et&#xa0;al., 2025</xref>).</p>
</sec>
<sec id="s4_4">
<label>4.4</label>
<title>Limitations and future perspectives</title>
<p>It should be noted that this study has several limitations. First, the experiment was conducted at a single site in northern Xinjiang, where soil type and management practices were relatively consistent. Although multiple varieties and planting densities were included, the transferability of the proposed model across different agro-ecological zones and cultivation systems remains to be validated. Yield estimation models are often affected by factors such as variety type, mulching practices, and management level when applied across regions (<xref ref-type="bibr" rid="B23">Niu et al., 2023</xref>; <xref ref-type="bibr" rid="B22">Ma et&#xa0;al., 2022</xref>). Second, the current object-based segmentation relies on manually labeled training samples and is also dependent on image spatial resolution and radiometric calibration quality. To scale up to large production fields, it may be necessary to adopt deep-learning frameworks such as instance segmentation to reduce annotation costs and improve adaptability under complex backgrounds and small-target scenarios (<xref ref-type="bibr" rid="B17">Li et&#xa0;al., 2022</xref>; <xref ref-type="bibr" rid="B24">Reddy et&#xa0;al., 2024</xref>). In addition, the regression models in this study were built solely on spectral features, without integrating structural information derived from digital elevation models or 3D point clouds, or agronomic covariates such as meteorological and soil variables. Multi-source data fusion has been shown to improve the robustness and cross-year generalization of crop yield estimation models (<xref ref-type="bibr" rid="B1">Ashapure et&#xa0;al., 2020</xref>).</p>
<p>Therefore, future work should pursue two complementary directions to enhance model generalizability. First, expanding sampling campaigns across multiple ecological regions is essential to build a more representative and diversified training dataset. Second, on the methodological front, it is crucial to explore techniques such as transfer learning or domain adaptation. These approaches could potentially leverage the model knowledge gained from data-rich source environments (like the current study site) to improve performance in new target regions with limited ground truth data, thereby reducing the dependency on large-scale labeled samples from every new zone. Ultimately, integrating these efforts within a framework of &#x201c;high-quality segmentation + multi-source feature fusion&#x201d; will support the joint retrieval of SBW and other yield-component traits. This will provide more comprehensive phenotypic information to guide precision cotton production and breeding decisions.</p>
</sec>
</sec>
<sec id="s5" sec-type="conclusions">
<label>5</label>
<title>Conclusion</title>
<p>This study developed an SBW estimation pipeline that integrates object-based cotton boll segmentation, vegetation indices designed to reduce soil background effects and incorporating red-edge information, and machine-learning regression. The pipeline was evaluated using data from a two-year field experiment conducted in a typical drip-irrigated cotton system in Xinjiang, China, in which four varieties were tested under five planting-density treatments. The results demonstrate that maximum likelihood segmentation produces reliable boll masks and substantially strengthens the association between boll-scale spectral signals and SBW. Vegetation indices derived from the red-edge bands and formulated to account for soil background, including the SAVI, OSAVI, and their red-edge variants, were identified as key predictors for SBW estimation. Based on these features, the model that combines maximum likelihood-based boll extraction with neural network regression achieved good accuracy and robustness across years, varieties, and planting-density. Overall, our findings verify the feasibility of organ-scale SBW estimation from UAV multispectral imagery and provide methodological support for incorporating yield-component traits into high-throughput phenotyping and precision cotton production.</p>
</sec>
</body>
<back>
<sec id="s6" sec-type="data-availability">
<title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p></sec>
<sec id="s7" sec-type="author-contributions">
<title>Author contributions</title>
<p>MC: Conceptualization, Methodology, Software, Writing &#x2013; review &amp; editing, Writing &#x2013; original draft. CY: Conceptualization, Writing &#x2013; review &amp; editing. NS: Data curation, Writing &#x2013; original draft. TL: Project administration, Writing &#x2013; original draft. XJ: Project administration, Writing &#x2013; original draft. FW: Data curation, Writing &#x2013; original draft. PJ: Project administration, Writing &#x2013; original draft. QT: Funding acquisition, Writing &#x2013; review &amp; editing.</p></sec>
<ack>
<title>Acknowledgments</title>
<p>The authors thank the College of Agronomy, Xinjiang Agricultural University and Xinjiang Huaxing Investment Group Co., Ltd. These institutions provided essential experimental infrastructure, technical guidance, and research resources throughout the study.</p>
</ack>
<sec id="s9" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p></sec>
<sec id="s10" sec-type="ai-statement">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p></sec>
<sec id="s11" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p></sec>
<ref-list>
<title>References</title>
<ref id="B1">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Ashapure</surname> <given-names>A.</given-names></name>
<name><surname>Jung</surname> <given-names>J.</given-names></name>
<name><surname>Chang</surname> <given-names>A.</given-names></name>
<name><surname>Oh</surname> <given-names>S.</given-names></name>
<name><surname>Yeom</surname> <given-names>J.</given-names></name>
<name><surname>Maeda</surname> <given-names>M.</given-names></name>
<etal/>
</person-group>. (<year>2020</year>). 
<article-title>Developing a machine learning based cotton yield estimation framework using multi-temporal UAS data</article-title>. <source>ISPRS J. Photogram. Remote Sens.</source> <volume>169</volume>, <fpage>180</fpage>&#x2013;<lpage>194</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.isprsjprs.2020.09.015</pub-id>
</mixed-citation>
</ref>
<ref id="B2">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Blaschke</surname> <given-names>T.</given-names></name>
</person-group> (<year>2010</year>). 
<article-title>Object based image analysis for remote sensing</article-title>. <source>ISPRS J. Photogram. Remote Sens.</source> <volume>65</volume>, <fpage>2</fpage>&#x2013;<lpage>16</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.isprsjprs.2009.06.004</pub-id>
</mixed-citation>
</ref>
<ref id="B3">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Cao</surname> <given-names>Q.</given-names></name>
<name><surname>Miao</surname> <given-names>Y.</given-names></name>
<name><surname>Wang</surname> <given-names>H.</given-names></name>
<name><surname>Huang</surname> <given-names>S.</given-names></name>
<name><surname>Cheng</surname> <given-names>S.</given-names></name>
<name><surname>Khosla</surname> <given-names>R.</given-names></name>
<etal/>
</person-group>. (<year>2013</year>). 
<article-title>Non-destructive estimation of rice plant nitrogen status with crop circle multispectral active canopy sensor</article-title>. <source>Field Crops Res.</source> <volume>154</volume>, <fpage>133</fpage>&#x2013;<lpage>144</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.fcr.2013.08.005</pub-id>
</mixed-citation>
</ref>
<ref id="B4">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Chen</surname> <given-names>M.</given-names></name>
<name><surname>Yin</surname> <given-names>C.</given-names></name>
<name><surname>Lin</surname> <given-names>T.</given-names></name>
<name><surname>Liu</surname> <given-names>H.</given-names></name>
<name><surname>Wang</surname> <given-names>Z.</given-names></name>
<name><surname>Jiang</surname> <given-names>P.</given-names></name>
<etal/>
</person-group>. (<year>2024</year>). 
<article-title>Integration of unmanned aerial vehicle spectral and textural features for accurate above-ground biomass estimation in cotton</article-title>. <source>Agron</source> <volume>14</volume>, <elocation-id>1313</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/agronomy14061313</pub-id>
</mixed-citation>
</ref>
<ref id="B5">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Chen</surname> <given-names>M.</given-names></name>
<name><surname>Yin</surname> <given-names>C.</given-names></name>
<name><surname>Xi</surname> <given-names>B.</given-names></name>
<name><surname>Jin</surname> <given-names>T.</given-names></name>
<name><surname>Liu</surname> <given-names>L.</given-names></name>
<name><surname>Lin</surname> <given-names>T.</given-names></name>
<etal/>
</person-group>. (<year>2025</year>). 
<article-title>Classification of degradation films and estimation of degradation rate based on multispectral fusion images</article-title>. <source>Trans. Chin. Soc Agric.</source> <volume>56</volume>, <fpage>345</fpage>&#x2013;<lpage>335</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.6041/j.issn.1000-1298.2025.03.034</pub-id>
</mixed-citation>
</ref>
<ref id="B6">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name><surname>Chen</surname> <given-names>W.</given-names></name>
</person-group> (<year>2023</year>). <source>Research on cotton boll detection technology based on multi-scale image and development of cotton yield estimation platform</source> (<publisher-loc>Shihezi</publisher-loc>: 
<publisher-name>Shihezi University</publisher-name>). doi:&#xa0;<pub-id pub-id-type="doi">10.27332/d.cnki.gshzu.2023.001213</pub-id>
</mixed-citation>
</ref>
<ref id="B7">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Chen</surname> <given-names>Z.</given-names></name>
<name><surname>Zhai</surname> <given-names>W.</given-names></name>
<name><surname>Cheng</surname> <given-names>Q.</given-names></name>
</person-group> (<year>2025</year>). 
<article-title>). Enhancing maize LAI estimation accuracy using unmanned aerial vehicle remote sensing and deep learning techniques</article-title>. <source>Artif. Intell. Agric.</source> <volume>15</volume>, <fpage>482</fpage>&#x2013;<lpage>495</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.aiia.2025.04.008</pub-id>
</mixed-citation>
</ref>
<ref id="B8">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Corte</surname> <given-names>A.</given-names></name>
<name><surname>Souza</surname> <given-names>D.</given-names></name>
<name><surname>Rex</surname> <given-names>F.</given-names></name>
<name><surname>Sanquetta</surname> <given-names>C.</given-names></name>
<name><surname>Mohan</surname> <given-names>M.</given-names></name>
<name><surname>Silva</surname> <given-names>C.</given-names></name>
<etal/>
</person-group>. (<year>2020</year>). 
<article-title>Forest inventory with high-density UAV-Lidar: machine learning approaches for predicting individual tree attributes</article-title>. <source>Comput. Electron. Agric.</source> <volume>179</volume>, <elocation-id>105815</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2020.105815</pub-id>
</mixed-citation>
</ref>
<ref id="B9">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Dai</surname> <given-names>Q.</given-names></name>
<name><surname>Chen</surname> <given-names>H.</given-names></name>
<name><surname>Chen</surname> <given-names>Z.</given-names></name>
<name><surname>Liu</surname> <given-names>C.</given-names></name>
<name><surname>Li</surname> <given-names>G.</given-names></name>
<name><surname>Wang</surname> <given-names>Y.</given-names></name>
<etal/>
</person-group>. (<year>2025</year>). 
<article-title>Identification of optimal phenological periods for summer maize yield prediction using UAV-based multispectral data</article-title>. <source>J. Integr. Agric.</source>, <fpage>2095</fpage>&#x2013;<lpage>3119</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.jia.2025.02.026</pub-id>
</mixed-citation>
</ref>
<ref id="B10">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Gao</surname> <given-names>X.</given-names></name>
<name><surname>Yao</surname> <given-names>Y.</given-names></name>
<name><surname>Chen</surname> <given-names>S.</given-names></name>
<name><surname>Li</surname> <given-names>Q.</given-names></name>
<name><surname>Zhang</surname> <given-names>X.</given-names></name>
<name><surname>Liu</surname> <given-names>Z.</given-names></name>
<etal/>
</person-group>. (<year>2024</year>). 
<article-title>Improved maize leaf area index inversion combining plant height corrected resampling size and random forest model using UAV images at fine scale</article-title>. <source>Eur. J. Agron.</source> <volume>161</volume>, <elocation-id>127360</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.eja.2024.127360</pub-id>
</mixed-citation>
</ref>
<ref id="B11">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Guo</surname> <given-names>Y.</given-names></name>
<name><surname>Fu</surname> <given-names>Y.</given-names></name>
<name><surname>Chen</surname> <given-names>S.</given-names></name>
<name><surname>Bryant</surname> <given-names>C.</given-names></name>
<name><surname>Li</surname> <given-names>X.</given-names></name>
<name><surname>Senthilnath</surname> <given-names>J.</given-names></name>
<etal/>
</person-group>. (<year>2021</year>). 
<article-title>Integrating spectral and textural information for identifying the tasseling date of summer maize using UAV based RGB images</article-title>. <source>Int. J. Appl. Earth Obs. Geoinf.</source> <volume>102</volume>, <elocation-id>102435</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.jag.2021.102435</pub-id>
</mixed-citation>
</ref>
<ref id="B12">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Huang</surname> <given-names>T.</given-names></name>
<name><surname>Jiao</surname> <given-names>L.</given-names></name>
<name><surname>Bai</surname> <given-names>Y.</given-names></name>
<name><surname>Yan</surname> <given-names>J.</given-names></name>
<name><surname>Yang</surname> <given-names>X.</given-names></name>
<name><surname>Liu</surname> <given-names>J.</given-names></name>
<etal/>
</person-group>. (<year>2025</year>). 
<article-title>Deciphering the UAV-LiDAR contribution to vegetation classification using interpretable machine learning</article-title>. <source>Comput. Electron. Agric.</source> <volume>235</volume>, <elocation-id>110360</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2025.110360</pub-id>
</mixed-citation>
</ref>
<ref id="B13">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Huete</surname> <given-names>A.</given-names></name>
</person-group> (<year>1988</year>). 
<article-title>A soil-adjusted vegetation index (SAVI)</article-title>. <source>Remote Sens. Environ.</source> <volume>25</volume>, <fpage>295</fpage>&#x2013;<lpage>309</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/0034-4257(88)90106-X</pub-id>
</mixed-citation>
</ref>
<ref id="B14">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Jaafar</surname> <given-names>H.</given-names></name>
<name><surname>Sujud</surname> <given-names>L.</given-names></name>
</person-group> (<year>2025</year>). 
<article-title>High resolution evapotranspiration from UAV multispectral thermal imagery: validation and comparison with EC, Landsat, and fused S2-MODIS HSEB ET</article-title>. <source>Int. J. Appl. Earth Obs. Geoinf.</source> <volume>136</volume>, <elocation-id>104359</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.jag.2025.104359</pub-id>
</mixed-citation>
</ref>
<ref id="B15">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Killeen</surname> <given-names>P.</given-names></name>
<name><surname>Kiringa</surname> <given-names>I.</given-names></name>
<name><surname>Yeap</surname> <given-names>T.</given-names></name>
<name><surname>Branco</surname> <given-names>P.</given-names></name>
</person-group> (<year>2024</year>). 
<article-title>Corn grain yield prediction using UAV-based high spatiotemporal resolution imagery, machine learning, and spatial cross-validation</article-title>. <source>Remote Sens.</source> <volume>16</volume>, <elocation-id>683</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/rs16040683</pub-id>
</mixed-citation>
</ref>
<ref id="B16">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Kushal</surname> <given-names>K.</given-names></name>
<name><surname>Matthew</surname> <given-names>R.</given-names></name>
<name><surname>Andrew</surname> <given-names>P.</given-names></name>
<name><surname>Sami</surname> <given-names>K.</given-names></name>
</person-group> (<year>2024</year>). 
<article-title>On&#x2212;farm cereal rye biomass estimation using machine learning on images from an unmanned aerial system</article-title>. <source>Precis. Agric.</source> <volume>25</volume>, <fpage>2198</fpage>&#x2013;<lpage>2225</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s11119-024-10162-9</pub-id>
</mixed-citation>
</ref>
<ref id="B17">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Li</surname> <given-names>F.</given-names></name>
<name><surname>Bai</surname> <given-names>J.</given-names></name>
<name><surname>Zhang</surname> <given-names>M.</given-names></name>
<name><surname>Zhang</surname> <given-names>R.</given-names></name>
</person-group> (<year>2022</year>). 
<article-title>Yield estimation of high-density cotton fields using low-altitude UAV imaging and deep learning</article-title>. <source>Plant Methods</source> <volume>18</volume>, <fpage>55</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s13007-022-00881-3</pub-id>, PMID: <pub-id pub-id-type="pmid">35477580</pub-id>
</mixed-citation>
</ref>
<ref id="B18">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Li</surname> <given-names>Y.</given-names></name>
<name><surname>Cao</surname> <given-names>Z.</given-names></name>
<name><surname>Lu</surname> <given-names>H.</given-names></name>
<name><surname>Xiao</surname> <given-names>Y.</given-names></name>
<name><surname>Zhu</surname> <given-names>Y.</given-names></name>
<name><surname>Cremers</surname> <given-names>A.</given-names></name>
</person-group> (<year>2016</year>). 
<article-title>In-field cotton detection via region-based semantic image segmentation</article-title>. <source>Comput. Electron. Agric.</source> <volume>127</volume>, <fpage>475</fpage>&#x2013;<lpage>486</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2016.07.006</pub-id>
</mixed-citation>
</ref>
<ref id="B19">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Li</surname> <given-names>X.</given-names></name>
<name><surname>Chen</surname> <given-names>F.</given-names></name>
<name><surname>Chen</surname> <given-names>X.</given-names></name>
</person-group> (<year>2013</year>). 
<article-title>Satellite-observed nighttime light variation as evidence for global armed conflicts</article-title>. <source>IEEE J. Sel. Top. Appl. Earth Obs. Remote Sens.</source> <volume>6</volume>, <fpage>2302</fpage>&#x2013;<lpage>2315</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/JSTARS.2013.2241021</pub-id>
</mixed-citation>
</ref>
<ref id="B20">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Liu</surname> <given-names>Q.</given-names></name>
<name><surname>Zhang</surname> <given-names>Y.</given-names></name>
<name><surname>Yang</surname> <given-names>G.</given-names></name>
</person-group> (<year>2023</year>). 
<article-title>Small unopened cotton boll counting by detection with MRF-YOLO in the wild</article-title>. <source>Comput. Electron. Agric.</source> <volume>204</volume>, <elocation-id>107576</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2022.107576</pub-id>
</mixed-citation>
</ref>
<ref id="B21">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Lu</surname> <given-names>J.</given-names></name>
<name><surname>Eitel</surname> <given-names>J.</given-names></name>
<name><surname>Engels</surname> <given-names>M.</given-names></name>
<name><surname>Zhu</surname> <given-names>J.</given-names></name>
<name><surname>Ma</surname> <given-names>Y.</given-names></name>
<name><surname>Liao</surname> <given-names>F.</given-names></name>
<etal/>
</person-group>. (<year>2021</year>). 
<article-title>Improving unmanned aerial vehicle (UAV) remote sensing of rice plant potassium accumulation by fusing spectral and textural information</article-title>. <source>Int. J. Appl. Earth Obs. Geoinf.</source> <volume>104</volume>, <elocation-id>102592</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.jag.2021.102592</pub-id>
</mixed-citation>
</ref>
<ref id="B22">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Ma</surname> <given-names>Y.</given-names></name>
<name><surname>Sun</surname> <given-names>Q.</given-names></name>
<name><surname>Li</surname> <given-names>F.</given-names></name>
<name><surname>Xu</surname> <given-names>W.</given-names></name>
<name><surname>Zhang</surname> <given-names>Y.</given-names></name>
<name><surname>Sun</surname> <given-names>Y.</given-names></name>
<etal/>
</person-group>. (<year>2022</year>). 
<article-title>Cotton yield estimation based on vegetation indices and texture features derived from RGB images</article-title>. <source>Front. Plant Sci.</source> <volume>13</volume>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fpls.2022.925986</pub-id>, PMID: <pub-id pub-id-type="pmid">35783985</pub-id>
</mixed-citation>
</ref>
<ref id="B23">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Niu</surname> <given-names>H.</given-names></name>
<name><surname>Kuang</surname> <given-names>M.</given-names></name>
<name><surname>Huang</surname> <given-names>L.</given-names></name>
<name><surname>Shang</surname> <given-names>H.</given-names></name>
<name><surname>Yuan</surname> <given-names>Y.</given-names></name>
<name><surname>Ge</surname> <given-names>Q.</given-names></name>
<etal/>
</person-group>. (<year>2023</year>). 
<article-title>Lint percentage and boll weight QTLs in three excellent upland cotton (Gossypium hirsutum): ZR014121, CCRI60, and EZ60</article-title>. <source>BMC Plant Biol.</source> <volume>23</volume>, <fpage>179</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s12870-023-04147-5</pub-id>, PMID: <pub-id pub-id-type="pmid">37020180</pub-id>
</mixed-citation>
</ref>
<ref id="B24">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Reddy</surname> <given-names>J.</given-names></name>
<name><surname>Niu</surname> <given-names>H.</given-names></name>
<name><surname>Scott</surname> <given-names>J.</given-names></name>
<name><surname>Bhandari</surname> <given-names>M.</given-names></name>
<name><surname>Landivar</surname> <given-names>J.</given-names></name>
<name><surname>Bednarz</surname> <given-names>C.</given-names></name>
<etal/>
</person-group>. (<year>2024</year>). 
<article-title>Cotton yield prediction via UAV-based cotton boll image segmentation using YOLO model and Segment Anything Model (SAM)</article-title>. <source>Remote Sens.</source> <volume>16</volume>, <elocation-id>4346</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/rs16234346</pub-id>
</mixed-citation>
</ref>
<ref id="B25">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Rehman</surname> <given-names>A.</given-names></name>
<name><surname>Mustafa</surname> <given-names>N.</given-names></name>
<name><surname>Du</surname> <given-names>X.</given-names></name>
<name><surname>Azhar</surname> <given-names>M.</given-names></name>
</person-group> (<year>2020</year>). 
<article-title>Heritability and correlation analysis of morphological and yield traits in genetically modified cotton</article-title>. <source>J. Cotton Res.</source> <volume>3</volume>, <elocation-id>23</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s42397-020-00067-z</pub-id>
</mixed-citation>
</ref>
<ref id="B26">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Rondeaux</surname> <given-names>G.</given-names></name>
<name><surname>Steven</surname> <given-names>M.</given-names></name>
<name><surname>Baret</surname> <given-names>F.</given-names></name>
</person-group> (<year>1996</year>). 
<article-title>Optimization of soil-adjusted vegetation indices</article-title>. <source>Remote Sens. Environ.</source> <volume>55</volume>, <fpage>95</fpage>&#x2013;<lpage>107</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/0034-4257(95)00186-7</pub-id>
</mixed-citation>
</ref>
<ref id="B27">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Shahzad</surname> <given-names>A.</given-names></name>
<name><surname>Amin</surname> <given-names>M.</given-names></name>
<name><surname>Emam</surname> <given-names>W.</given-names></name>
<name><surname>Faisal</surname> <given-names>M.</given-names></name>
</person-group> (<year>2024</year>). 
<article-title>New ridge parameter estimators for the quasi&#x2212;poisson ridge regression model</article-title>. <source>Sci. Rep.</source> <volume>14</volume>, <fpage>8489</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41598-023-50085-5</pub-id>, PMID: <pub-id pub-id-type="pmid">38605090</pub-id>
</mixed-citation>
</ref>
<ref id="B28">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Singh</surname> <given-names>N.</given-names></name>
<name><surname>Tewari</surname> <given-names>V.</given-names></name>
<name><surname>Biswas</surname> <given-names>P.</given-names></name>
<name><surname>Pareek</surname> <given-names>C.</given-names></name>
<name><surname>Dhruw</surname> <given-names>L.</given-names></name>
</person-group> (<year>2021</year>). 
<article-title>Image processing algorithms for in-field cotton boll detection in natural lighting conditions</article-title>. <source>Artif. Intell. Agric.</source> <volume>5</volume>, <fpage>142</fpage>&#x2013;<lpage>156</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.aiia.2021.07.002</pub-id>
</mixed-citation>
</ref>
<ref id="B29">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Sun</surname> <given-names>S.</given-names></name>
<name><surname>Li</surname> <given-names>C.</given-names></name>
<name><surname>Chee</surname> <given-names>P.</given-names></name>
<name><surname>Paterson</surname> <given-names>A.</given-names></name>
<name><surname>Jiang</surname> <given-names>Y.</given-names></name>
<name><surname>Xu</surname> <given-names>R.</given-names></name>
<etal/>
</person-group>. (<year>2020</year>). 
<article-title>Three-dimensional photogrammetric mapping of cotton bolls in <italic>situ</italic> based on point cloud segmentation and clustering</article-title>. <source>ISPRS J. Photogram. Remote Sens.</source> <volume>160</volume>, <fpage>195</fpage>&#x2013;<lpage>207</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.isprsjprs.2019.12.011</pub-id>
</mixed-citation>
</ref>
<ref id="B30">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Tong</surname> <given-names>F.</given-names></name>
<name><surname>Zhang</surname> <given-names>Y.</given-names></name>
</person-group> (<year>2025</year>). 
<article-title>Individual tree crown delineation in high resolution aerial RGB imagery using StarDist-based model</article-title>. <source>Remote Sens. Environ.</source> <volume>319</volume>, <elocation-id>114618</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.rse.2025.114618</pub-id>
</mixed-citation>
</ref>
<ref id="B31">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Wilczkowiak</surname> <given-names>M.</given-names></name>
<name><surname>Sturm</surname> <given-names>P.</given-names></name>
<name><surname>Boyer</surname> <given-names>E.</given-names></name>
</person-group> (<year>2005</year>). 
<article-title>Using geometric constraints through parallelepipeds for calibration and 3D modeling</article-title>. <source>IEEE Trans. Pattern Anal. Mach. Intell.</source> <volume>27</volume>, <fpage>194</fpage>&#x2013;<lpage>207</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/tpami.2005.40</pub-id>, PMID: <pub-id pub-id-type="pmid">15688557</pub-id>
</mixed-citation>
</ref>
<ref id="B32">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Xu</surname> <given-names>W.</given-names></name>
<name><surname>Yang</surname> <given-names>W.</given-names></name>
<name><surname>Chen</surname> <given-names>S.</given-names></name>
<name><surname>Wu</surname> <given-names>C.</given-names></name>
<name><surname>Chen</surname> <given-names>P.</given-names></name>
<name><surname>Lan</surname> <given-names>Y.</given-names></name>
<etal/>
</person-group>. (<year>2020</year>). 
<article-title>Establishing a model to predict the single boll weight of cotton in northern Xinjiang by using high resolution UAV remote sensing data</article-title>. <source>Comput. Electron. Agric.</source> <volume>179</volume>, <elocation-id>105762</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2020.105762</pub-id>
</mixed-citation>
</ref>
<ref id="B33">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Yeom</surname> <given-names>J.</given-names></name>
<name><surname>Jung</surname> <given-names>J.</given-names></name>
<name><surname>Chang</surname> <given-names>A.</given-names></name>
<name><surname>Maeda</surname> <given-names>M.</given-names></name>
<name><surname>Landivar</surname> <given-names>J.</given-names></name>
</person-group> (<year>2018</year>). 
<article-title>Automated open cotton boll detection for yield estimation using unmanned aircraft vehicle (UAV) data</article-title>. <source>Remote Sens.</source> <volume>10</volume>, <elocation-id>1895</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/rs10121895</pub-id>
</mixed-citation>
</ref>
<ref id="B34">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Yuan</surname> <given-names>Y.</given-names></name>
<name><surname>Chen</surname> <given-names>X.</given-names></name>
<name><surname>Wang</surname> <given-names>J.</given-names></name>
</person-group> (<year>2020</year>). 
<article-title>Object-contextual representations for semantic segmentation</article-title>. <source>ECCV</source> <volume>2020</volume>, <fpage>173</fpage>&#x2013;<lpage>190</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/978-3-030-58539-6_11</pub-id>
</mixed-citation>
</ref>
<ref id="B35">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zeng</surname> <given-names>S.</given-names></name>
<name><surname>Wang</surname> <given-names>X.</given-names></name>
<name><surname>Duan</surname> <given-names>X.</given-names></name>
<name><surname>Zeng</surname> <given-names>S.</given-names></name>
<name><surname>Xiao</surname> <given-names>Z.</given-names></name>
<name><surname>Feng</surname> <given-names>D.</given-names></name>
</person-group> (<year>2021</year>). 
<article-title>Kernelized mahalanobis distance for fuzzy clustering. IEEE Trans</article-title>. <source>. Fuzzy Syst</source> <volume>29</volume>, <fpage>3103</fpage>&#x2013;<lpage>3117</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/TFUZZ.2020.3012765</pub-id>
</mixed-citation>
</ref>
<ref id="B36">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zhang</surname> <given-names>T.</given-names></name>
<name><surname>Li</surname> <given-names>J.</given-names></name>
<name><surname>Tong</surname> <given-names>J.</given-names></name>
<name><surname>Song</surname> <given-names>Y.</given-names></name>
<name><surname>Wang</surname> <given-names>L.</given-names></name>
<name><surname>Wu</surname> <given-names>R.</given-names></name>
<etal/>
</person-group>. (<year>2025</year>). 
<article-title>End-to-end deep fusion of hyperspectral imaging and computer vision techniques for rapid detection of wheat seed quality</article-title>. <source>Artif. Intell. Agric.</source> <volume>15</volume>, <fpage>537</fpage>&#x2013;<lpage>554</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.aiia.2025.02.003</pub-id>
</mixed-citation>
</ref>
<ref id="B37">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zhang</surname> <given-names>J.</given-names></name>
<name><surname>Wang</surname> <given-names>W.</given-names></name>
<name><surname>Krienke</surname> <given-names>B.</given-names></name>
<name><surname>Cao</surname> <given-names>Q.</given-names></name>
<name><surname>Zhu</surname> <given-names>Y.</given-names></name>
<name><surname>Cao</surname> <given-names>W.</given-names></name>
<etal/>
</person-group>. (<year>2022</year>). 
<article-title>In-season variable rate nitrogen recommendation for wheat precision production supported by fixed-wing UAV imagery</article-title>. <source>Precis. Agric.</source> <volume>23</volume>, <fpage>830</fpage>&#x2013;<lpage>853</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s11119-021-09863-2</pub-id>
</mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn id="n1" fn-type="custom" custom-type="edited-by">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/172404">Alejandro Isabel Luna-Maldonado</ext-link>, Autonomous University of Nuevo Le&#xf3;n, Mexico</p></fn>
<fn id="n2" fn-type="custom" custom-type="reviewed-by">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3023646">Osman Ilniyaz</ext-link>, Academia Turfanica, China</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3209451">Yuhang Tian</ext-link>, Sun Yat-sen University, China</p></fn>
</fn-group>
</back>
</article>