<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="2.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Plant Sci.</journal-id>
<journal-title>Frontiers in Plant Science</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Plant Sci.</abbrev-journal-title>
<issn pub-type="epub">1664-462X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fpls.2025.1611653</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Plant Science</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Deep learning-based semantic segmentation for rice yield estimation by analyzing the dynamic change of panicle coverage</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Bak</surname>
<given-names>Hyeok-Jin</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Kim</surname>
<given-names>Eun-Ji</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Lee</surname>
<given-names>Ji-Hyeon</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Chang</surname>
<given-names>Sungyul</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Kwon</surname>
<given-names>Dongwon</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Im</surname>
<given-names>Woo-Jin</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Hwang</surname>
<given-names>Woon-Ha</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Chang</surname>
<given-names>Jae-Ki</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Chung</surname>
<given-names>Nam-Jin</given-names>
</name>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Sang</surname>
<given-names>Wan-Gyu</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="author-notes" rid="fn001">
<sup>*</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/3017896/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
</contrib-group>
<aff id="aff1">
<sup>1</sup>
<institution>National Institute of Crop and Food Science, Rural Development Administration</institution>, <addr-line>Wanju-gun</addr-line>,&#xa0;<country>Republic of Korea</country>
</aff>
<aff id="aff2">
<sup>2</sup>
<institution>National Institute of Horticultural and Herbal Science, Rural Development Administration</institution>, <addr-line>Muan-gun</addr-line>,&#xa0;<country>Republic of Korea</country>
</aff>
<aff id="aff3">
<sup>3</sup>
<institution>Department of Agronomy, Jeonbuk National University</institution>, <addr-line>Jeonju-si</addr-line>,&#xa0;<country>Republic of Korea</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>Edited by: Fr&#xe9;d&#xe9;ric Cointault, Agrosup Dijon, France</p>
</fn>
<fn fn-type="edited-by">
<p>Reviewed by: Md Nashir Uddin, Virginia State University, United States</p>
<p>Zedong Geng, Huazhong Agricultural University, China</p>
</fn>
<fn fn-type="corresp" id="fn001">
<p>*Correspondence: Wan-Gyu Sang, <email xlink:href="mailto:wg_sang@korea.kr">wg_sang@korea.kr</email>
</p>
</fn>
</author-notes>
<pub-date pub-type="epub">
<day>14</day>
<month>08</month>
<year>2025</year>
</pub-date>
<pub-date pub-type="collection">
<year>2025</year>
</pub-date>
<volume>16</volume>
<elocation-id>1611653</elocation-id>
<history>
<date date-type="received">
<day>14</day>
<month>04</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>29</day>
<month>07</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2025 Bak, Kim, Lee, Chang, Kwon, Im, Hwang, Chang, Chung and Sang.</copyright-statement>
<copyright-year>2025</copyright-year>
<copyright-holder>Bak, Kim, Lee, Chang, Kwon, Im, Hwang, Chang, Chung and Sang</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<sec>
<title>Introduction</title>
<p>Rising global populations and climate change necessitate increased agricultural productivity. Most studies on rice panicle detection using imaging technologies rely on single-time-point analyses, failing to capture the dynamic changes in panicle coverage and their effects on yield. Therefore, this study presents a novel temporal framework for rice phenotyping and yield prediction by integrating high-resolution RGB imagery with deep learning-based semantic segmentation.</p>
</sec>
<sec>
<title>Methods</title>
<p>High-resolution RGB images of rice canopies were acquired over two growing seasons. We evaluated five semantic segmentation models (DeepLabv3+, U-Net, PSPNet, FPN, LinkNet) to effectively delineate rice panicles. Time-series panicle coverage data, extracted from the segmented images, were fitted to a piecewise function to model their growth and decline dynamics. This process distilled key predictive parameters: <italic>K</italic> (maximum panicle coverage), <italic>g</italic> (growth rate), <italic>d0</italic> (time of maximum growth rate), a (decline rate), and <italic>d1</italic> (transition point). These parameters served as predictors in four machine learning regression models (PLSR, RFR, GBR, and XGBR) to estimate yield and its components.</p>
</sec>
<sec>
<title>Results</title>
<p>In panicle segmentation, DeepLabv3+ and LinkNet achieved superior performance (mIoU &gt; 0.81). Among the piecewise function parameters, K showed the strongest positive correlation with Yield and Grain Number (GN) (<italic>r</italic> = 0.87 and <italic>r</italic> = 0.85, respectively), while <italic>d0</italic> was strongly negatively correlated with the Filled Grain Ratio (FGR) (<italic>r</italic> = -0.71). For yield prediction, the RFR and XGBR models demonstrated the highest performance (R<sub>2</sub>= 0.89). SHAP analysis quantified the relative importance of each parameter for predicting yield components.</p>
</sec>
<sec>
<title>Discussion</title>
<p>This framework proves to be a powerful tool for quantifying rice developmental dynamics and accurately predicting yield using readily available RGB imagery. It holds significant potential for advancing both precision agriculture and crop breeding efforts.</p>
</sec>
</abstract>
<kwd-group>
<kwd>rice</kwd>
<kwd>phenotyping</kwd>
<kwd>deep learning</kwd>
<kwd>semantic segmentation</kwd>
<kwd>yield prediction</kwd>
<kwd>timeseries analysis</kwd>
<kwd>piecewise function</kwd>
</kwd-group>
<counts>
<fig-count count="5"/>
<table-count count="4"/>
<equation-count count="9"/>
<ref-count count="44"/>
<page-count count="16"/>
<word-count count="8910"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-in-acceptance</meta-name>
<meta-value>Technical Advances in Plant Science</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec id="s1" sec-type="intro">
<label>1</label>
<title>Introduction</title>
<p>Agricultural research now prioritizes improving sustainable productivity and efficiency to address the challenges posed by population growth and climate change. Data-driven agriculture supported by advanced technologies provides a novel means of reconciling productivity and environmental effects (<xref ref-type="bibr" rid="B6">Benti et&#xa0;al., 2024</xref>). Within data-driven agriculture, crop phenotyping via imaging techniques has received considerable interest. Phenotyping involves the detailed analysis of the morphological and physiological traits of crops, thereby informing variety selection, environmental adaptability assessment, and optimization of agricultural management (<xref ref-type="bibr" rid="B19">Jangra et&#xa0;al., 2021</xref>; <xref ref-type="bibr" rid="B36">Vishal et&#xa0;al., 2020</xref>).</p>
<p>Deep learning, particularly convolutional neural networks (CNNs), is fundamental to modern visual analytics. CNN, a type of deep learning model that uses convolutional kernels to extract features and classify images through multilayer neural networks (<xref ref-type="bibr" rid="B2">Alzubaidi et&#xa0;al., 2021</xref>), is highly effective at handling intricate visuals. CNNs are used for plant growth monitoring, pest detection, and yield prediction (<xref ref-type="bibr" rid="B25">Liu and Wang, 2021</xref>; <xref ref-type="bibr" rid="B34">Srivastava et&#xa0;al., 2022</xref>; <xref ref-type="bibr" rid="B3">Bak et&#xa0;al., 2024a</xref>, <xref ref-type="bibr" rid="B4">b</xref>). For major crops such as rice, quantitative analysis of the growth and yield components is essential for supporting food security and agricultural sustainability; deep learning provides an effective avenue to perform such evaluations (<xref ref-type="bibr" rid="B21">Kim et&#xa0;al., 2017</xref>).</p>
<p>Indeed, significant progress has been made in methods for detecting and quantifying rice panicles using these technologies. Foundational work has established high-quality public datasets for panicle segmentation (<xref ref-type="bibr" rid="B37">Wang et&#xa0;al., 2021</xref>), and object detection models, including advanced Vision Transformer-based architectures, have been widely applied for panicle counting (<xref ref-type="bibr" rid="B38">Wang et&#xa0;al., 2022</xref>; <xref ref-type="bibr" rid="B39">Wei et&#xa0;al., 2024</xref>; <xref ref-type="bibr" rid="B26">Lu et&#xa0;al., 2024</xref>). However, these counting-based methods face significant challenges as the canopy matures and panicles become occluded (<xref ref-type="bibr" rid="B38">Wang et&#xa0;al., 2022</xref>; <xref ref-type="bibr" rid="B26">Lu et&#xa0;al., 2024</xref>; <xref ref-type="bibr" rid="B39">Wei et&#xa0;al., 2024</xref>). An approach focusing on the total panicle area or coverage, rather than the count, may therefore offer a more robust signal. Yet, whether based on counting or area, these powerful methods predominantly rely on analysis at single or discrete time-points. While a few advanced studies have incorporated time-series analysis to track individual panicles (<xref ref-type="bibr" rid="B44">Zhao et&#xa0;al., 2017</xref>), a research gap persists in modeling the holistic dynamic change of the entire panicle canopy coverage with a continuous function.</p>
<p>To address this gap, this study develops an integrated framework. First, we leverage semantic segmentation to analyze panicle coverage, a technique well-suited for area-based analysis of complex crop structures (<xref ref-type="bibr" rid="B22">Lei et&#xa0;al., 2024</xref>; <xref ref-type="bibr" rid="B27">Madokoro et&#xa0;al., 2022</xref>; <xref ref-type="bibr" rid="B1">Abourabia et&#xa0;al., 2024</xref>). We evaluated established models such as DeepLabv3+ and U-Net to ensure precise pixel-level data extraction (<xref ref-type="bibr" rid="B25">Liu and Wang, 2021</xref>). Second, building on the principle of function-based time-series modeling successfully used in other crops (<xref ref-type="bibr" rid="B35">Stepanov et&#xa0;al., 2022</xref>; <xref ref-type="bibr" rid="B17">Guo et&#xa0;al., 2021</xref>), we apply a piecewise function to quantify the unique growth and decline dynamics of the panicle coverage. Finally, the parameters derived from this function are used as inputs for machine learning models to accurately estimate yield. This complete framework provides a novel method for leveraging canopy dynamics for data-driven rice breeding and management.</p>
</sec>
<sec id="s2" sec-type="materials|methods">
<label>2</label>
<title>Materials and methods</title>
<sec id="s2_1">
<label>2.1</label>
<title>Image acquisition</title>
<p>Rice canopy images for panicle detection were gathered at the fields of the National Institute of Crop Science (NICS) in Wanju-gun, Republic of Korea, during the 2022 and 2023 growing seasons. High-resolution RGB images were acquired using two imaging systems: a fixed-position PTZ (Pan&#x2013;Tilt&#x2013;Zoom) camera (Hanwha Vision XNP-8300RW, South Korea) and a handheld camera (Sony DSC-RX0-M2, Japan). The experimental site and image acquisition equipment are shown in <xref ref-type="supplementary-material" rid="SF1">
<bold>Supplementary Figure S1</bold>
</xref>.</p>
<p>The PTZ camera, mounted on a tower at a fixed height of 5 m, was used for time-series imaging. While the camera has pan-tilt-zoom capabilities, these were used only for initial framing of the plot; for all subsequent data acquisition, the camera remained in a fixed position to record nadir RGB images at 3840 &#xd7; 2160 pixels, ensuring consistent imaging geometry. The PTZ camera, using the Wisenet WAVE (Hanwha Vision, South Korea) software, recorded images twice daily at 09:00 and 16:00. In contrast, the handheld camera captured images between 10:00 AM and 12:00 PM. This dataset, collected in 2022 and 2023, was used solely for training and validating the deep learning-based semantic segmentation models.</p>
<p>For the handheld camera, a specific visual alignment protocol was implemented to minimize human-induced variability. The camera was wirelessly tethered to a smartphone using the &#x2018;Imaging Edge&#x2019; mobile application for real-time monitoring of the field of view. Four poles were used to clearly mark the corners of the 1 m&#xb2; target quadrat within each plot. During image acquisition, the operator manually adjusted the camera&#x2019;s position until the on-screen auxiliary gridlines visually aligned with the four corner poles of the quadrat. This procedure was repeated for every shot between 10:00 AM and 12:00 PM to ensure that the camera&#x2019;s height, distance, and near-nadir viewing angle were kept as consistent as possible, resulting in a highly consistent pixel resolution of the target area across all images.</p>
<p>The entire dataset, collected from four rice cultivars (Nampyeong, Shindongjin, Dongjin-1, and Saeilmi), was used solely for training and validating the deep learning-based semantic segmentation models.</p>
</sec>
<sec id="s2_2">
<label>2.2</label>
<title>Image preprocessing</title>
<p>All acquired images underwent preprocessing for semantic segmentation training. OpenCV 4.9.0 (a Python-based image processing library) cropped the images to 512 &#xd7; 512 pixels, improving computational efficiency and model performance. The rice panicle regions were then manually annotated as a single class using the LabelMe tool (<xref ref-type="bibr" rid="B31">Russell et&#xa0;al., 2008</xref>), after the heading stage. These annotations were converted into binary masks, in which 0 encoded background and 1 encoded panicles (<xref ref-type="fig" rid="f1">
<bold>Figure&#xa0;1</bold>
</xref>). Image augmentation techniques (<xref ref-type="bibr" rid="B32">Shorten and Khoshgoftaar, 2019</xref>), including resizing, Gaussian noise addition, and random brightness and contrast adjustments (<xref ref-type="fig" rid="f1">
<bold>Figure&#xa0;1</bold>
</xref>), were applied to improve model generalization and robustness. Specifically, the images were augmented with random brightness and contrast adjustments between 0.8 and 1.2, and were then upscaled (1.1&#x2013;2.0) or downscaled (0.6&#x2013;0.9) to vary size. This tripled the original dataset from 867 to 2,601 labeled images, producing greater diversity. This augmentation strategy simulated real-world variations in lighting, noise, and contrast, such as those caused by cloudy skies, shadows, and variable camera exposure, thereby enhancing model robustness at inference. Finally, the augmented dataset was divided into training, validation, and testing sets at a ratio of 7:2:1 to enable objective evaluation.</p>
<fig id="f1" position="float">
<label>Figure&#xa0;1</label>
<caption>
<p>Panicle Segmentation Model Development and Yield Prediction Framework. Schematic overview of the entire research methodology. The left panel details the development pipeline for the deep learning-based panicle segmentation model, including image acquisition, data augmentation, training, and model selection using the &#x2018;22-&#x2019;23 dataset. The right panel illustrates the application of the selected model for time-series analysis and yield prediction using the &#x2018;23-&#x2019;24 dataset, covering panicle coverage extraction, parameter derivation, and machine learning-based prediction.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-16-1611653-g001.tif">
<alt-text content-type="machine-generated">Flowchart illustrating a framework for panicle segmentation and yield prediction using panicle coverage. On the left, the process includes image acquisition and preprocessing, data labeling and augmentation, training with semantic segmentation networks, performance evaluation, and model selection (DeepLabv3+ with ResNet-101). On the right, it details time-series image and yield data collection, extraction of panicle coverage, parameter extraction, prediction using machine learning models, and model performance evaluation. Various types of images and data representations are included, demonstrating each step visually.</alt-text>
</graphic>
</fig>
</sec>
<sec id="s2_3">
<label>2.3</label>
<title>Deep learning architecture for rice panicle segmentation</title>
<p>This study evaluated the rice panicle segmentation performance with two backbone networks and five established semantic-segmentation architectures. The backbone networks, ResNet-50 and ResNet-101 (<xref ref-type="supplementary-material" rid="SM1">
<bold>Supplementary Table S1</bold>
</xref>), were selected for their capacity to extract hierarchical features and mitigate the vanishing gradient problem through residual learning. These pre-trained networks act as foundational feature extractors, providing rich, multi-scale representations essential for accurate pixel-level classification (<xref ref-type="bibr" rid="B18">He et&#xa0;al., 2016</xref>). The use of both ResNet-50 and ResNet-101 enabled comparison of the feature representation depth, with ResNet-101 potentially capturing finer details at the cost of increased computational resources. Five distinct semantic segmentation models were evaluated: DeepLabv3+ (<xref ref-type="bibr" rid="B10">Chen et&#xa0;al., 2018</xref>), U-Net (<xref ref-type="bibr" rid="B30">Ronneberger et&#xa0;al., 2015</xref>), PSPNet (<xref ref-type="bibr" rid="B44">Zhao et&#xa0;al., 2017</xref>), FPN (<xref ref-type="bibr" rid="B23">Lin et&#xa0;al., 2017</xref>), and LinkNet (<xref ref-type="bibr" rid="B8">Chaurasia and Culurciello, 2017</xref>) (<xref ref-type="supplementary-material" rid="SF2">
<bold>Supplementary Figure S2</bold>
</xref>). These models were selected to represent a range of architectural designs and feature-processing strategies commonly employed in pixel-level classification, particularly in agricultural image analysis. DeepLabv3+ was chosen for its capacity to capture long-range contextual information via atrous convolution and the Atrous Spatial Pyramid Pooling (ASPP) module. U-Net, with its encoder&#x2013;decoder structure and skip connections, was included for its success in biomedical image segmentation and adaptability to diverse image analysis tasks. PSPNet, which utilizes a pyramid scene parsing network, was assessed to examine how global context affects segmentation accuracy. FPN was incorporated to evaluate the benefits of multi-scale feature representations for improved object delineation. Finally, LinkNet, known for its efficiency and real-time applicability, was used to explore the potential for computationally efficient segmentation. By systematically combining each of the five segmentation models with both ResNet-50 and ResNet-101, this study sought the optimal deep learning configuration for accurate and efficient rice panicle segmentation under field conditions.</p>
<p>Training parameters were tuned to standardize the input data and ensure stable learning. The image size was fixed at 512 &#xd7; 512 pixels, the batch size was set to 8 to balance memory usage with optimization stability, and each model was trained for 200 epochs with a learning rate of 0.0001 to ensure gradual, stable improvement. Training was conducted on a system featuring an NVIDIA Quadro RTX 5000 GPU (16 GB), an Intel Xeon Gold 6226R CPU, and 256 GB of RAM. The operating system was Windows 11, with CUDA 12.5 for GPU acceleration, Python 3.9, and PyTorch 2.2.2 serving as the deep-learning framework (<xref ref-type="table" rid="T1">
<bold>Table&#xa0;1</bold>
</xref>). Model validation and testing were conducted on the same system to ensure consistency.</p>
<table-wrap id="T1" position="float">
<label>Table&#xa0;1</label>
<caption>
<p>Training parameters, models, and hardware specifications used for the semantic segmentation tasks.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Category</th>
<th valign="middle" align="center">Specification</th>
</tr>
</thead>
<tbody>
<tr>
<th valign="middle" colspan="2" align="left">Architectures and backbones</th>
</tr>
<tr>
<td valign="middle" align="center">Semantic segmentation models</td>
<td valign="middle" align="center">DeepLabv3+, U-Net, FPN, LinkNet, PSPNet</td>
</tr>
<tr>
<td valign="middle" align="center">Backbone networks used</td>
<td valign="middle" align="center">ResNet-50, ResNet-101</td>
</tr>
<tr>
<th valign="middle" colspan="2" align="left">Training hyperparameters</th>
</tr>
<tr>
<td valign="middle" align="center">Image input size</td>
<td valign="middle" align="center">512 &#xd7; 512 pixels</td>
</tr>
<tr>
<td valign="middle" align="center">Batch size</td>
<td valign="middle" align="center">8</td>
</tr>
<tr>
<td valign="middle" align="center">Epochs</td>
<td valign="middle" align="center">200</td>
</tr>
<tr>
<td valign="middle" align="center">Optimizer</td>
<td valign="middle" align="center">Adam</td>
</tr>
<tr>
<td valign="middle" align="center">Learning rate</td>
<td valign="middle" align="center">0.0001</td>
</tr>
<tr>
<th valign="middle" colspan="2" align="left">System specifications</th>
</tr>
<tr>
<td valign="middle" align="center">CPU</td>
<td valign="middle" align="center">Intel Xeon Gold 6226R</td>
</tr>
<tr>
<td valign="middle" align="center">GPU</td>
<td valign="middle" align="center">NVIDIA Quadro RTX 5000 (16 GB)</td>
</tr>
<tr>
<td valign="middle" align="center">RAM</td>
<td valign="middle" align="center">256 GB</td>
</tr>
<tr>
<td valign="middle" align="center">Operating system</td>
<td valign="middle" align="center">Windows 11</td>
</tr>
<tr>
<td valign="middle" align="center">Framework and libraries</td>
<td valign="middle" align="center">Python 3.9, PyTorch 2.2.2, CUDA 12.5</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s2_4">
<label>2.4</label>
<title>Evaluation of training accuracy</title>
<p>To evaluate the rice panicle detection performance, an independent evaluation dataset, separate from the training set, was used. Various metrics, including pixel accuracy, precision, recall, F1 score, and intersection over union (IoU), were used to assess the performance of the model comprehensively of the model comprehensively (<xref ref-type="disp-formula" rid="eq1">Equations 1-5</xref>). Pixel accuracy denotes the proportion of correctly classified pixels among all pixels for evaluating the overall accuracy of the model. Precision is the proportion of actual panicle pixels among those predicted as panicles by the model, thus indicating the panicle prediction accuracy of the model. Recall is the proportion of actual panicle pixels correctly identified by the model; it is used to evaluate the panicle detection capability of the model. The F1 score, which is the harmonic mean of the precision and recall, combines both metrics. IoU is the ratio between the intersection area and the union area of the actual and predicted panicle regions, thus indicating the accurate segmentation ability of the model. These metrics are crucial to evaluating the rice panicle detection performance of the model from various aspects and determining its applicability to real-world environments. The equations for the metrics are as follows:</p>
<disp-formula id="eq1">
<label>(1)</label>
<mml:math display="block" id="M1">
<mml:mrow>
<mml:mtext>Pixel&#xa0;accuracy</mml:mtext>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>T</mml:mi>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>T</mml:mi>
<mml:mi>N</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<disp-formula id="eq2">
<label>(2)</label>
<mml:math display="block" id="M2">
<mml:mrow>
<mml:mtext>Precision</mml:mtext>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<disp-formula id="eq3">
<label>(3)</label>
<mml:math display="block" id="M3">
<mml:mrow>
<mml:mtext>Recall</mml:mtext>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<disp-formula id="eq4">
<label>(4)</label>
<mml:math display="block" id="M4">
<mml:mrow>
<mml:mtext>F</mml:mtext>
<mml:mn>1</mml:mn>
<mml:mtext>&#xa0;score</mml:mtext>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mn>2</mml:mn>
<mml:mrow>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:mo>+</mml:mo>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>n</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<disp-formula id="eq5">
<label>(5)</label>
<mml:math display="block" id="M5">
<mml:mrow>
<mml:mtext>IoU</mml:mtext>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<p>where TP (true positive) denotes the number of pixels correctly classified as panicle, FP (false positive) denotes pixels incorrectly classified as panicle, FN (false negative) signifies panicle pixels incorrectly classified as background, and TN (true negative) represents background pixels correctly classified as background. These metrics offer a comprehensive evaluation of the overall segmentation performance of the models.</p>
</sec>
<sec id="s2_5">
<label>2.5</label>
<title>Experimental design for time-series panicle coverage analysis</title>
<p>To apply the trained segmentation models and build a yield prediction framework, separate time-series panicle coverage experiments were conducted in 2023 and 2024 at the National Institute of Crop Science (NICS), Republic of Korea, under both field and soil&#x2013;bin conditions. Field transplantation occurred on June 7 and June 26, 2023, and on June 8, 2024. The transplantation on June 26, 2023, was designated late transplantation (LT). Soil bins (1 m &#xd7; 1 m &#xd7; 0.5 m) were placed outdoors, and transplantation occurred on June 9, 2023, and June 10, 2024. The primary cultivar was Nampyeong, with Dongjin-1, Shindongjin, and Saeilmi included in the 2024 experiments. Nitrogen was applied at three rates (0, 98.8, and 197.6 kg ha<sup>-</sup>&#xb9;), with treatments varying by year, environment (field or soil bin), and cultivar. Nitrogen was split into three doses following the Korean standard cultivation method in Korea: 50% as a basal dressing before transplantation, 20% as a tillering fertilizer 20 days after transplanting, and the final 30% as a panicle fertilizer at the panicle formation stage. Each experimental unit consisted of a 1 m&#xb2; plot containing 28 hills. To analyze the relationship between the time-series panicle coverage and yield components, post-harvest measurements of the panicle number (PN), grain number (GN), number of grains per panicle (GNP), 1000-grain weight (TGW), and filled grain ratio (FGR) were conducted.</p>
</sec>
<sec id="s2_6">
<label>2.6</label>
<title>RGB image collection and preprocessing for yield component estimation</title>
<p>This section details the first step of our yield prediction framework: RGB image collection and preprocessing. The overall process, described in the following sections, involved (1) extracting panicle coverage from these images, (2) fitting the time-series data to a piecewise function to derive dynamic parameters (Section 2.7), and (3) using these parameters as inputs for machine learning models to predict final yield (Section 2.8). High-resolution RGB images were captured at intervals of 3&#x2013;7 days throughout the growing season using a Sony DSC-RX0-M2 camera to document all key growth stages. For this study, which focuses on panicle coverage, the images taken from the heading onwards were used, as panicles are the primary subject of segmentation. The images were acquired between 10:00 AM and 12:00 PM, with the camera aimed at the center of the yield survey plot and leveled with the ground to minimize distortion. The captured RGB images were cropped to encompass a 1 m&#xb2; area demarcated by the four corner poles within the plot, ensuring consistency in yield component measurements and reducing variability from inconsistent sampling. These cropped images were then resized to 1536 &#xd7; 1536 pixels, nine times the model input size of 512 &#xd7; 512 pixels, to ensure dataset uniformity (<xref ref-type="fig" rid="f1">
<bold>Figure&#xa0;1</bold>
</xref>). For the yield estimation study, an aggregate of 1,956 time-series images was acquired over multiple observation dates. These images were collected from 152 distinct plots (20 field plots in 2023, and 132 plots, including 80 field and 52 soil-bin plots in 2024). Panicle coverage was calculated by dividing each 1536 &#xd7; 1536 image into nine 512 &#xd7; 512 pixel tiles. The deep learning model then processed each sub-image to estimate the panicle coverage for that segment. The overall panicle coverage was determined by dividing the estimated panicle area by the total image area as (<xref ref-type="disp-formula" rid="eq6">Equation 6</xref>).</p>
<disp-formula id="eq6">
<label>(6)</label>
<mml:math display="block" id="M6">
<mml:mrow>
<mml:mtext mathvariant="italic">PC</mml:mtext>
<mml:mo stretchy="false">(</mml:mo>
<mml:mo>%</mml:mo>
<mml:mo stretchy="false">)</mml:mo>
<mml:mo>=</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mtext mathvariant="italic">PA</mml:mtext>
</mml:mrow>
<mml:mrow>
<mml:mtext mathvariant="italic">PA</mml:mtext>
<mml:mo>+</mml:mo>
<mml:mtext mathvariant="italic">BA</mml:mtext>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<p>where PC denotes the panicle coverage (%), PA represents the panicle area of the image, and BA denotes the background area of the image. The estimated panicle coverage values from all nine segments were averaged to derive coverage per unit area (<xref ref-type="fig" rid="f1">
<bold>Figure&#xa0;1</bold>
</xref>). Representative resized images after the 2023 and 2024 treatments appear in <xref ref-type="supplementary-material" rid="SF3">
<bold>Supplementary Figure S3</bold>
</xref> to demonstrate the structure and quality of the dataset. Based on the performance evaluation presented in Section 3.2, the DeepLabv3+ model with a ResNet-101 backbone, which achieved the highest mIoU (0.82), was selected as the final model. This model was then used to segment all time-series images for the subsequent yield prediction analysis.</p>
</sec>
<sec id="s2_7">
<label>2.7</label>
<title>Fitting time-series data to a piecewise function</title>
<p>The time-series panicle coverage data obtained from the segmented images were fit to a piecewise function (<xref ref-type="disp-formula" rid="eq7">Equation 7</xref>) designed to model the growth and decline phases of the rice panicles, as shown in <xref ref-type="fig" rid="f2">
<bold>Figure&#xa0;2</bold>
</xref>. The function comprises a sigmoidal growth phase and a quadratic decline phase, seamlessly connected at the transition point. The function is expressed below.</p>
<fig id="f2" position="float">
<label>Figure&#xa0;2</label>
<caption>
<p>Piecewise function for modeling panicle coverage dynamics. A representative example of the piecewise function fit to time-series panicle coverage data. The figure illustrates the key parameters derived from the model: <italic>K</italic> (maximum panicle coverage), <italic>g</italic> (growth rate), <italic>d<sub>0</sub>
</italic> (time of maximum growth rate), <italic>a</italic> (curvature of the decline phase), and <italic>d<sub>1</sub>
</italic> (transition point between growth and decline phases).</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-16-1611653-g002.tif">
<alt-text content-type="machine-generated">Graph showing panicle coverage percentage over days after heading (DAH). The curve rises sharply from 0 to 25 DAH, then levels off. Vertical dashed lines at \(d_0\) and \(d_1\). Annotations include \(K\), \(g\), and \(a\), with arrows indicating growth and change.</alt-text>
</graphic>
</fig>
<disp-formula id="eq7">
<label>(7)</label>
<mml:math display="block" id="M7">
<mml:mrow>
<mml:mtext mathvariant="bold-italic">f</mml:mtext>
<mml:mo stretchy="false">(</mml:mo>
<mml:mi>x</mml:mi>
<mml:mo stretchy="false">)</mml:mo>
<mml:mo>=</mml:mo>
<mml:mo>{</mml:mo>
<mml:mtable columnalign="left" equalrows="true" equalcolumns="true">
<mml:mtr columnalign="left">
<mml:mtd columnalign="left">
<mml:mrow>
<mml:mfrac>
<mml:mtext mathvariant="bold-italic">K</mml:mtext>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mn mathvariant="bold">1</mml:mn>
<mml:mo>+</mml:mo>
<mml:msup>
<mml:mtext mathvariant="bold-italic">e</mml:mtext>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mtext mathvariant="bold-italic">g</mml:mtext>
<mml:mo stretchy="false">(</mml:mo>
<mml:mtext mathvariant="bold-italic">x</mml:mtext>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mtext mathvariant="bold-italic">d</mml:mtext>
<mml:mn mathvariant="bold">0</mml:mn>
</mml:msub>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:msup>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:mtd>
<mml:mtd columnalign="left">
<mml:mrow>
<mml:mtext>&#x2003;&#x2003;&#x2003;</mml:mtext>
<mml:mtext mathvariant="bold-italic">if&#xa0;x</mml:mtext>
<mml:mo>&#x2264;</mml:mo>
<mml:msub>
<mml:mtext mathvariant="bold-italic">d</mml:mtext>
<mml:mn mathvariant="bold">1</mml:mn>
</mml:msub>
</mml:mrow>
</mml:mtd>
</mml:mtr>
<mml:mtr columnalign="left">
<mml:mtd columnalign="left">
<mml:mrow>
<mml:mfrac>
<mml:mtext mathvariant="bold-italic">K</mml:mtext>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mn mathvariant="bold">1</mml:mn>
<mml:mo>+</mml:mo>
<mml:msup>
<mml:mtext mathvariant="bold-italic">e</mml:mtext>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mtext mathvariant="bold-italic">g</mml:mtext>
<mml:mo stretchy="false">(</mml:mo>
<mml:msub>
<mml:mtext mathvariant="bold-italic">d</mml:mtext>
<mml:mn mathvariant="bold">1</mml:mn>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mtext mathvariant="bold-italic">d</mml:mtext>
<mml:mn mathvariant="bold">0</mml:mn>
</mml:msub>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:msup>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mfrac>
<mml:mo>&#x2212;</mml:mo>
<mml:mtext mathvariant="bold-italic">a</mml:mtext>
<mml:msup>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mtext mathvariant="bold-italic">x</mml:mtext>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mtext mathvariant="bold-italic">d</mml:mtext>
<mml:mn mathvariant="bold">1</mml:mn>
</mml:msub>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mn mathvariant="bold">2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:mtd>
<mml:mtd columnalign="left">
<mml:mrow>
<mml:mtext>&#x2003;&#x2003;&#x2003;</mml:mtext>
<mml:mtext mathvariant="bold-italic">if&#xa0;x</mml:mtext>
<mml:mo>&gt;</mml:mo>
<mml:msub>
<mml:mtext mathvariant="bold-italic">d</mml:mtext>
<mml:mn mathvariant="bold">1</mml:mn>
</mml:msub>
</mml:mrow>
</mml:mtd>
</mml:mtr>
</mml:mtable>
<mml:mo>}</mml:mo>
</mml:mrow>
</mml:math>
</disp-formula>
<p>where <italic>K</italic> represents the maximum panicle coverage observed during the growth phase; <italic>g</italic> determines the growth rate, influencing the steepness of the sigmoidal curve. The parameter <italic>d<sub>0</sub>
</italic> defines the point of maximum growth rate (inflection point), and <italic>d<sub>1</sub>
</italic> marks the transition point between the growth and decline phases. Lastly, <italic>a</italic> controls the curvature of the quadratic decline, dictating how rapidly the panicle coverage decreases after <italic>d<sub>1</sub>
</italic>. This piecewise function enables precise modeling of rice panicle dynamics, capturing the rapid increase during the heading stage and subsequent decline during senescence. The parameters derived from this model (<italic>K, g, d<sub>0</sub>, d<sub>1</sub>
</italic>, and <italic>a</italic>) provide quantitative insights into panicle development under varying environmental and experimental conditions. These fitted parameters served to compare treatment effects on rice growth and yield. The parameters of this piecewise function for each experimental plot were determined by fitting the model to the time-series panicle coverage data using the non-linear least squares method. The optimization was performed using a scientific computing library (e.g., the curve_fit function from the SciPy library in Python). A key step in this process was providing robust initial guesses for the parameters to ensure stable convergence of the algorithm; these were estimated from the observed data trends for each plot. The goodness-of-fit for each resulting curve was then evaluated using the coefficient of determination (R&#xb2;).</p>
</sec>
<sec id="s2_8">
<label>2.8</label>
<title>Machine learning model development for rice yield estimation</title>
<p>Machine learning models were developed for predicting the rice yield and its components using five parameters (<italic>K</italic>, <italic>g</italic>, <italic>d<sub>0</sub>
</italic>, <italic>a</italic>, <italic>d<sub>1</sub>
</italic>) extracted from a piecewise function fitted to the time-series panicle coverage data collected in 2023 and 2024. The models included partial least squares regression (PLSR), XGBoost regressor (XGBR), random forest regressor (RFR), and gradient boosting regressor (GBR).</p>
<p>The models were selected based on their documented performance in similar predictive modeling tasks in agricultural research. PLSR captures the linear relationship between the input and output variables, exhibiting stable predictive performance even under multicollinearity (<xref ref-type="bibr" rid="B40">Wold et&#xa0;al., 2001</xref>). RFR is an ensemble model that enhances the predictive performance by combining multiple decision trees (<xref ref-type="bibr" rid="B7">Breiman, 2001</xref>), whereas GBR and XGBR are boosting-based ensemble models that increase predictive accuracy by sequentially training weak learners (<xref ref-type="bibr" rid="B13">Friedman, 2001</xref>; <xref ref-type="bibr" rid="B9">Chen and Guestrin, 2016</xref>). The scikit-learn library serves primarily for model training and prediction.</p>
<p>Hyperparameter optimization was conducted for each model using GridSearchCV. Specifically, the PLSR model was set to n_components = 3, the RFR model with n_estimators = 100, and both the GBR and XGBR models with n_estimators = 100 and learning_rate = 0.05. The model performance was assessed using leave-one-out cross-validation (LOOCV). The root mean squared error (RMSE) and coefficient of determination (R&#xb2;) served as evaluation as evaluation metrics (<xref ref-type="disp-formula" rid="eq8">Equations 8</xref>, <xref ref-type="disp-formula" rid="eq9">9</xref>). Tree SHapley Additive exPlanations (Tree SHAP) analysis was performed to clarify the RFR and boosting-based models (XGBR, GBR). The RMSE equals the square root of the mean squared difference between the predicted and actual values, calculated as</p>
<disp-formula id="eq8">
<label>(8)</label>
<mml:math display="block" id="M8">
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mi>M</mml:mi>
<mml:mi>S</mml:mi>
<mml:mi>E</mml:mi>
<mml:mo>=</mml:mo>
<mml:msqrt>
<mml:mrow>
<mml:mfrac>
<mml:mi>1</mml:mi>
<mml:mn>2</mml:mn>
</mml:mfrac>
<mml:mstyle displaystyle="true">
<mml:msubsup>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>n</mml:mi>
</mml:msubsup>
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:msub>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo>^</mml:mo>
</mml:mover>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:mstyle>
<mml:mo>,</mml:mo>
</mml:mrow>
</mml:msqrt>
</mml:mrow>
</mml:math>
</disp-formula>
<p>where <inline-formula>
<mml:math display="inline" id="im1">
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> denotes the actual values, <inline-formula>
<mml:math display="inline" id="im2">
<mml:mrow>
<mml:msub>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo>^</mml:mo>
</mml:mover>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> denotes the predicted values, and n is the number of data points. The coefficient of determination (R&#xb2;) is the proportion of the variance in the actual values that is predictable from the predicted values, calculated as</p>
<disp-formula id="eq9">
<label>(9)</label>
<mml:math display="block" id="M9">
<mml:mrow>
<mml:msup>
<mml:mtext>R</mml:mtext>
<mml:mn>2</mml:mn>
</mml:msup>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:msubsup>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>n</mml:mi>
</mml:msubsup>
<mml:msup>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo>^</mml:mo>
</mml:mover>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
<mml:mrow>
<mml:msubsup>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>n</mml:mi>
</mml:msubsup>
<mml:msup>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo>&#xaf;</mml:mo>
</mml:mover>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<p>where <inline-formula>
<mml:math display="inline" id="im3">
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo>&#xaf;</mml:mo>
</mml:mover>
</mml:math>
</inline-formula> represents the mean of the actual values.</p>
</sec>
<sec id="s2_9">
<label>2.9</label>
<title>Statistical analysis</title>
<p>To assess the effects of the experimental factors on the parameters derived from the piecewise function (<italic>K</italic>, <italic>g</italic>, <italic>d<sub>0</sub>
</italic>, <italic>a</italic>, and <italic>d<sub>1</sub>
</italic>), an analysis of variance (ANOVA) was performed using the statsmodels library in Python. Due to differences in the experimental design between the two years, the data for each year were analyzed separately. For the 2023 data, a one-way ANOVA was used to test the effect of the different treatment levels, which included nitrogen rates and transplantation dates. For the 2024 data, a three-way ANOVA was conducted to test the main effects of nitrogen, cultivar, and location, as well as their two-way interaction effects. All effects were considered statistically significant at <italic>p</italic> &lt; 0.05.</p>
</sec>
<sec id="s2_10">
<label>2.10</label>
<title>Overall research framework</title>
<p>The comprehensive methodology, visually summarized in <xref ref-type="fig" rid="f1">
<bold>Figure&#xa0;1</bold>
</xref>, is structured into two main components. The left panel details the development and validation of the deep learning model for panicle segmentation, while the right panel illustrates how this trained model is subsequently applied within a time-series analysis pipeline to extract dynamic growth parameters and, ultimately, to predict rice yield and its components using machine learning regression.</p>
</sec>
</sec>
<sec id="s3" sec-type="results">
<label>3</label>
<title>Results</title>
<sec id="s3_1">
<label>3.1</label>
<title>Model training and validation</title>
<p>The training and validation performance of five semantic segmentation models&#x2014;DeepLabv3+, PSPNet, U-Net, FPN, and LinkNet&#x2014;was evaluated using two different backbone architectures, ResNet-50 and ResNet-101. Both training loss and validation accuracy were monitored over 200 epochs to analyze the convergence trends (<xref ref-type="supplementary-material" rid="SF4">
<bold>Supplementary Figure S4</bold>
</xref>).</p>
<p>Across all models and backbones, the training loss consistently decreased, whereas the validation accuracy increased and subsequently stabilized as epochs increased. Models using ResNet-101 generally exhibited a slightly higher validation accuracy than ResNet-50, reflecting the enhanced feature extraction capabilities of the deeper backbone. Although the convergence patterns varied across the models, all achieved stable, low validation loss by the end of training. U-Net demonstrated the fastest initial stabilization, whereas other models, such as PSPNet, required more epochs to achieve similar final loss values.</p>
<p>This analysis confirms that all five segmentation models with both ResNet-50 and ResNet-101 backbones successfully converged, exhibiting decreasing training loss and stable validation accuracy. These results emphasize the effectiveness of these architectures in segmenting rice panicle images, with variations in convergence speed and final accuracy depending on the specific model and backbone combination.</p>
</sec>
<sec id="s3_2">
<label>3.2</label>
<title>Model performance comparison</title>
<p>To evaluate the performance of the models trained with two backbones (ResNet-50 and ResNet-101) and five semantic segmentation models (U-Net, FPN, LinkNet, PSPNet, and DeepLabv3+), the performance metrics were calculated using a test image set (<xref ref-type="table" rid="T2">
<bold>Table&#xa0;2</bold>
</xref>). These metrics included the pixel accuracy, precision, recall, F1-score, and mean IoU (mIoU), offering a comprehensive overview of the segmentation quality and performance of each model.</p>
<table-wrap id="T2" position="float">
<label>Table&#xa0;2</label>
<caption>
<p>Performance comparison of semantic segmentation models.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="left">Model</th>
<th valign="middle" align="left">Backbone</th>
<th valign="middle" align="left">Pixel accuracy</th>
<th valign="middle" align="left">Precision</th>
<th valign="middle" align="left">Recall</th>
<th valign="middle" align="left">F1-score</th>
<th valign="middle" align="left">mIOU</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">U-Net</td>
<td valign="middle" rowspan="5" align="left">ResNet-50</td>
<td valign="middle" align="left">0.98</td>
<td valign="middle" align="left">0.85</td>
<td valign="middle" align="left">0.86</td>
<td valign="middle" align="left">0.85</td>
<td valign="middle" align="left">0.77</td>
</tr>
<tr>
<td valign="middle" align="left">FPN</td>
<td valign="middle" align="left">0.98</td>
<td valign="middle" align="left">0.85</td>
<td valign="middle" align="left">0.84</td>
<td valign="middle" align="left">0.84</td>
<td valign="middle" align="left">0.76</td>
</tr>
<tr>
<td valign="middle" align="left">LinkNet</td>
<td valign="middle" align="left">0.98</td>
<td valign="middle" align="left">0.85</td>
<td valign="middle" align="left">0.89</td>
<td valign="middle" align="left">0.87</td>
<td valign="middle" align="left">0.81</td>
</tr>
<tr>
<td valign="middle" align="left">PSPNet</td>
<td valign="middle" align="left">0.97</td>
<td valign="middle" align="left">0.73</td>
<td valign="middle" align="left">0.78</td>
<td valign="middle" align="left">0.76</td>
<td valign="middle" align="left">0.61</td>
</tr>
<tr>
<td valign="middle" align="left">DeepLabv3+</td>
<td valign="middle" align="left">0.99</td>
<td valign="middle" align="left">0.87</td>
<td valign="middle" align="left">0.88</td>
<td valign="middle" align="left">0.88</td>
<td valign="middle" align="left">0.81</td>
</tr>
<tr>
<td valign="middle" align="left">U-Net</td>
<td valign="middle" rowspan="5" align="left">ResNet-101</td>
<td valign="middle" align="left">0.98</td>
<td valign="middle" align="left">0.88</td>
<td valign="middle" align="left">0.84</td>
<td valign="middle" align="left">0.86</td>
<td valign="middle" align="left">0.78</td>
</tr>
<tr>
<td valign="middle" align="left">FPN</td>
<td valign="middle" align="left">0.98</td>
<td valign="middle" align="left">0.87</td>
<td valign="middle" align="left">0.86</td>
<td valign="middle" align="left">0.85</td>
<td valign="middle" align="left">0.78</td>
</tr>
<tr>
<td valign="middle" align="left">LinkNet</td>
<td valign="middle" align="left">0.99</td>
<td valign="middle" align="left">0.86</td>
<td valign="middle" align="left">0.90</td>
<td valign="middle" align="left">0.88</td>
<td valign="middle" align="left">0.81</td>
</tr>
<tr>
<td valign="middle" align="left">PSPNet</td>
<td valign="middle" align="left">0.97</td>
<td valign="middle" align="left">0.72</td>
<td valign="middle" align="left">0.77</td>
<td valign="middle" align="left">0.73</td>
<td valign="middle" align="left">0.59</td>
</tr>
<tr>
<td valign="middle" align="left">DeepLabv3+</td>
<td valign="middle" align="left">0.99</td>
<td valign="middle" align="left">0.86</td>
<td valign="middle" align="left">0.90</td>
<td valign="middle" align="left">0.88</td>
<td valign="middle" align="left">0.82</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>The table presents key performance metrics, including pixel accuracy, precision, recall, F1-score, and mean Intersection over Union (mIoU), for the five evaluated architectures using both ResNet-50 and ResNet-101 backbones.</p>
</fn>
</table-wrap-foot>
</table-wrap>
<p>Among the models with the ResNet-50 backbone, DeepLabv3+ attained the highest mIoU (0.81), F1-score (0.88), and pixel accuracy (0.99). LinkNet also scored well with an mIoU of 0.81 and an F1-score of 0.87. PSPNet had the lowest mIoU (0.61) among the models employing ResNet-50. Among the models with the ResNet-101 backbone, DeepLabv3+ again recorded the highest mIoU (0.82) and an F1-score of 0.88. LinkNet ranked second with an mIoU of 0.81 and an F1-score of 0.88. Across all models, the use of ResNet-101 generally resulted in a modest improvement in the recall and mIoU over that of ResNet-50, although the magnitude of this improvement varied.</p>
<p>A visual inspection of the segmentation results (<xref ref-type="supplementary-material" rid="SF5">
<bold>Supplementary Figure S5</bold>
</xref>) showed that despite these differences in the numerical metrics, the qualitative performance of panicle detection was generally high across all models. All models segmented the panicle regions, with DeepLabv3+ and LinkNet showing slightly better quantitative results, particularly in terms of the mIoU.</p>
<p>These findings indicate that although the numerical performance metrics highlight subtle differences between the models, practical model selection may depend on the computational efficiency, task-specific requirements, or hardware constraints rather than significant differences in the segmentation capability. DeepLabv3+ and LinkNet emerge as strong candidates due to their consistently high performance across both backbones. Future research should prioritize evaluating the robustness of these models across diverse datasets and environmental conditions to optimize their application to real-world tasks.</p>
</sec>
<sec id="s3_3">
<label>3.3</label>
<title>Analysis of parameter values by treatment</title>
<p>
<xref ref-type="table" rid="T3">
<bold>Table&#xa0;3</bold>
</xref> lists the fitted parameters (K, g, d<sub>0</sub>, a, d<sub>1</sub>) for various treatment conditions in 2023 and 2024. Through analysis of variance (ANOVA), we confirmed that factors such as nitrogen level, transplantation date, and rice variety had a statistically significant effect on the dynamics of panicle development, as quantified by these parameters (<xref ref-type="supplementary-material" rid="SM1">
<bold>Supplementary Table S2</bold>
</xref>).</p>
<table-wrap id="T3" position="float">
<label>Table&#xa0;3</label>
<caption>
<p>Fitted parameters of the piecewise function for rice panicle coverage under different treatment conditions in 2023 and 2024.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="left">Year</th>
<th valign="middle" align="left">Treatment</th>
<th valign="middle" align="left">
<italic>K</italic>
</th>
<th valign="middle" align="left">
<italic>g</italic>
</th>
<th valign="middle" align="left">
<italic>d<sub>0</sub>
</italic>
</th>
<th valign="middle" align="left">
<italic>a</italic>
</th>
<th valign="middle" align="left">
<italic>d<sub>1</sub>
</italic>
</th>
<th valign="middle" align="left">R<sup>2</sup>
</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" rowspan="7" align="right">2023</td>
<td valign="middle" align="left">23-F-0N</td>
<td valign="middle" align="right">0.2512</td>
<td valign="middle" align="right">0.2738</td>
<td valign="middle" align="right">9.9412</td>
<td valign="middle" align="right">0.0003</td>
<td valign="middle" align="right">28.0717</td>
<td valign="middle" align="right">0.9858</td>
</tr>
<tr>
<td valign="middle" align="left">23-F-0N-LT</td>
<td valign="middle" align="right">0.2081</td>
<td valign="middle" align="right">0.2889</td>
<td valign="middle" align="right">8.3847</td>
<td valign="middle" align="right">0.0002</td>
<td valign="middle" align="right">26.11</td>
<td valign="middle" align="right">0.9825</td>
</tr>
<tr>
<td valign="middle" align="left">23-F-9N</td>
<td valign="middle" align="right">0.306</td>
<td valign="middle" align="right">0.2681</td>
<td valign="middle" align="right">8.6695</td>
<td valign="middle" align="right">0.0003</td>
<td valign="middle" align="right">27.0125</td>
<td valign="middle" align="right">0.9876</td>
</tr>
<tr>
<td valign="middle" align="left">23-F-9N-LT</td>
<td valign="middle" align="right">0.3078</td>
<td valign="middle" align="right">0.3712</td>
<td valign="middle" align="right">7.7353</td>
<td valign="middle" align="right">0.0001</td>
<td valign="middle" align="right">25.7043</td>
<td valign="middle" align="right">0.9958</td>
</tr>
<tr>
<td valign="middle" align="left">23-S-0N</td>
<td valign="middle" align="right">0.3272</td>
<td valign="middle" align="right">0.2658</td>
<td valign="middle" align="right">9.5009</td>
<td valign="middle" align="right">0.0003</td>
<td valign="middle" align="right">28.9899</td>
<td valign="middle" align="right">0.9926</td>
</tr>
<tr>
<td valign="middle" align="left">23-S-18N</td>
<td valign="middle" align="right">0.3505</td>
<td valign="middle" align="right">0.2649</td>
<td valign="middle" align="right">8.2946</td>
<td valign="middle" align="right">0.0003</td>
<td valign="middle" align="right">27.4182</td>
<td valign="middle" align="right">0.9932</td>
</tr>
<tr>
<td valign="middle" align="left">23-S-9N</td>
<td valign="middle" align="right">0.3455</td>
<td valign="middle" align="right">0.2781</td>
<td valign="middle" align="right">8.3295</td>
<td valign="middle" align="right">0.0003</td>
<td valign="middle" align="right">27.0919</td>
<td valign="middle" align="right">0.9936</td>
</tr>
<tr>
<td valign="middle" rowspan="11" align="right">2024</td>
<td valign="middle" align="left">24-F-0N-DJ</td>
<td valign="middle" align="right">0.2067</td>
<td valign="middle" align="right">0.3683</td>
<td valign="middle" align="right">8.5425</td>
<td valign="middle" align="right">0.0002</td>
<td valign="middle" align="right">26.3238</td>
<td valign="middle" align="right">0.9932</td>
</tr>
<tr>
<td valign="middle" align="left">24-F-0N-NP</td>
<td valign="middle" align="right">0.222</td>
<td valign="middle" align="right">0.3098</td>
<td valign="middle" align="right">10.311</td>
<td valign="middle" align="right">0.0001</td>
<td valign="middle" align="right">29.3876</td>
<td valign="middle" align="right">0.9967</td>
</tr>
<tr>
<td valign="middle" align="left">24-F-0N-SD</td>
<td valign="middle" align="right">0.1574</td>
<td valign="middle" align="right">0.2746</td>
<td valign="middle" align="right">11.0649</td>
<td valign="middle" align="right">0</td>
<td valign="middle" align="right">31.9771</td>
<td valign="middle" align="right">0.9918</td>
</tr>
<tr>
<td valign="middle" align="left">24-F-0N-SI</td>
<td valign="middle" align="right">0.211</td>
<td valign="middle" align="right">0.2103</td>
<td valign="middle" align="right">13.0975</td>
<td valign="middle" align="right">0</td>
<td valign="middle" align="right">34.027</td>
<td valign="middle" align="right">0.9918</td>
</tr>
<tr>
<td valign="middle" align="left">24-F-9N-DJ</td>
<td valign="middle" align="right">0.25</td>
<td valign="middle" align="right">0.4606</td>
<td valign="middle" align="right">4.9593</td>
<td valign="middle" align="right">0.0002</td>
<td valign="middle" align="right">17.093</td>
<td valign="middle" align="right">0.9672</td>
</tr>
<tr>
<td valign="middle" align="left">24-F-9N-NP</td>
<td valign="middle" align="right">0.2663</td>
<td valign="middle" align="right">0.3667</td>
<td valign="middle" align="right">7.5438</td>
<td valign="middle" align="right">0.0002</td>
<td valign="middle" align="right">23.7445</td>
<td valign="middle" align="right">0.986</td>
</tr>
<tr>
<td valign="middle" align="left">24-F-9N-SD</td>
<td valign="middle" align="right">0.2102</td>
<td valign="middle" align="right">0.3355</td>
<td valign="middle" align="right">7.8603</td>
<td valign="middle" align="right">0.0001</td>
<td valign="middle" align="right">25.8459</td>
<td valign="middle" align="right">0.9902</td>
</tr>
<tr>
<td valign="middle" align="left">24-F-9N-SI</td>
<td valign="middle" align="right">0.2782</td>
<td valign="middle" align="right">0.2695</td>
<td valign="middle" align="right">8.9365</td>
<td valign="middle" align="right">0.0002</td>
<td valign="middle" align="right">28.3735</td>
<td valign="middle" align="right">0.9905</td>
</tr>
<tr>
<td valign="middle" align="left">24-S-0N</td>
<td valign="middle" align="right">0.193</td>
<td valign="middle" align="right">0.198</td>
<td valign="middle" align="right">17.6353</td>
<td valign="middle" align="right">0.0002</td>
<td valign="middle" align="right">40.1652</td>
<td valign="middle" align="right">0.9932</td>
</tr>
<tr>
<td valign="middle" align="left">24-S-9N</td>
<td valign="middle" align="right">0.2293</td>
<td valign="middle" align="right">0.2053</td>
<td valign="middle" align="right">14.5728</td>
<td valign="middle" align="right">0.0002</td>
<td valign="middle" align="right">37.9843</td>
<td valign="middle" align="right">0.9908</td>
</tr>
<tr>
<td valign="middle" align="left">24-S-18N</td>
<td valign="middle" align="right">0.2171</td>
<td valign="middle" align="right">0.206</td>
<td valign="middle" align="right">16.2943</td>
<td valign="middle" align="right">0.0003</td>
<td valign="middle" align="right">38.9029</td>
<td valign="middle" align="right">0.9923</td>
</tr>
<tr>
<td valign="middle" rowspan="3" align="center">ANOVA</td>
<td valign="middle" align="left">Nitrogen</td>
<td valign="middle" align="right">&lt;0.001</td>
<td valign="middle" align="right">&lt;0.001</td>
<td valign="middle" align="right">&lt;0.001</td>
<td valign="middle" align="right">5.10E-02</td>
<td valign="middle" align="right">&lt;0.001</td>
<td valign="middle" align="left">
</td>
</tr>
<tr>
<td valign="middle" align="left">Cultivar</td>
<td valign="middle" align="right">8.66E-03</td>
<td valign="middle" align="right">&lt;0.001</td>
<td valign="middle" align="right">&lt;0.001</td>
<td valign="middle" align="right">&lt;0.001</td>
<td valign="middle" align="right">&lt;0.001</td>
<td valign="middle" align="left">
</td>
</tr>
<tr>
<td valign="middle" align="left">Transplanting</td>
<td valign="middle" align="right">&lt;0.001</td>
<td valign="middle" align="right">&lt;0.001</td>
<td valign="middle" align="right">&lt;0.001</td>
<td valign="middle" align="right">8.33E-02</td>
<td valign="middle" align="right">&lt;0.001</td>
<td valign="middle" align="right">
</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>The parameters <italic>K</italic> (maximum panicle coverage), <italic>g</italic> (growth rate), <italic>d<sub>0</sub>
</italic> (inflection point/time of maximum growth rate), <italic>a</italic> (curvature of the decline phase), and <italic>d<sub>1</sub>
</italic> (transition point between growth and decline phases) were estimated for each treatment. R&#xb2; values indicate the goodness-of-fit of the piecewise function. Treatment codes: F, Field; S, Soil&#x2013;bin; 0N, 0 kg ha<sup>-</sup>&#xb9; nitrogen; 9N, 98.8 kg ha<sup>-</sup>&#xb9; nitrogen; 18N, 197.6 kg ha<sup>-</sup>&#xb9; nitrogen; LT, Late transplantation; DJ, Dongjin-1; NP, Nampyeong; SD, Shindongjin; SI, Saeilmi. The number before the hyphen represents the year of the experiment. The ANOVA results at the bottom of the table show the significance levels (<italic>p</italic>-values) for the effects of nitrogen, cultivar, and transplanting factors on each parameter.</p>
</fn>
</table-wrap-foot>
</table-wrap>
<p>Across both years, <italic>K</italic> (maximum panicle coverage) generally increased with higher nitrogen levels, affirming the critical role of nitrogen. For instance, in the 2023 field experiment, <italic>K</italic> increased from 0.2512 under no nitrogen (23-F-0N) to 0.3060 with nitrogen addition (23-F-9N). The parameter <italic>g</italic> (growth rate) also tended to rise with nitrogen application in most cases, such as in the 2024 Nampyeong variety field experiment, where it rose from 0.3098 (0N) to 0.3667 (9N), indicating accelerated growth.</p>
<p>The timing of key developmental stages also shifted. The time of maximum growth rate (<italic>d<sub>0</sub>
</italic>) advanced under higher nitrogen levels, shifting from 9.94 days (23-F-0N) to 8.67 days (23-F-9N) in the 2023 field data. Similarly, the start of the decline phase (<italic>d<sub>1</sub>
</italic>) was advanced in parallel, occurring at 27.01 days compared to 28.07 days. The curvature of the post-peak decline (<italic>a</italic>) showed varied responses, though it marginally increased with higher nitrogen in some treatments.</p>
<p>The piecewise function demonstrated reliable performance in capturing panicle dynamics across diverse conditions, with the goodness-of-fit R&#xb2; values remaining high, nearly all above 0.98.</p>
</sec>
<sec id="s3_4">
<label>3.4</label>
<title>Effects of nitrogen, transplantation date, and crop variety on panicle coverage dynamics</title>
<p>To investigate the influence of nitrogen, transplantation date, and varietal factors on the panicle coverage dynamics, the time-series changes in the panicle coverage were plotted under different treatment conditions, and the data were fitted to the piecewise function (<xref ref-type="fig" rid="f3">
<bold>Figure&#xa0;3</bold>
</xref>).</p>
<fig id="f3" position="float">
<label>Figure&#xa0;3</label>
<caption>
<p>Effects of agricultural treatments on the dynamics of panicle coverage. This figure shows how panicle coverage over time is influenced by different management practices and genetic backgrounds, with each curve representing a fitted piecewise function. <bold>(A)</bold> The effect of nitrogen fertilization on panicle coverage dynamics. <bold>(B)</bold> The effect of late transplantation on panicle coverage dynamics. <bold>(C)</bold> Comparison of panicle coverage dynamics among four different rice varieties under the no-nitrogen (0N) treatment. <bold>(D)</bold> Comparison of panicle coverage dynamics among the same four varieties under a high-nitrogen (9N) fertilization regime.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-16-1611653-g003.tif">
<alt-text content-type="machine-generated">Four line graphs (A, B, C, D) show panicle coverage over days after heading (DAH) in rice plants under different treatments. Graph A compares 0N and 9N nitrogen treatments. Graph B details ON-LT, 9N-LT, ON, and 9N treatments. Graphs C and D compare rice varieties: Saeilmi, Sindongjin, Namyeong, and Dongjin. Each graph demonstrates varying coverage and growth patterns.</alt-text>
</graphic>
</fig>
<p>As confirmed by our ANOVA results, nitrogen treatment had a statistically significant effect on panicle coverage dynamics (<xref ref-type="table" rid="T3">
<bold>Table&#xa0;3</bold>
</xref>), a trend visually represented in <xref ref-type="fig" rid="f3">
<bold>Figure&#xa0;3A</bold>
</xref>. Higher nitrogen level (9N) led to a higher maximum panicle coverage (<italic>K</italic>) of 0.2663 compared to 0.2220 under the no-nitrogen control (0N). This suggests that nitrogen promotes panicle growth and development. The quadratic curvature parameter (<italic>a</italic>), however, showed only slight changes with nitrogen treatment, where it remained constant (0.0003) across all treatments.</p>
<p>The effects of the transplantation date and nitrogen availability were also apparent (<xref ref-type="fig" rid="f3">
<bold>Figure&#xa0;3B</bold>
</xref>). Late transplantation (LT) under no-nitrogen conditions resulted in a lower maximum panicle coverage (<italic>K</italic>), decreasing from 0.2512 (0N) to 0.2081 (0N-LT). Contrary to the baseline treatment, the transition to the decline phase (<italic>d<sub>1</sub>
</italic>) was also earlier in the LT group. With sufficient nitrogen, the adverse effect of late transplantation on <italic>K</italic> was offset, with K values of 0.3060 (9N) and 0.3078 (9N-LT) being nearly identical.</p>
<p>Significant differences in panicle coverage dynamics were observed among cultivars, and these responses were influenced by nitrogen availability, as indicated by the significant main effects of cultivar and nitrogen (<xref ref-type="table" rid="T3">
<bold>Table&#xa0;3</bold>
</xref>) and shown in <xref ref-type="fig" rid="f3">
<bold>Figures&#xa0;3C, D</bold>
</xref>. Under the 9N condition, Saeilmi achieved one of the highest <italic>K</italic> values (0.2782) but had a slower growth rate (<italic>g</italic> = 0.2695), whereas Dongjin-1 showed the fastest growth rate (<italic>g</italic> = 0.4606) but a more moderate <italic>K</italic> value (0.2500). These results underscore the intricate interplay of genetics with management practices. These results reveal the complex interplay of nitrogen availability, transplantation timing, and genetic factors in determining the panicle coverage dynamics. The findings emphasize the importance of considering these factors in optimizing nitrogen management and variety selection for rice production.</p>
</sec>
<sec id="s3_5">
<label>3.5</label>
<title>Correlation between piecewise function parameters and yield components</title>
<p>A Pearson correlation analysis was performed to investigate the linear relationship between the five parameters derived from the piecewise function (<italic>K</italic>, <italic>g</italic>, <italic>d<sub>0</sub>
</italic>, <italic>a</italic>, and <italic>d<sub>1</sub>
</italic>) and the key yield components (<xref ref-type="fig" rid="f4">
<bold>Figure&#xa0;4</bold>
</xref>). The analysis identified several significant correlations.</p>
<fig id="f4" position="float">
<label>Figure&#xa0;4</label>
<caption>
<p>Heatmap of Pearson correlation coefficients between piecewise function parameters and yield components. This heatmap visualizes the linear relationships between the five dynamic parameters (K, g, d<sub>0</sub>, a, and d<sub>1</sub>) and six key yield components (Yield, GN, Grain Number; PN, Panicle Number; GNP, Grains per Panicle; TGW, 1000-Grain Weight; FGR, Filled Grain Ratio). Red cells indicate a positive correlation, while blue cells indicate a negative correlation. The intensity of the color corresponds to the strength of the correlation, with the correlation coefficient (r) value displayed in each cell. Asterisks denote statistical significance levels: *<italic>p</italic> &lt; 0.05, **<italic>p</italic> &lt; 0.01, and ***<italic>p</italic> &lt; 0.001. &#x2018;ns&#x2019; indicates a non-significant correlation.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-16-1611653-g004.tif">
<alt-text content-type="machine-generated">Heatmap showing Pearson correlation coefficients between agricultural parameters and yield factors. Strong positive correlations in red, negative in blue. Significant correlations are marked with asterisks (*). Parameters on y-axis include maximum panicle coverage, growth rate, inflection point, curvature of the decline phase, and transition point. Yield factors on x-axis include yield, grain number, panicle number, grain number per panicle, thousand grain weight, and fertile grain rate.</alt-text>
</graphic>
</fig>
<p>Among the parameters, <italic>K</italic> (maximum panicle coverage) showed the strongest positive correlation with Yield (<italic>r</italic> = 0.87, <italic>p</italic> &lt; 0.001), GN (grain number) (<italic>r</italic> = 0.85, <italic>p</italic> &lt; 0.001), and PN (panicle number) (<italic>r</italic> = 0.70, <italic>p</italic> &lt; 0.001), highlighting its dominant role in determining final crop yield. Interestingly, <italic>K</italic> displayed a significant negative correlation with TGW (1000-grain weight) (<italic>r</italic> = -0.47, <italic>p</italic> &lt; 0.001).</p>
<p>The timing parameters also revealed strong relationships. The time of maximum growth rate (<italic>d<sub>0</sub>
</italic>) was strongly negatively correlated with Yield (<italic>r</italic> = -0.62, <italic>p</italic> &lt; 0.001) and especially with FGR (filled grain ratio) (<italic>r</italic> = -0.71, <italic>p</italic> &lt; 0.001). Similarly, the transition point to the decline phase (<italic>d<sub>1</sub>
</italic>) showed a strong negative correlation with Yield (r = -0.57, p &lt; 0.001) and FGR (<italic>r</italic> = -0.69, <italic>p</italic> &lt; 0.001). Taken together, these findings underscore the intricate interplay between genetic factors (cultivar) and management practices (nitrogen, transplantation timing) in determining panicle coverage dynamics, emphasizing the importance of an integrated approach for optimizing rice production.</p>
</sec>
<sec id="s3_6">
<label>3.6</label>
<title>Regression analysis of yield components using models evaluated with LOOCV</title>
<p>Regression plots illustrated the predictive performance of the four models (PLSR, RFR, GBR, and XGBR) in predicting the yield and yield components based on the five parameters derived from the piecewise function (<xref ref-type="supplementary-material" rid="SF6">
<bold>Supplementary Figure S6</bold>
</xref>). The models were evaluated using LOOCV; each plot shows the relationship between the actual (x-axis) and predicted values (y-axis). The red diagonal line represents the ideal 1:1 relationship (perfect prediction), and the blue line represents the regression line fitted to the data.</p>
<p>The results demonstrate different accuracies depending on the model and yield component. RFR and XGBR showed high predictive performance for Yield and GN, with points clustered closely around the 1:1 line, indicating good agreement between the predicted and observed values. PLSR and GBR also showed reasonably good predictions for Yield and GN, although with slightly greater scatter. For PN, all models showed moderate prediction accuracy, with RFR and XGBR performing marginally better than PLSR and GBR. In contrast, the predictions for GNP and TGW showed lower accuracy across all models, as evidenced by the increased scatter around the 1:1 line. This reflects a greater inherent variability in these traits, in addition to limitations in the ability of the models to capture them based solely on the piecewise function parameters. The FGR predictions showed moderate accuracy, with RFR and XGBR again demonstrating slightly better performances than those of PLSR and GBR.</p>
</sec>
<sec id="s3_7">
<label>3.7</label>
<title>Coefficient of determination and RMSE evaluation of yield estimation models</title>
<p>
<xref ref-type="table" rid="T4">
<bold>Table&#xa0;4</bold>
</xref> summarizes the predictive performances of the four regression models (PLSR, RFR, GBR, and XGBR) based on the R&#xb2; and RMSE values for the yield and yield components: GN, PN, GNP, TGW, and FGR.</p>
<table-wrap id="T4" position="float">
<label>Table&#xa0;4</label>
<caption>
<p>Predictive performance of machine learning models for yield and its components.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="left">Yield components</th>
<th valign="middle" align="left">Metric</th>
<th valign="middle" align="left">PLSR</th>
<th valign="middle" align="left">XGBR</th>
<th valign="middle" align="left">RFR</th>
<th valign="middle" align="left">GBR</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">Yield</td>
<td valign="middle" rowspan="6" align="left">R<sup>2</sup>
</td>
<td valign="middle" align="left">0.82</td>
<td valign="middle" align="left">0.89</td>
<td valign="middle" align="left">0.89</td>
<td valign="middle" align="left">0.88</td>
</tr>
<tr>
<td valign="middle" align="left">GN</td>
<td valign="middle" align="left">0.75</td>
<td valign="middle" align="left">0.82</td>
<td valign="middle" align="left">0.87</td>
<td valign="middle" align="left">0.87</td>
</tr>
<tr>
<td valign="middle" align="left">PN</td>
<td valign="middle" align="left">0.61</td>
<td valign="middle" align="left">0.66</td>
<td valign="middle" align="left">0.80</td>
<td valign="middle" align="left">0.80</td>
</tr>
<tr>
<td valign="middle" align="left">GNP</td>
<td valign="middle" align="left">0.21</td>
<td valign="middle" align="left">0.27</td>
<td valign="middle" align="left">0.39</td>
<td valign="middle" align="left">0.40</td>
</tr>
<tr>
<td valign="middle" align="left">TGW</td>
<td valign="middle" align="left">0.44</td>
<td valign="middle" align="left">0.67</td>
<td valign="middle" align="left">0.70</td>
<td valign="middle" align="left">0.63</td>
</tr>
<tr>
<td valign="middle" align="left">FGR</td>
<td valign="middle" align="left">0.57</td>
<td valign="middle" align="left">0.71</td>
<td valign="middle" align="left">0.73</td>
<td valign="middle" align="left">0.73</td>
</tr>
<tr>
<td valign="middle" align="left">Yield (kg/ha)</td>
<td valign="middle" rowspan="6" align="left">RMSE</td>
<td valign="middle" align="left">75.69</td>
<td valign="middle" align="left">61.16</td>
<td valign="middle" align="left">60.83</td>
<td valign="middle" align="left">61.46</td>
</tr>
<tr>
<td valign="middle" align="left">GN (number/m<sup>2</sup>)</td>
<td valign="middle" align="left">3521.43</td>
<td valign="middle" align="left">2943.32</td>
<td valign="middle" align="left">2527.82</td>
<td valign="middle" align="left">2526.56</td>
</tr>
<tr>
<td valign="middle" align="left">PN (number/m<sup>2</sup>)</td>
<td valign="middle" align="left">53.86</td>
<td valign="middle" align="left">49.91</td>
<td valign="middle" align="left">38.69</td>
<td valign="middle" align="left">38.05</td>
</tr>
<tr>
<td valign="middle" align="left">GNP (GN/PN)</td>
<td valign="middle" align="left">9.93</td>
<td valign="middle" align="left">9.52</td>
<td valign="middle" align="left">8.72</td>
<td valign="middle" align="left">8.65</td>
</tr>
<tr>
<td valign="middle" align="left">TGW (g)</td>
<td valign="middle" align="left">1.91</td>
<td valign="middle" align="left">1.46</td>
<td valign="middle" align="left">1.40</td>
<td valign="middle" align="left">1.53</td>
</tr>
<tr>
<td valign="middle" align="left">FGR (%)</td>
<td valign="middle" align="left">0.06</td>
<td valign="middle" align="left">0.05</td>
<td valign="middle" align="left">0.05</td>
<td valign="middle" align="left">0.05</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>The coefficient of determination (R<sup>2</sup>) and root mean squared error (RMSE) are shown for four regression models evaluated using leave-one-out cross-validation (LOOCV).</p>
</fn>
</table-wrap-foot>
</table-wrap>
<p>Regarding the R&#xb2; values, RFR and XGBR consistently demonstrated superior performance for the key components, such as Yield (R&#xb2; = 0.890 for both) and GN (R&#xb2; = 0.870 and 0.820, respectively). GBR also achieved strong results for Yield, GN, and PN, with its performance comparable to that of RFR for these components. PLSR performed relatively well for Yield (R&#xb2; = 0.820) and GN (R&#xb2; = 0.750) but underperformed in components such as the GNP (R&#xb2; = 0.210) and TGW (R&#xb2; = 0.440), highlighting its limitations in capturing complex, nonlinear relationships.</p>
<p>Considering the RMSE values, RFR and XGBR recorded lower RMSE values for Yield (RMSE = 60.83 and 61.16, respectively) and GN (RMSE = 2527.82 and 2943.32, respectively), indicating their reliability in reducing the prediction errors. Conversely, PLSR displayed higher RMSE values across most components, particularly GN (RMSE = 3521.43) and Yield (RMSE = 75.69), reaffirming its limited predictive capability when compared with that of the nonlinear models. GBR generally performed similarly to XGBR in terms of the RMSE, particularly for Yield, GN, and PN.</p>
<p>These results emphasize the strengths of nonlinear models such as RFR and XGBR in handling complex traits and achieving better prediction accuracy for the yield components. However, traits such as the GNP and TGW consistently recorded lower R&#xb2; values and higher RMSE across all models, suggesting the inherent difficulty in predicting these components with the available data.</p>
<p>
<xref ref-type="fig" rid="f5">
<bold>Figure&#xa0;5</bold>
</xref>; <xref ref-type="supplementary-material" rid="SF7">
<bold>Supplementary Figure S7</bold>
</xref> presents the SHapley Additive exPlanations (SHAP) value analysis for the RFR model, detailing the contribution of each parameter in the piecewise function (<italic>K</italic>, <italic>g</italic>, <italic>d<sub>0</sub>
</italic>, <italic>a</italic>, <italic>d<sub>1</sub>
</italic>) to the prediction of the yield and yield components. Each point on the plot represents a single data point, with its position on the x-axis indicating the SHAP value (effect on the model output) and its color representing the feature value (red for high, blue for low).</p>
<fig id="f5" position="float">
<label>Figure&#xa0;5</label>
<caption>
<p>Feature importance analysis for yield prediction models using SHAP (SHapley Additive exPlanations). This figure details the contribution of each piecewise function parameter to the predictions of the Random Forest Regressor (RFR) model for Yield, Grain Number (GN), and Panicle Number (PN). For each yield component, the left plot is a SHAP summary plot, where each dot is a single data point. The color of the dot represents the feature&#x2019;s value (red for high, blue for low), and its position on the x-axis indicates its impact on the model&#x2019;s output (positive or negative). The right bar plot ranks the features by their mean absolute SHAP value, indicating their overall importance to the model&#x2019;s prediction.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-16-1611653-g005.tif">
<alt-text content-type="machine-generated">Three pairs of graphs illustrate SHAP value impacts on model outputs for Yield, GN, and PN. Left graphs show SHAP values across features K, \(d_0\), a, \(d_1\), and g, with colors indicating feature value magnitude. Right bar charts show mean absolute SHAP values, highlighting feature K as most influential, followed by \(d_1\), \(d_0\), a, and g in varying order across outcomes.</alt-text>
</graphic>
</fig>
<p>Regarding the yield prediction, <italic>K</italic> (maximum panicle coverage) showed the greatest mean absolute SHAP value and therefore the most significant overall influence, with high values of <italic>K</italic> (red points) consistently associated with positive SHAP values (increased yield prediction); <italic>d<sub>0</sub>
</italic> (time of maximum growth rate) was also found to be essential for yield prediction.</p>
<p>Regarding the GN, <italic>K</italic> was again the dominant predictor, followed by <italic>d<sub>0</sub>
</italic> and <italic>d<sub>1</sub>
</italic>, whereas <italic>K</italic> and <italic>d<sub>1</sub>
</italic> were the most important for the PN. The GNP was the most affected by <italic>d<sub>1</sub>
</italic> and <italic>K</italic>, whereas the TGW was the most influenced by <italic>K</italic>, <italic>d<sub>1</sub>
</italic>, and <italic>d<sub>0</sub>
</italic>. The parameters <italic>d<sub>1</sub>
</italic> and <italic>d<sub>0</sub>
</italic> had the strongest influence on the FGR, with a higher value of <italic>d<sub>1</sub>
</italic> (a delayed transition to the decline phase) and a lower value of <italic>d<sub>0</sub>
</italic> (delayed onset of the maximum growth rate) generally associated with a higher FGR. The growth rate (<italic>g</italic>) and curvature of the decline (<italic>a</italic>) consistently showed lesser importance across all yield components.</p>
<p>This SHAP analysis confirmed that the parameters derived from the piecewise function, particularly <italic>K</italic>, <italic>d<sub>0</sub>
</italic>, and <italic>d<sub>1</sub>
</italic>, provide valuable insights into the factors driving the yield and its components. The visual representation of the feature importance facilitates a more nuanced understanding of the predictions made by the model than a simple examination of the overall model performance metrics.</p>
</sec>
</sec>
<sec id="s4" sec-type="discussion">
<label>4</label>
<title>Discussion</title>
<p>This study demonstrated the effectiveness of a deep learning-based approach integrating high-resolution RGB imagery, semantic segmentation, and time-series analysis for accurately monitoring the rice panicle coverage and predicting the yield components. The framework achieves significant improvements in accuracy and efficiency relative to traditional methods, highlighting its potential for advancing precision agricultural practices in rice production. Specifically, the strong correlation between the predicted and observed values (<xref ref-type="table" rid="T4">
<bold>Table&#xa0;4</bold>
</xref>; <xref ref-type="supplementary-material" rid="SF6">
<bold>Supplementary Figure S6</bold>
</xref>) underscores the practical applicability of this technology. Furthermore, the ability to perform these analyses using readily available RGB imagery, rather than specialized equipment, increases the accessibility and potential for widespread adoption of this approach.</p>
<p>Deep learning models, particularly CNNs, are adept at extracting complex features from high-resolution images, effectively addressing challenges such as partial occlusion and variable lighting conditions that often hinder traditional image processing techniques (<xref ref-type="bibr" rid="B24">Lin et&#xa0;al., 2023</xref>; <xref ref-type="bibr" rid="B28">Mohammadzadeh Babr et&#xa0;al., 2022</xref>; <xref ref-type="bibr" rid="B26">Lu et&#xa0;al., 2024</xref>). Our evaluations confirmed this, showing robust performances across diverse environmental conditions (<xref ref-type="table" rid="T2">
<bold>Table&#xa0;2</bold>
</xref>). Notably, the high performance of DeepLabv3+ is consistent with previous studies that have also identified it as a robust model for rice panicle segmentation (<xref ref-type="bibr" rid="B37">Wang et&#xa0;al., 2021</xref>). LinkNet also achieved a comparable mIoU to DeepLabv3+ in our experiments, suggesting its suitability for this application.</p>
<p>However, we acknowledge that this level of performance (mIoU &lt; 0.85) indicates room for further optimization. This limitation is likely attributable to the inherent complexity of the target objects and the challenging field conditions. First, severe occlusion is unavoidable as rice panicles grow and overlap with each other and with leaves in a dense canopy, making it fundamentally difficult for any model to delineate precise pixel-level boundaries. Second, uncontrolled lighting conditions, such as shadows and direct sunlight, can alter the appearance of panicles and obscure their features obscure their features (<xref ref-type="bibr" rid="B24">Lin et al., 2023</xref>). Lastly, the texture and color of panicles can become similar to those of senescing leaves in later growth stages, potentially confusing the model.</p>
<p>Nevertheless, it is crucial to interpret this segmentation performance within the context of our study&#x2019;s primary objective: predicting yield components from the temporal dynamics of panicle coverage. The results demonstrate that our framework, even with an mIoU of 0.82, was sufficiently robust to capture the overall trend of panicle development. The high predictive power of the final yield models (R&#xb2; up to 0.89) strongly supports this, suggesting that capturing the holistic temporal pattern of the canopy is more critical for yield prediction than achieving perfect segmentation of every individual panicle.</p>
<p>While the selected models like DeepLabv3+ and LinkNet provided robust performance for this study&#x2019;s objectives, we acknowledge that the field of deep learning is rapidly evolving. Future research should therefore focus on improving segmentation accuracy to an even higher standard. A critical next step would be to significantly expand the current dataset to include more diverse genetic backgrounds, environmental conditions, and growth stages. The automated image acquisition platform based on a fixed tower, as implemented in this study, offers an efficient and direct pathway for building the large-scale, longitudinal datasets required to effectively train and validate more advanced architectures. Furthermore, exploring these advanced architectures, such as Vision Transformers (ViT) which have shown promise in handling occlusion (<xref ref-type="bibr" rid="B26">Lu et&#xa0;al., 2024</xref>), will be a key priority.</p>
<p>While conventional single-time-point analyses might offer a snapshot correlation, for instance, between panicle number and coverage at the heading stage, they cannot capture the temporal dynamics of grain filling and senescence. The proposed framework, by contrast, not only estimates maximum coverage (<italic>K</italic>) but also quantifies the rates of growth (<italic>g</italic>) and decline (<italic>a</italic>). This allows for a deeper understanding of how the <italic>entire</italic> developmental trajectory, including the speed of maturation and senescence, impacts final yield components like the filled grain ratio (FGR)&#x2014;an insight largely inaccessible through static measurements.</p>
<p>This study revealed that dynamic parameters derived from time-series panicle coverage are powerful predictors for rice yield. The maximum panicle coverage (<italic>K</italic>) emerged as the strongest predictor for Yield and GN (<italic>r</italic> = 0.87 and 0.85, respectively). This is physiologically sound, as a greater panicle area allows for a higher number of spikelets and increased light interception, ultimately boosting photosynthetic capacity and assimilate production, which aligns with previous findings on the importance of canopy architecture (<xref ref-type="bibr" rid="B15">Gu et&#xa0;al., 2018</xref>; <xref ref-type="bibr" rid="B20">Ji et&#xa0;al., 2023</xref>). Interestingly, <italic>K</italic> showed a moderate negative correlation with TGW (<italic>r</italic> = -0.47), which can be interpreted as the well-known &#x201c;yield component compensation effect&#x201d;. This suggests that when a higher number of grains is secured per unit area, the photosynthates distributed to each grain become relatively limited, leading to a tendency for reduced individual grain weight.</p>
<p>Critically, the introduction of a piecewise function to model the temporal dynamics of panicle coverage provides valuable insights into rice growth and development, allowing us to capture the dynamic processes that influence yield. The strong negative correlations between timing parameters (<italic>d<sub>0</sub>
</italic>, <italic>d<sub>1</sub>
</italic>) and FGR (<italic>r</italic> = -0.71 and -0.69, respectively) provide critical insights. These results imply that a faster progression to the peak growth and senescence stages is beneficial for grain filling. This could be because rapid panicle development allows the critical grain-filling period to occur under optimal weather conditions, avoiding late-season stresses like high temperatures or insufficient solar radiation that might otherwise hinder full grain development. This highlights the potential of using temporal dynamic parameters to assess the adaptation of the crop to environmental conditions.</p>
<p>Furthermore, the parameters &#x2018;<italic>g</italic>&#x2019; (growth rate) and &#x2018;<italic>a</italic>&#x2019; (decline rate) provide additional physiological insights. The growth rate &#x2018;g&#x2019; likely reflects the initial vigor and uniformity of panicle exsertion, with a higher &#x2018;g&#x2019; value indicating efficient nutrient translocation at the beginning of the reproductive stage. More intriguingly, the decline rate &#x2018;<italic>a</italic>&#x2019; can be interpreted as a proxy for the grain-filling process. As grains successfully fill and accumulate weight, the panicles begin to droop, which is a key visual indicator of a heavy and well-developing sink. This physical change in canopy architecture, specifically the change in panicle angle, reduces the panicle area visible from the nadir-view camera. Therefore, a higher value for &#x2018;<italic>a</italic>&#x2019; may not represent degradation, but rather the positive outcome of effective assimilate partitioning that leads to heavy grains and successful ripening.</p>
<p>The results also demonstrated the significant influence of environmental and genetic factors on panicle coverage dynamics (e.g., <xref ref-type="bibr" rid="B42">Yang et al., 2022</xref>), as statistically confirmed by our analysis of variance (<xref ref-type="supplementary-material" rid="SM1">
<bold>Supplementary Table&#xa0;3</bold>
</xref>). As shown in <xref ref-type="fig" rid="f3">
<bold>Figure&#xa0;3</bold>
</xref>, nitrogen fertilization levels, transplantation dates, and varietal differences had a measurable statistically significant effect on the parameters of the piecewise function, ultimately affecting the yield components. This underscores the need for tailored management practices that account for these interacting factors.</p>
<p>However, this study has several limitations. First, because the data were collected over a relatively short period and from a single experimental location, it may not adequately reflect the variability introduced by various environmental factors such as weather, pests, and diseases. Future research should consider this variability through multi-environment and long-term studies. Second, because only RGB images were used, additional research is needed to integrate multispectral and thermal imaging to analyze the physiological traits and stress responses (<xref ref-type="bibr" rid="B14">Gitelson et&#xa0;al., 1996</xref>; <xref ref-type="bibr" rid="B12">Dorigo et&#xa0;al., 2007</xref>; <xref ref-type="bibr" rid="B11">&#xc7;olak et&#xa0;al., 2015</xref>; <xref ref-type="bibr" rid="B29">Park et&#xa0;al., 2021</xref>). Additionally, 3D point cloud data, derived from drone imagery, can provide comprehensive information on the canopy structure and panicle architecture, potentially improving the accuracy of the yield component predictions (<xref ref-type="bibr" rid="B41">Wu et&#xa0;al., 2022</xref>; <xref ref-type="bibr" rid="B33">Song et&#xa0;al., 2024</xref>). Lastly, the model utilized in this study may be optimized for specific varieties and cultivation conditions, requiring further research to validate its generalization performance across diverse production systems.</p>
<p>Beyond precision agriculture, this panicle coverage-based framework holds promise for plant breeding applications. Drone-based image analysis can facilitate large-scale phenotyping and the identification of superior varieties with enhanced nitrogen use efficiency or stress tolerance (<xref ref-type="bibr" rid="B43">Zhang and Kovacs, 2012</xref>; <xref ref-type="bibr" rid="B16">Guan et&#xa0;al., 2019</xref>; <xref ref-type="bibr" rid="B5">Bak et&#xa0;al., 2023</xref>). Specifically, the ability to rapidly and non-destructively estimate parameters such as <italic>K</italic>, <italic>g</italic>, <italic>d<sub>0</sub>
</italic>, and <italic>d<sub>1</sub>
</italic> can accelerate the selection of genotypes with desirable growth characteristics.</p>
<p>In conclusion, this study provides a robust and adaptable framework for image-based rice phenotyping, with the potential to significantly improve both agricultural management and crop improvement efforts. By combining deep learning with time-series analysis, the proposed framework serves as a powerful tool for understanding and predicting rice yield, paving the way for more sustainable and efficient rice production.</p>
</sec>
<sec id="s5" sec-type="conclusions">
<label>5</label>
<title>Conclusions</title>
<p>This study established and validated a deep learning-based framework for accurate rice panicle segmentation and yield component prediction using time-series RGB imagery. The combination of semantic segmentation (particularly with DeepLabv3+ and LinkNet models) and a piecewise function to characterize the panicle coverage dynamics demonstrated high efficacy. The maximum panicle coverage (<italic>K</italic>) and time of maximum growth rate (<italic>d<sub>0</sub>
</italic>) derived from the piecewise function were the key predictors of the yield and yield components. Nonlinear regression models (RFR and XGBR) exhibited superior predictive performance relative to PLSR. The proposed framework offers a practical and easy-to-use approach for high-throughput phenotyping of rice, with significant potential for application to both precision agriculture (optimizing nitrogen management and planting strategies) and plant breeding (by accelerating the evaluation and selection of superior genotypes). Future research will focus on expanding the framework to incorporate additional environmental factors and imaging modalities and validating the approach across multiple locations and growing seasons.</p>
</sec>
</body>
<back>
<sec id="s6" sec-type="data-availability">
<title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p>
</sec>
<sec id="s7" sec-type="author-contributions">
<title>Author contributions</title>
<p>H-JB: Conceptualization, Data curation, Formal Analysis, Methodology, Software, Validation, Visualization, Writing &#x2013; original draft. E-JK: Writing &#x2013; original draft. J-HL: Writing &#x2013; original draft. SC: Writing &#x2013; review &amp; editing. DK: Writing &#x2013; original draft. W-JI: Writing &#x2013; original draft. WH: Writing &#x2013; review &amp; editing. J-KC:&#xa0;Writing &#x2013; review &amp; editing. N-JC: Writing &#x2013; review &amp; editing. W-GS: Project administration, Supervision, Writing &#x2013; review &amp; editing.</p>
</sec>
<sec id="s8" sec-type="funding-information">
<title>Funding</title>
<p>The author(s) declare financial support was received for the research and/or publication of this article. This research was funded by the Rural Development Administration (RDA) of South Korea (grant number PJ01739902) and was supported by 2024 the RDA Fellowship Program of the National Institute of Crop Science (NICS), Rural Development Administration, Republic of Korea.</p>
</sec>
<ack>
<title>Acknowledgments</title>
<p>The authors are grateful to Professor Mo Youngjun of Jeonbuk National University for his valuable comments/suggestions.</p>
</ack>
<sec id="s9" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="s10" sec-type="ai-statement">
<title>Generative AI statement</title>
<p>The author(s) declare that no Generative AI was used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec id="s11" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec id="s12" sec-type="supplementary-material">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fpls.2025.1611653/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fpls.2025.1611653/full#supplementary-material</ext-link>
</p>
<supplementary-material xlink:href="Image1.tif" id="SF1" mimetype="image/tiff">
<label>Supplementary Figure&#xa0;1</label>
<caption>
<p>Image acquisition site and equipment. <bold>(A)</bold> An aerial view of the experimental fields at the National Institute of Crop Science in Wanju-gun, Republic of Korea. <bold>(B)</bold> The fixed tower-mounted camera used for time-series imaging. <bold>(C)</bold> The handheld camera used for acquiring images for model training and validation.</p>
</caption>
</supplementary-material>
  <supplementary-material xlink:href="Image2.tif" id="SF2" mimetype="image/tiff">
<label>Supplementary Figure&#xa0;2</label>
<caption>
<p>Architectures of semantic-segmentation networks evaluated in this study. <bold>(A)</bold> U-Net, <bold>(B)</bold> LinkNet, <bold>(C)</bold> PSPNet, <bold>(D)</bold> FPN, and <bold>(E)</bold> DeepLabv3+; each architecture uses a distinct approach to feature extraction and processing.</p>
</caption>
</supplementary-material>
  <supplementary-material xlink:href="Image3.tif" id="SF3" mimetype="image/tiff">
<label>Supplementary Figure&#xa0;3</label>
<caption>
<p>Representative time-series RGB images of rice canopies under different experimental treatments in 2023 <bold>(A)</bold> and 2024 <bold>(B)</bold>. Images were taken on various dates after transplantation, as indicated on the left. Treatments include different nitrogen-fertilization levels (0N, 9N, 18N), late transplantation (LT), and different cultivars (NP: Nampyeong, DJ: Dongjin-1, SD: Shindongjin, SI: Saeilmi) under both field and soil-bin conditions. The &#x201c;F&#x201d; and &#x201c;S&#x201d; prefixes denote field and soil-bin experiments, respectively. The number before the hyphen represents the year.</p>
</caption>
</supplementary-material>
  <supplementary-material xlink:href="Image4.tif" id="SF4" mimetype="image/tiff">
<label>Supplementary Figure&#xa0;4</label>
<caption>
<p>Model Training and Validation Curves. The plots show training loss, validation loss, and validation accuracy over 200 epochs for all five evaluated semantic segmentation models, each combined with ResNet-50 and ResNet-101 backbones. The consistent decrease in loss and stabilization of accuracy indicate successful model convergence.</p>
</caption>
</supplementary-material>
  <supplementary-material xlink:href="Image5.tif" id="SF5" mimetype="image/tiff">
<label>Supplementary Figure&#xa0;5</label>
<caption>
<p>Qualitative comparison of segmentation results from different models. The representative input images (&#x201c;Image&#x201d;), ground truth segmentations (&#x201c;GT&#x201d;), and segmentation outputs of U-Net, LinkNet, PSPNet, FPN, and DeepLabv3+ (all using the ResNet-101 backbone) are shown.</p>
</caption>
</supplementary-material>
  <supplementary-material xlink:href="Image6.tif" id="SF6" mimetype="image/tiff">
<label>Supplementary Figure&#xa0;6</label>
<caption>
<p>Detailed Regression Analysis for Yield Prediction. Regression plots compare the actual and predicted values for all yield and yield components using the four regression models (PLSR, RFR, GBR, and XGBR). Each plot shows the relationship between actual (x-axis) and predicted values (y-axis), with the red line indicating a perfect 1:1 relationship and the blue line representing the fitted regression.</p>
</caption>
</supplementary-material>
  <supplementary-material xlink:href="Image7.tif" id="SF7" mimetype="image/tiff">
<label>Supplementary Figure&#xa0;7</label>
<caption>
<p>SHAP analysis for additional yield components. The figure shows the SHAP (SHapley Additive exPlanations) value analysis for the Random Forest Regressor (RFR) model, detailing the contribution of each piecewise function parameter (<italic>K, g, d<sub>0</sub>, d<sub>1</sub>
</italic>, and <italic>a</italic> &#x200b;) to the prediction of the number of grains per panicle (GNP), 1000-grain weight (TGW), and filled grain ratio (FGR). For each component, the left plot is a SHAP summary plot where each point represents a single observation, the x-axis indicates the impact on model output, and the color represents the feature value (high=red, low=blue). The right bar plot ranks the features by their mean absolute SHAP value, indicating their overall importance to the model&#x2019;s prediction.</p>
</caption>
</supplementary-material>
<supplementary-material xlink:href="Table1.docx" id="SM1" mimetype="application/vnd.openxmlformats-officedocument.wordprocessingml.document">
<label>Supplementary Table&#xa0;1</label>
<caption>
<p>Layer architecture of ResNet backbone networks.</p>
</caption>
</supplementary-material>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Abourabia</surname> <given-names>I.</given-names>
</name>
<name>
<surname>Ounacer</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Ellghomari</surname> <given-names>M. Y.</given-names>
</name>
<name>
<surname>Azzouazi</surname> <given-names>M.</given-names>
</name>
</person-group> (<year>2024</year>). &#x201c;<article-title>Enhancing deep learning-based semantic segmentation approaches for smart agriculture</article-title>,&#x201d; in <source>Engineering Applications of Artificial Intelligence</source>. Eds. <person-group person-group-type="editor">
<name>
<surname>Chakir</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Andry</surname> <given-names>J. F.</given-names>
</name>
<name>
<surname>Ullah</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Bansal</surname> <given-names>R.</given-names>
</name>
<name>
<surname>Ghazouani</surname> <given-names>M.</given-names>
</name>
</person-group> (<publisher-loc>Cham, Switzerland</publisher-loc>: <publisher-name>Springer</publisher-name>), <fpage>395</fpage>&#x2013;<lpage>406</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/978-3-031-50300-9_21</pub-id>
</citation></ref>
<ref id="B2">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Alzubaidi</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Humaidi</surname> <given-names>A. J.</given-names>
</name>
<name>
<surname>Al-Dujaili</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Duan</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Al-Shamma</surname> <given-names>O.</given-names>
</name>
<etal/>
</person-group>. (<year>2021</year>). <article-title>Review of deep learning: Concepts, CNN architectures, challenges, applications, future directions</article-title>. <source>J. Big. Data</source> <volume>8</volume>, <elocation-id>53</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s40537-021-00444-8</pub-id>, PMID: <pub-id pub-id-type="pmid">33816053</pub-id></citation></ref>
<ref id="B3">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bak</surname> <given-names>H.-J.</given-names>
</name>
<name>
<surname>Kwon</surname> <given-names>D.</given-names>
</name>
<name>
<surname>Im</surname> <given-names>W.-J.</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>J.-H.</given-names>
</name>
<name>
<surname>Kim</surname> <given-names>E.-J.</given-names>
</name>
<name>
<surname>Chung</surname> <given-names>N.-J.</given-names>
</name>
<etal/>
</person-group>. (<year>2024</year>a). <article-title>Evaluation of planting distance in rice paddies using deep learning-based drone imagery</article-title>. <source>Korean. J. Crop Sci.</source> <volume>69</volume>, <fpage>154</fpage>&#x2013;<lpage>162</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.7740/kjcs.2024.69.3.154</pub-id>
</citation></ref>
<ref id="B4">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bak</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Sang</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Kwon</surname> <given-names>D.</given-names>
</name>
<name>
<surname>Im</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Kim</surname> <given-names>E.</given-names>
</name>
<etal/>
</person-group>. (<year>2024</year>b). <article-title>Classification of nitrogen treatments in rice at different growth stages using UAV-based multispectral imaging</article-title>. <source>Korean. J. Agric. For. Meteorol.</source> <volume>26</volume>, <fpage>219</fpage>&#x2013;<lpage>227</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.5532/KJAFM.2024.26.4.219</pub-id>
</citation></ref>
<ref id="B5">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bak</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Sang</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Chang</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Kwon</surname> <given-names>D.</given-names>
</name>
<name>
<surname>Im</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>J.</given-names>
</name>
<etal/>
</person-group>. (<year>2023</year>). <article-title>Estimation of rice heading date of paddy rice from slanted and top-view images using deep learning classification model</article-title>. <source>Korean. J. Agric. For. Meteorol.</source> <volume>25</volume>, <fpage>337</fpage>&#x2013;<lpage>345</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.5532/KJAFM.2023.25.4.337</pub-id>
</citation></ref>
<ref id="B6">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Benti</surname> <given-names>N. E.</given-names>
</name>
<name>
<surname>Chaka</surname> <given-names>M. D.</given-names>
</name>
<name>
<surname>Semie</surname> <given-names>A. G.</given-names>
</name>
<name>
<surname>Warkineh</surname> <given-names>B.</given-names>
</name>
<name>
<surname>Soromessa</surname> <given-names>T.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Transforming agriculture with machine learning, deep learning, and IoT: Perspectives from Ethiopia&#x2014;Challenges and opportunities</article-title>. <source>Discov. Agric.</source> <volume>2</volume>, <fpage>63</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s44279-024-00066-7</pub-id>
</citation></ref>
<ref id="B7">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Breiman</surname> <given-names>L.</given-names>
</name>
</person-group> (<year>2001</year>). <article-title>Random forests</article-title>. <source>Mach. Learn.</source> <volume>45</volume>, <fpage>5</fpage>&#x2013;<lpage>32</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1023/A:1010933404324</pub-id>
</citation></ref>
<ref id="B8">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chaurasia</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Culurciello</surname> <given-names>E.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>LinkNet: Exploiting encoder representations for efficient semantic segmentation</article-title>. <source>arXiv. preprint. arXiv:1707.03718</source>. Available online at: <uri xlink:href="https://arxiv.org/abs/1707.03718">https://arxiv.org/abs/1707.03718</uri> (Accessed <access-date>November 17, 2024</access-date>).</citation></ref>
<ref id="B9">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Chen</surname> <given-names>T.</given-names>
</name>
<name>
<surname>Guestrin</surname> <given-names>C.</given-names>
</name>
</person-group> (<year>2016</year>). &#x201c;<article-title>XGBoost: A scalable tree boosting system</article-title>,&#x201d; in <conf-name>Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining</conf-name>, (<publisher-loc>New York, NY</publisher-loc>: <publisher-name>ACM</publisher-name>), <fpage>785</fpage>&#x2013;<lpage>794</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1145/2939672.2939785</pub-id>
</citation></ref>
<ref id="B10">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chen</surname> <given-names>L.-C.</given-names>
</name>
<name>
<surname>Papandreou</surname> <given-names>G.</given-names>
</name>
<name>
<surname>Kokkinos</surname> <given-names>I.</given-names>
</name>
<name>
<surname>Murphy</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Yuille</surname> <given-names>A. L.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>DeepLab: Semantic image segmentation with deep convolutional nets, atrous convolution, and fully connected CRFs</article-title>. <source>IEEE Trans. Pattern Anal. Mach. Intell.</source> <volume>40</volume>, <fpage>834</fpage>&#x2013;<lpage>848</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/tpami.2017.2699184</pub-id>, PMID: <pub-id pub-id-type="pmid">28463186</pub-id></citation></ref>
<ref id="B11">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>&#xc7;olak</surname> <given-names>Y. B.</given-names>
</name>
<name>
<surname>Yazar</surname> <given-names>A.</given-names>
</name>
<name>
<surname>&#xc7;olak</surname> <given-names>&#x130;.</given-names>
</name>
<name>
<surname>Ak&#xe7;a</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Duraktekin</surname> <given-names>G.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Evaluation of crop water stress index (CWSI) for eggplant under varying irrigation regimes using surface and subsurface drip systems</article-title>. <source>Agric. Agric. Sci. Proc.</source> <volume>4</volume>, <fpage>372</fpage>&#x2013;<lpage>382</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.aaspro.2015.03.042</pub-id>
</citation></ref>
<ref id="B12">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dorigo</surname> <given-names>W. A.</given-names>
</name>
<name>
<surname>Zurita-Milla</surname> <given-names>R.</given-names>
</name>
<name>
<surname>de Wit</surname> <given-names>A. J. W.</given-names>
</name>
<name>
<surname>Brazile</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Singh</surname> <given-names>R.</given-names>
</name>
<name>
<surname>Schaepman</surname> <given-names>M. E.</given-names>
</name>
</person-group> (<year>2007</year>). <article-title>A review on reflective remote sensing and data assimilation techniques for enhanced agroecosystem modeling</article-title>. <source>Int. J. Appl. Earth Observ. Geoinform.</source> <volume>9</volume>, <fpage>165</fpage>&#x2013;<lpage>193</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.jag.2006.05.003</pub-id>
</citation></ref>
<ref id="B13">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Friedman</surname> <given-names>J. H.</given-names>
</name>
</person-group> (<year>2001</year>). <article-title>Greedy function approximation: A gradient boosting machine</article-title>. <source>Ann. Statist.</source> <volume>29</volume>, <fpage>1189</fpage>&#x2013;<lpage>1232</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1214/aos/1013203451</pub-id>
</citation></ref>
<ref id="B14">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gitelson</surname> <given-names>A. A.</given-names>
</name>
<name>
<surname>Kaufman</surname> <given-names>Y. J.</given-names>
</name>
<name>
<surname>Merzlyak</surname> <given-names>M. N.</given-names>
</name>
</person-group> (<year>1996</year>). <article-title>Use of a green channel in remote sensing of global vegetation from EOS-MODIS</article-title>. <source>Remote Sens. Environ.</source> <volume>58</volume>, <fpage>289</fpage>&#x2013;<lpage>298</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/S0034-4257(96)00072-7</pub-id>
</citation></ref>
<ref id="B15">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gu</surname> <given-names>H. M.</given-names>
</name>
<name>
<surname>You</surname> <given-names>O. J.</given-names>
</name>
<name>
<surname>Park</surname> <given-names>J. H.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Physiological and ecological comparison of rice cultivars grown in low fertilized condition</article-title>. <source>J. Pract. Agric. Fish. Res.</source> <volume>20</volume>, <fpage>175</fpage>&#x2013;<lpage>185</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.23097/JPAF.2018.20(1):175</pub-id>
</citation></ref>
<ref id="B16">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Guan</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Fukami</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Matsunaka</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Okami</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Tanaka</surname> <given-names>R.</given-names>
</name>
<name>
<surname>Nakano</surname> <given-names>H.</given-names>
</name>
<etal/>
</person-group>. (<year>2019</year>). <article-title>Assessing correlation of high-resolution NDVI with fertilizer application level and yield of rice and wheat crops using small UAVs</article-title>. <source>Remote Sens.</source> <volume>11</volume>, <elocation-id>112</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/rs11020112</pub-id>
</citation></ref>
<ref id="B17">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Guo</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Huang</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Dong</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Ye</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Ma</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>B.</given-names>
</name>
<etal/>
</person-group>. (<year>2021</year>). <article-title>Wheat yellow rust detection using UAV-based hyperspectral technology</article-title>. <source>Remote Sens.</source> <volume>13</volume>, <elocation-id>123</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/rs13010123</pub-id>
</citation></ref>
<ref id="B18">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>He</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Ren</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Sun</surname> <given-names>J.</given-names>
</name>
</person-group> (<year>2016</year>). &#x201c;<article-title>Deep residual learning for image recognition</article-title>,&#x201d; in <source>Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</source> (<publisher-loc>New York, NY</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>770</fpage>&#x2013;<lpage>778</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/CVPR.2016.90</pub-id>
</citation></ref>
<ref id="B19">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jangra</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Chaudhary</surname> <given-names>V.</given-names>
</name>
<name>
<surname>Yadav</surname> <given-names>R. C.</given-names>
</name>
<name>
<surname>Yadav</surname> <given-names>N. R.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>High-throughput phenotyping: A platform to accelerate crop improvement</article-title>. <source>Phenomics</source> <volume>1</volume>, <fpage>31</fpage>&#x2013;<lpage>53</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s43657-020-00007-6</pub-id>, PMID: <pub-id pub-id-type="pmid">36939738</pub-id></citation></ref>
<ref id="B20">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ji</surname> <given-names>D.</given-names>
</name>
<name>
<surname>Xiao</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Sun</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Gu</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>H.</given-names>
</name>
<etal/>
</person-group>. (<year>2023</year>). <article-title>Translocation and distribution of carbon-nitrogen in relation to rice yield and grain quality as affected by high temperature at early panicle initiation stage</article-title>. <source>Rice Sci.</source> <volume>30</volume>, <fpage>598</fpage>&#x2013;<lpage>612</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.rsci.2023.06.003</pub-id>
</citation></ref>
<ref id="B21">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kim</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>C. K.</given-names>
</name>
<name>
<surname>Sang</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Shin</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Cho</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Seo</surname> <given-names>M.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Introduction to empirical approach to estimate rice yield and comparison with remote sensing approach</article-title>. <source>Korean. J. Remote Sens.</source> <volume>33</volume>, <fpage>701</fpage>&#x2013;<lpage>717</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.7780/kjrs.2017.33.5.2.12</pub-id>
</citation></ref>
<ref id="B22">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lei</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>Q.</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Shen</surname> <given-names>T.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>R.</given-names>
</name>
<name>
<surname>Fu</surname> <given-names>C.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Deep learning implementation of image segmentation in agricultural applications: A comprehensive review</article-title>. <source>Artif. Intell. Rev.</source> <volume>57</volume>, <fpage>149</fpage>&#x2013;<lpage>167</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s10462-024-10775-6</pub-id>
</citation></ref>
<ref id="B23">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lin</surname> <given-names>T. Y.</given-names>
</name>
<name>
<surname>Doll&#xe1;r</surname> <given-names>P.</given-names>
</name>
<name>
<surname>Girshick</surname> <given-names>R.</given-names>
</name>
<name>
<surname>He</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Hariharan</surname> <given-names>B.</given-names>
</name>
<name>
<surname>Belongie</surname> <given-names>S.</given-names>
</name>
</person-group> (<year>2017</year>). &#x201c;<article-title>Feature pyramid networks for object detection</article-title>,&#x201d; in <conf-name>Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</conf-name>, <conf-loc>Honolulu, HI, USA</conf-loc>, <conf-date>21-26 July 2017</conf-date>, <fpage>2117</fpage>&#x2013;<lpage>2125</lpage>. Available online at: <uri xlink:href="https://arxiv.org/abs/1612.03144">https://arxiv.org/abs/1612.03144</uri> (Accessed <access-date>November 20, 2024</access-date>).</citation></ref>
<ref id="B24">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lin</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>C.-T.</given-names>
</name>
<name>
<surname>Adams</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Kouzani</surname> <given-names>A. Z.</given-names>
</name>
<name>
<surname>Jiang</surname> <given-names>R.</given-names>
</name>
<name>
<surname>He</surname> <given-names>L.</given-names>
</name>
<etal/>
</person-group>. (<year>2023</year>). <article-title>Self-supervised leaf segmentation under complex lighting conditions</article-title>. <source>Pattern Recognit.</source> <volume>135</volume>, <elocation-id>109021</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.patcog.2022.109021</pub-id>
</citation></ref>
<ref id="B25">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>X.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Plant diseases and pests detection based on deep learning: A review</article-title>. <source>Plant Methods</source> <volume>17</volume>, <elocation-id>22</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s13007-021-00722-9</pub-id>, PMID: <pub-id pub-id-type="pmid">33627131</pub-id></citation></ref>
<ref id="B26">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lu</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Shen</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Xie</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Shu</surname> <given-names>Q.</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>S.</given-names>
</name>
<etal/>
</person-group>. (<year>2024</year>). <article-title>Phenotyping of panicle number and shape in rice breeding materials based on unmanned aerial vehicle imagery</article-title>. <source>Plant Phenomics.</source> <volume>6</volume>, <elocation-id>265</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.34133/plantphenomics.0265</pub-id>, PMID: <pub-id pub-id-type="pmid">39449974</pub-id></citation></ref>
<ref id="B27">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Madokoro</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Takahashi</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Yamamoto</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Nix</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Chiyonobu</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Saruta</surname> <given-names>K.</given-names>
</name>
<etal/>
</person-group>. (<year>2022</year>). <article-title>Semantic segmentation of agricultural images based on style transfer using conditional and unconditional generative adversarial networks</article-title>. <source>Appl. Sci.</source> <volume>12</volume>, <elocation-id>7785</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/app12157785</pub-id>
</citation></ref>
<ref id="B28">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mohammadzadeh Babr</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Faghihabdolahi</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Risti&#x107;-Durrant</surname> <given-names>D.</given-names>
</name>
<name>
<surname>Michels</surname> <given-names>K.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Deep learning-based occlusion handling of overlapped plants for robotic grasping</article-title>. <source>Appl. Sci.</source> <volume>12</volume>, <elocation-id>3655</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/app12073655</pub-id>
</citation></ref>
<ref id="B29">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Park</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Ryu</surname> <given-names>D.</given-names>
</name>
<name>
<surname>Fuentes</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Chung</surname> <given-names>H.</given-names>
</name>
<name>
<surname>O&#x2019;Connell</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Kim</surname> <given-names>J.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Dependence of CWSI-based plant water stress estimation with diurnal acquisition times in a nectarine orchard</article-title>. <source>Remote Sens.</source> <volume>13</volume>, <elocation-id>2775</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/rs13142775</pub-id>
</citation></ref>
<ref id="B30">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ronneberger</surname> <given-names>O.</given-names>
</name>
<name>
<surname>Fischer</surname> <given-names>P.</given-names>
</name>
<name>
<surname>Brox</surname> <given-names>T.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>U-Net: Convolutional networks for biomedical image segmentation</article-title>. <source>Medical Image Computing and Computer-Assisted Intervention &#x2013; MICCAI 2015</source>, eds. <person-group person-group-type="editor">
<name>
<surname>Navab</surname> <given-names>N.</given-names>
</name>
<name>
<surname>Hornegger</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Wells</surname> <given-names>W. M.</given-names>
</name>
<name>
<surname>Frangi</surname> <given-names>A. F.</given-names>
</name>
</person-group> (<publisher-loc>Cham</publisher-loc>: <publisher-name>Springer</publisher-name>), <volume>9351</volume>, <fpage>234</fpage>&#x2013;<lpage>241</lpage>. Available online at: <uri xlink:href="https://arxiv.org/abs/1505.04597">https://arxiv.org/abs/1505.04597</uri> (Accessed <access-date>November 16, 2024</access-date>).</citation></ref>
<ref id="B31">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Russell</surname> <given-names>B. C.</given-names>
</name>
<name>
<surname>Torralba</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Murphy</surname> <given-names>K. P.</given-names>
</name>
<name>
<surname>Freeman</surname> <given-names>W. T.</given-names>
</name>
</person-group> (<year>2008</year>). <article-title>LabelMe: A database and web-based tool for image annotation</article-title>. <source>Int. J. Comput. Vis.</source> <volume>77</volume>, <fpage>157</fpage>&#x2013;<lpage>173</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s11263-007-0090-8</pub-id>
</citation></ref>
<ref id="B32">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Shorten</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Khoshgoftaar</surname> <given-names>T. M.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>A survey on image data augmentation for deep learning</article-title>. <source>J. Big. Data</source> <volume>6</volume>, <fpage>60</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s40537-019-0197-0</pub-id>
</citation></ref>
<ref id="B33">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Song</surname> <given-names>E.</given-names>
</name>
<name>
<surname>Shao</surname> <given-names>G.</given-names>
</name>
<name>
<surname>Zhu</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Dai</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Lu</surname> <given-names>J.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Estimation of plant height and biomass of rice using unmanned aerial vehicle</article-title>. <source>Agronomy</source> <volume>14</volume>, <elocation-id>145</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/agronomy14010145</pub-id>
</citation></ref>
<ref id="B34">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Srivastava</surname> <given-names>A. K.</given-names>
</name>
<name>
<surname>Safaei</surname> <given-names>N.</given-names>
</name>
<name>
<surname>Khaki</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Lopez</surname> <given-names>G.</given-names>
</name>
<name>
<surname>Zeng</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Ewert</surname> <given-names>F.</given-names>
</name>
<etal/>
</person-group>. (<year>2022</year>). <article-title>Winter wheat yield prediction using convolutional neural networks from environmental and phenological data</article-title>. <source>Sci. Rep.</source> <volume>12</volume>, <fpage>3215</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41598-022-06249-w</pub-id>, PMID: <pub-id pub-id-type="pmid">35217689</pub-id></citation></ref>
<ref id="B35">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Stepanov</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Dubrovin</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Sorokin</surname> <given-names>A.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Function fitting for modeling seasonal normalized difference vegetation index time series and early forecasting of soybean yield</article-title>. <source>Crop J.</source> <volume>10</volume>, <fpage>1452</fpage>&#x2013;<lpage>1459</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.cj.2021.12.013</pub-id>
</citation></ref>
<ref id="B36">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Vishal</surname> <given-names>M. K.</given-names>
</name>
<name>
<surname>Tamboli</surname> <given-names>D.</given-names>
</name>
<name>
<surname>Patil</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Saluja</surname> <given-names>R.</given-names>
</name>
<name>
<surname>Banerjee</surname> <given-names>B.</given-names>
</name>
<name>
<surname>Sethi</surname> <given-names>A.</given-names>
</name>
<etal/>
</person-group>. (<year>2020</year>). &#x201c;<article-title>Image-based phenotyping of diverse rice (Oryza sativa L.) genotypes</article-title>,&#x201d; in <source>Proc. International Conference on Learning Representations (ICLR)</source>. arXiv:2004.02498. doi:&#xa0;<pub-id pub-id-type="doi">10.48550/arXiv.2004.02498</pub-id>
</citation></ref>
<ref id="B37">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Lyu</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Ren</surname> <given-names>Y.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Paddy rice imagery dataset for panicle segmentation</article-title>. <source>Agronomy</source> <volume>11</volume>, <elocation-id>1542</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/agronomy11081542</pub-id>
</citation></ref>
<ref id="B38">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Lv</surname> <given-names>Q.</given-names>
</name>
<name>
<surname>Huang</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Liang</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>G.</given-names>
</name>
<etal/>
</person-group>. (<year>2022</year>). <article-title>Field rice panicle detection and counting based on deep learning</article-title>. <source>Front. Plant Sci.</source> <volume>13</volume>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fpls.2022.966495</pub-id>, PMID: <pub-id pub-id-type="pmid">36035660</pub-id></citation></ref>
<ref id="B39">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wei</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Tian</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Ren</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Gao</surname> <given-names>R.</given-names>
</name>
<name>
<surname>Ji</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Kong</surname> <given-names>Q.</given-names>
</name>
<etal/>
</person-group>. (<year>2024</year>). <article-title>A precise plot-level rice yield prediction method based on panicle detection</article-title>. <source>Agronomy</source> <volume>14</volume>, <elocation-id>1618</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/agronomy14081618</pub-id>
</citation></ref>
<ref id="B40">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wold</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Sj&#xf6;str&#xf6;m</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Eriksson</surname> <given-names>L.</given-names>
</name>
</person-group> (<year>2001</year>). <article-title>PLS-regression: A basic tool of chemometrics</article-title>. <source>Chemom. Intell. Lab. Syst.</source> <volume>58</volume>, <fpage>109</fpage>&#x2013;<lpage>130</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/S0169-7439(01)00155-1</pub-id>
</citation></ref>
<ref id="B41">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wu</surname> <given-names>D.</given-names>
</name>
<name>
<surname>Yu</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Ye</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Zhai</surname> <given-names>R.</given-names>
</name>
<name>
<surname>Duan</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>L.</given-names>
</name>
<etal/>
</person-group>. (<year>2022</year>). <article-title>Panicle-3D: A low-cost 3D-modeling method for rice panicles based on deep learning, shape from silhouette, and supervoxel clustering</article-title>. <source>Crop J.</source> <volume>10</volume>, <fpage>1386</fpage>&#x2013;<lpage>1398</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.cj.2022.02.007</pub-id>
</citation></ref>
<ref id="B42">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yang</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Xu</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>Z.</given-names>
</name>
<name>
<surname>Meng</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Song</surname> <given-names>X.</given-names>
</name>
<etal/>
</person-group>. (<year>2022</year>). <article-title>Remote sensing prescription for rice nitrogen fertilizer recommendation based on improved NFOA model</article-title>. <source>Agronomy</source> <volume>12</volume>, <elocation-id>1804</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/agronomy12081804</pub-id>
</citation></ref>
<ref id="B43">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Kovacs</surname> <given-names>J. M.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>The application of small unmanned aerial systems for precision agriculture: A review</article-title>. <source>Precis. Agric.</source> <volume>13</volume>, <fpage>693</fpage>&#x2013;<lpage>712</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s11119-012-9274-5</pub-id>
</citation></ref>
<ref id="B44">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhao</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Shi</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Qi</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>X.</given-names>
</name>
<name>
<surname>Jia</surname> <given-names>J.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Pyramid scene parsing network</article-title>. <source>Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Honolulu, HI, USA, 21-26 July 2017, 2881&#x2013;2890</source>. Available online at: <uri xlink:href="https://arxiv.org/abs/1612.01105">https://arxiv.org/abs/1612.01105</uri> (accessed <access-date>November 9, 2024</access-date>).</citation></ref>
</ref-list>
</back>
</article>