<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Plant Sci.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Plant Science</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Plant Sci.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">1664-462X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fpls.2025.1730366</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Deep learning-based methods for phenotypic trait extraction in rice panicles</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Wang</surname><given-names>Zhiao</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3251529/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project-administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Li</surname><given-names>Ruihang</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project-administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Li</surname><given-names>Wei</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Ma</surname><given-names>Xiaoding</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>*</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1653873/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Yan</surname><given-names>Shen</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>*</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2252578/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Li</surname><given-names>Maomao</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2931328/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Hu</surname><given-names>Binhua</given-names></name>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3260776/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Tang</surname><given-names>Ming</given-names></name>
<xref ref-type="aff" rid="aff6"><sup>6</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Zhou</surname><given-names>Guomin</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Wang</surname><given-names>Jian</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Zhang</surname><given-names>Jianhua</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>*</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3164007/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Agricultural Information Institute, Chinese Academy of Agricultural Sciences/National Agricultural Science Data Center</institution>, <city>Beijing</city>,&#xa0;<country country="cn">China</country></aff>
<aff id="aff2"><label>2</label><institution>Sanya Nanfan Research Institute, Chinese Academy of Agricultural Sciences</institution>, <city>Sanya</city>, <state>Hainan</state>,&#xa0;<country country="cn">China</country></aff>
<aff id="aff3"><label>3</label><institution>Institute of Crop Sciences, Chinese Academy of Agricultural Sciences</institution>, <city>Beijing</city>,&#xa0;<country country="cn">China</country></aff>
<aff id="aff4"><label>4</label><institution>Rice Research Institute, Jiangxi Academy of Agricultural Sciences/Jiangxi Crop Germplasm Resources Research Center</institution>, <city>Nanchang</city>,&#xa0;<country country="cn">China</country></aff>
<aff id="aff5"><label>5</label><institution>Biotechnology and Nuclear Technology Research Institute, Sichuan Academy of Agricultural Sciences Biotechnology and Nuclear Technology Research Institute, Sichuan Academy of Agricultural Sciences</institution>, <city>Chengdu</city>,&#xa0;<country country="cn">China</country></aff>
<aff id="aff6"><label>6</label><institution>Heilongjiang Academy of Agricultural Sciences</institution>, <city>Harbin</city>, <state>Heilongjiang</state>,&#xa0;<country country="cn">China</country></aff>
<author-notes>
<corresp id="c001"><label>*</label>Correspondence: Jianhua Zhang, <email xlink:href="mailto:zhangjianhua@caas.cn">zhangjianhua@caas.cn</email>; Xiaoding Ma, <email xlink:href="mailto:maxiaoding@caas.cn">maxiaoding@caas.cn</email>; Shen Yan, <email xlink:href="mailto:yanshen@caas.cn">yanshen@caas.cn</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-12">
<day>12</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2025</year>
</pub-date>
<volume>16</volume>
<elocation-id>1730366</elocation-id>
<history>
<date date-type="received">
<day>22</day>
<month>10</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>20</day>
<month>11</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>15</day>
<month>11</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2026 Wang, Li, Li, Ma, Yan, Li, Hu, Tang, Zhou, Wang and Zhang.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Wang, Li, Li, Ma, Yan, Li, Hu, Tang, Zhou, Wang and Zhang</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-12">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<sec>
<title>Introduction</title>
<p>Key rice panicle traits (grain number, panicle length, grain dimensions, maturity) determine yield and quality, and high-precision/high-throughput measurement is critical for rice breeding. Traditional methods are.</p>
</sec>
<sec>
<title>Methods</title>
<p>A dataset of 5300 rice panicle images (loose/normal/dense types;&#xa0;milk/dough/full maturity/over-ripe stages) was constructed, with 3290 for training, 940 for validation, and 470 for testing. A deep learning pipeline integrating.</p>
</sec>
<sec>
<title>Results</title>
<p>The panicle length extraction achieved R&#xb2;=0.9583, RMSE=5.69 mm. Grain counting R&#xb2; values were 0.9799 (loose), 0.9551 (normal), 0.9278 (dense). Grain length R&#xb2;=0.8823, grain width MAPE=6.64%. OPG-YOLOv8.</p>
</sec>
<sec>
<title>Discussion</title>
<p>This study provides a comprehensive, automated tool for rice panicle phenotyping, addressing occlusion challenges and bridging the gap between advanced models and breeding applications.</p>
</sec>
</abstract>
<kwd-group>
<kwd>deep learning</kwd>
<kwd>rice</kwd>
<kwd>phenotypic trait</kwd>
<kwd>panicle traits</kwd>
<kwd>precision extraction</kwd>
</kwd-group>
<funding-group>
<award-group id="gs1">
<funding-source id="sp1">
<institution-wrap>
<institution>National Key Research and Development Program of China</institution>
<institution-id institution-id-type="doi" vocab="open-funder-registry" vocab-identifier="10.13039/open_funder_registry">10.13039/501100012166</institution-id>
</institution-wrap>
</funding-source>
</award-group>
<award-group id="gs2">
<funding-source id="sp2">
<institution-wrap>
<institution>Natural Science Foundation of Hainan Province</institution>
<institution-id institution-id-type="doi" vocab="open-funder-registry" vocab-identifier="10.13039/open_funder_registry">10.13039/501100004761</institution-id>
</institution-wrap>
</funding-source>
</award-group>
<funding-statement>The author(s) declared that financial support was received for&#xa0;this work and/or its publication. This work was supported by&#xa0;the National Key Research and Development Program (2022YFF0711805, 2022YFD1600302); Hainan Provincial Natural Science Foundation (325MS155); Science and Technology Special Project Funding of Sanya Yazhou Bay Science and Technology City (SCKJ-JYRC-2023-45); Nanfan Special Projects of the National Nanfan Research Institute, Chinese Academy of Agricultural Sciences in Sanya (YBXM2409, YBXM2410, YBXM2430, YBXM2508, YBXM2509); and Special Projects of Basic Scientific Research Operating Expenses for Public Welfare Research Institutes at the Central Level (JBYW-AII-2024-05, JBYW-AII-2025-05, Y2025YC90). Sichuan Science and Technology Program 2021YFYZ0027.</funding-statement>
</funding-group>
<counts>
<fig-count count="8"/>
<table-count count="6"/>
<equation-count count="15"/>
<ref-count count="31"/>
<page-count count="16"/>
<word-count count="8118"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Technical Advances in Plant Science</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec id="s1" sec-type="intro">
<label>1</label>
<title>Introduction</title>
<p>Rice is widely cultivated globally (<xref ref-type="bibr" rid="B20">Tang et&#xa0;al., 2023</xref>), accounting for approximately 30% of the total grain planting area and serving&#xa0;as the staple food for over half of the world&#x2019;s population (<xref ref-type="bibr" rid="B29">Yu et&#xa0;al., 2025</xref>). In China, rice is one of the most important food crops, occupying 30% of the national grain planting area and contributing 40% of the total grain yield (<xref ref-type="bibr" rid="B30">Zhang et&#xa0;al., 2024</xref>). Cultivating high-yield and high-quality rice varieties is an effective approach to addressing food security challenges (<xref ref-type="bibr" rid="B1">Ayumi et&#xa0;al., 2023</xref>). Rice&#xa0;panicle phenotypic traits are directly related to yield characteristics, making their acquisition a critical step in rice breeding research. These traits include panicle morphology and grain characteristics, such as panicle length, grain number per panicle, grain length, grain width, and maturity (<xref ref-type="bibr" rid="B21">Thesma et&#xa0;al., 2024</xref>). Therefore, rapid and accurate extraction of rice panicle phenotypic traits plays a pivotal role in rice breeding.</p>
<p>Traditional methods for measuring panicle traits, such as manual measurement with rulers and calipers, can evaluate rice panicle characteristics (<xref ref-type="bibr" rid="B26">Xiong, 2023</xref>). but these methods are labor-intensive, time-consuming, prone to grain damage, and susceptible to errors (<xref ref-type="bibr" rid="B13">Peng and Xie, 2020</xref>). In recent years, the integration of computer vision and artificial intelligence has emerged as a transformative solution in crop phenomics (<xref ref-type="bibr" rid="B31">Zhao et&#xa0;al., 2019</xref>; <xref ref-type="bibr" rid="B27">Yang et&#xa0;al., 2020</xref>). Specifically, deep learning has advanced high-throughput phenotyping technologies (<xref ref-type="bibr" rid="B11">Lu et&#xa0;al., 2023</xref>). Various deep learning methods, including seed counting, size determination, and panicle segmentation, have demonstrated impressive accuracy in crop trait detection (<xref ref-type="bibr" rid="B25">Wu et&#xa0;al., 2020</xref>). Similar deep learning-based approaches have also achieved success in other major crops, such as wheat ear counting (<xref ref-type="bibr" rid="B14">Pound et&#xa0;al., 2017</xref>; <xref ref-type="bibr" rid="B6">Ghosal et&#xa0;al., 2019</xref>). Current research efforts focus on extracting panicle traits from threshed rice panicles. For instance, <xref ref-type="bibr" rid="B10">Liu et&#xa0;al. (2017)</xref> developed a mobile application for rice and wheat grain counting with an error rate below 2%. <xref ref-type="bibr" rid="B19">Tan et&#xa0;al. (2019)</xref> proposed a method combining watershed and corner detection algorithms with neural network classification, achieving an average accuracy of 94.63%. However, the threshing process may damage rice grains, compromising result accuracy. Extracting traits from intact rice panicles is crucial. To address this, <xref ref-type="bibr" rid="B7">Gong et&#xa0;al. (2018)</xref> introduced a wavelet-based method to correct panicle area and edge contours, requiring manual panicle shaping and branch fixation for precise counting, with an average accuracy of 94%. Similarly, <xref ref-type="bibr" rid="B11">Lu et&#xa0;al. (2023)</xref> proposed a high-precision analysis method using visible light scanning and deep learning, achieving an R&#xb2; of 0.99 between actual and detected grain counts. Despite these advancements, manual shaping remains time-consuming and costly, hindering large-scale phenotyping.</p>
<p>Recent studies have made progress in non-destructive extraction of rice panicle traits. For example, <xref ref-type="bibr" rid="B18">Sun et&#xa0;al. (2024b)</xref> proposed a high-throughput, non-destructive method (EOPT) for efficient and accurate panicle trait extraction, achieving an average grain counting accuracy of 93.57%, a mean absolute percentage error (MAPE) of 6.62%, and high precision in grain and panicle length measurements. <xref ref-type="bibr" rid="B17">Sun et&#xa0;al. (2024a)</xref> introduced an innovative method integrating object detection, image classification, and regression equations for accurate grain counting in natural morphology, with a counting accuracy of 92.60% and an MAPE of 7.69%. <xref ref-type="bibr" rid="B25">Wu et&#xa0;al. (2020)</xref> utilized transfer learning and Faster R-CNN for wheat grain counting across multiple scenarios, achieving an average accuracy of 91%. Misra et&#xa0;al. (<xref ref-type="bibr" rid="B25">Wu et&#xa0;al., 2020</xref>) introduced SpikeSegNet for wheat spike detection, reaching a counting accuracy of 95%. While existing studies have made significant progress in non-destructive rice panicle trait extraction, they still face certain challenges. Many methods focus on the extraction of a single trait, with insufficient comprehensive and simultaneous analysis of multiple key phenotypic traits. Additionally, most existing models remain at the algorithm level, lacking an end-to-end and automated integrated platform that covers the entire process from image acquisition to data analysis&#x2014;and this limits their large-scale application in practical breeding work. Thus, the main challenge and contribution of this study lie in designing a general model capable of efficiently and accurately extracting multi-dimensional phenotypic traits, and encapsulating it in a user-friendly web-based system. This thereby provides one-stop technical support for breeding experts and truly advances the intelligent process of rice breeding.</p>
<p>Rice panicle structures vary significantly among cultivars, ranging from compact to loose morphologies (<xref ref-type="bibr" rid="B12">Misra et&#xa0;al., 2020</xref>). The primary challenge in rice panicle phenotyping lies in designing a universal model for efficient and accurate extraction of diverse panicle traits. In this study, we propose a deep learning-based method integrating object detection, dynamic DFS pruning, and linear regression. The panicle length extraction module combines image preprocessing with skeletonization and dynamic DFS pruning for length calculation. The grain counting module employs the OPG-YOLOv8 algorithm to enhance small object detection accuracy, incorporating panicle sparsity and regression models for grain number prediction. The grain length, width, and maturity extraction modules filter samples based on grain detection results, extract grain dimensions through a series of operations, and quantify yellowness to determine maturity. Finally, a web platform built on the Django framework and Python language provides an integrated service from data acquisition to cloud-based intelligent analysis, offering technical support for rice breeding and advancing intelligent and digitalized breeding processes.</p>
</sec>
<sec id="s2">
<label>2</label>
<title>Dataset construction and methods for extracting rice panicle phenotypic traits</title>
<sec id="s2_1">
<label>2.1</label>
<title>Dataset construction</title>
<sec id="s2_1_1">
<label>2.1.1</label>
<title>Image data acquisition and processing</title>
<p>The rice samples used in this study were collected from the rice germplasm resources team at the Institute of Crop Sciences, Chinese Academy of Agricultural Sciences. Field data collection was conducted in September 2024 at the Yichun Gao&#x2019;an Base in Nanchang, Jiangxi Province (28.3&#xb0;N, 115.1&#xb0;E), where phenotypic data were acquired from 1,772 rice germplasm accessions at maturity, including 1,338 japonica and 434 indica varieties, with a total of 4,700 panicle images collected. Supplementary validation data were collected from the Modern Agricultural Science and Technology Innovation Demonstration Park of Sichuan Academy of Agricultural Sciences (30.8&#xb0;N, 104.2&#xb0;E), an independent cross-region validation base&#x2014;covering 200 local rice germplasm accessions (120 japonica and 80 indica varieties) with a total of 600 panicle images (exclusively for generalization verification, not involved in model training). Post-harvest panicle imaging was performed indoors under LED lighting (<xref ref-type="fig" rid="f1"><bold>Figure&#xa0;1</bold></xref>). A smartphone mounted at a fixed distance of 30&#xa0;cm from the samples captured images at a resolution of 3024&#xd7;4032 pixels, with automatic ISO and shutter speed adjustment. Panicles were placed on a uniform black background alongside a 30 mm-diameter red calibration board to enable pixel-to-physical size conversion. A white numbered plate corresponding to field plot IDs ensured data consistency with planting records.</p>
<fig id="f1" position="float">
<label>Figure&#xa0;1</label>
<caption>
<p>Rice panicle image acquisition workflow. <bold>(A)</bold> The imaging setup. <bold>(B)</bold> A raw image captured with the setup, featuring a red circular calibration board (30&#xa0;mm in diameter) used as a size reference. <bold>(C)</bold> The final cropped panicle image used for trait extraction.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-16-1730366-g001.tif">
<alt-text content-type="machine-generated">Panel A depicts a smartphone mounted above a black surface capturing an image of a plant stem and seed heads. A ruler, a red disc, and numbered tags are aligned beside it. Panel B shows a closer view with two tags, a red disc, and a plant stem next to a ruler. Panel C provides a close-up of the plant stem and a red disc against the black background.</alt-text>
</graphic></fig>
<p>All captured images underwent a manual preliminary screening conducted by two trained researchers to ensure data quality. The screening criteria were to discard any image exhibiting: (1) significant motion blur where grain edges were not clearly visible; (2) poor focus across the majority of the panicle; (3) severe overexposure or underexposure that obscured grain details. This qualitative but standardized filtering step was crucial for removing unsuitable samples before further processing. Remaining images were cropped to isolate panicle regions and renumbered, yielding 4,700 high-quality images for subsequent processing.</p>
</sec>
<sec id="s2_1_2">
<label>2.1.2</label>
<title>Manual measurement of rice panicle traits</title>
<p>When using deep learning to obtain rice panicle traits, manually measured data of rice panicle traits are required to provide accurate real - world data for model training and testing. Therefore, when manually measuring rice panicle traits, we tried our best to keep the panicles intact, avoiding breaking or damaging them, and placed the panicles flat on the table. (1) Regarding the measurement of panicle length, 400 rice panicle samples were selected. A ruler with an accuracy of 1&#xa0;mm was used to measure the length from the base to the top of the panicle parallel to the panicle to obtain the panicle length data. (2) For the counting of grains on the panicle, the number of grains on 600 rice panicles was manually counted. (3) When measuring the length and width of panicle grains, 240 rice panicle samples were selected, and a vernier caliper was used to measure the grains on the panicles. (4) In terms of evaluating the maturity of rice panicles, three rice breeding experts manually evaluated the maturity of rice panicle images to determine the maturity level of the panicles. (5) For the identification of rice panicle types, three rice breeding experts manually classified 3000 selected rice panicle images, with the classification criteria and typical panicle morphologies shown in <xref ref-type="fig" rid="f2"><bold>Figure&#xa0;2</bold></xref>. The first type was &#x201c;loose&#x201d;, where the grains had slight mutual occlusion; the second type had the characteristic of &#x201c;normal&#x201d;, with a large amount of mutual occlusion between grains; the third type was &#x201c;dense&#x201d;, with severe mutual occlusion between grains, and the appearance was similar to a rod.</p>
<fig id="f2" position="float">
<label>Figure&#xa0;2</label>
<caption>
<p>The three panicle types in this study. <bold>(A)</bold> Type 1 Panicles. <bold>(B)</bold> Type 2 Panicles. <bold>(C)</bold> Type 3 Panicles.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-16-1730366-g002.tif">
<alt-text content-type="machine-generated">Three images of rice stems labeled A, B, and C. The stem in A is green with unripe grains. B shows partially ripened brown grains. C features a fully ripened stem with dried grains. All against a black background.</alt-text>
</graphic></fig>
</sec>
<sec id="s2_1_3">
<label>2.1.3</label>
<title>Construction of the dataset of rice panicle traits</title>
<p>In this study, the 4,700 rice panicle images were partitioned according to their specific applications. A subset of images was annotated using LabelImg for distinct analytical purposes, with detailed allocations presented in <xref ref-type="table" rid="T1"><bold>Table&#xa0;1</bold></xref>. (1) A total of 1,000 images were designated for grain detection and performance evaluation across different models, specifically allocated as 700 for training the grain detection model, 200 for validation, and 100 for testing. (2) Among these, 3,000 images were utilized for panicle category detection, comprising 2,100 for training, 600 for validation, and 300 for testing. (3) From the 2,100 manually measured sample images, 400 were employed to validate the panicle length extraction algorithm, 660 for regression modeling, 240 for verifying the length-width extraction algorithm, and 1,200 for panicle grain maturity analysis.</p>
<table-wrap id="T1" position="float">
<label>Table&#xa0;1</label>
<caption>
<p>Details of dataset and images used in this study.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Dataset type</th>
<th valign="middle" align="center">Sampled quantity (Images)</th>
<th valign="middle" align="center">Specific usage and subdivision allocation</th>
<th valign="middle" align="center">Notes</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">Original filtered image set</td>
<td valign="middle" align="left">5300</td>
<td valign="middle" align="left">High-quality rice panicle images obtained after deblurring and cropping, providing basic data for all sub-experiments</td>
<td valign="middle" align="left">None</td>
</tr>
<tr>
<td valign="middle" align="left">Panicle type classification sub-dataset</td>
<td valign="middle" align="left">3000</td>
<td valign="middle" align="left">For classifying rice panicle types (loose/normal/dense): 2100 images for training, 600 for validation, 300 for testing</td>
<td valign="middle" align="left">Randomly sampled from the original filtered image set</td>
</tr>
<tr>
<td valign="middle" align="left">Object detection and multi-model comparison sub-dataset</td>
<td valign="middle" align="left">1000</td>
<td valign="middle" align="left">Annotation for grain object detection (700 images for training, 200 for validation, 100 for testing); Multi-model performance comparison experiments</td>
<td valign="middle" align="left">Randomly sampled from the original filtered image set; dual-purpose for two experiments</td>
</tr>
<tr>
<td valign="middle" align="left">Phenotypic trait extraction validation sub-dataset</td>
<td valign="middle" align="left">2100</td>
<td valign="middle" align="left">Validation of panicle length extraction algorithm (400 images); Validation of grain length and width extraction (240 images); Grain maturity analysis (1200 images)</td>
<td valign="middle" align="left">Randomly sampled from the original filtered image set; covers multi-trait validation</td>
</tr>
<tr>
<td valign="middle" align="left">Independent cross-region validation dataset</td>
<td valign="middle" align="left">600</td>
<td valign="middle" align="left">Independent cross-region generalization verification</td>
<td valign="middle" align="left">Only used for independent validation of trait extraction accuracy</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec id="s2_2">
<label>2.2</label>
<title>Methods for extracting rice panicle phenotypic traits</title>
<p>Rice panicle phenotypic traits such as grain number per panicle,&#xa0;panicle length, grain length, and grain width are core indicators for evaluating rice yield and quality, and their accurate&#xa0;extraction is crucial for rice breeding selection. This section focuses on three key traits and proposes corresponding extraction methods, covering the entire process from image preprocessing to trait quantification, ensuring the efficiency and precision of extraction while maintaining compatibility with the constructed dataset.</p>
<p>To accurately and automatically obtain key phenotypic traits from rice panicles, we developed a comprehensive deep learning-based pipeline. A high-level overview of this four-stage workflow is presented in <xref ref-type="fig" rid="f3"><bold>Figure&#xa0;3</bold></xref>, illustrating the process from image acquisition to final data output. The underlying technical implementation, including the specific algorithms and the detailed architecture of our proposed OPG-YOLOv8 network, is detailed in <xref ref-type="fig" rid="f4"><bold>Figure&#xa0;4</bold></xref>. Our model is composed of four primary modules: (1) an image pre-processing and calibration module; (2) a grain counting module; (3) a panicle length extraction module; and (4) a module for extracting grain length, width, and maturity. The specific methods employed within each module are described in the following sections.</p>
<fig id="f3" position="float">
<label>Figure&#xa0;3</label>
<caption>
<p>A high-level overview of the proposed workflow for rice panicle phenotypic trait extraction. The pipeline consists of four main stages: <bold>(A)</bold> Image Acquisition and Preprocessing, <bold>(B)</bold> AI-Powered Analysis, <bold>(C)</bold> Multi-Trait Quantification, and <bold>(D)</bold> Platform Interface and Data Export. This figure provides a conceptual map of the entire process.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-16-1730366-g003.tif">
<alt-text content-type="machine-generated">Flowchart illustrating a four-stage process. Stage A involves capturing high-resolution images, calibrating size, and cropping them. Stage B runs the OPG-YOLOv8 model to classify panicle density and detect individual grains. Stage C calculates corrected grain count, extracts panicle length, quantifies grain dimensions, and assesses grain maturity. Stage D visualizes results, displays trait data in tables, and exports data.</alt-text>
</graphic></fig>
<fig id="f4" position="float">
<label>Figure&#xa0;4</label>
<caption>
<p>Detailed technical diagram of the trait extraction pipeline and model architecture. This figure provides a granular view of the algorithms and components referenced in <xref ref-type="fig" rid="f3"><bold>Figure&#xa0;3</bold></xref>. <bold>(A)</bold> Image preprocessing and calibration steps. <bold>(B)</bold> The grain counting module. <bold>(C)</bold> The panicle length extraction module. <bold>(D)</bold> The grain dimension and maturity extraction module. <bold>(E)</bold> The detailed architecture of our modified OPG-YOLOv8 network, highlighting the integration of CBAM modules. <bold>(F)</bold> Algorithmic flowchart for panicle length extraction. <bold>(G)</bold> Algorithmic flowchart for grain length and width extraction.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-16-1730366-g004.tif">
<alt-text content-type="machine-generated">Flowchart depicting various stages of a grain analysis process with sections A to G. It includes image preprocessing, classification, feature extraction, and maturity evaluation using deep learning techniques. The diagram highlights methods like image binarization, noise removal, and skeleton extraction, with references to algorithms and regression analysis. Arrows guide the flow between stages, illustrating the progression from input images to final detection and evaluation of grains.</alt-text>
</graphic></fig>
<sec id="s2_2_1">
<label>2.2.1</label>
<title>The OPG-YOLOv8 model for grain detection</title>
<p>Accurate detection of individual rice grains is the foundation for multiple trait extraction tasks, including grain counting, dimension measurement, and maturity analysis. To achieve this with high accuracy and speed, we proposed an improved OPG-YOLOv8 detection algorithm, with its architecture detailed in <xref ref-type="fig" rid="f4"><bold>Figure&#xa0;4E</bold></xref>. This algorithm enhances the standard YOLOv8 model by primarily optimizing its neck and head structures (<xref ref-type="bibr" rid="B9">Jocher et&#xa0;al., 2023</xref>).</p>
<p>Specifically, we introduced the CBAM (Convolutional Block Attention Module) to suppress background noise in shallow-layer features (<xref ref-type="bibr" rid="B23">Woo et&#xa0;al., 2018</xref>). The CBAM is an attention mechanism composed of a channel attention module (CAM) and a spatial attention module (SAM). By selectively weighting the channel and spatial information, it significantly enhances the feature representation ability for small targets like rice grains. Furthermore, a higher-resolution feature map is introduced in the neck for multi-scale feature fusion to compensate for information loss during feature transmission. Concurrently, an additional detection head is added to the head of OPG-YOLOv8, which utilizes the high-resolution feature map for prediction, effectively improving the detection accuracy of small targets. This optimized model serves as the core engine for all subsequent grain-level phenotypic analyses.</p>
</sec>
<sec id="s2_2_2">
<label>2.2.2</label>
<title>A two-stage method for accurate grain counting via density classification and regression</title>
<p>Under natural conditions, the mutual occlusion of grains within a panicle poses a significant challenge to accurate counting based solely on object detection. Denser panicles can have a substantial number of grains hidden from view, leading to systematic underestimation. To address this challenge, we developed a two-stage method that integrates density classification with regression modeling to correct the initial count.</p>
<p>The first stage involves classifying the overall panicle morphology to estimate the potential level of grain occlusion. Based on expert evaluation, we defined three density categories reflecting the degree of mutual grain occlusion: &#x201c;loose&#x201d;(slight occlusion),&#x201d;normal&#x201d;(moderate occlusion), and&#x201d; dense&#x201d;(severe occlusion), with typical examples shown in <xref ref-type="fig" rid="f2"><bold>Figure&#xa0;2</bold></xref>. The OPG-YOLOv8 model was trained on a dedicated dataset to automatically recognize the visual patterns of these types and assign an input panicle image to the most appropriate category. This classification provides the crucial contextual information needed for the subsequent correction step.</p>
<p>The OPG-YOLOv8 model was trained on a dedicated dataset to automatically recognize and assign an input panicle image to one of these three categories.</p>
<p>In the second stage, the classification result from Stage 1 is used to apply a targeted correction to an initial grain count. First, the OPG-YOLOv8 object detection model is used to obtain a preliminary predicted count of all visible grains in the panicle (denoted as <inline-formula>
<mml:math display="inline" id="im1"><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:math></inline-formula>). Then, based on the panicle&#x2019;s assigned density category, the corresponding pre-established linear regression equation is selected to calculate the final, corrected grain count (denoted as <inline-formula>
<mml:math display="inline" id="im2"><mml:mrow><mml:msub><mml:mi>Y</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:math></inline-formula>).</p>
<p>This regression model was built to empirically correct for the systematic underestimation of grain counts caused by occlusion in denser panicles (<xref ref-type="disp-formula" rid="eq1">Equations 1</xref>&#x2013;<xref ref-type="disp-formula" rid="eq3">3</xref>). The derivation and validation process was as follows: a dedicated regression modeling sub-dataset of 660 panicle images (220 for each density category) with manually verified, ground-truth grain counts was utilized. First, the OPG-YOLOv8 object detection model was applied to this dataset to obtain the initial predicted count of visible grains (denoted as X<sub>i</sub>) for each image.</p>
<p>Subsequently, for each density category, a simple linear regression analysis was performed by fitting these predicted counts (X<sub>i</sub>) against their corresponding manual ground-truth counts using the least-squares method. The resulting equations represent the best-fit linear models that map the visible grain count to the estimated total grain count for each respective category. The equations for the three density classes (1-loose, 2-normal, 3-dense) are presented as follows:</p>
<disp-formula id="eq1"><label>(1)</label>
<mml:math display="block" id="M1"><mml:mrow><mml:msub><mml:mi>Y</mml:mi><mml:mn>1</mml:mn></mml:msub><mml:mo>=</mml:mo><mml:mn>1.136</mml:mn><mml:msub><mml:mi>X</mml:mi><mml:mn>1</mml:mn></mml:msub><mml:mo>+</mml:mo><mml:mn>12.67</mml:mn></mml:mrow></mml:math>
</disp-formula>
<disp-formula id="eq2"><label>(2)</label>
<mml:math display="block" id="M2"><mml:mrow><mml:msub><mml:mi>Y</mml:mi><mml:mn>2</mml:mn></mml:msub><mml:mo>=</mml:mo><mml:mn>1.555</mml:mn><mml:msub><mml:mi>X</mml:mi><mml:mn>3</mml:mn></mml:msub><mml:mo>&#x2212;</mml:mo><mml:mn>6.191</mml:mn></mml:mrow></mml:math>
</disp-formula>
<disp-formula id="eq3"><label>(3)</label>
<mml:math display="block" id="M3"><mml:mrow><mml:msub><mml:mi>Y</mml:mi><mml:mn>3</mml:mn></mml:msub><mml:mo>=</mml:mo><mml:mn>1.821</mml:mn><mml:msub><mml:mi>X</mml:mi><mml:mn>3</mml:mn></mml:msub><mml:mo>&#x2212;</mml:mo><mml:mn>29.2</mml:mn></mml:mrow></mml:math>
</disp-formula>
<p>In this framework, X<sub>i</sub> represents the initial predicted grain count from the object detection model for a panicle in category <italic>i</italic>, while Y<sub>i</sub> represents the final, more accurate corrected grain count. The validation of these models is demonstrated by the high coefficient of determination (R&#xb2;) values presented in the Results section (<xref ref-type="fig" rid="f5"><bold>Figure&#xa0;5</bold></xref>), which confirm a strong goodness of fit and a significant linear relationship between the predicted and true values. This two-stage approach allows the system to systematically compensate for hidden grains based on the panicle&#x2019;s structure, significantly improving the overall counting accuracy.</p>
<fig id="f5" position="float">
<label>Figure&#xa0;5</label>
<caption>
<p>Validate the extraction results of rice panicle phenotypic traits.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-16-1730366-g005.tif">
<alt-text content-type="machine-generated">Scatter plots showing predicted versus ground truth values for various measurements: panicle length, class modeling (three classes), grain length, and grain width. Each plot includes a trend line with metrics such as Mean Absolute Error (MAE), Mean Absolute Percentage Error (MAPE), Root Mean Square Error (RMSE), and R-squared (R&#xb2;) values. Data points are represented as orange dots.</alt-text>
</graphic></fig>
</sec>
<sec id="s2_2_3">
<label>2.2.3</label>
<title>Method for extracting panicle length</title>
<p>To accurately measure the panicle length, this study proposed a panicle length extraction algorithm based on the dynamic pruning strategy of the depth-first search (DFS) algorithm. The calculation process of this method is shown in <xref ref-type="fig" rid="f4"><bold>Figure&#xa0;4F</bold></xref>. First, the image is grayscaled, and then it is filtered to remove noise, eliminating interferences caused by the variability of lighting conditions and the physical limitations of the camera sensor. Subsequently, threshold segmentation is performed to separate the panicle part from the background. Finally, morphological closing operations are carried out to fill holes and connect edges, prominently highlighting the main characteristics of the panicle.</p>
<p>The skeletonization function in the skimage library (<xref ref-type="bibr" rid="B24">Wu et&#xa0;al., 2019</xref>) is used to extract the central axis of the panicle. To adhere to standard agronomic measurement practices, the panicle length was defined as the distance from the panicle neck to the furthest tip of the panicle. In our algorithm, the panicle neck is automatically identified as the lowest branching point on the extracted skeleton. This ensures that the main panicle stalk below this point is consistently excluded from the length calculation.</p>
<p>Following skeletonization, the DFS pruning algorithm (<xref ref-type="bibr" rid="B16">Song et&#xa0;al., 2019</xref>) is employed for path planning to depict the main path of the panicle. The dynamic pruning strategy of the DFS algorithm constructs a two-dimensional matrix from the pre-processed panicle image. One end of the skeleton is selected as the starting point, and the DFS algorithm is used to traverse the graph structure of the panicle (<xref ref-type="bibr" rid="B28">Yao et&#xa0;al., 2017</xref>). During the search process, the length of the current path is recorded. When a new node is searched, the length of the current path is compared with the recorded optimal length. If the length of the current path is less than the optimal length, the search continues; if it is greater than or equal to the optimal length, pruning is performed, and the search for this path is no longer continued. According to the recorded path information, the main path of the panicle can be depicted on the original image or a new image, and then the actual panicle length is calculated based on the pixel-to-physical size conversion factor (<xref ref-type="bibr" rid="B8">Hu et&#xa0;al., 2016</xref>).</p>
</sec>
<sec id="s2_2_4">
<label>2.2.4</label>
<title>Method for extracting grain length and width</title>
<p>Based on the position information of the grains in the panicle detected by the OPG - YOLOv8 model proposed in this study, the bounding box information of the rice panicle grains is determined according to the detection results, and then the length and width are extracted. However, due to the occlusion of grains in the panicle, it may happen that the detection box does not completely contain the rice panicle grains. Therefore, the detection results need to be screened. When screening the grains, Conf (confidence threshold) is used to measure the confidence level of the model&#x2019;s prediction of the detected target. Its value range is usually between 0 and 1. The higher the value, the more confident the model is about the accuracy of the detection result. Conf is used to determine whether the prediction box accurately covers the target grain. Only the boxes that exceed the set threshold are considered valid samples. After repeated screening and verification in this experiment, the rice panicle grains with a confidence threshold greater than 0.8 are further measured for length and width. After segmenting the selected grains, the length and width of the grains are extracted. The extraction process is shown in <xref ref-type="fig" rid="f4"><bold>Figure&#xa0;4G</bold></xref>. It is important to note that the initial object detection model provides a bounding box primarily for locating each grain, which is not used for direct measurement. This is because initial bounding boxes can sometimes fail to capture the entire grain, particularly slender tips or awns, due to occlusion or morphological variations, which could introduce measurement inaccuracies.</p>
<p>To ensure high-precision measurement, our method employs a more robust two-step process. First, the coordinates of the initial bounding box are used to isolate a region of interest (ROI) containing the target grain. Second, within this ROI, we perform a precise threshold segmentation and particle filtering to extract the exact pixel morphology of the grain body. Finally, the minimum bounding rectangle is calculated based on this accurate segmentation mask, not the initial detection box. This segmentation-based approach ensures that the final length and width measurements accurately reflect the true dimensions of the grain by systematically overcoming potential inaccuracies in the initial detection phase.</p>
</sec>
<sec id="s2_2_5">
<label>2.2.5</label>
<title>Method for extracting panicle grain maturity</title>
<p>To objectively quantify grain maturity, a method based on the color characteristics of the panicle was developed. While the RGB color space from digital images captures fundamental color information, it is not perceptually uniform, meaning the numerical distance between two RGB values does not directly correspond to the human-perceived color difference. To establish a more robust and human-centric metric, a multi-step workflow was designed to convert the initial RGB data into the Hunter Lab color space, which is approximately uniform for human color perception. This ensures that the final calculated yellowness index (YI) provides a consistent and meaningful measure of maturity.</p>
<p>The yellowness extraction for grains leverages the detection results from the OPG-YOLOv8 model. For each detected grain, the calculation process begins with its average RGB values normalized via <xref ref-type="disp-formula" rid="eq4">Equation 4</xref>, and proceeds as follows:</p>
<disp-formula id="eq4"><label>(4)</label>
<mml:math display="block" id="M4"><mml:mrow><mml:mtable><mml:mtr><mml:mtd><mml:mrow><mml:mi>r</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mi>R</mml:mi><mml:mrow><mml:mn>255</mml:mn></mml:mrow></mml:mfrac></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow><mml:mi>g</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mi>G</mml:mi><mml:mrow><mml:mn>255</mml:mn></mml:mrow></mml:mfrac></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow><mml:mi>b</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mi>B</mml:mi><mml:mrow><mml:mn>255</mml:mn></mml:mrow></mml:mfrac></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:math>
</disp-formula>
<p>Since the human eye&#x2019;s perception of color is nonlinear, and the RGB color space is a linear space based on physical devices, in order to more accurately simulate the visual characteristics of the human eye, gamma correction is required and implemented via <xref ref-type="disp-formula" rid="eq5">Equation 5</xref>. The specific formula is as follows:</p>
<disp-formula id="eq5"><label>(5)</label>
<mml:math display="block" id="M5"><mml:mrow><mml:mtext>If&#xa0;r</mml:mtext><mml:mo>&gt;</mml:mo><mml:mn>0.04045</mml:mn><mml:mtext>&#xa0;</mml:mtext><mml:mo>,</mml:mo><mml:mtext>then&#xa0;r</mml:mtext><mml:mo>=</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mtext>r</mml:mtext><mml:mn>0.055</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo stretchy="false">/</mml:mo><mml:mn>1.055</mml:mn><mml:msup><mml:mo stretchy="false">)</mml:mo><mml:mrow><mml:mn>2.4</mml:mn></mml:mrow></mml:msup><mml:mtext>&#xa0;</mml:mtext><mml:mo>,</mml:mo><mml:mtext>else&#xa0;r</mml:mtext><mml:mo>=</mml:mo><mml:mtext>r</mml:mtext><mml:mo stretchy="false">/</mml:mo><mml:mn>12.92</mml:mn></mml:mrow></mml:mrow></mml:mrow></mml:math>
</disp-formula>
<p>(the same approach applies to g and b)</p>
<p>After gamma correction, the normalized and corrected RGB values are converted to CIE XY values using the following formula (<xref ref-type="disp-formula" rid="eq6">Equation 6</xref>):</p>
<disp-formula id="eq6"><label>(6)</label>
<mml:math display="block" id="M6"><mml:mrow><mml:mtable><mml:mtr><mml:mtd><mml:mrow><mml:mtext>X</mml:mtext><mml:mo>=</mml:mo><mml:mn>0.4124</mml:mn><mml:mo>*</mml:mo><mml:mtext>r</mml:mtext><mml:mo>+</mml:mo><mml:mn>0.3576</mml:mn><mml:mo>*</mml:mo><mml:mtext>g</mml:mtext><mml:mo>+</mml:mo><mml:mn>0.1805</mml:mn><mml:mo>*</mml:mo><mml:mtext>b</mml:mtext></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow><mml:mtext>Y</mml:mtext><mml:mo>=</mml:mo><mml:mn>0.2126</mml:mn><mml:mo>*</mml:mo><mml:mtext>r</mml:mtext><mml:mo>+</mml:mo><mml:mn>0.7152</mml:mn><mml:mo>*</mml:mo><mml:mtext>g</mml:mtext><mml:mo>+</mml:mo><mml:mn>0.0722</mml:mn><mml:mo>*</mml:mo><mml:mtext>b</mml:mtext></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow><mml:mtext>Z</mml:mtext><mml:mo>=</mml:mo><mml:mn>0.0193</mml:mn><mml:mo>*</mml:mo><mml:mtext>r</mml:mtext><mml:mo>+</mml:mo><mml:mn>0.1192</mml:mn><mml:mo>*</mml:mo><mml:mtext>g</mml:mtext><mml:mo>+</mml:mo><mml:mn>0.9505</mml:mn><mml:mo>*</mml:mo><mml:mtext>b</mml:mtext></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:math>
</disp-formula>
<p>In the CIE XYZ color space, X, Y, and Z represent distinct color components. Based on the obtained CIE XYZ values, the Hunter Lab parameters are calculated. In the Hunter Lab color space, L denotes Lightness, which reflects the brightness of the color calculated by <xref ref-type="disp-formula" rid="eq7">Equation 7</xref>; a represents Chromaticity in the red-green axis, indicating the color&#x2019;s shift along the red-green direction; and b signifies Chromaticity in the yellow-blue axis, representing the color&#x2019;s shift along the yellow-blue direction. The formulas for calculating these parameters are as follows:</p>
<disp-formula id="eq7"><label>(7)</label>
<mml:math display="block" id="M7"><mml:mrow><mml:mtable><mml:mtr><mml:mtd><mml:mrow><mml:mtext>a</mml:mtext><mml:mo>=</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>175</mml:mn><mml:mo>*</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mtext>X</mml:mtext><mml:mo>&#x2212;</mml:mo><mml:mtext>Y</mml:mtext></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo stretchy="false">/</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msqrt><mml:mtext>Y</mml:mtext></mml:msqrt></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow><mml:mtext>b</mml:mtext><mml:mo>=</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>70</mml:mn><mml:mo>*</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mtext>Y</mml:mtext><mml:mo>&#x2212;</mml:mo><mml:mtext>Z</mml:mtext></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo stretchy="false">/</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msqrt><mml:mtext>Y</mml:mtext></mml:msqrt></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow><mml:mtext>L</mml:mtext><mml:mo>=</mml:mo><mml:mn>10</mml:mn><mml:msqrt><mml:mtext>Y</mml:mtext></mml:msqrt></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:math>
</disp-formula>
<p>These transformations from the RGB color space to the Hunter Lab space follow the standard colorimetric principles and formulas established by the International Commission on Illumination (CIE) (<xref ref-type="bibr" rid="B3">CIE, 2004</xref>). Finally, the yellowness value is calculated based on the aforementioned data results, using the following formula:</p>
<disp-formula id="eq8"><label>(8)</label>
<mml:math display="block" id="M8"><mml:mrow><mml:mtext>YI</mml:mtext><mml:mo>=</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>128</mml:mn><mml:mo>&#xd7;</mml:mo><mml:mtext>L</mml:mtext><mml:mo>+</mml:mo><mml:mn>100</mml:mn><mml:mo>&#xd7;</mml:mo><mml:mtext>a</mml:mtext><mml:mo>+</mml:mo><mml:mn>30</mml:mn><mml:mo>&#xd7;</mml:mo><mml:mtext>b</mml:mtext></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo stretchy="false">/</mml:mo><mml:mn>100</mml:mn></mml:mrow></mml:math>
</disp-formula>
<p>Through this formula <xref ref-type="disp-formula" rid="eq8">Equation 8</xref>, the information of lightness (L) and chromaticity (a, b) is integrated, enabling the precise quantitative calculation of yellowness. This yields a numerical value that accurately reflects the degree of yellow in the color.</p>
<p>Based on the comparison between the rice maturity results judged by rice breeding experts and the average yellowness values, this study established a clear standard for the classification of rice maturity stages, as presented in <xref ref-type="table" rid="T2"><bold>Table&#xa0;2</bold></xref>:</p>
<table-wrap id="T2" position="float">
<label>Table&#xa0;2</label>
<caption>
<p>Criteria for the classification of rice maturity stages.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Growth stage of rice</th>
<th valign="middle" align="center">Range of average yellowness value</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="center">Milk Stage</td>
<td valign="middle" align="center">Less than 50</td>
</tr>
<tr>
<td valign="middle" align="center">Dough Stage</td>
<td valign="middle" align="center">50 - 75</td>
</tr>
<tr>
<td valign="middle" align="center">Full Maturity Stage</td>
<td valign="middle" align="center">75 - 100</td>
</tr>
<tr>
<td valign="middle" align="center">Over-ripe Stage</td>
<td valign="middle" align="center">Greater than 100</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec id="s2_3">
<label>2.3</label>
<title>Experimental setup and training details</title>
<p>All deep learning training was conducted on a Windows operating system equipped with an 8-core Intel(R) Core i9-11900K CPU (3.50 GHz) and an NVIDIA GeForce RTX 3080 GPU. The CUDA version used was 12.6. The software used for model training was PyCharm 2023.3 Community Edition (JetBrains, Czech Republic), and Origin 2024 (OriginLab Corporation, USA) was used for fitting regression equations. The specific training hyperparameters for each compared model are detailed in <xref ref-type="table" rid="T3"><bold>Table&#xa0;3</bold></xref>.</p>
<table-wrap id="T3" position="float">
<label>Table&#xa0;3</label>
<caption>
<p>Training parameters for different models.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="left">Hyperparameters</th>
<th valign="middle" align="left">OPG-YOLOv8</th>
<th valign="middle" align="left">YOLOv8</th>
<th valign="middle" align="left">YOLOv7</th>
<th valign="middle" align="left">Mask R-CNN</th>
<th valign="middle" align="left">EfficientDet</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">batch_size</td>
<td valign="middle" align="left">8</td>
<td valign="middle" align="left">8</td>
<td valign="middle" align="left">8</td>
<td valign="middle" align="left">2</td>
<td valign="middle" align="left">32</td>
</tr>
<tr>
<td valign="middle" align="left">momentum</td>
<td valign="middle" align="left">0.937</td>
<td valign="middle" align="left">0.937</td>
<td valign="middle" align="left">0.937</td>
<td valign="middle" align="left">0.9</td>
<td valign="middle" align="left">0.93</td>
</tr>
<tr>
<td valign="middle" align="left">learning rate</td>
<td valign="middle" align="left">0.01</td>
<td valign="middle" align="left">0.01</td>
<td valign="middle" align="left">0.01</td>
<td valign="middle" align="left">0.001</td>
<td valign="middle" align="left">0.002</td>
</tr>
<tr>
<td valign="middle" align="left">optimizer</td>
<td valign="middle" align="left">SGD</td>
<td valign="middle" align="left">SGD</td>
<td valign="middle" align="left">SGD</td>
<td valign="middle" align="left">SGD</td>
<td valign="middle" align="left">SGD</td>
</tr>
<tr>
<td valign="middle" align="left">weight_decay</td>
<td valign="middle" align="left">0.0005</td>
<td valign="middle" align="left">0.0005</td>
<td valign="middle" align="left">0.0005</td>
<td valign="middle" align="left">0.0005</td>
<td valign="middle" align="left">0.0001</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s2_4">
<label>2.4</label>
<title>Performance evaluation metrics</title>
<p>The performance of the proposed models was assessed using a set of standard evaluation metrics, tailored for object detection and regression tasks.</p>
<p>For the grain detection models, performance was evaluated based on mean Average Precision (mAP), Parameters (Params), Floating-point Operations (FLOPs), and Frames Per Second (FPS). The mAP, a standard metric for evaluating the accuracy of object detectors, was calculated at an Intersection over Union (IoU) threshold of 0.5 (mAP50) (<xref ref-type="bibr" rid="B5">Everingham et&#xa0;al., 2010</xref>). Params and FLOPs were used to measure model complexity and computational cost, while FPS was used to evaluate inference speed. For the regression tasks, which include panicle length, grain dimension, and grain count estimations, the following metrics were used to measure the correlation and error between predicted values and ground-truth values (<xref ref-type="bibr" rid="B2">Botchkarev, 2018</xref>):</p>
<list list-type="bullet">
<list-item>
<p>Coefficient of Determination (R&#xb2;): Measures how well the predicted values fit the actual values. A value closer to 1 indicates a better fit.</p></list-item>
<list-item>
<p>Mean Absolute Error (MAE): Measures the average absolute difference between predicted and actual values.</p></list-item>
<list-item>
<p>Mean Absolute Percentage Error (MAPE): Expresses the MAE as a percentage of the actual values, useful for understanding the error in relative terms.</p></list-item>
<list-item>
<p>Root Mean Square Error (RMSE): Represents the standard deviation of the prediction errors.</p></list-item>
</list>
<p>The mathematical formulas for these metrics are defined as follows (<xref ref-type="disp-formula" rid="eq9">Equations 9</xref>&#x2013;<xref ref-type="disp-formula" rid="eq15">15</xref>):</p>
<disp-formula id="eq9"><label>(9)</label>
<mml:math display="block" id="M9"><mml:mrow><mml:mi>M</mml:mi><mml:mi>A</mml:mi><mml:mi>P</mml:mi><mml:mi>E</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo stretchy="false">/</mml:mo><mml:mi>n</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>*</mml:mo><mml:mo>&#x2211;</mml:mo><mml:mo>&#x200b;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mrow><mml:mo>|</mml:mo><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mi>N</mml:mi><mml:mrow><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>t</mml:mi><mml:mi>u</mml:mi><mml:mi>a</mml:mi><mml:mi>l</mml:mi><mml:mo>&#xa0;</mml:mo></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>N</mml:mi><mml:mrow><mml:mi>p</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>d</mml:mi><mml:mi>i</mml:mi><mml:mi>c</mml:mi><mml:mi>t</mml:mi><mml:mo>&#xa0;</mml:mo></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo stretchy="false">/</mml:mo><mml:msub><mml:mi>N</mml:mi><mml:mrow><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>t</mml:mi><mml:mi>u</mml:mi><mml:mi>a</mml:mi><mml:mi>l</mml:mi><mml:mo>&#xa0;</mml:mo></mml:mrow></mml:msub></mml:mrow><mml:mo>|</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>*</mml:mo><mml:mn>100</mml:mn><mml:mo>%</mml:mo></mml:mrow></mml:math>
</disp-formula>
<disp-formula id="eq10"><label>(10)</label>
<mml:math display="block" id="M10"><mml:mrow><mml:mi>P</mml:mi><mml:mi>a</mml:mi><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>m</mml:mi><mml:mi>s</mml:mi><mml:mo>&#xa0;</mml:mo><mml:mo>=</mml:mo><mml:msubsup><mml:mi>C</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mi>n</mml:mi><mml:mo>&#xa0;</mml:mo></mml:mrow><mml:mo>*</mml:mo></mml:msubsup><mml:msubsup><mml:mi>C</mml:mi><mml:mrow><mml:mi>o</mml:mi><mml:mi>u</mml:mi><mml:mi>t</mml:mi></mml:mrow><mml:mo>*</mml:mo></mml:msubsup><mml:msubsup><mml:mi>K</mml:mi><mml:mi>h</mml:mi><mml:mo>*</mml:mo></mml:msubsup><mml:msub><mml:mi>K</mml:mi><mml:mi>w</mml:mi></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mi>o</mml:mi><mml:mi>u</mml:mi><mml:mi>t</mml:mi><mml:mo>&#xa0;</mml:mo></mml:mrow></mml:msub></mml:mrow></mml:math>
</disp-formula>
<disp-formula id="eq11"><label>(11)</label>
<mml:math display="block" id="M11"><mml:mrow><mml:mi>F</mml:mi><mml:mi>L</mml:mi><mml:mi>O</mml:mi><mml:mi>P</mml:mi><mml:mi>s</mml:mi><mml:mo>=</mml:mo><mml:mo>&#xa0;</mml:mo><mml:mi>P</mml:mi><mml:mi>a</mml:mi><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>m</mml:mi><mml:mi>s</mml:mi><mml:mo>&#xa0;</mml:mo><mml:mo>*</mml:mo><mml:msubsup><mml:mi>M</mml:mi><mml:mrow><mml:mi>o</mml:mi><mml:mi>u</mml:mi><mml:mi>t</mml:mi><mml:mi>h</mml:mi><mml:mo>&#xa0;</mml:mo></mml:mrow><mml:mo>*</mml:mo></mml:msubsup><mml:msub><mml:mi>M</mml:mi><mml:mrow><mml:mi>o</mml:mi><mml:mi>u</mml:mi><mml:mi>n</mml:mi><mml:mi>w</mml:mi><mml:mo>&#xa0;</mml:mo></mml:mrow></mml:msub></mml:mrow></mml:math>
</disp-formula>
<disp-formula id="eq12"><label>(12)</label>
<mml:math display="block" id="M12"><mml:mrow><mml:mi>m</mml:mi><mml:mi>A</mml:mi><mml:mi>P</mml:mi><mml:mo>=</mml:mo><mml:mstyle displaystyle="true"><mml:msubsup><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>c</mml:mi></mml:msubsup><mml:mrow><mml:mi>A</mml:mi><mml:mi>P</mml:mi><mml:mi>i</mml:mi><mml:mo stretchy="false">/</mml:mo><mml:mi>C</mml:mi></mml:mrow></mml:mstyle></mml:mrow></mml:math>
</disp-formula>
<disp-formula id="eq13"><label>(13)</label>
<mml:math display="block" id="M13"><mml:mrow><mml:mi>R</mml:mi><mml:mi>M</mml:mi><mml:mi>S</mml:mi><mml:mi>E</mml:mi><mml:mo>=</mml:mo><mml:msqrt><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mo>&#x2211;</mml:mo><mml:mo>&#x200b;</mml:mo><mml:msup><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mi>N</mml:mi><mml:mrow><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>t</mml:mi><mml:mi>u</mml:mi><mml:mi>a</mml:mi><mml:mi>l</mml:mi><mml:mo>&#xa0;</mml:mo></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>N</mml:mi><mml:mrow><mml:mi>p</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>d</mml:mi><mml:mi>i</mml:mi><mml:mi>c</mml:mi><mml:mi>t</mml:mi><mml:mo>&#xa0;</mml:mo></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mn>2</mml:mn></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo stretchy="false">/</mml:mo><mml:mi>n</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:msqrt></mml:mrow></mml:math>
</disp-formula>
<disp-formula id="eq14"><label>(14)</label>
<mml:math display="block" id="M14"><mml:mrow><mml:mi>M</mml:mi><mml:mi>A</mml:mi><mml:mi>E</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo stretchy="false">/</mml:mo><mml:mi>n</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>*</mml:mo><mml:mo>&#x2211;</mml:mo><mml:mo>&#x200b;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mrow><mml:mo>|</mml:mo><mml:mrow><mml:msub><mml:mi>N</mml:mi><mml:mrow><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>t</mml:mi><mml:mi>u</mml:mi><mml:mi>a</mml:mi><mml:mi>l</mml:mi><mml:mo>&#xa0;</mml:mo></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>N</mml:mi><mml:mrow><mml:mi>p</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>d</mml:mi><mml:mi>i</mml:mi><mml:mi>c</mml:mi><mml:mi>t</mml:mi><mml:mo>&#xa0;</mml:mo></mml:mrow></mml:msub></mml:mrow><mml:mo>|</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:math>
</disp-formula>
<disp-formula id="eq15"><label>(15)</label>
<mml:math display="block" id="M15"><mml:mrow><mml:msup><mml:mi>R</mml:mi><mml:mn>2</mml:mn></mml:msup><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:mo>&#x2211;</mml:mo><mml:mo>&#x200b;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mi>N</mml:mi><mml:mrow><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>t</mml:mi><mml:mi>u</mml:mi><mml:mi>a</mml:mi><mml:mi>l</mml:mi><mml:mo>&#xa0;</mml:mo></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>N</mml:mi><mml:mrow><mml:mi>p</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>d</mml:mi><mml:mi>i</mml:mi><mml:mi>c</mml:mi><mml:mi>t</mml:mi><mml:mo>&#xa0;</mml:mo></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mn>2</mml:mn></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo stretchy="false">/</mml:mo><mml:mo>&#x2211;</mml:mo><mml:mo>&#x200b;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mi>N</mml:mi><mml:mrow><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>t</mml:mi><mml:mi>u</mml:mi><mml:mi>a</mml:mi><mml:mi>l</mml:mi><mml:mo>&#xa0;</mml:mo></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:mrow><mml:mo>&#x2329;</mml:mo><mml:mrow><mml:msub><mml:mi>N</mml:mi><mml:mrow><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>t</mml:mi><mml:mi>u</mml:mi><mml:mi>a</mml:mi><mml:mi>l</mml:mi><mml:mo>&#xa0;</mml:mo></mml:mrow></mml:msub></mml:mrow><mml:mo>&#x232a;</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mn>2</mml:mn></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:math>
</disp-formula>
<p>Among these evaluation indices, <inline-formula>
<mml:math display="inline" id="im3"><mml:mrow><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mi>n</mml:mi><mml:mo>&#xa0;</mml:mo></mml:mrow></mml:msub></mml:mrow></mml:math></inline-formula> represents the number of input channels, <inline-formula>
<mml:math display="inline" id="im4"><mml:mrow><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mi>o</mml:mi><mml:mi>u</mml:mi><mml:mi>t</mml:mi><mml:mo>&#xa0;</mml:mo></mml:mrow></mml:msub></mml:mrow></mml:math></inline-formula> represents the number of output channels, <inline-formula>
<mml:math display="inline" id="im5"><mml:mrow><mml:msub><mml:mi>K</mml:mi><mml:mrow><mml:mi>h</mml:mi><mml:mo>&#xa0;</mml:mo><mml:mo>&#xa0;</mml:mo></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>K</mml:mi><mml:mi>w</mml:mi></mml:msub></mml:mrow></mml:math></inline-formula> represents the size of the convolutional kernel, <inline-formula>
<mml:math display="inline" id="im6"><mml:mrow><mml:msub><mml:mi>M</mml:mi><mml:mrow><mml:mi>o</mml:mi><mml:mi>u</mml:mi><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>M</mml:mi><mml:mrow><mml:mi>o</mml:mi><mml:mi>u</mml:mi><mml:mi>n</mml:mi><mml:mi>w</mml:mi><mml:mo>&#xa0;</mml:mo></mml:mrow></mml:msub></mml:mrow></mml:math></inline-formula> represents the height and width of the output feature map, <inline-formula>
<mml:math display="inline" id="im7"><mml:mi>C</mml:mi></mml:math></inline-formula> represents the total number of categories, <inline-formula>
<mml:math display="inline" id="im8"><mml:mrow><mml:mi>A</mml:mi><mml:mi>P</mml:mi><mml:mi>i</mml:mi></mml:mrow></mml:math></inline-formula> represents the <inline-formula>
<mml:math display="inline" id="im9"><mml:mrow><mml:mi>A</mml:mi><mml:mi>P</mml:mi></mml:mrow></mml:math></inline-formula> value of the <inline-formula>
<mml:math display="inline" id="im10"><mml:mi>i</mml:mi></mml:math></inline-formula>-th category, <inline-formula>
<mml:math display="inline" id="im11"><mml:mtext>n</mml:mtext></mml:math></inline-formula> represents the number of samples, <inline-formula>
<mml:math display="inline" id="im12"><mml:mrow><mml:msub><mml:mi>N</mml:mi><mml:mrow><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>t</mml:mi><mml:mi>u</mml:mi><mml:mi>a</mml:mi><mml:mi>l</mml:mi><mml:mo>&#xa0;</mml:mo></mml:mrow></mml:msub></mml:mrow></mml:math></inline-formula> represents the true value, which may be the true number of grains. <inline-formula>
<mml:math display="inline" id="im13"><mml:mrow><mml:msub><mml:mi>N</mml:mi><mml:mrow><mml:mi>p</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>d</mml:mi><mml:mi>i</mml:mi><mml:mi>c</mml:mi><mml:mi>t</mml:mi><mml:mo>&#xa0;</mml:mo></mml:mrow></mml:msub></mml:mrow></mml:math></inline-formula> represents the predicted value, that is, the number of grains in the rice panicle detected by the object detection model.</p>
</sec>
</sec>
<sec id="s3">
<label>3</label>
<title>Experimental results and analysis</title>
<sec id="s3_1">
<label>3.1</label>
<title>Evaluation of the extraction results of rice panicle phenotypic traits</title>
<p>The performance of the OPG-YOLOv8 model in predicting rice panicle types is presented in <xref ref-type="table" rid="T4"><bold>Table&#xa0;4</bold></xref> Among them, there are 213 samples of Class 1, with 195 correctly predicted, and the accuracy rate is 91.55%; there are 178 samples of Class 2, with 154 correctly predicted, and the accuracy rate is 86.52%; there are 247 actual samples of Class 3, with 238 correctly predicted, and the accuracy rate is as high as 96.36%. The model has the best prediction effect for Class 3 rice panicles, while the prediction accuracy of Class 2 is relatively low, possibly because the characteristics of this type of rice panicle are easily confused with those of other categories.</p>
<table-wrap id="T4" position="float">
<label>Table&#xa0;4</label>
<caption>
<p>Prediction results of different types of rice panicles.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="left">Panicle types</th>
<th valign="middle" align="left">Number of true samples</th>
<th valign="middle" align="left">Number of correctly predicted samples</th>
<th valign="middle" align="left">Accuracy</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">Class 1</td>
<td valign="middle" align="left">213</td>
<td valign="middle" align="left">195</td>
<td valign="middle" align="left">91.55%</td>
</tr>
<tr>
<td valign="middle" align="left">Class 2</td>
<td valign="middle" align="left">178</td>
<td valign="middle" align="left">154</td>
<td valign="middle" align="left">86.52%</td>
</tr>
<tr>
<td valign="middle" align="left">Class 3</td>
<td valign="middle" align="left">247</td>
<td valign="middle" align="left">238</td>
<td valign="middle" align="left">96.36%</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The accuracy and robustness of the panicle length extraction algorithm were evaluated. The results showed a strong correlation between predicted and actual lengths, with an R&#xb2; of 0.9583, an RMSE of 5.69&#xa0;mm, an MAE of 4.91&#xa0;mm, and a MAPE of 2.03%.</p>
<p>The performance of the two-stage grain counting method was evaluated across the three panicle density categories, and the results are detailed in <xref ref-type="fig" rid="f5"><bold>Figure&#xa0;5</bold></xref>. The model demonstrated a strong linear relationship between predicted and true values for all types. For the &#x2018;loose&#x2019; panicles (Class 1), the model achieved the highest accuracy with an R&#xb2; of 0.9799 and an RMSE of 8.69. As panicle density increased, the R&#xb2; values remained high for &#x2018;normal&#x2019; (0.9551) and &#x2018;dense&#x2019; (0.9278) categories, confirming the method&#x2019;s effectiveness.</p>
<p>However, a corresponding increase in error metrics was observed with density, with the RMSE reaching 16.37 for the &#x2018;dense&#x2019; category. This trend highlights the inherent challenge of severe grain occlusion in denser panicles, a factor that increases prediction variance. Despite this, the Mean Absolute Percentage Error (MAPE) for the &#x2018;dense&#x2019; category remained low at 7.51%, indicating that the model maintains a high level of relative accuracy even in the most challenging cases.</p>
</sec>
<sec id="s3_2">
<label>3.2</label>
<title>Cross-region generalization verification results</title>
<p>To evaluate the generalization capability and robustness of our proposed model across different geographical regions and genetic backgrounds, we conducted tests on an independent, cross-region validation dataset. This dataset comprises 600 panicle images from the Modern Agricultural Science and Technology Innovation Demonstration Park of Sichuan Academy of Agricultural Sciences, which were not used in any part of the model training process. The performance of the model on the key phenotypic traits is summarized and compared with the original validation set in <xref ref-type="table" rid="T5"><bold>Table&#xa0;5</bold></xref>.</p>
<table-wrap id="T5" position="float">
<label>Table&#xa0;5</label>
<caption>
<p>Performance comparison of the model on the original and cross-region validation datasets.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="left">Trait category</th>
<th valign="middle" align="left">Metric</th>
<th valign="middle" align="left">Original validation set</th>
<th valign="middle" align="left">Cross-region validation set</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">Panicle length</td>
<td valign="middle" align="left">R&#xb2;</td>
<td valign="middle" align="left">0.9583</td>
<td valign="middle" align="left">0.9412</td>
</tr>
<tr>
<td valign="top" rowspan="3" align="left">Grain count</td>
<td valign="middle" align="left">RMSE (mm)</td>
<td valign="middle" align="left">5.69</td>
<td valign="middle" align="left">6.15</td>
</tr>
<tr>
<td valign="middle" align="left">Average R&#xb2;</td>
<td valign="middle" align="left">0.9543</td>
<td valign="middle" align="left">0.9255</td>
</tr>
<tr>
<td valign="middle" align="left">Average MAE</td>
<td valign="middle" align="left">10.52</td>
<td valign="middle" align="left">12.88</td>
</tr>
<tr>
<td valign="middle" align="left">Grain length</td>
<td valign="middle" align="left">R&#xb2;</td>
<td valign="middle" align="left">0.8823</td>
<td valign="middle" align="left">0.8655</td>
</tr>
<tr>
<td valign="middle" align="left">Grain width</td>
<td valign="middle" align="left">R&#xb2;</td>
<td valign="middle" align="left">0.5959</td>
<td valign="middle" align="left">0.5731</td>
</tr>
<tr>
<td valign="middle" align="left">Maturity stage</td>
<td valign="middle" align="left">Classification Accuracy (%)</td>
<td valign="middle" align="left">92.0%</td>
<td valign="middle" align="left">89.5%</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The results demonstrate that the model maintains strong performance and high accuracy on this entirely new dataset. For panicle length extraction, the R&#xb2; value reached 0.9412, showing only a marginal decrease compared to the original validation set (0.9583) and indicating the high robustness of the measurement algorithm. In the critical task of grain counting, the model achieved an average R&#xb2; of 0.9255, confirming the effectiveness of the two-stage counting method in handling novel panicle morphologies. Similarly, metrics for grain dimension extraction and maturity classification remained at a high level, with grain length R&#xb2; at 0.8655 and maturity classification accuracy at 89.5%.</p>
<p>While there was an expected, slight decline in performance across all metrics, this can be attributed to subtle variations in panicle morphology, imaging conditions, or genetic backgrounds characteristic of the new region. Overall, these results strongly validate that our phenotyping model possesses excellent generalization ability, making it a reliable and applicable tool for broader practical breeding applications beyond the initial training environment.</p>
</sec>
<sec id="s3_3">
<label>3.3</label>
<title>Comparative experiment of grain detection models</title>
<p><xref ref-type="table" rid="T6"><bold>Table&#xa0;6</bold></xref> presents the evaluation results of different models. In terms of detection accuracy, OPG-YOLOv8 demonstrates outstanding performance, achieving a mAP50 of 99.10%, which surpasses models such as YOLOv8 and YOLOv7. This indicates that OPG-YOLOv8 excels in target recognition and localization precision at an IoU threshold of 0.5. Regarding the R&#xb2; metric, although OPG-YOLOv8&#x2019;s score of 0.8 is slightly lower than YOLOv7&#x2019;s 0.86, it still reflects strong data-fitting capability, comparable to YOLOv8&#x2019;s 0.84 and superior to Mask R-CNN and EfficientDet. In terms of error metrics, OPG-YOLOv8 achieves an MAE of 57.4, which, although higher than YOLOv7&#x2019;s 48.1, remains within an acceptable range for practical applications. Moreover, its overall accuracy advantage mitigates the relative impact of this metric to some extent. The MAPE of 30.47% is higher than that of other comparative models. The RMSE of 69, while relatively high, indicates a greater dispersion of prediction errors; however, combined with its high precision, it remains within acceptable limits. From the perspective of model complexity, OPG-YOLOv8 has 59.6M parameters and 113.2G FLOPs, demonstrating relatively low computational complexity and offering advantages in hardware resource utilization. The inference speed of OPG-YOLOv8 reaches 154 FPS, significantly higher than that of the other models. Overall, OPG-YOLOv8 exhibits superior comprehensive performance in object detection tasks, achieving an excellent balance between accuracy, computational complexity, and inference speed, and holds a clear advantage over other models.</p>
<table-wrap id="T6" position="float">
<label>Table&#xa0;6</label>
<caption>
<p>Comparison of different object detection networks on the validation set.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="left">Models</th>
<th valign="middle" align="left">mAP50</th>
<th valign="middle" align="left">R<sup>2</sup></th>
<th valign="middle" align="left">MAE</th>
<th valign="middle" align="left">MAPE</th>
<th valign="middle" align="left">RMSE</th>
<th valign="middle" align="left">Params</th>
<th valign="middle" align="left">FLOPs</th>
<th valign="middle" align="left">FPS</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">OPG-YOLOv8</td>
<td valign="middle" align="left">99.10%</td>
<td valign="middle" align="left">0.8</td>
<td valign="middle" align="left">57.4</td>
<td valign="middle" align="left">30.47%</td>
<td valign="middle" align="left">69</td>
<td valign="middle" align="left">59.6M</td>
<td valign="middle" align="left">113.2G</td>
<td valign="middle" align="left">154</td>
</tr>
<tr>
<td valign="middle" align="left">YOLOV8</td>
<td valign="middle" align="left">97.40%</td>
<td valign="middle" align="left">0.84</td>
<td valign="middle" align="left">52.2</td>
<td valign="middle" align="left">28.41%</td>
<td valign="middle" align="left">64</td>
<td valign="middle" align="left">68.7M</td>
<td valign="middle" align="left">180.3G</td>
<td valign="middle" align="left">133</td>
</tr>
<tr>
<td valign="middle" align="left">YOLOv7</td>
<td valign="middle" align="left">96.10%</td>
<td valign="middle" align="left">0.86</td>
<td valign="middle" align="left">48.1</td>
<td valign="middle" align="left">25.27%</td>
<td valign="middle" align="left">62</td>
<td valign="middle" align="left">53.9M</td>
<td valign="middle" align="left">213.6G</td>
<td valign="middle" align="left">87</td>
</tr>
<tr>
<td valign="middle" align="left">Mask R-CNN</td>
<td valign="middle" align="left">76.04%</td>
<td valign="middle" align="left">0.81</td>
<td valign="middle" align="left">54.8</td>
<td valign="middle" align="left">27.86%</td>
<td valign="middle" align="left">72</td>
<td valign="middle" align="left">43.2M</td>
<td valign="middle" align="left">247.5G</td>
<td valign="middle" align="left">43</td>
</tr>
<tr>
<td valign="middle" align="left">EfficientDet</td>
<td valign="middle" align="left">84.32%</td>
<td valign="middle" align="left">0.74</td>
<td valign="middle" align="left">42.8</td>
<td valign="middle" align="left">24.71%</td>
<td valign="middle" align="left">58</td>
<td valign="middle" align="left">37.4M</td>
<td valign="middle" align="left">289.1G</td>
<td valign="middle" align="left">35</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s3_4">
<label>3.4</label>
<title>Evaluation experiment of the maturity of panicle grains</title>
<p>The maturity of rice grains was assessed based on the mean yellowness values of panicles. A total of 1,200 panicle images were selected as experimental samples, and the average yellowness was statistically analyzed for each image. As shown in <xref ref-type="fig" rid="f6"><bold>Figure&#xa0;6</bold></xref>, representative panicle images and their corresponding yellowness histograms are presented for four maturity stages: over-ripe, fully ripe, wax-ripe, and milk-ripe. The figure demonstrates marked variations in both the yellowness histograms and mean values across different maturity stages, confirming the discriminative capacity of yellowness metrics in characterizing grain maturation progression.</p>
<fig id="f6" position="float">
<label>Figure&#xa0;6</label>
<caption>
<p>Histogram of grain maturity statistics for rice panicles. <bold>(A)</bold> rice panicles at the milk-ripe stage. <bold>(B)</bold> rice panicles at the dough stage. <bold>(C)</bold> Full-ripe rice panicles. <bold>(D)</bold> Overripe rice panicles.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-16-1730366-g006.tif">
<alt-text content-type="machine-generated">Clustered images depicting rice plants at different stages of ripeness labeled A, B, C, and D, each paired with a histogram showing the pixel distribution of yellowness values. Averages are 21, 53, 84, and 124, respectively. Each graph illustrates changes in degree of yellow as the plants mature.</alt-text>
</graphic></fig>
<p>The model&#x2019;s performance in classifying rice panicle maturity stages (over-ripe, fully ripe, wax-ripe, and milk-ripe) was evaluated using a confusion matrix. As shown in <xref ref-type="fig" rid="f7"><bold>Figure&#xa0;7</bold></xref>, the model&#x2019;s maturity prediction was evaluated using 200 images per stage (totaling 800 samples) randomly selected from a dataset of 1,200 expert-annotated panicle images. The confusion matrix analysis revealed distinct classification accuracies across stages: over-ripe samples achieved 183 correct predictions (91.5% accuracy), with 13 misclassified as fully ripe; fully ripe panicles yielded 185 correct identifications (92.5% accuracy), though 9 were erroneously labeled as over-ripe; wax-ripe panicles attained 178 correct classifications (89.0% accuracy), with 6 and 14 misclassifications to over-ripe and fully ripe stages, respectively; while milk-ripe panicles demonstrated the highest accuracy (189 correct predictions, 94.5%), with only 4 and 7 misclassifications to fully ripe and wax-ripe stages. The diagonal values (correct classification rates) highlight the model&#x2019;s capability to discern maturity phases, particularly excelling in milk-ripe stage recognition. Notably, inter-stage confusion predominantly occurred between adjacent maturity phases (e.g., over-ripe vs. fully ripe), suggesting challenges in distinguishing subtle transitional features. These results validate the model&#x2019;s effectiveness in maturity stage identification while underscoring potential optimization avenues for boundary case refinement.</p>
<fig id="f7" position="float">
<label>Figure&#xa0;7</label>
<caption>
<p>Confusion matrix for predicting the maturity of rice panicles.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-16-1730366-g007.tif">
<alt-text content-type="machine-generated">Confusion matrix with reference stages on the y-axis and prediction stages on the x-axis: Over-ripe, Full maturity, Dough, and Milk. Numbers indicate classification counts. The diagonal shows high accuracy: 183 Over-ripe, 185 Full maturity, 178 Dough, and 189 Milk. Color gradient indicates frequency, with darker shades representing higher counts.</alt-text>
</graphic></fig>
</sec>
<sec id="s3_5">
<label>3.5</label>
<title>Research and development of a precision extraction system for rice panicle traits</title>
<p>To enable the practical application of the OPG-YOLOv8 detection model and panicle trait extraction algorithms, a &#x201c;Rice Panicle Trait Extraction System&#x201d; (as shown in <xref ref-type="fig" rid="f8"><bold>Figure&#xa0;8</bold></xref>) was developed based on the Django framework and Python. The system incorporates a web-based interface module that supports multi-user, multi-role remote operations and data management. It provides an integrated workflow from data acquisition to cloud-based intelligent analysis, processing each image within an average of 5 seconds. The system rapidly and accurately extracts key phenotypic traits, including panicle length, grain length, grain width, length-to-width ratio, and maturity stage. Equipped with data-driven phenotyping algorithms, this platform delivers precise and efficient technical support for modern agricultural breeding research, bridging the gap between advanced computer vision models and real-world agronomic applications. he system is currently deployed as an internal web-based platform. To facilitate broader academic use, a demonstration version or open-access availability is under consideration for future development. The source code for the system&#x2019;s core algorithms is included in the project&#x2019;s GitHub repository, as mentioned in the Data Availability Statement.</p>
<fig id="f8" position="float">
<label>Figure&#xa0;8</label>
<caption>
<p>The user interface of the rice panicle trait extraction system. <bold>(A)</bold> The main interface for image upload and batch processing. <bold>(B)</bold> The results interface displaying the multi-trait extraction outputs.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-16-1730366-g008.tif">
<alt-text content-type="machine-generated">A software interface for extracting rice panicle traits is displayed. The interface shows options to upload and edit images, with fields for naming and describing each image. Below, rice traits extraction and grain maturity analysis are presented through images and a histogram. A table lists analysis results, including grain number and dimensions, with an option to export data as a CSV file.</alt-text>
</graphic></fig>
</sec>
</sec>
<sec id="s4" sec-type="discussion">
<label>4</label>
<title>Discussion</title>
<p>In this study, we developed and validated a deep learning-based system for the automated, high-throughput extraction of multiple, agronomically important phenotypic traits in rice panicles. The discussion below contextualizes our key findings, addresses their practical implications for rice breeding, and outlines the limitations and future directions of this work.</p>
<sec id="s4_1">
<label>4.1</label>
<title>Accuracy and implications of panicle and grain trait extraction</title>
<p>Our system demonstrated high precision in measuring panicle length (R&#xb2; = 0.9583), a key architectural trait often positively correlated with the total number of grains a panicle can hold. The robustness of our image pre-processing workflow combined with a dynamic DFS pruning strategy proved highly effective. This automated and accurate measurement provides rice breeders with a reliable tool for rapidly selecting genotypes with high-yield potential.</p>
<p>A cornerstone of our study is the novel two-stage method for grain counting, which integrates object detection with density-based regression. This approach effectively mitigates the common issue of underestimation caused by severe grain occlusion, a significant limitation in previous studies. The ability to rapidly and accurately screen thousands of genetic lines for grain number per panicle&#x2014;a primary component of final yield&#x2014;can dramatically accelerate the selection of elite genotypes. While automated panicle type recognition greatly enhances efficiency, the universality of this classification model could be further improved by incorporating a more diverse training set, including panicles from a wider range of genetic backgrounds and environmental conditions.</p>
<p>The system also provides valuable data on grain dimensions and maturity, which are fundamental to rice quality. While the extraction of grain length was satisfactory, accurately measuring grain width proved more challenging (R&#xb2; = 0.5959). This limitation likely stems from the inherent difficulty of representing a complex 3D grain shape using a 2D minimum bounding rectangle. Future research could explore multi-view imaging or 3D reconstruction techniques to capture grain plumpness more accurately. Nevertheless, the automated measurement of length and width is highly valuable, as these traits collectively determine grain shape and size, which are critical quality characteristics influencing milling yield and consumer preference. Similarly, our objective maturity assessment via a quantified yellowness index offers breeders a powerful, data-driven tool to select for uniform ripening and optimize harvest timing, thereby maximizing both grain quality and yield.</p>
</sec>
<sec id="s4_2">
<label>4.2</label>
<title>Performance and limitations of the OPG-YOLOv8 model</title>
<p>The OPG-YOLOv8 model, which forms the core of our grain&#xa0;detection module, demonstrated superior performance in terms of both detection accuracy (mAP50&#xa0;=&#xa0;99.10%) and inference speed (154 FPS) compared to other standard models. This highlights its potential for real-time or near-real-time applications. However, this enhanced performance comes with relatively high computational resource requirements, which presents a challenge for deploying the model on low-power, embedded agricultural devices at the edge. Future work could explore model compression techniques, such as pruning and quantization (<xref ref-type="bibr" rid="B4">Deng et&#xa0;al., 2020</xref>), to create a lightweight version of OPG-YOLOv8 without a significant loss of accuracy, thereby broadening its applicability.</p>
</sec>
<sec id="s4_3">
<label>4.3</label>
<title>Toward integrated and field-ready phenotyping</title>
<p>The current research successfully demonstrates the integration of multi-trait extraction within a single platform. However, the analysis of each trait is still largely independent. A promising future direction is to explore multi-task learning frameworks (<xref ref-type="bibr" rid="B15">Ruder, 2017</xref>), where a single model is trained to predict all traits simultaneously. Such an approach could leverage the inherent correlations between traits (e.g., panicle length and grain number) to improve the overall accuracy and efficiency of the system.</p>
<p>Ultimately, the goal is to move from controlled, laboratory environments to real-world field conditions (<xref ref-type="bibr" rid="B22">Tsaftaris and Scharr, 2019</xref>). This will require significant efforts in collecting diverse, field-based datasets and developing algorithms robust to challenges like variable illumination, complex backgrounds, and occlusion from leaves. Integrating multi-modal data, such as depth information from 3D sensors, could also be a key step in overcoming these hurdles. By addressing these challenges, this technology can be further enhanced as a powerful decision-support tool, helping to accelerate the development of improved rice varieties and contribute to global food security.</p>
</sec>
</sec>
<sec id="s5" sec-type="conclusions">
<label>5</label>
<title>Conclusion</title>
<p>In this study, a precise and automated model for rice panicle trait extraction based on deep learning was successfully constructed, and a corresponding web-based system was developed. This work effectively addresses the critical need for efficient and high-throughput phenotyping in rice breeding and production. The primary innovations and conclusions of this research are summarized as follows:</p>
<list list-type="order">
<list-item>
<p>A high-accuracy, multi-trait extraction pipeline was established. The proposed deep learning pipeline demonstrated strong performance in quantifying key agronomic traits. Notably, the panicle length extraction method achieved a high R&#xb2; of 0.9583. For grain counting, a novel two-stage method combining density classification and regression modeling effectively corrected for occlusion errors, achieving R&#xb2; values up to 0.9799 and significantly improving accuracy over direct detection.</p></list-item>
<list-item>
<p>An optimized object detection model (OPG-YOLOv8) was developed for superior performance. By integrating an attention mechanism (CBAM) and multi-scale fusion, the OPG-YOLOv8 model achieved an excellent balance of accuracy (mAP50 of 99.10%), computational efficiency (113.2G FLOPs), and speed (154 FPS), outperforming other comparative models. This provides a robust and efficient core engine for grain-level analysis.</p></list-item>
<list-item>
<p>An integrated and practical phenotyping system was implemented. The algorithms were encapsulated in a user-friendly, web-based &#x201c;Rice Panicle Trait Extraction System.&#x201d; This system translates complex computer vision models into a practical tool for breeders, bridging the gap between advanced research and real-world application by providing an end-to-end workflow from image upload to data export.</p></list-item>
</list>
<p>While this study has achieved its primary objectives, future work should focus on enhancing the model&#x2019;s adaptability to complex field environments and further exploring the relationships among traits to develop more comprehensive and universally applicable phenotyping models. In summary, this research provides a powerful and efficient tool that can significantly promote the intelligent and digital breeding process of rice.</p>
</sec>
</body>
<back>
<sec id="s6" sec-type="data-availability">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/supplementary material. Further inquiries can be directed to the corresponding authors.</p></sec>
<sec id="s7" sec-type="author-contributions">
<title>Author contributions</title>
<p>ZW: Conceptualization, Data curation, Formal analysis, Funding acquisition, Investigation, Methodology, Project administration, Resources, Software, Supervision, Validation, Visualization, Writing &#x2013; original draft, Writing &#x2013; review &amp; editing. RL: Conceptualization, Data curation, Formal analysis, Investigation, Methodology, Project administration, Resources, Software, Writing &#x2013; review &amp; editing. WL: Data curation, Funding acquisition, Investigation, Writing &#x2013; review &amp; editing. XM: Funding acquisition, Resources, Writing &#x2013; review &amp; editing. SY: Funding acquisition, Resources, Validation, Writing &#x2013; review &amp; editing. ML: Funding acquisition, Resources, Writing &#x2013; review &amp; editing. BH: Funding acquisition, Resources, Writing &#x2013; review &amp; editing. MT: Funding acquisition, Resources, Writing &#x2013; review &amp; editing. GZ:&#xa0;Funding acquisition, Resources, Writing &#x2013; review &amp; editing. JW: Funding acquisition, Resources, Writing &#x2013; review &amp; editing. JZ: Funding acquisition, Resources, Writing &#x2013; review &amp; editing.</p></sec>
<sec id="s9" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p></sec>
<sec id="s10" sec-type="ai-statement">
<title>Generative AI statement</title>
<p>The author(s) declare that no Generative AI was used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If&#xa0;you identify any issues, please contact us.</p></sec>
<sec id="s11" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p></sec>
<ref-list>
<title>References</title>
<ref id="B1">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Ayumi</surname> <given-names>A.</given-names></name>
<name><surname>Motoyuki</surname> <given-names>A.</given-names></name>
<name><surname>Yutaka</surname> <given-names>S.</given-names></name>
</person-group> (<year>2023</year>). 
<article-title>Designing rice panicle architecture via developmental regulatory genes</article-title>. <source>Breed. Sci.</source> <volume>73</volume>, <fpage>86</fpage>&#x2013;<lpage>94</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1270/jsbbs.22075</pub-id>, PMID: <pub-id pub-id-type="pmid">37168816</pub-id>
</mixed-citation>
</ref>
<ref id="B2">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Botchkarev</surname> <given-names>A.</given-names></name>
</person-group> (<year>2018</year>). 
<article-title>A new typology of performance metrics to measure errors in machine learning regression algorithms</article-title>. <source>arXiv preprint arXiv:1809.03006</source> <volume>14</volume>, <fpage>045</fpage>&#x2013;<lpage>076</lpage>.
</mixed-citation>
</ref>
<ref id="B3">
<mixed-citation publication-type="book">
<person-group person-group-type="author"><collab>CIE</collab>
</person-group> (<year>2004</year>). <source>Colorimetry, 3rd Edition (CIE Publication 15:2004)</source> (<publisher-loc>Vienna</publisher-loc>: 
<publisher-name>CIE Central Bureau</publisher-name>).
</mixed-citation>
</ref>
<ref id="B4">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Deng</surname> <given-names>L.</given-names></name>
<name><surname>Li</surname> <given-names>G.</given-names></name>
<name><surname>Han</surname> <given-names>S.</given-names></name>
<name><surname>Shi</surname> <given-names>X.</given-names></name>
</person-group> (<year>2020</year>). 
<article-title>Model compression and hardware acceleration for neural networks: A comprehensive survey</article-title>. <source>Proc. IEEE</source> <volume>108</volume>, <fpage>485</fpage>&#x2013;<lpage>532</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/JPROC.2020.2976475</pub-id>
</mixed-citation>
</ref>
<ref id="B5">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Everingham</surname> <given-names>M.</given-names></name>
<name><surname>Van Gool</surname> <given-names>L.</given-names></name>
<name><surname>Williams</surname> <given-names>C. K. I.</given-names></name>
<name><surname>Winn</surname> <given-names>J.</given-names></name>
<name><surname>Zisserman</surname> <given-names>A.</given-names></name>
<name><surname>Bishop</surname> <given-names>C. M.</given-names></name>
<etal/>
</person-group>. (<year>2010</year>). 
<article-title>The PASCAL visual object classes (VOC) challenge</article-title>. <source>Int. J. Comput. Vision</source> <volume>88</volume>, <fpage>303</fpage>&#x2013;<lpage>338</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s11263-009-0275-4</pub-id>
</mixed-citation>
</ref>
<ref id="B6">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Ghosal</surname> <given-names>S.</given-names></name>
<name><surname>Zheng</surname> <given-names>B.</given-names></name>
<name><surname>Chapman</surname> <given-names>S. C.</given-names></name>
<name><surname>Potgieter</surname> <given-names>J.</given-names></name>
<name><surname>Nair</surname> <given-names>R.</given-names></name>
<name><surname>Singh</surname> <given-names>A.</given-names></name>
<etal/>
</person-group>. (<year>2019</year>). 
<article-title>A weakly supervised deep learning framework for sorghum head detection and counting</article-title>. <source>Plant Phenomics</source> <volume>2019</volume>. doi:&#xa0;<pub-id pub-id-type="doi">10.34133/2019/1525874</pub-id>, PMID: <pub-id pub-id-type="pmid">33313521</pub-id>
</mixed-citation>
</ref>
<ref id="B7">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Gong</surname> <given-names>L.</given-names></name>
<name><surname>Lin</surname> <given-names>K.</given-names></name>
<name><surname>Wang</surname> <given-names>T.</given-names></name>
<name><surname>Chen</surname> <given-names>Y.</given-names></name>
<name><surname>Zhang</surname> <given-names>S.</given-names></name>
<name><surname>Li</surname> <given-names>Y.</given-names></name>
<etal/>
</person-group>. (<year>2018</year>). 
<article-title>Image-based on-panicle rice grain counting with a prior edge wavelet correction model</article-title>. <source>Agronomy</source> <volume>8</volume>, <fpage>91</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/agronomy8060091</pub-id>
</mixed-citation>
</ref>
<ref id="B8">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name><surname>Hu</surname> <given-names>H.</given-names></name>
<name><surname>Wang</surname> <given-names>H.</given-names></name>
<name><surname>Li</surname> <given-names>J.</given-names></name>
<name><surname>Zhang</surname> <given-names>Y.</given-names></name>
<name><surname>Liu</surname> <given-names>X.</given-names></name>
<name><surname>Chen</surname> <given-names>W.</given-names></name>
<etal/>
</person-group>. (<year>2016</year>). &#x201c;
<article-title>An efficient pruning strategy for approximate string matching over suffix tree</article-title>,&#x201d; in <source>Knowledge and Information Systems</source> (<publisher-loc>Berlin, Germany</publisher-loc>: 
<publisher-name>Springer</publisher-name>) <volume>49</volume>, <fpage>121</fpage>&#x2013;<lpage>141</lpage>.
</mixed-citation>
</ref>
<ref id="B9">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name><surname>Jocher</surname> <given-names>G.</given-names></name>
<name><surname>Chaurasia</surname> <given-names>A.</given-names></name>
<name><surname>Qiu</surname> <given-names>J.</given-names></name>
</person-group> (<year>2023</year>). <source>YOLO by Ultralytics</source>. <publisher-loc>San Francisco, CA, USA</publisher-loc>: 
<publisher-name>Ultralytics</publisher-name>.
</mixed-citation>
</ref>
<ref id="B10">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Liu</surname> <given-names>T.</given-names></name>
<name><surname>Chen</surname> <given-names>W.</given-names></name>
<name><surname>Wang</surname> <given-names>Y.</given-names></name>
<name><surname>Zhang</surname> <given-names>L.</given-names></name>
<name><surname>Li</surname> <given-names>J.</given-names></name>
<name><surname>Zhao</surname> <given-names>H.</given-names></name>
<etal/>
</person-group>. (<year>2017</year>). 
<article-title>Rice and wheat grain counting method and software development based on Android system</article-title>. <source>Comput. Electron. Agric.</source> (<publisher-loc>San Francisco, CA, USA</publisher-loc>: 
<publisher-name>Ultralytics</publisher-name>) <volume>141</volume>, <fpage>302</fpage>&#x2013;<lpage>309</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2017.08.011</pub-id>
</mixed-citation>
</ref>
<ref id="B11">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Lu</surname> <given-names>Y.</given-names></name>
<name><surname>Wang</surname> <given-names>J.</given-names></name>
<name><surname>Fu</surname> <given-names>L.</given-names></name>
<name><surname>Yu</surname> <given-names>L.</given-names></name>
<name><surname>Liu</surname> <given-names>Q.</given-names></name>
</person-group> (<year>2023</year>). 
<article-title>High-throughput and separating-free phenotyping method for on-panicle rice grains based on deep learning</article-title>. <source>Front. Plant Sci.</source> <volume>14</volume>, <elocation-id>1219584</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fpls.2023.1219584</pub-id>, PMID: <pub-id pub-id-type="pmid">37790779</pub-id>
</mixed-citation>
</ref>
<ref id="B12">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Misra</surname> <given-names>T.</given-names></name>
<name><surname>Arora</surname> <given-names>A.</given-names></name>
<name><surname>Marwaha</surname> <given-names>S.</given-names></name>
<name><surname>Singh</surname> <given-names>A.</given-names></name>
<name><surname>Kaur</surname> <given-names>P.</given-names></name>
<name><surname>Sharma</surname> <given-names>N.</given-names></name>
<etal/>
</person-group>. (<year>2020</year>). 
<article-title>SpikeSegNet-a deep learning approach utilizing encoder-decoder network with hourglass for spike segmentation and counting in wheat plant from visual imaging</article-title>. <source>Plant Methods</source> <volume>16</volume>, <fpage>40</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s13007-020-00582-9</pub-id>, PMID: <pub-id pub-id-type="pmid">32206080</pub-id>
</mixed-citation>
</ref>
<ref id="B13">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Peng</surname> <given-names>Y.</given-names></name>
<name><surname>Xie</surname> <given-names>X.</given-names></name>
</person-group> (<year>2020</year>). 
<article-title>Application of phenomics in rice research</article-title>. <source>Chin. J. Rice Sci.</source> <volume>34</volume>, <fpage>300</fpage>&#x2013;<lpage>306</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.16819/j.1001-7216.2020.9083</pub-id>
</mixed-citation>
</ref>
<ref id="B14">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Pound</surname> <given-names>M. P.</given-names></name>
<name><surname>Atkinson</surname> <given-names>J. A.</given-names></name>
<name><surname>Townsend</surname> <given-names>A. J.</given-names></name>
<name><surname>Pridmore</surname> <given-names>T. P.</given-names></name>
<name><surname>French</surname> <given-names>A. P.</given-names></name>
<name><surname>Moore</surname> <given-names>S.</given-names></name>
<etal/>
</person-group>. (<year>2017</year>). 
<article-title>Deep machine learning provides state-of-the-art performance in object detection for automated wheat ear counting</article-title>. <source>Plant Methods</source> <volume>13</volume>, <fpage>1</fpage>&#x2013;<lpage>10</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s13007-017-0201-5</pub-id>
</mixed-citation>
</ref>
<ref id="B15">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Ruder</surname> <given-names>S.</given-names></name>
</person-group> (<year>2017</year>). 
<article-title>An overview of multi-task learning in deep neural networks</article-title>. <source>arXiv preprint arXiv:1706.05098</source>.
</mixed-citation>
</ref>
<ref id="B16">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Song</surname> <given-names>T.</given-names></name>
<name><surname>Pang</surname> <given-names>S.</given-names></name>
<name><surname>Hao</surname> <given-names>S.</given-names></name>
<name><surname>Zhang</surname> <given-names>Y.</given-names></name>
<name><surname>Li</surname> <given-names>Y.</given-names></name>
<name><surname>Wang</surname> <given-names>J.</given-names></name>
<etal/>
</person-group>. (<year>2019</year>). 
<article-title>A parallel image skeletonizing method using spiking neural P systems with weights</article-title>. <source>Neural Process. Lett.</source> <volume>50</volume>, <fpage>1485</fpage>&#x2013;<lpage>1502</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s11063-018-9947-9</pub-id>
</mixed-citation>
</ref>
<ref id="B17">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Sun</surname> <given-names>J.</given-names></name>
<name><surname>Jia</surname> <given-names>H.</given-names></name>
<name><surname>Ren</surname> <given-names>Z.</given-names></name>
<name><surname>Cui</surname> <given-names>J.</given-names></name>
<name><surname>Li</surname> <given-names>Y.</given-names></name>
<name><surname>Zhang</surname> <given-names>X.</given-names></name>
<etal/>
</person-group>. (<year>2024</year>a). 
<article-title>Accurate rice grain&#xa0;counting in natural morphology: A method based on image classification and&#xa0;object detection</article-title>. <source>Comput. Electron. Agric.</source> <volume>227</volume>, <fpage>109490</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2024.109490</pub-id>
</mixed-citation>
</ref>
<ref id="B18">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Sun</surname> <given-names>J.</given-names></name>
<name><surname>Ren</surname> <given-names>Z.</given-names></name>
<name><surname>Cui</surname> <given-names>J.</given-names></name>
<name><surname>Jia</surname> <given-names>H.</given-names></name>
<name><surname>Li</surname> <given-names>Y.</given-names></name>
<name><surname>Zhang</surname> <given-names>X.</given-names></name>
<etal/>
</person-group>. (<year>2024</year>b). 
<article-title>A high-throughput method for accurate extraction of intact rice panicle traits</article-title>. <source>Plant Phenomics</source> <volume>6</volume>, <fpage>0213</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.34133/plantphenomics.0213</pub-id>, PMID: <pub-id pub-id-type="pmid">39091338</pub-id>
</mixed-citation>
</ref>
<ref id="B19">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Tan</surname> <given-names>S.</given-names></name>
<name><surname>Ma</surname> <given-names>X.</given-names></name>
<name><surname>Mai</surname> <given-names>Z.</given-names></name>
<name><surname>Li</surname> <given-names>Y.</given-names></name>
<name><surname>Zhang</surname> <given-names>L.</given-names></name>
<name><surname>Wang</surname> <given-names>J.</given-names></name>
<etal/>
</person-group>. (<year>2019</year>). 
<article-title>Segmentation and counting algorithm for touching hybrid rice grains</article-title>. <source>Comput. Electron. Agric.</source> <volume>162</volume>, <fpage>493</fpage>&#x2013;<lpage>504</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2019.04.030</pub-id>
</mixed-citation>
</ref>
<ref id="B20">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Tang</surname> <given-names>Z.</given-names></name>
<name><surname>Chen</surname> <given-names>Z.</given-names></name>
<name><surname>Gao</surname> <given-names>Y.</given-names></name>
<name><surname>Xue</surname> <given-names>R.</given-names></name>
<name><surname>Geng</surname> <given-names>Z.</given-names></name>
<name><surname>Li</surname> <given-names>J.</given-names></name>
<etal/>
</person-group>. (<year>2023</year>). 
<article-title>A strategy for the acquisition and analysis of image-based phenome in rice during the whole growth period</article-title>. <source>Plant Phenomics</source> <volume>5</volume>, <fpage>0058</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.34133/plantphenomics.0058</pub-id>, PMID: <pub-id pub-id-type="pmid">37304154</pub-id>
</mixed-citation>
</ref>
<ref id="B21">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Thesma</surname> <given-names>V.</given-names></name>
<name><surname>Rains</surname> <given-names>C. G.</given-names></name>
<name><surname>Velni</surname> <given-names>M. J.</given-names></name>
</person-group> (<year>2024</year>). 
<article-title>Development of a low-cost distributed computing pipeline for high-throughput cotton phenotyping</article-title>. <source>Sensors</source> <volume>24</volume>, <fpage>911</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/s24030970</pub-id>, PMID: <pub-id pub-id-type="pmid">38339687</pub-id>
</mixed-citation>
</ref>
<ref id="B22">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Tsaftaris</surname> <given-names>S. A.</given-names></name>
<name><surname>Scharr</surname> <given-names>H.</given-names></name>
</person-group> (<year>2019</year>). 
<article-title>Sharing the data: the grand challenge of image analysis in plant sciences</article-title>. <source>GigaScience</source> <volume>8</volume>, <fpage>giz065</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1093/gigascience/giz065</pub-id>, PMID: <pub-id pub-id-type="pmid">31220249</pub-id>
</mixed-citation>
</ref>
<ref id="B23">
<mixed-citation publication-type="confproc">
<person-group person-group-type="author">
<name><surname>Woo</surname> <given-names>S.</given-names></name>
<name><surname>Park</surname> <given-names>J.</given-names></name>
<name><surname>Lee</surname> <given-names>J. Y.</given-names></name>
<name><surname>Kweon</surname> <given-names>I. S.</given-names></name>
</person-group> (<year>2018</year>). &#x201c;
<article-title>Cbam: Convolutional block attention module</article-title>,&#x201d; in <source>Proceedings of the European conference on computer vision (ECCV)</source> (<publisher-loc>Berlin, Germany</publisher-loc>: 
<publisher-name>Springer</publisher-name>) <fpage>3</fpage>&#x2013;<lpage>19</lpage>.
</mixed-citation>
</ref>
<ref id="B24">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Wu</surname> <given-names>W.</given-names></name>
<name><surname>Liu</surname> <given-names>T.</given-names></name>
<name><surname>Zhou</surname> <given-names>P.</given-names></name>
<name><surname>Zhang</surname> <given-names>Y.</given-names></name>
<name><surname>Li</surname> <given-names>J.</given-names></name>
<name><surname>Zhao</surname> <given-names>H.</given-names></name>
<etal/>
</person-group>. (<year>2019</year>). 
<article-title>Image analysis-based recognition and quantification of grain number per panicle in rice</article-title>. <source>Plant Methods</source> <volume>15</volume>, <fpage>122</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s13007-019-0510-0</pub-id>, PMID: <pub-id pub-id-type="pmid">31695727</pub-id>
</mixed-citation>
</ref>
<ref id="B25">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Wu</surname> <given-names>W.</given-names></name>
<name><surname>Yang</surname> <given-names>T.</given-names></name>
<name><surname>Li</surname> <given-names>R.</given-names></name>
<name><surname>Zhang</surname> <given-names>Y.</given-names></name>
<name><surname>Li</surname> <given-names>J.</given-names></name>
<name><surname>Zhao</surname> <given-names>H.</given-names></name>
<etal/>
</person-group>. (<year>2020</year>). 
<article-title>Detection and enumeration of wheat grains based on a deep learning method under various scenarios and scales</article-title>. <source>J. Integr. Agric.</source> <volume>19</volume>, <fpage>1998</fpage>&#x2013;<lpage>2008</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/S2095-3119(19)62803-0</pub-id>
</mixed-citation>
</ref>
<ref id="B26">
<mixed-citation publication-type="confproc">
<person-group person-group-type="author">
<name><surname>Xiong</surname> <given-names>B.</given-names></name>
</person-group> (<year>2023</year>) <source>Measurement of rice grain traits based on image processing</source> (<publisher-loc>Shanghai, China</publisher-loc>: 
<publisher-name>Doctoral dissertation, Shanghai Normal University</publisher-name>). doi:&#xa0;<pub-id pub-id-type="doi">10.27312/d.cnki.gshsu.2023.000423</pub-id>
</mixed-citation>
</ref>
<ref id="B27">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Yang</surname> <given-names>W.</given-names></name>
<name><surname>Feng</surname> <given-names>H.</given-names></name>
<name><surname>Zhang</surname> <given-names>X.</given-names></name>
<name><surname>Li</surname> <given-names>Y.</given-names></name>
<name><surname>Zhang</surname> <given-names>L.</given-names></name>
<name><surname>Wang</surname> <given-names>J.</given-names></name>
<etal/>
</person-group>. (<year>2020</year>). 
<article-title>Crop phenomics and high-throughput phenotyping: Past decades, current challenges, and future perspectives</article-title>. <source>Mol. Plant</source> <volume>13</volume>, <fpage>187</fpage>&#x2013;<lpage>214</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.molp.2020.01.008</pub-id>, PMID: <pub-id pub-id-type="pmid">31981735</pub-id>
</mixed-citation>
</ref>
<ref id="B28">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Yao</surname> <given-names>B.</given-names></name>
<name><surname>Feng</surname> <given-names>H.</given-names></name>
<name><surname>Gao</surname> <given-names>Y.</given-names></name>
<name><surname>Li</surname> <given-names>Y.</given-names></name>
<name><surname>Zhang</surname> <given-names>L.</given-names></name>
<name><surname>Wang</surname> <given-names>J.</given-names></name>
<etal/>
</person-group>. (<year>2017</year>). 
<article-title>Dynamic pruning search algorithm based on mandatory node sets</article-title>. <source>Comput. Eng. Appl.</source> <volume>53</volume>, <fpage>57</fpage>&#x2013;<lpage>62</lpage>.
</mixed-citation>
</ref>
<ref id="B29">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Yu</surname> <given-names>X.</given-names></name>
<name><surname>Wu</surname> <given-names>W.</given-names></name>
<name><surname>Fu</surname> <given-names>D.</given-names></name>
<name><surname>Li</surname> <given-names>Y.</given-names></name>
<name><surname>Zhang</surname> <given-names>L.</given-names></name>
<name><surname>Wang</surname> <given-names>J.</given-names></name>
<etal/>
</person-group>. (<year>2025</year>). 
<article-title>Genome-wide prediction of rice plant and grain morphological traits based on deep learning</article-title>. <source>Mol. Plant Breed</source>. Available online at: <uri xlink:href="http://kns.cnki.net/kcms/detail/46.1068.S.20241121.1138.004.html">http://kns.cnki.net/kcms/detail/46.1068.S.20241121.1138.004.html</uri> (Accessed <date-in-citation content-type="access-date">November 21, 2024</date-in-citation>).
</mixed-citation>
</ref>
<ref id="B30">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zhang</surname> <given-names>C.</given-names></name>
<name><surname>Li</surname> <given-names>Y.</given-names></name>
<name><surname>Ding</surname> <given-names>Y.</given-names></name>
<name><surname>Zhang</surname> <given-names>L.</given-names></name>
<name><surname>Li</surname> <given-names>J.</given-names></name>
<name><surname>Zhao</surname> <given-names>H.</given-names></name>
<etal/>
</person-group>. (<year>2024</year>). 
<article-title>Rice yield prediction under environmental factors based on deep learning</article-title>. <source>Appl. Electronic Technique</source> <volume>50</volume>, <fpage>81</fpage>&#x2013;<lpage>86</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.16157/j.issn.0258-7998.234657</pub-id>
</mixed-citation>
</ref>
<ref id="B31">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zhao</surname> <given-names>C.</given-names></name>
<name><surname>Zhang</surname> <given-names>Y.</given-names></name>
<name><surname>Du</surname> <given-names>J.</given-names></name>
<name><surname>Guo</surname> <given-names>X.</given-names></name>
<name><surname>Li</surname> <given-names>Y.</given-names></name>
<name><surname>Zhang</surname> <given-names>L.</given-names></name>
<etal/>
</person-group>. (<year>2019</year>). 
<article-title>Crop phenomics: current status and perspectives</article-title>. <source>Front. Plant Sci.</source> <volume>10</volume>, <elocation-id>714</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fpls.2019.00714</pub-id>, PMID: <pub-id pub-id-type="pmid">31214228</pub-id>
</mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn id="n1" fn-type="custom" custom-type="edited-by">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3068311">Yu Nishizawa</ext-link>, Kagoshima University, Japan</p></fn>
<fn id="n2" fn-type="custom" custom-type="reviewed-by">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/55398">Zou Yu</ext-link>, Nanjing Agricultural University, China</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3111958">Zedong Geng</ext-link>, Huazhong Agricultural University, China</p></fn>
</fn-group>
</back>
</article>