<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Plant Sci.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Plant Science</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Plant Sci.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">1664-462X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fpls.2026.1765317</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>XooNet: a high-throughput UAV-based approach for field screening of bacterial blight-resistant germplasm in wild rice</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Pan</surname><given-names>Pan</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2361109/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Guo</surname><given-names>Wenlong</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Li</surname><given-names>Mingxia</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Li</surname><given-names>Haochun</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Yang</surname><given-names>Jingxi</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Guo</surname><given-names>Zhihao</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Zhao</surname><given-names>Huibo</given-names></name>
<xref ref-type="aff" rid="aff6"><sup>6</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Yu</surname><given-names>Guoping</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff6"><sup>6</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>*</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Li</surname><given-names>Maomao</given-names></name>
<xref ref-type="aff" rid="aff7"><sup>7</sup></xref>
<xref ref-type="aff" rid="aff8"><sup>8</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2931328/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project-administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Yi</surname><given-names>Long</given-names></name>
<xref ref-type="aff" rid="aff7"><sup>7</sup></xref>
<xref ref-type="aff" rid="aff8"><sup>8</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project-administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Zheng</surname><given-names>Xiaoming</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<xref ref-type="aff" rid="aff9"><sup>9</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/829734/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project-administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Zhou</surname><given-names>Guomin</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="aff" rid="aff10"><sup>10</sup></xref>
<xref ref-type="aff" rid="aff11"><sup>11</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project-administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Zhang</surname><given-names>Jianhua</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>*</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3164007/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project-administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>National Nanfan Research Institute, Chinese Academy of Agricultural Sciences</institution>, <city>Sanya</city>,&#xa0;<country country="cn">China</country></aff>
<aff id="aff2"><label>2</label><institution>Agricultural Information Institute, Chinese Academy of Agricultural Sciences</institution>, <city>Beijing</city>,&#xa0;<country country="cn">China</country></aff>
<aff id="aff3"><label>3</label><institution>National Agriculture Science Data Center</institution>, <city>Beijing</city>,&#xa0;<country country="cn">China</country></aff>
<aff id="aff4"><label>4</label><institution>College of Agronomy and Biotechnology, China Agricultural University</institution>, <city>Beijing</city>,&#xa0;<country country="cn">China</country></aff>
<aff id="aff5"><label>5</label><institution>Yazhouwan National Laboratory</institution>, <city>Sanya</city>,&#xa0;<country country="cn">China</country></aff>
<aff id="aff6"><label>6</label><institution>China National Rice Research Institute</institution>, <city>Hangzhou</city>,&#xa0;<country country="cn">China</country></aff>
<aff id="aff7"><label>7</label><institution>Rice Research Institute, Jiangxi Academy of Agricultural Sciences</institution>, <city>Nanchang</city>,&#xa0;<country country="cn">China</country></aff>
<aff id="aff8"><label>8</label><institution>Jiangxi Provincial Crop Germplasm Resources Research Center</institution>, <city>Nanchang</city>,&#xa0;<country country="cn">China</country></aff>
<aff id="aff9"><label>9</label><institution>Institute of Crop Science, Chinese Academy of Agricultural Sciences</institution>, <city>Beijing</city>,&#xa0;<country country="cn">China</country></aff>
<aff id="aff10"><label>10</label><institution>Nanjing Institute of Agricultural Mechanization, Ministry of Agriculture and Rural Affairs</institution>, <city>Nanjing</city>,&#xa0;<country country="cn">China</country></aff>
<aff id="aff11"><label>11</label><institution>Institute of Western Agriculture, Chinese Academy of Agricultural Sciences</institution>, <city>Changji</city>, <state>Xinjiang</state>,&#xa0;<country country="cn">China</country></aff>
<author-notes>
<corresp id="c001"><label>*</label>Correspondence: Jianhua Zhang, <email xlink:href="mailto:zhangjianhua@caas.cn">zhangjianhua@caas.cn</email>; Guoping Yu, <email xlink:href="mailto:yuguoping@caas.cn">yuguoping@caas.cn</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-20">
<day>20</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>17</volume>
<elocation-id>1765317</elocation-id>
<history>
<date date-type="received">
<day>11</day>
<month>12</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>02</day>
<month>02</month>
<year>2026</year>
</date>
<date date-type="rev-recd">
<day>19</day>
<month>01</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2026 Pan, Guo, Li, Li, Yang, Guo, Zhao, Yu, Li, Yi, Zheng, Zhou and Zhang.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Pan, Guo, Li, Li, Yang, Guo, Zhao, Yu, Li, Yi, Zheng, Zhou and Zhang</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-20">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<p>Bacterial blight (BB) poses a significant threat to rice production, necessitating efficient screening of resistant wild rice germplasm to facilitate breeding. Traditional methods are labor-intensive and subjective, while existing UAV-based approaches suffer from high costs or incomplete solutions. This study introduces XooNet, a novel UAV-based method for automated BB resistance screening in wild rice, which classifies wild rice into several levels based on BB resistance. To facilitate this method, a high-precision and lightweight oriented bounding box (OBB) detection algorithm for BB in wild rice has been developed. Experimental results show that the screening method achieved an accuracy of 97.5%. After applying the LAMP pruning strategy to balance performance and efficiency, the detection model achieved an accuracy of 93.1% with a significantly reduced parameter size of 1.4M and a computational complexity of 3.5 GFLOPs. This approach will facilitate the high-throughput screening of extensive wild rice germplasm for BB resistance, thereby expediting the discovery of valuable wild rice genetic resources.</p>
</abstract>
<kwd-group>
<kwd>bacterial blight</kwd>
<kwd>deep learning</kwd>
<kwd>disease-resistant breeding</kwd>
<kwd>germplasm screening</kwd>
<kwd>UAV</kwd>
<kwd>wild rice</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This work was supported by Project of Sanya Yazhou Bay Science and Technology City (No.SCKJ-JYRC-2023-45, SKJC-JYRC-2023-47), and The National Key Research and Development Program of China (No.2021YFD1200503, 2022YFF0711805), and Hainan Provincial Natural Science Foundation (325MS155), and Jiangxi Province Modern Seed Industry Development Special Project, and Key R&amp;D Projects in Hainan Province(ZDYF2024KJTPY018), and The Special Fund of Chinese Central Government for Basic Scientific Research Operations in Commonweal Research Institutes(JBYW-AII-2024-05, JBYW-AII-2025-05,Y2025YC90), and Nanfan special project, CAAS Grant Nos. YBXM2409,YBXM2410,YBXM2430,YBXM2508,YBXM2509,YBXM2562,YBXM2448, YBXM2527, and the Hainan Seed Industry Laboratory (Grant No. B25H1JC14).</funding-statement>
</funding-group>
<counts>
<fig-count count="8"/>
<table-count count="7"/>
<equation-count count="7"/>
<ref-count count="40"/>
<page-count count="17"/>
<word-count count="9470"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Sustainable and Intelligent Phytoprotection</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec id="s1" sec-type="intro">
<label>1</label>
<title>Introduction</title>
<p>Rice (Oryza sativa) is one of the most important staple crops globally, feeding nearly half of the world&#x2019;s population (<xref ref-type="bibr" rid="B16">Londo et&#xa0;al., 2006</xref>; <xref ref-type="bibr" rid="B18">Muthayya et&#xa0;al., 2014</xref>; <xref ref-type="bibr" rid="B4">Fukagawa and Ziska, 2019</xref>). The spread of bacterial blight (BB) severely impacts both rice yield and quality, with potential reductions exceeding 70% in severe cases (<xref ref-type="bibr" rid="B5">Gnanamanickam et&#xa0;al., 1999</xref>; <xref ref-type="bibr" rid="B11">Kumar et&#xa0;al., 2023</xref>; <xref ref-type="bibr" rid="B25">Sabar et&#xa0;al., 2024</xref>). Currently, fungicides such as thiazole zinc, tebuconazole, and prochloraz are primarily used to control this disease (<xref ref-type="bibr" rid="B28">Shcherbakova, 2019</xref>; <xref ref-type="bibr" rid="B6">Gupta and Gupta, 2025</xref>). However, these chemical treatments pose risks of environmental contamination and chemical residues (<xref ref-type="bibr" rid="B32">Thompson and Darwish, 2019</xref>). The most cost-effective and efficient approach to controlling BB is to identify and utilize resistance genes for breeding disease-resistant rice varieties (<xref ref-type="bibr" rid="B26">Sahu et&#xa0;al., 2022</xref>; <xref ref-type="bibr" rid="B8">Jain et&#xa0;al., 2025</xref>). Selecting materials with BB resistance is essential for identifying disease resistance genes and breeding disease-resistant rice varieties (<xref ref-type="bibr" rid="B37">Zhang et&#xa0;al., 2020</xref>).</p>
<p>Wild rice, through prolonged natural selection, has accumulated numerous resistance genes that enable adaptation to harsh environments, making it a valuable source for identifying BB resistance genes (<xref ref-type="bibr" rid="B40">Ziyi et&#xa0;al., 2022</xref>; <xref ref-type="bibr" rid="B38">Zheng et&#xa0;al., 2024</xref>; <xref ref-type="bibr" rid="B22">Pan et&#xa0;al., 2025</xref>). Key disease-resistant genes such as Xa21 (<xref ref-type="bibr" rid="B30">Song et&#xa0;al., 1995</xref>), Xa23 (<xref ref-type="bibr" rid="B33">Wang et&#xa0;al., 2015</xref>), and Xa27 (<xref ref-type="bibr" rid="B35">Xu et&#xa0;al., 2024</xref>) have been discovered in wild rice and successfully incorporated into rice breeding, significantly enhancing resistance to BB (<xref ref-type="bibr" rid="B1">Amante-Bordeos et&#xa0;al., 1992</xref>).</p>
<p>Currently, screening of BB-Resistant Germplasm in Wild Rice is typically conducted by breeding researchers who monitor and record disease progression daily in wild rice disease fields. This method is inefficient, subjective, lacks data consistency, and is not suitable for large-scale field applications (<xref ref-type="bibr" rid="B31">Tannidi et&#xa0;al., 2025</xref>). Furthermore, the continuous evolution of Xoo physiologic races and changes in geographic, ecological, and cultivation conditions can lead to a decline in the resistance of highly resistant varieties, requiring ongoing screening efforts (<xref ref-type="bibr" rid="B15">Li et&#xa0;al., 2025</xref>; <xref ref-type="bibr" rid="B36">Zhang and Li, 2025</xref>). This demands significant time, effort, and resources from researchers (<xref ref-type="bibr" rid="B19">Oliveira-Garcia et&#xa0;al., 2024</xref>; <xref ref-type="bibr" rid="B34">Wang et&#xa0;al., 2025</xref>). Therefore, there is a pressing need for the development of automated, high-throughput methods for screening BB resistance in wild rice (<xref ref-type="bibr" rid="B24">Pan et&#xa0;al., 2023b</xref>).</p>
<p>In the past five years, the rapid development of drones and deep learning has led to studies utilizing UAVs for screening rice disease resistance. (<xref ref-type="bibr" rid="B20">Pan et&#xa0;al., 2023a</xref>) developed the Xoo-YOLO detection model for detecting BB in wild rice. Although the model achieved high detection accuracy, it only focused on disease target detection and proposed future applications for screening resistant materials, but this has not yet been realized. Additionally, the model still has room for optimization in terms of size and computational complexity. Another study by (<xref ref-type="bibr" rid="B2">Bai et&#xa0;al., 2023</xref>) analyzed spectral data to develop a UAV-based remote sensing model for screening rice germplasm with BB resistance. This model effectively meets the needs of large-scale screening and achieves high accuracy. However, due to the use of hyperspectral equipment, which is costly (over $80,000) and requires operators with high technical expertise, it has not been widely adopted by breeding laboratories.</p>
<p>Although the previous studies have certain limitations, such as high equipment costs, operational complexity, and the lack of an end-to-end screening solution, they have demonstrated the feasibility of using UAVs for screening BB-resistant wild rice materials. This approach has proven effective for large-scale screening with high accuracy (<xref ref-type="bibr" rid="B27">Shaodan et&#xa0;al., 2023</xref>; <xref ref-type="bibr" rid="B17">Lu et&#xa0;al., 2025</xref>; <xref ref-type="bibr" rid="B39">Zhu et&#xa0;al., 2025</xref>). To address the deficiencies in existing studies that limit their large-scale application in breeding, this paper proposes a novel UAV-based screening method for wild rice BB resistance, aiming to accelerate the utilization of wild rice in rice breeding for BB resistance.</p>
<p>The main contributions of this paper are as follows:</p>
<list list-type="simple">
<list-item>
<p>(1) A UAV-based screening method for wild rice BB resistance, XooNet, is proposed.</p></list-item>
<list-item>
<p>(2) A high-precision and lightweight OBB detection algorithm for BB in wild rice is developed, suitable for this screening method.</p></list-item>
<list-item>
<p>(3) The proposed detection model and screening method are evaluated for performance to verify their effectiveness and reliability.</p></list-item>
</list>
</sec>
<sec id="s2" sec-type="materials|methods">
<label>2</label>
<title>Materials and methods</title>
<sec id="s2_1">
<label>2.1</label>
<title>Material preparation and images acquisition</title>
<p>The wild rice germplasm was provided by the Yazhou Bay National Laboratory and the Institute of Crop Science, Chinese Academy of Agricultural Sciences, and is stored at the National Wild Rice Germplasm Repository in Sanya, Hainan Province. Samples were collected from regions including Guangxi, Hainan, and Yunnan, encompassing both ordinary and medicinal wild rice varieties. Disease plots were established on February 18, 2023, and March 3, 2024, at the Potianyang base in Yazhou District, Sanya City (Latitude 18&#xb0;23&#x2032;36&#x2033;, Longitude 109&#xb0;9&#x2032;52&#x2033;). Based on their growth status, tillering, and population distribution, 120 and 50 representative and diverse wild rice samples were transplanted. After transplanting, compound fertilizer was applied one week later, with regular water changes, field cleaning, and pest control carried out.</p>
<p>After tillering reached the reproductive stage, wild rice was inoculated with the bacterial blight pathogen Xanthomonas oryzae pv. oryzae (strain PXO99, a highly virulent, widely pathogenic Philippine race 6) following the Rice Bacterial Blight Resistance Identification Technical Standards. The bacteria, strain PXO99, known for its high virulence and broad pathogenicity, were cultured on PSA medium (10 g/L tryptone, 10 g/L sucrose, 1 g/L monosodium glutamate, 15 g/L agar, pH 7.0) for 2 days, then washed with sterile water and diluted to an OD600 of 0.6. Using scissors dipped in the bacterial solution, leaves of individual wild rice plants were cut 3&#x2013;5 cm from the leaf tip, with at least 7&#x2013;10 leaves inoculated per sample. Jingang 30, a susceptible variety, was used as a positive control, with inoculation considered valid only if the control exhibited symptoms.</p>
<p>RGB images of rice infected with BB were captured at different time points using DJI Mini 2 (DJI Innovations, Shenzhen, China) and DJI Mini 4 Pro (DJI Innovations, Shenzhen, China) UAVs. The flight altitude was set between 0.6 m and 1.5 m above the wild rice field, with the camera gimbal pitch adjusted to -90&#xb0; to -60&#xb0;. The images had a resolution of 1920&#xd7;1080 pixels and were saved in JPG format. Image acquisition occurred between 5 and 21 days post-infection, during the morning (8:00&#x2013;11:00) and afternoon (16:30&#x2013;18:30), under weather conditions such as sunny, cloudy, and overcast, with ambient temperatures ranging from 22 &#xb0;C to 30 &#xb0;C. All images were taken under natural field conditions with ambient lighting, without the use of a flash. The images may contain varying degrees of obstruction, water surface reflection, overexposure, as well as noise from weeds, dead leaves, bird droppings, and other field debris. Each image contained between 1/4 and 2 wild rice plants. In total, 2,035 images were captured. Their quality and accuracy for representing BB symptoms were verified by two wild rice germplasm identification experts, confirming that the depicted lesions were definitively BB.</p>
<p>Data processing of the UAV images was carried out to ensure high-quality input for the model. To mitigate noise, overexposure, and occlusion, specific preprocessing techniques were applied. A Gaussian blur with a kernel size of 5&#xd7;5 and a standard deviation (&#x3c3;) of 1.5 was applied to reduce high-frequency noise. Morphological opening operations using a 3&#xd7;3 rectangular structuring element were employed to clean small debris and refine the boundaries of detected lesions. Additionally, Z-score normalization was performed to standardize lighting conditions across the images by subtracting the mean and dividing by the standard deviation of the dataset, thereby reducing the effect of varying weather and light conditions during UAV capture.</p>
<p><xref ref-type="fig" rid="f1"><bold>Figure&#xa0;1</bold></xref> illustrates the process of material preparation and image acquisition.</p>
<fig id="f1" position="float">
<label>Figure&#xa0;1</label>
<caption>
<p>The process of material preparation and image acquisition.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1765317-g001.tif">
<alt-text content-type="machine-generated">Illustration showing a drone capturing images of rice plants at specific angles and distances for a period of 5 to 21 days without flash, followed by manual cutting of rice plants and transferring samples to a laboratory flask for further analysis.</alt-text>
</graphic></fig>
</sec>
<sec id="s2_2">
<label>2.2</label>
<title>Image preprocessing and dataset construction</title>
<p>To meet the input and training requirements of deep learning models, this study employed an image cropping strategy, dividing the raw UAV-captured images into multiple 640&#xd7;640 pixel blocks. A sliding window method with a fixed step size was used to ensure that each cropped region covered different parts of the original image, creating independent training samples. Additionally, to enhance dataset diversity and model robustness, data augmentation techniques were applied, including random rotations, horizontal and vertical flips, scaling, and color adjustments. This resulted in a total of 12,210 images. The dataset, comprising images collected from both the 2023 and 2024 growing seasons, was pooled together and randomly shuffled. It was then split into training (9,768 images), validation (1,221 images), and testing (1,221 images) sets in an 8:1:1 ratio. This random splitting strategy ensures that the model learns from a diverse range of environmental conditions present in both years. The BB lesion areas on wild rice were manually annotated using the open-source software roLabelImg, with the annotation information saved in.xml format.</p>
</sec>
<sec id="s2_3">
<label>2.3</label>
<title>BB detection model</title>
<sec id="s2_3_1">
<label>2.3.1</label>
<title>Overall model</title>
<p>YOLOv11, introduced in 2024, is an advanced object detection model that offers various versions, including the standard YOLOv11, YOLOv11-OBB, YOLOv11-Seg for segmentation tasks, and YOLOv11-Pose for pose estimation (<xref ref-type="bibr" rid="B9">Jegham et&#xa0;al., 2024</xref>; <xref ref-type="bibr" rid="B23">Pan et&#xa0;al., 2024b</xref>; <xref ref-type="bibr" rid="B7">Hidayatullah et&#xa0;al., 2025</xref>). Additionally, it comes in multiple size variants: n, s, l, and x, with each variant optimized for different levels of computational efficiency and performance (<xref ref-type="bibr" rid="B10">Khanam and Hussain, 2024</xref>).</p>
<p>The UAV captures images from above, often presenting targets at various angles. Additionally, bacterial blight (BB) typically manifests as elongated lesions. Considering these factors, the YOLOv11-OBB model was selected. To optimize model efficiency and reduce computational complexity, the smallest variant, the n model, was chosen as the foundation for improvements. As shown in <xref ref-type="fig" rid="f2"><bold>Figure&#xa0;2</bold></xref>, the improved model&#x2019;s overall structure consists of three main components: Backbone, Neck, and Head.</p>
<fig id="f2" position="float">
<label>Figure&#xa0;2</label>
<caption>
<p>Structure of the BB detection model.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1765317-g002.tif">
<alt-text content-type="machine-generated">A flowchart illustrates a deep learning model architecture for detecting rice leaf blight in field images. The process starts with a raw image input, passes through multiple Conv2d, C3k2FC, and specialized layers, then follows concatenation, upsampling, and block connections. Outputs are indicated as “LiteHead” with separate branches. Example images show original rice plants and detected blight regions outlined in blue with confidence scores appearing as text labels.</alt-text>
</graphic></fig>
<p>The key improvements are outlined below:</p>
<list list-type="order">
<list-item>
<p>C3k2FC Module in Backbone: Integrates PConv and CGLU into the C3k2 structure to reduce redundancy and enhance efficiency.</p></list-item>
<list-item>
<p>SPPF_LSKA Module in Backbone: Incorporates LSKA post-pooling to improve multi-scale feature aggregation and reduce noise.</p></list-item>
<list-item>
<p>SlimNeck in Neck: Utilizes GSConv and VoVGSCSP to optimize feature fusion while minimizing FLOPs.</p></list-item>
<list-item>
<p>LiteHead in Detection Head: Employs separated Batch Normalization (BN) and dynamic anchors for improved handling of rotations.</p></list-item>
<list-item>
<p>LAMP Pruning: Implements adaptive global pruning to compress model parameters while maintaining accuracy.</p></list-item>
</list>
</sec>
<sec id="s2_3_2">
<label>2.3.2</label>
<title>C3k2FC</title>
<p>The C3k2 module in YOLOv11-OBB, based on the Cross-Stage Partial (CSP) structure, splits the input feature map into two parallel branches. This design enhances feature reuse and gradient flow by repeating bottleneck units, supporting flexible configurations of layers and channels to balance model depth and computational efficiency. However, this module suffers from redundant convolution operations, increasing computational cost and complicating deployment on resource-constrained UAV embedded systems.</p>
<p>To address these issues, this paper integrates the FasterNet Block&#x2019;s Partial Convolution (PConv) and the TransNext CGLU module to enhance the C3k2 module in both the Backbone and Neck networks (<xref ref-type="bibr" rid="B3">Chen et&#xa0;al., 2023</xref>; <xref ref-type="bibr" rid="B21">Pan et&#xa0;al., 2024a</xref>; <xref ref-type="bibr" rid="B29">Shi, 2024</xref>). This modification aims to achieve a lightweight design, reduce computational redundancy, and minimize parameter size, making it more suitable for UAV embedded systems with limited resources. The CGLU module, an evolution of the GLU gated linear unit, extends local spatial perception by inserting a 3&#xd7;3 depthwise convolution before the activation function in the gating branch. The module consists of two linear transformation paths: one path applies a 3&#xd7;3 depthwise convolution followed by a gated activation function to strengthen local feature capture, while the other generates the baseline output. These paths are merged via element-wise multiplication, adjusting feature weights, and the final output is produced through linear projection and a residual connection with the input. This design optimizes the gating signal using local information from the depthwise convolutions, adjusting inter-channel dependencies. The FasterNet Block simplifies the network using an inverted residual structure, starting with a 1&#xd7;1 pointwise convolution for channel expansion and feature compression. It then inserts the PConv module, which applies a 3&#xd7;3 standard convolution to only a quarter of the input channels for spatial feature mixing, while the remaining channels are passed through identity mapping to reduce full-channel computation. Following this, a normalization layer and ReLU activation function maintain feature distribution and non-linearity, concluding with another 1&#xd7;1 pointwise convolution for channel restoration and output integration.</p>
<p>In this study, we replace the two 1&#xd7;1 pointwise convolutions in the FasterNet Block with the CGLU module, forming an improved FC structure. We also integrate PConv into the depthwise convolution section of the C3k2 bottleneck, resulting in the improved C3k2FC module, as shown in <xref ref-type="fig" rid="f3"><bold>Figure&#xa0;3</bold></xref>. This structure optimizes feature extraction through partial channel convolutions and gating mechanisms, reducing floating-point operations and memory access. The FC Block, with the PConv module, extracts multi-scale features from the input image while maintaining low computational overhead. Combined with the CGLU module, the design supports dynamic feature weight allocation. The improved C3k2FC module integrates these components to capture both detailed and global information, supporting wild rice BB detection and localization from UAV perspectives, and is suitable for edge device deployment.</p>
<fig id="f3" position="float">
<label>Figure&#xa0;3</label>
<caption>
<p>Structure of C3k2FC.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1765317-g003.tif">
<alt-text content-type="machine-generated">Flowchart diagram of a neural network model architecture showing three main sections: the leftmost pipeline includes Conv2d, Split, fully connected (FC) layers, Concat, and another Conv2d; the central section labeled FC includes layers PConv, CGLU, DropPath, Add, and Conv2d; the rightmost section expands CGLU as a flow of Linear, Depthwise Convolution (DWConv), Activation, Multiply, Linear, and Add, with interconnected pathways for feature processing.</alt-text>
</graphic></fig>
</sec>
<sec id="s2_3_3">
<label>2.3.3</label>
<title>SPPF_LSKA</title>
<p>The SPPF module in YOLOv11, based on the spatial pyramid pooling (SPPF) structure, aggregates multi-scale features by applying maximum pooling operations of different sizes in parallel. These features are then fused through channel concatenation and convolution to extract global context. The module aims to reduce information loss and enhance adaptability to targets of varying scales, supporting efficient feature representation. However, the pooling operations in this module have limitations, leading to inefficient feature aggregation in complex scenes and the potential introduction of background noise. This is especially problematic when dealing with UAV-captured rice field images that involve varying lighting, leaf occlusion, and resolution fluctuations, which may cause fine-grained details to be overlooked or irrelevant features to be amplified, resulting in false detections and degraded performance.</p>
<p>To address these challenges, we propose the integration of the Large Separable Kernel Attention (LSKA) mechanism to improve the SPPF module in the Backbone network, achieving a lightweight design while enhancing multi-scale feature extraction and local attention capabilities. The LSKA module, based on separable convolutions, decomposes large kernel convolutions into 1D separable operations in the horizontal and vertical directions to simulate large receptive field attention. This approach, combined with depth-wise separable convolutions, reduces parameter count.</p>
<p>The SPPF_LSKA module integrates the LSKA mechanism (<xref ref-type="bibr" rid="B12">Lau et&#xa0;al., 2024</xref>) after the pooling layers of the traditional SPPF, as shown in <xref ref-type="fig" rid="f4"><bold>Figure&#xa0;4</bold></xref>. The structure first extracts multi-resolution features through multiple maximum pooling branches, then applies the LSKA module to perform attention weighting on the concatenated features. The horizontal and vertical branches of the LSKA handle spatial information, and the depthwise separable convolution further optimizes channel mixing. Finally, the fused features are output through a 1&#xd7;1 convolution to enhance both global and local semantic representations.</p>
<fig id="f4" position="float">
<label>Figure&#xa0;4</label>
<caption>
<p>Structure of SPPF_LSKA.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1765317-g004.tif">
<alt-text content-type="machine-generated">Flowchart illustrating a neural network architecture. The left path includes sequential blocks: Conv2d, three MaxPool layers, Concat, LSKA, and Conv 1×1. LSKA, highlighted and expanded on the right, contains sequential steps: two DWConv layers, two yellow DW-D-Conv layers, and a Conv2d layer. Black arrows indicate workflow direction.</alt-text>
</graphic></fig>
<p>This study replaces the original SPPF in YOLOv11-OBB with the improved SPPF_LSKA module, optimizing multi-scale feature integration through attention mechanisms to support wild rice BB detection and localization from UAV perspectives.</p>
</sec>
<sec id="s2_3_4">
<label>2.3.4</label>
<title>SlimNeck</title>
<p>The standard convolution (SC) component in YOLOv11-OBB captures features by processing multi-channel data through parallel multi-kernel operations. While this promotes deep interactions between channels, it also leads to parameter inflation and a significant increase in floating-point operations (FLOPs), which limits real-time performance, especially on resource-constrained UAV embedded systems. On the other hand, efficient architectures like MobileNet and ShuffleNet alleviate the computational burden by using depthwise separable convolution (DSC). However, DSC isolates channel data during computation, which significantly reduces feature integration and extraction effectiveness. This limitation is problematic for wild rice BB detection from UAV perspectives, where noise from lighting changes, leaf occlusion, and resolution fluctuations can lead to missed or mislocalized small lesions.</p>
<p>To accelerate computation without compromising detection accuracy, this study integrates the GSConv composite convolution unit into the Neck structure of YOLOv11-OBB. The GSConv unit incorporates a Shuffle mechanism to merge the channel association data derived from SC with the spatial output from DSC (<xref ref-type="bibr" rid="B14">Li et&#xa0;al., 2022</xref>). Compared to DSC alone, GSConv reduces computational burden while retaining potential correlations, optimizing both accuracy and response time&#x2014;ideal for crop disease detection on edge hardware.</p>
<p>The core components of the GSConv unit include Conv, DWConv, Concat, and Shuffle operations. The architecture is constructed through the following process: The input feature matrix with C1&#x200b; channels is first processed by applying DSC to half of the channels to capture spatial details, while SC is applied to the other half for channel fusion. The outputs from both sides are then concatenated along the channel axis. Subsequently, a Shuffle operation is applied to the concatenated result to promote random channel reorganization and enhance data interaction. The final output matrix contains C2 channels. The VoVGSCSP, an iterative bottleneck combination for GSConv, splits the input matrix channels into two segments. One segment undergoes Conv preprocessing and is passed through a series of GS bottleneck units to extract features, while the other segment serves as a residual path with only a single Conv transformation. This unit, leveraging the CSP framework, enhances feature reuse, employs a one-shot aggregation method to minimize data loss, and integrates batch normalization and SiLU activation to maintain feature consistency.</p>
<p>Finally, this study reconstructs the Neck framework of YOLOv11-OBB using GSConv and VoVGSCSP to form the SlimNeck architecture. These modifications reduce the model&#x2019;s computational burden, enabling faster inference and data processing while enhancing multi-resolution feature integration. This optimization strikes an effective balance between speed and accuracy.</p>
</sec>
<sec id="s2_3_5">
<label>2.3.5</label>
<title>LiteHead</title>
<p>The detection head of YOLOv11-OBB uses a shared convolution mechanism to process multi-scale feature outputs for boundary box predictions. While this promotes parameter reuse, the use of unified Batch Normalization (BN) can lead to inaccurate moving averages when there are significant statistical differences between features at different levels. This can negatively affect training stability and generalization performance. Additionally, the static anchor box design struggles to adapt to the rotation and scale variations of BB lesions, under the challenges posed by UAVs, such as leaf occlusion, resolution fluctuations, and changing lighting conditions. This often results in localization errors or missed small targets, limiting overall detection performance.</p>
<p>To address these issues, this paper introduces the LiteHead detection head. This optimization is designed to improve computational efficiency, reduce parameter size and operational load, and make the system more suitable for resource-constrained UAV embedded systems. The LiteHead detection head mitigates BN-related issues by separately processing statistical differences between features at different levels, ensuring independent sliding averages and preventing bias accumulation in shared parameters. Additionally, the dynamic anchor box generator adapts the scale, angle, and aspect ratio of anchors based on input features, enhancing the alignment with rotating targets.</p>
<p>The core components of the LiteHead detection head include: a shared convolution layer for multi-task reuse to reduce redundant parameters; a separated BN module that applies BN independently to each detection branch, optimizing feature distribution and speeding up convergence; and a dynamic anchor box generator that computes anchor parameters in real-time using feature map statistics, supporting the prediction of rotated bounding boxes (OBBs). The workflow is as follows: multi-scale feature maps are processed by the shared convolution to extract boundary box regression and classification scores; separated BN standardizes each layer&#x2019;s output; and the dynamic anchor box module generates adaptive anchors based on feature clustering or gradient guidance, ultimately outputting the OBB predictions.</p>
<p>In this study, LiteHead is integrated into the detection head of YOLOv11-OBB by replacing the standard shared convolution and static anchor boxes. This design optimizes the feature processing pipeline, reduces floating-point operations and memory access, and enhances the ability to capture rotational details and global information. The LiteHead architecture is well-suited for BB detection and localization in UAV-captured rice field images.</p>
</sec>
<sec id="s2_3_6">
<label>2.3.6</label>
<title>LAMP</title>
<p>Despite the introduction of lightweight modules, the YOLOv11-OBB model still faces parameter redundancy and computational intensity, resulting in increased floating-point operations (GFLOPs) and memory requirements. This issue is particularly problematic for resource-constrained UAV embedded systems, where it can lead to inference delays and deployment bottlenecks.</p>
<p>To address these challenges, this study introduces Layer-adaptive Magnitude-based Pruning (LAMP) to further optimize the YOLOv11-OBB model (<xref ref-type="bibr" rid="B13">Lee et&#xa0;al., 2020</xref>). LAMP performs global pruning to reduce both parameter size and computational overhead, making the model more suitable for the computational constraints of UAV platforms. LAMP is a magnitude-based pruning strategy that calculates a layer-adaptive importance score (LAMP score) to determine pruning thresholds. This avoids performance degradation due to uniform sparsity and dynamically adjusts the sparsity of each layer based on the statistical characteristics of the weight distribution. As shown in <xref ref-type="fig" rid="f5"><bold>Figure&#xa0;5</bold></xref>, early layers retain more connections to capture fine-grained features, while later layers undergo more aggressive pruning to reduce complexity.</p>
<fig id="f5" position="float">
<label>Figure&#xa0;5</label>
<caption>
<p>Process of LAMP.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1765317-g005.tif">
<alt-text content-type="machine-generated">Diagram illustrating a neural network process with labeled blocks: an initial image passes through a channel, then undergoes sparsification involving weights and nodes, followed by pruning, and results in a final processed image.</alt-text>
</graphic></fig>
<p>The LAMP pruning process proceeds in four stages: pre-training the complete model to obtain initial weights; calculating the global LAMP scores to sort all weights and set thresholds according to the target sparsity; applying magnitude-based pruning layer-by-layer to remove connections below the threshold; and finally fine-tuning the pruned model to restore accuracy, typically using techniques like knowledge distillation or learning rate scheduling.</p>
<p>Compared to traditional magnitude pruning, LAMP improves accuracy retention after pruning by using the layer-adaptive mechanism. This allows for significant reductions in parameters and GFLOPs without sacrificing detection performance.</p>
<p>In this study, LAMP is applied to the Backbone, Neck, and Detection Head of YOLOv11-OBB, resulting in a lightweight variant through iterative pruning and fine-tuning. This optimized structure enhances resource utilization and supports the capture of both fine details and global information, making it well-suited for wild rice BB detection and localization from UAV perspectives.</p>
</sec>
</sec>
<sec id="s2_4">
<label>2.4</label>
<title>Screening method</title>
<p>The continuous movement of UAVs during video capture often results in partial occlusion of wild rice plants or the redundancy of the same BB target appearing across multiple frames. Consequently, relying solely on static detection from individual images can lead to missed, incorrect, or duplicate counts, thereby compromising the accuracy and stability of the screening process. To address these challenges, we propose an integrated screening workflow. As illustrated in <xref ref-type="fig" rid="f6"><bold>Figure&#xa0;6</bold></xref>, this method consists of the following seven steps:</p>
<fig id="f6" position="float">
<label>Figure&#xa0;6</label>
<caption>
<p>The overall framework of the proposed XooNet method.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1765317-g006.tif">
<alt-text content-type="machine-generated">Flowchart illustration depicting an automated plant disease evaluation system with three stages: image capture using drones, disease detection and tracking with annotated plant images and filter prediction steps, and screening and grading involving disease rate calculation, grade classification, and germplasm screening for resistance or susceptibility.</alt-text>
</graphic></fig>
<sec id="s2_4_1">
<label>2.4.1</label>
<title>Image capture and transmission</title>
<p>The user controls the UAV to fly over the wild rice disease field, adjusting the camera&#x2019;s focus and gimbal angle, and then activates the recording function. The captured video data is transmitted to the edge computing device via either wireless or wired connection.</p>
</sec>
<sec id="s2_4_2">
<label>2.4.2</label>
<title>Disease target detection</title>
<p>The video recorded by the UAV is processed through the BB detection model proposed in this paper, which detects and generates corresponding Oriented Bounding Boxes.</p>
</sec>
<sec id="s2_4_3">
<label>2.4.3</label>
<title>Kalman filter prediction</title>
<p>A multi-object tracking model is introduced, utilizing the Kalman Filter to predict the location of targets. The Kalman Filter is a recursive estimation algorithm that uses statistical inference based on historical data to estimate the current position of a target. This prediction provides prior information for disease detection in subsequent frames, reducing bias due to occlusion or detection errors.</p>
<p>The Kalman filter state update equation is expressed in <xref ref-type="disp-formula" rid="eq1">Equation 1</xref>:</p>
<disp-formula id="eq1"><label>(1)</label>
<mml:math display="block" id="M1"><mml:mrow><mml:mtable><mml:mtr><mml:mtd><mml:mrow><mml:msub><mml:mtext>x</mml:mtext><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2223;</mml:mo><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mi>F</mml:mi><mml:mo>&#xb7;</mml:mo><mml:msub><mml:mtext>x</mml:mtext><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn><mml:mo>&#x2223;</mml:mo><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:mi>B</mml:mi><mml:mo>&#xb7;</mml:mo><mml:msub><mml:mtext>u</mml:mtext><mml:mi>t</mml:mi></mml:msub></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:math>
</disp-formula>
<p>where <inline-formula>
<mml:math display="inline" id="im1"><mml:mrow><mml:msub><mml:mtext>x</mml:mtext><mml:mrow><mml:mtext>t</mml:mtext><mml:mo>&#x2223;</mml:mo><mml:mn>t-1</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:math></inline-formula> represents the predicted state at time t, F is the state transition matrix, B is the control input matrix, and <inline-formula>
<mml:math display="inline" id="im2"><mml:mrow><mml:msub><mml:mtext>u</mml:mtext><mml:mtext>t</mml:mtext></mml:msub></mml:mrow></mml:math></inline-formula> is the control vector. This prediction helps improve the accuracy of the detection in the following frames.</p>
</sec>
<sec id="s2_4_4">
<label>2.4.4</label>
<title>Matching detection and prediction</title>
<p>The Hungarian Algorithm is used to match the current frame&#x2019;s detection results with the Kalman Filter&#x2019;s predicted positions. The matching score, representing the similarity between targets, is calculated as shown in <xref ref-type="disp-formula" rid="eq2">Equation 2</xref>:</p>
<disp-formula id="eq2"><label>(2)</label>
<mml:math display="block" id="M2"><mml:mrow><mml:mtable><mml:mtr><mml:mtd><mml:mrow><mml:mi>s</mml:mi><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mi>&#x3bb;</mml:mi><mml:mo>&#xb7;</mml:mo><mml:msub><mml:mrow><mml:mtext>IoU</mml:mtext></mml:mrow><mml:mrow><mml:mi>O</mml:mi><mml:mi>B</mml:mi><mml:mi>B</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msubsup><mml:mi>B</mml:mi><mml:mi>i</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msubsup><mml:mo>,</mml:mo><mml:msubsup><mml:mi>B</mml:mi><mml:mi>j</mml:mi><mml:mi>t</mml:mi></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:mi>&#x3bb;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#xb7;</mml:mo><mml:msub><mml:mrow><mml:mtext>Sim</mml:mtext></mml:mrow><mml:mrow><mml:mtext>cos</mml:mtext></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msubsup><mml:mi>F</mml:mi><mml:mi>i</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msubsup><mml:mo>,</mml:mo><mml:msubsup><mml:mi>F</mml:mi><mml:mi>j</mml:mi><mml:mi>t</mml:mi></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:math>
</disp-formula>
<p>where IoU_OBB represents the Intersection over Union calculated based on the Oriented Bounding Boxes (OBB) to accurately account for the rotation of the long, narrow lesions. <inline-formula>
<mml:math display="inline" id="im3"><mml:mrow><mml:msub><mml:mrow><mml:mtext>Sim</mml:mtext></mml:mrow><mml:mrow><mml:mtext>cos</mml:mtext></mml:mrow></mml:msub></mml:mrow></mml:math></inline-formula> represents the Cosine Similarity, derived from the Cosine Distance (calculated as 1 - Cosine Distance), between the re-identification (ReID) feature vectors. The feature vectors (<italic>F</italic>) are 128-dimensional embeddings extracted using a lightweight CNN branch integrated into the detection head. &#x3bb; is a weight parameter balancing spatial overlap and appearance consistency; based on empirical testing on the validation set, &#x3bb; was set to 0.7.</p>
</sec>
<sec id="s2_4_5">
<label>2.4.5</label>
<title>Disease counting</title>
<p>The BB targets in the tracking list, each with a unique identifier, are counted for BB occurrences.</p>
</sec>
<sec id="s2_4_6">
<label>2.4.6</label>
<title>Disease grade classification</title>
<p>Based on the disease count and the number of disease inoculations in wild rice, the disease incidence rate is calculated using <xref ref-type="disp-formula" rid="eq3">Equation 3</xref>:</p>
<disp-formula id="eq3"><label>(3)</label>
<mml:math display="block" id="M3"><mml:mrow><mml:mtable><mml:mtr><mml:mtd><mml:mrow><mml:mtext>Disease&#xa0;Incidence&#xa0;Rate&#xa0;</mml:mtext><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mtext>&#xa0;Disease&#xa0;Count&#xa0;</mml:mtext></mml:mrow><mml:mrow><mml:mtext>&#xa0;Number&#xa0;of&#xa0;Inoculations&#xa0;</mml:mtext></mml:mrow></mml:mfrac><mml:mo>&#xd7;</mml:mo><mml:mn>100</mml:mn><mml:mo>%</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:math>
</disp-formula>
<p>The incidence rate is then used to classify wild rice germplasm into BB-resistant categories, as shown in <xref ref-type="table" rid="T1"><bold>Table&#xa0;1</bold></xref>.</p>
<table-wrap id="T1" position="float">
<label>Table&#xa0;1</label>
<caption>
<p>Classification criteria for BB-resistant wild rice germplasm.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Disease grade</th>
<th valign="middle" align="center">Incidence rate (%)</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="center">L1</td>
<td valign="middle" align="center">0&#x2264; p &#x2264; 20</td>
</tr>
<tr>
<td valign="middle" align="center">L2</td>
<td valign="middle" align="center">20&lt; p &#x2264; 50</td>
</tr>
<tr>
<td valign="middle" align="center">L3</td>
<td valign="middle" align="center">50&lt; p &#x2264; 80</td>
</tr>
<tr>
<td valign="middle" align="center">L4</td>
<td valign="middle" align="center">80&lt; p &#x2264; 100</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s2_4_7">
<label>2.4.7</label>
<title>Germplasm screening</title>
<p>Wild rice classified as L1 or L2 is selected as prime candidates for resistance to BB.</p>
</sec>
</sec>
<sec id="s2_5">
<label>2.5</label>
<title>Configuration of experimental environment</title>
<p>The experiments were conducted on a Dell tower workstation (Dell, Inc., Round Rock, Texas, USA) running Windows 11. The system was powered by a 12th Gen Intel(R) Core(TM) i5&#x2013;12500 processor with a clock speed of 3.00 GHz, supported by 64GB of RAM and a 1TB solid-state drive. For GPU-accelerated computations, an NVIDIA GeForce RTX 3080 graphics card (NVIDIA Corporation, Santa Clara, California, USA) with 10GB of video memory was used. The software environment included Python version 3.8.17, along with PyTorch 1.13.0, Torchvision 0.14.0, and CUDA 11.7.</p>
<p>During the model calibration process, hyperparameter optimization was conducted on the validation set. The hyperparameters tested included the learning rate, batch size, and optimizer settings. The Adam optimizer was used with an initial learning rate of 1e-3, a maximum learning rate of 1e-5, a momentum coefficient of 0.937, and a weight decay parameter of 5e-4. The batch size was set to 8, and the input images were resized to a resolution of 640&#xd7;640 pixels.</p>
<p>For model validation, 5-fold cross-validation was performed to assess the model&#x2019;s robustness. The dataset was divided into 5 subsets, and the model was trained and evaluated 5 times, each time using a different subset as the validation set and the remaining subsets for training. This process provided a reliable estimation of the model&#x2019;s performance and ensured that the model generalizes well to unseen data.</p>
<p>The experimental procedure involved 300 epochs for each training run. The training parameters and dataset were kept consistent across all models throughout the training phase to ensure a fair comparison.</p>
<p>In the experiment on performance in edge computing devices, we first converted the model&#x2019;s weight file to ONNX format, then optimized and added model nodes. Next, we used TensorRT to accelerate the model and deployed it on the Nvidia Jetson Nano. This compact, high-performance AI embedded development board, developed by Nvidia, features a quad-core ARM A57 CPU (1.43 GHz), a 128-core Maxwell GPU, and 4 GB of memory. The operating system used is Ubuntu 18.04, with the environment configured to JetPack 4.6, CUDA 10.2, cuDNN 8.2, and TensorRT 8.0.</p>
</sec>
<sec id="s2_6">
<label>2.6</label>
<title>Field application test method</title>
<p>To evaluate the performance of the proposed method, video data of 120 wild rice plants inoculated with BB, captured by UAVs in March 2023, were selected. The data were collected at 8, 10, 13, and 15 days post-inoculation. The results from the proposed method were compared with the manual counts performed by breeding experts on-site, providing a systematic assessment of the method&#x2019;s accuracy.</p>
<p>To validate the effectiveness and stability of the screening method, a field application test was conducted on July 1, 2024, at the Potianyang Experimental Base in Yazhou District, Sanya City, Hainan Province. This test assessed the disease condition of BB in 50 different wild rice samples. The participants included eight individuals: rice disease resistance breeding researchers, experts, local farmers responsible for managing the wild rice base, and graduate students in related fields. During the test, participants were divided into two groups: the validation group, composed of four rice disease resistance breeding researchers and local farmers, performed manual counting to assess the disease condition of the wild rice; the test group, consisting of two graduate students, operated the UAV to collect wild rice images from a height of 0.6 to 1.5 meters and utilized the proposed method for disease condition evaluation. The tests were conducted in the field, with no more than a 30-minute interval between the counting processes of both groups. Two rice disease resistance breeding experts supervised the testing procedures and results.</p>
</sec>
</sec>
<sec id="s3" sec-type="results">
<label>3</label>
<title>Results</title>
<sec id="s3_1">
<label>3.1</label>
<title>Experimental setup and evaluation metrics</title>
<p>To ensure a comprehensive and transparent assessment of the proposed method, the dataset details, grading criteria, and evaluation metrics are consolidated below before presenting the experimental results.</p>
<sec id="s3_1_1">
<label>3.1.1</label>
<title>Dataset overview</title>
<p>The dataset used in this study consists of 12,210 images derived from UAV-captured footage of wild rice fields. These images were pooled from data collected in 2023 and 2024 and randomly split into training (9,768 images), validation (1,221 images), and testing (1,221 images) sets in an 8:1:1 ratio.</p>
<p>Grading Criteria: The core objective is to screen wild rice germplasm for bacterial blight resistance. The resistance level is determined based on the disease incidence rate, calculated as the ratio of infected leaves to the total number of inoculated leaves (Eq. 3). The classification standards are detailed in <xref ref-type="table" rid="T1"><bold>Table&#xa0;1</bold></xref>.</p>
</sec>
<sec id="s3_1_2">
<label>3.1.2</label>
<title>Evaluation metrics</title>
<p>The performance of the detection model was evaluated using standard object detection metrics: Precision, Recall, and mean Average Precision (mAP). Additionally, model complexity was assessed using the number of Parameters (Params) and floating-point operations (GFLOPs).</p>
<p>Precision measures the ratio of true positive predictions out of all instances predicted as positive by the model. Recall, on the other hand, quantifies the proportion of true positive samples that the model correctly detects, compared to the total number of actual positive instances. The respective formulas are presented in <xref ref-type="disp-formula" rid="eq4">Equations 4</xref>, <xref ref-type="disp-formula" rid="eq5">5</xref>:</p>
<disp-formula id="eq4"><label>(4)</label>
<mml:math display="block" id="M4"><mml:mrow><mml:mtable><mml:mtr><mml:mtd><mml:mrow><mml:mi>P</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>i</mml:mi><mml:mi>s</mml:mi><mml:mi>i</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mtext>TP</mml:mtext></mml:mrow><mml:mrow><mml:mtext>TP</mml:mtext><mml:mo>+</mml:mo><mml:mtext>FP</mml:mtext></mml:mrow></mml:mfrac></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:math>
</disp-formula>
<disp-formula id="eq5"><label>(5)</label>
<mml:math display="block" id="M5"><mml:mrow><mml:mtable><mml:mtr><mml:mtd><mml:mrow><mml:mi>R</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>a</mml:mi><mml:mi>l</mml:mi><mml:mi>l</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mtext>TP</mml:mtext></mml:mrow><mml:mrow><mml:mtext>TP</mml:mtext><mml:mo>+</mml:mo><mml:mtext>FN</mml:mtext></mml:mrow></mml:mfrac></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:math>
</disp-formula>
<p>where True Positive (TP) refers to correctly predicted positive instances, False Positive (FP) refers to negative instances incorrectly predicted as positive, and False Negative (FN) refers to positive instances misclassified as negative.</p>
<p>Average Precision (AP) represents the area under the Precision-Recall curve, summarizing the model&#x2019;s ability to balance precision and recall at various decision thresholds. Mean Average Precision (mAP) is the average of the AP values across all object categories, providing an overall evaluation of the model&#x2019;s detection accuracy. These are calculated as shown in <xref ref-type="disp-formula" rid="eq6">Equations 6</xref>, <xref ref-type="disp-formula" rid="eq7">7</xref>:</p>
<disp-formula id="eq6"><label>(6)</label>
<mml:math display="block" id="M6"><mml:mrow><mml:mtable><mml:mtr><mml:mtd><mml:mrow><mml:mi>A</mml:mi><mml:mi>P</mml:mi><mml:mo>=</mml:mo><mml:mstyle displaystyle="true"><mml:mrow><mml:msubsup><mml:mo>&#x222b;</mml:mo><mml:mi>0</mml:mi><mml:mn>1</mml:mn></mml:msubsup><mml:mrow><mml:mi>p</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>r</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mi>d</mml:mi><mml:mi>r</mml:mi></mml:mrow></mml:mrow></mml:mstyle></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:math>
</disp-formula>
<disp-formula id="eq7"><label>(7)</label>
<mml:math display="block" id="M7"><mml:mrow><mml:mtable><mml:mtr><mml:mtd><mml:mrow><mml:mi>m</mml:mi><mml:mi>A</mml:mi><mml:mi>P</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:msubsup><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>N</mml:mi></mml:msubsup><mml:mi>A</mml:mi><mml:mi>P</mml:mi><mml:mi>i</mml:mi></mml:mrow><mml:mi>N</mml:mi></mml:mfrac></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:math>
</disp-formula>
<p>where n represents the total number of object classes. In this study, n=1.</p>
<p>Params refers to the total number of trainable parameters in the model, indicating its complexity. GFLOPs, or Giga Floating-point Operations, measures the computational cost required to perform one forward pass through the network. A model with fewer Params and lower GFLOPs is more efficient.</p>
</sec>
</sec>
<sec id="s3_2">
<label>3.2</label>
<title>Evaluation of the BB detection model&#x2019;s performance</title>
<sec id="s3_2_1">
<label>3.2.1</label>
<title>Ablation experiment</title>
<p>To validate the effectiveness of each proposed module in the improved YOLOv11-OBB model for BB detection in UAV-captured rice field images, we conducted ablation experiments. The baseline is the original YOLOv11-OBB (n variant), and we progressively integrated the proposed modules: C3k2FC, SPPF_LSKA, SlimNeck, and LiteHead. The results are summarized in <xref ref-type="table" rid="T2"><bold>Table&#xa0;2</bold></xref>.</p>
<table-wrap id="T2" position="float">
<label>Table&#xa0;2</label>
<caption>
<p>Ablation study results.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">BaseLine</th>
<th valign="middle" align="center">C3k2FC</th>
<th valign="middle" align="center">SPPF_LSKA</th>
<th valign="middle" align="center">SlimNeck</th>
<th valign="middle" align="center">LiteHead</th>
<th valign="middle" align="center">mAP</th>
<th valign="middle" align="center">FLOPS/G</th>
<th valign="middle" align="center">Params/M</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center"/>
<td valign="middle" align="center"/>
<td valign="middle" align="center"/>
<td valign="middle" align="center">88.4%</td>
<td valign="middle" align="center">6.7</td>
<td valign="middle" align="center">2.6</td>
</tr>
<tr>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center"/>
<td valign="middle" align="center"/>
<td valign="middle" align="center">90.1%</td>
<td valign="middle" align="center">6.1</td>
<td valign="middle" align="center">2.3</td>
</tr>
<tr>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center"/>
<td valign="middle" align="center">92.6%</td>
<td valign="middle" align="center">6.4</td>
<td valign="middle" align="center">2.6</td>
</tr>
<tr>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">93.1%</td>
<td valign="middle" align="center">6.2</td>
<td valign="middle" align="center">2.6</td>
</tr>
<tr>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center"/>
<td valign="middle" align="center">93.9%</td>
<td valign="middle" align="center">6.9</td>
<td valign="middle" align="center">2.9</td>
</tr>
<tr>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">94.4%</td>
<td valign="middle" align="center">6.5</td>
<td valign="middle" align="center">2.9</td>
</tr>
<tr>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center">93.6%</td>
<td valign="middle" align="center">6.7</td>
<td valign="middle" align="center">3.1</td>
</tr>
<tr>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center"/>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">91.7%</td>
<td valign="middle" align="center">6.3</td>
<td valign="middle" align="center">2.6</td>
</tr>
<tr>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center"/>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center">91.9%</td>
<td valign="middle" align="center">6.0</td>
<td valign="middle" align="center">2.4</td>
</tr>
<tr>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center"/>
<td valign="middle" align="center"/>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center">92.8%</td>
<td valign="middle" align="center">6.4</td>
<td valign="middle" align="center">2.4</td>
</tr>
<tr>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center">&#x2713;</td>
<td valign="middle" align="center">94.1%</td>
<td valign="middle" align="center">5.9</td>
<td valign="middle" align="center">2.5</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The integration of C3k2FC demonstrates clear improvements in efficiency and accuracy. Starting from the baseline mAP of 88.4%, FLOPs of 6.7G, and parameters of 2.6M, adding C3k2FC alone raises mAP to 90.1%, while reducing FLOPs to 6.1G and parameters to 2.3M. This enhancement arises from C3k2FC&#x2019;s partial channel convolutions and gating mechanisms, which alleviate redundancy in parallel branches and bolster local feature capture, proving especially useful for differentiating BB lesions under leaf occlusions and lighting variations. When combined with other modules, such as C3k2FC + SPPF_LSKA (mAP 92.6%, FLOPs 6.4G, parameters 2.6M), C3k2FC + SPPF_LSKA + SlimNeck (mAP 93.1%, FLOPs 6.2G, parameters 2.6M), and the full model C3k2FC + SPPF_LSKA + SlimNeck + LiteHead (mAP 94.1%, FLOPs 5.9G, parameters 2.5M), the module contributes to consistent gains in mAP while maintaining or reducing computational costs, underscoring its role in foundational redundancy mitigation.</p>
<p>The SPPF_LSKA module contributes to refined multi-scale handling and noise suppression. When added to the baseline with SlimNeck (baseline + SPPF_LSKA + SlimNeck: mAP 93.9%, FLOPs 6.9G, parameters 2.9M) or with SlimNeck + LiteHead (baseline + SPPF_LSKA + SlimNeck + LiteHead: mAP 94.4%, FLOPs 6.5G, parameters 2.9M), it elevates mAP through LSKA attention&#x2019;s refinement of pooling operations, effectively amplifying fine-grained details like striped BB patterns in high-altitude UAV images while managing a modest increase in FLOPs. In combinations involving C3k2FC, such as C3k2FC + SPPF_LSKA (mAP 92.6%, FLOPs 6.4G, parameters 2.6M) and C3k2FC + SPPF_LSKA + SlimNeck (mAP 93.1%, FLOPs 6.2G, parameters 2.6M), SPPF_LSKA further boosts performance by enhancing adaptability to varying scales and background noise, leading to better suppression of irrelevant features in challenging rice field environments.</p>
<p>SlimNeck&#x2019;s incorporation focuses on optimizing the Neck for computational efficiency. Standalone addition to the baseline yields mAP of 91.7%, FLOPs 6.3G, and parameters 2.6M, with GSConv&#x2019;s channel mixing and VoVGSCSP&#x2019;s reuse mechanisms addressing overhead in resource-constrained UAV systems. Combinations like baseline + SlimNeck + LiteHead (mAP 91.9%, FLOPs 6.0G, parameters 2.4M) and baseline + SPPF_LSKA + SlimNeck (mAP 93.9%, FLOPs 6.9G, parameters 2.9M) show reduced FLOPs or improved multi-resolution fusion, ensuring global context extraction without excessive computational demands.</p>
<p>Finally, LiteHead refines detection for rotational and small targets. When added alone to the baseline, it achieves mAP of 92.8%, FLOPs 6.4G, and parameters 2.4M, with LiteHead&#x2019;s separated BN and dynamic anchors enhancing stability and adaptability to rotated BB lesions amid resolution fluctuations. In various combinations, such as baseline + SlimNeck + LiteHead (mAP 91.9%, FLOPs 6.0G, parameters 2.4M), baseline + SPPF_LSKA + SlimNeck + LiteHead (mAP 94.4%, FLOPs 6.5G, parameters 2.9M), and the complete model (mAP 94.1%, FLOPs 5.9G, parameters 2.5M), LiteHead consistently reduces false detections and supports robust performance.</p>
<p>Overall, the ablation demonstrates that each module contributes incrementally to performance, with synergistic effects from combinations aligning with design goals for lightweight, robust BB detection.</p>
</sec>
<sec id="s3_2_2">
<label>3.2.2</label>
<title>Comparative experiments of different models</title>
<p>To evaluate our improved model for BB detection in UAV-captured rice field images, we compared it against state-of-the-art oriented bounding box (OBB) detection models, including Oriented RCNN, Rotated-Faster-RCNN, S&#xb2;ANet, RetinaNet-OBB, ReDet, RoI Transformer, YOLOv8-OBB, YOLOv11-OBB, and YOLOv12-OBB. All models were tested on the same dataset under identical hardware conditions. Performance metrics include mean Average Precision (mAP), floating-point operations (FLOPs in G), and parameters (in M). Results are summarized in <xref ref-type="table" rid="T3"><bold>Table&#xa0;3</bold></xref>.</p>
<table-wrap id="T3" position="float">
<label>Table&#xa0;3</label>
<caption>
<p>Comparative results of different OBB detection models.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Models</th>
<th valign="middle" align="center">mAP/%</th>
<th valign="middle" align="center">FLOPS/G</th>
<th valign="middle" align="center">Params/M</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="center">Oriented RCNN</td>
<td valign="middle" align="center">88.6</td>
<td valign="middle" align="center">121.6</td>
<td valign="middle" align="center">41.1</td>
</tr>
<tr>
<td valign="middle" align="center">Rotated-Faster-RCNN</td>
<td valign="middle" align="center">88.3</td>
<td valign="middle" align="center">91.0</td>
<td valign="middle" align="center">41.1</td>
</tr>
<tr>
<td valign="middle" align="center">S&#xb2;ANet</td>
<td valign="middle" align="center">87.9</td>
<td valign="middle" align="center">77.0</td>
<td valign="middle" align="center">38.6</td>
</tr>
<tr>
<td valign="middle" align="center">RetinaNet-OBB</td>
<td valign="middle" align="center">88.0</td>
<td valign="middle" align="center">83.3</td>
<td valign="middle" align="center">36.3</td>
</tr>
<tr>
<td valign="middle" align="center">ReDet</td>
<td valign="middle" align="center">88.2</td>
<td valign="middle" align="center">48.3</td>
<td valign="middle" align="center">31.6</td>
</tr>
<tr>
<td valign="middle" align="center">RoI-Transformer</td>
<td valign="middle" align="center">88.5</td>
<td valign="middle" align="center">105.0</td>
<td valign="middle" align="center">55.1</td>
</tr>
<tr>
<td valign="middle" align="center">YOLOv8-OBB</td>
<td valign="middle" align="center">85.5</td>
<td valign="middle" align="center">7.1</td>
<td valign="middle" align="center">2.8</td>
</tr>
<tr>
<td valign="middle" align="center">YOLOv11-OBB</td>
<td valign="middle" align="center">88.4</td>
<td valign="middle" align="center">6.7</td>
<td valign="middle" align="center">2.6</td>
</tr>
<tr>
<td valign="middle" align="center">YOLOv12-OBB</td>
<td valign="middle" align="center">87.0</td>
<td valign="middle" align="center">6.2</td>
<td valign="middle" align="center">2.5</td>
</tr>
<tr>
<td valign="middle" align="center">Our</td>
<td valign="middle" align="center">94.1</td>
<td valign="middle" align="center">5.9</td>
<td valign="middle" align="center">2.5</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Oriented RCNN achieves an mAP of 88.6% with 121.6G FLOPs and 41.1M parameters, leveraging a two-stage detection framework but incurring high computational costs due to its complex region proposal network. Rotated-Faster-RCNN, with an mAP of 88.3%, 91.0G FLOPs, and 41.1M parameters, offers comparable accuracy with reduced computational demand. S&#xb2;ANet, an anchor-free model, records an mAP of 87.9%, 77.0G FLOPs, and 38.6M parameters, balancing efficiency and accuracy but struggling with small objects in dense rice fields. RetinaNet-OBB, a one-stage model using focal loss, achieves an mAP of 88.0% with 83.3G FLOPs and 36.3M parameters, limited by feature extraction for rotated objects. ReDet, with an mAP of 88.2%, 48.3G FLOPs, and 31.6M parameters, optimizes rotation-invariant features but shows slightly lower accuracy in complex scenarios. RoI Transformer, with an mAP of 88.5%, 105.0G FLOPs, and 55.1M parameters, excels in handling rotated objects via a transformer-based approach but is resource-intensive. Among YOLO-based models, YOLOv8-OBB, with an mAP of 85.5%, 7.1G FLOPs, and 2.8M parameters, provides a lightweight baseline but lags in accuracy due to less advanced feature fusion. YOLOv11-OBB improves to an mAP of 88.4%, 6.7G FLOPs, and 2.6M parameters with better gradient flow and multi-scale handling. YOLOv12-OBB, with an mAP of 87.0%, 6.2G FLOPs, and 2.5M parameters, gains efficiency but falls short in accuracy for fine-grained detection. Our model achieves the highest mAP of 94.1% with the lowest FLOPs (5.9G) and parameters (2.5M), outperforming all comparators. This superiority stems from optimizations like C3k2FC for redundancy reduction, SPPF_LSKA for noise suppression, SlimNeck for efficient fusion, and LiteHead for rotational adaptation, ensuring robust performance in challenging rice field scenarios with minimal resource demands.</p>
</sec>
<sec id="s3_2_3" sec-type="results">
<label>3.2.3</label>
<title>Results of pruning experiments</title>
<p>To evaluate the impact of the Layer-adaptive Magnitude-based Pruning (LAMP) method on the improved model, we conducted pruning experiments at varying rates (1.5, 2.0, 2.5, and 3.0), where the pruning rate represents the adaptive magnitude threshold factor applied globally to weights, with higher values indicating more aggressive pruning. The improved model (unpruned) serves as the reference, and all evaluations were performed under the same dataset and environment conditions. Metrics include mean Average Precision (mAP), FLOPs (G), and Parameters (M). The results are summarized in <xref ref-type="table" rid="T4"><bold>Table&#xa0;4</bold></xref>.</p>
<table-wrap id="T4" position="float">
<label>Table&#xa0;4</label>
<caption>
<p>Pruning experiment results at varying pruning rates.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Pruning Rate</th>
<th valign="middle" align="center">mAP</th>
<th valign="middle" align="center">FLOPS/G</th>
<th valign="middle" align="center">Params/M</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="center">Base</td>
<td valign="middle" align="center">94.1%</td>
<td valign="middle" align="center">5.9</td>
<td valign="middle" align="center">2.5</td>
</tr>
<tr>
<td valign="middle" align="center">1.5</td>
<td valign="middle" align="center">93.2%</td>
<td valign="middle" align="center">4.2</td>
<td valign="middle" align="center">1.8</td>
</tr>
<tr>
<td valign="middle" align="center">2.0</td>
<td valign="middle" align="center">93.1%</td>
<td valign="middle" align="center">3.5</td>
<td valign="middle" align="center">1.4</td>
</tr>
<tr>
<td valign="middle" align="center">2.5</td>
<td valign="middle" align="center">80.5%</td>
<td valign="middle" align="center">2.8</td>
<td valign="middle" align="center">1.1</td>
</tr>
<tr>
<td valign="middle" align="center">3.0</td>
<td valign="middle" align="center">59.7%</td>
<td valign="middle" align="center">2.3</td>
<td valign="middle" align="center">0.9</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The baseline achieves mAP of 94.1% with FLOPs of 5.9G and parameters of 2.5M. At a pruning rate of 1.5, mAP slightly drops to 93.2%, but FLOPs reduce to 4.2G and parameters to 1.8M, demonstrating minimal accuracy loss with substantial efficiency gains due to LAMP&#x2019;s layer-adaptive scoring, which preserves critical connections in early layers for fine-grained BB lesion capture. Increasing to 2.0 yields mAP of 93.1%, FLOPs 3.5G, and parameters 1.4M, maintaining robust performance amid lighting variations and occlusions by dynamically adjusting sparsity to protect feature extraction pathways.</p>
<p>However, at 2.5, mAP declines more noticeably to 80.5%, with FLOPs 2.8G and parameters 1.1M, indicating a threshold where aggressive pruning begins to erode semantic representation. At 3.0, mAP plummets to 59.7%, FLOPs 2.3G, and parameters 0.9M, highlighting severe degradation from overpruning, which amplifies false negatives in noisy rice field environments. These findings underscore LAMP&#x2019;s effectiveness at a pruning rate of 2.0 for balancing compression and accuracy, ideal for resource-constrained UAV deployments, while higher rates risk compromising BB detection reliability. The final model uses LAMP pruning at rate 2.0 for optimal balance.</p>
</sec>
<sec id="s3_2_4">
<label>3.2.4</label>
<title>Detection performance in different scenarios</title>
<p>To further evaluate the performance of the proposed BB detection model in different scenarios, we selected 50 images from the preaugmentation test set for each of the following conditions: detection under complex backgrounds, dense disease conditions, and strong midday lighting. The results, along with comparisons to recent YOLO-based OBB detection models (YOLOv8-OBB, YOLOv11-OBB, and YOLOv12-OBB), are summarized in <xref ref-type="table" rid="T5"><bold>Table&#xa0;5</bold></xref>. <xref ref-type="fig" rid="f7"><bold>Figure&#xa0;7</bold></xref> visualizes the performance comparisons across the various models and conditions. The red circle marks locations of false detections, while the red square highlights missed detections.</p>
<table-wrap id="T5" position="float">
<label>Table&#xa0;5</label>
<caption>
<p>Comparison of recent YOLO-Based OBB detection models across different scenarios.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Models</th>
<th valign="middle" align="center">Complex backgrounds</th>
<th valign="middle" align="center">Dense disease</th>
<th valign="middle" align="center">Strong midday lighting</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="center">YOLOv8-OBB</td>
<td valign="middle" align="center">79.7%</td>
<td valign="middle" align="center">81.5%</td>
<td valign="middle" align="center">87.5%</td>
</tr>
<tr>
<td valign="middle" align="center">YOLOv11-OBB</td>
<td valign="middle" align="center">80.6%</td>
<td valign="middle" align="center">83.7%</td>
<td valign="middle" align="center">88.4%</td>
</tr>
<tr>
<td valign="middle" align="center">YOLOv12-OBB</td>
<td valign="middle" align="center">82.1%</td>
<td valign="middle" align="center">87.2%</td>
<td valign="middle" align="center">86.3%</td>
</tr>
<tr>
<td valign="middle" align="center">Our</td>
<td valign="middle" align="center">93.7%</td>
<td valign="middle" align="center">94.5%</td>
<td valign="middle" align="center">93.8%</td>
</tr>
</tbody>
</table>
</table-wrap>
<fig id="f7" position="float">
<label>Figure&#xa0;7</label>
<caption>
<p>Performance comparison of recent YOLO-based OBB detection models across different scenarios. <bold>(A)</bold> Detection under complex backgrounds. <bold>(B)</bold> Detection under dense disease conditions. <bold>(C)</bold> Detection under strong midday lighting.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1765317-g007.tif">
<alt-text content-type="machine-generated">Grid of fifteen photographs shows rice plants in a paddy field, divided into three labeled rows (A, B, C). Each image features blue annotations marking “blight” with varying confidence scores, indicating locations of detected disease on the leaves.</alt-text>
</graphic></fig>
<p>Experimental results demonstrate that the proposed model outperforms the other YOLO-based models in all scenarios. As shown in <xref ref-type="table" rid="T5"><bold>Table&#xa0;5</bold></xref>, our model achieves the highest detection performance across complex backgrounds (93.7%), dense disease conditions (94.5%), and strong midday lighting (93.8%), outperforming YOLOv8-OBB, YOLOv11-OBB, and YOLOv12-OBB. These results highlight the robustness of our model in handling diverse and challenging environmental conditions. We randomly selected detection results from a variety of environmental conditions, as shown in <xref ref-type="fig" rid="f8"><bold>Figure&#xa0;8</bold></xref>.</p>
<fig id="f8" position="float">
<label>Figure&#xa0;8</label>
<caption>
<p>Detection results in different scenarios. <bold>(A)</bold> Detection under complex backgrounds. <bold>(B)</bold> Detection under dense disease conditions. <bold>(C)</bold> Detection under strong midday lighting.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1765317-g008.tif">
<alt-text content-type="machine-generated">Figure comparing original and processed images of rice plants in water, arranged in three rows labeled A, B, and C, with five columns for different detection methods. Blight-affected leaves are marked with blue or red boxes and labeled “blight” in the YOLOv8-OBB, YOLOv11-OBB, YOLOv12-OBB, and XooNet columns, demonstrating varying detection accuracy and bounding shapes versus the original unmarked images.</alt-text>
</graphic></fig>
<p>As shown in <xref ref-type="fig" rid="f8"><bold>Figure&#xa0;8</bold></xref>, the proposed model effectively addresses these challenges, demonstrating robust performance in complex backgrounds, dense disease scenarios, and varying lighting conditions, underscoring its potential for reliable and accurate disease detection.</p>
</sec>
<sec id="s3_2_5">
<label>3.2.5</label>
<title>Detection performance on edge computing devices</title>
<p>To evaluate the performance of the model deployed on edge computing devices, we tested a 60-second video captured by a UAV of wild rice infected with BB. The video had a resolution of 1920x1080 pixels. The improved model achieved a detection speed of 7.5 fps in the unaccelerated mode, and 21.3 fps with TensorRT acceleration, resulting in an improvement of 13.8 fps. This represents a 2.84-fold increase in performance. In contrast, the YOLOv11-OBB model showed an improvement of 9.3 fps, with a 2.69-fold increase in performance after applying TensorRT acceleration. Prior to acceleration, the detection speed of the improved model was constrained by the limited computational power of the Jetson Nano device. However, after applying TensorRT acceleration, the detection speed of the improved model reached 21.3 fps, achieving a significant increase in performance. These results demonstrate that the improved model, with TensorRT acceleration, significantly enhances detection speed, making it well-suited to meet the performance requirements for deployment on edge computing devices.</p>
</sec>
</sec>
<sec id="s3_3">
<label>3.3</label>
<title>Assessment of BB screening performance</title>
<sec id="s3_3_1">
<label>3.3.1</label>
<title>Analysis of BB screening accuracy</title>
<p>The experimental results show that the method accurately assessed 117 varieties, with 3 plants misclassified, resulting in an overall accuracy of 97.5%. This performance is sufficient for disease resistance breeding applications. Further analysis of disease counting performance revealed that the method correctly counted the disease in 112 plants, with 8 varieties showing varying degrees of miscounts, yielding an overall accuracy of 93.3%. To better understand the source of these errors, the miscounted samples were analyzed.</p>
<p>From <xref ref-type="table" rid="T6"><bold>Table&#xa0;6</bold></xref>, it is evident that five samples (IDs 025, 034, 045, 064, 089) were missed detection due to leaf disturbance. The cause of this was the vibrations and airflow from the UAV&#x2019;s propellers, which caused the leaves to move vigorously. This disturbance resulted in blurred and overlapping leaves in the images, making it difficult for the detection model to accurately detect and locate the disease. Additionally, some wild rice leaves exceeded 50 cm in length, which made them more susceptible to wind, further increasing the chances of missed detection. Three samples (IDs 094, 096, 109) were misclassified due to the presence of dead leaves that were not cleared in time. The visual similarity between dead leaves and BB led to false positives during the disease detection process.</p>
<table-wrap id="T6" position="float">
<label>Table&#xa0;6</label>
<caption>
<p>Disease detection miscount analysis and causes.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Sample ID</th>
<th valign="middle" align="center">Actual disease count</th>
<th valign="middle" align="center">Disease count by this method</th>
<th valign="middle" align="center">Cause of miscount</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="center">025</td>
<td valign="middle" align="center">9</td>
<td valign="middle" align="center">8</td>
<td valign="middle" align="center">Missed detection due to leaf disturbance</td>
</tr>
<tr>
<td valign="middle" align="center">034</td>
<td valign="middle" align="center">4</td>
<td valign="middle" align="center">2</td>
<td valign="middle" align="center">Missed detection due to leaf disturbance</td>
</tr>
<tr>
<td valign="middle" align="center">045</td>
<td valign="middle" align="center">2</td>
<td valign="middle" align="center">0</td>
<td valign="middle" align="center">Missed detection due to leaf disturbance</td>
</tr>
<tr>
<td valign="middle" align="center">064</td>
<td valign="middle" align="center">6</td>
<td valign="middle" align="center">4</td>
<td valign="middle" align="center">Missed detection due to leaf disturbance</td>
</tr>
<tr>
<td valign="middle" align="center">089</td>
<td valign="middle" align="center">8</td>
<td valign="middle" align="center">7</td>
<td valign="middle" align="center">Missed detection due to leaf disturbance</td>
</tr>
<tr>
<td valign="middle" align="center">094</td>
<td valign="middle" align="center">3</td>
<td valign="middle" align="center">5</td>
<td valign="middle" align="center">Misclassification due to dead leaves not cleared in time</td>
</tr>
<tr>
<td valign="middle" align="center">096</td>
<td valign="middle" align="center">7</td>
<td valign="middle" align="center">6</td>
<td valign="middle" align="center">Misclassification due to dead leaves not cleared in time</td>
</tr>
<tr>
<td valign="middle" align="center">109</td>
<td valign="middle" align="center">7</td>
<td valign="middle" align="center">9</td>
<td valign="middle" align="center">Misclassification due to dead leaves not cleared in time</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s3_3_2">
<label>3.3.2</label>
<title>Analysis of practical application test results</title>
<p>The experimental results showed that the method correctly screened 47 wild rice varieties, with 3 varieties misclassified, resulting in an overall accuracy of 94.0%. This performance meets the requirements for disease resistance breeding applications. Further analysis of the disease counting performance revealed that the method accurately counted the disease in 43 plants, with 7 plants showing varying degrees of miscounts, yielding an overall accuracy of 86.0%. To better understand the source of these errors, the miscounted samples were analyzed.</p>
<p>From <xref ref-type="table" rid="T7"><bold>Table&#xa0;7</bold></xref>, it is clear that four samples (IDs 005, 009, 017, 041) were missed detection due to occlusion. This was primarily caused by the failure to remove healthy leaves, which grew too quickly and obscured the diseased targets. As the UAV captured images from above, these occluded targets could not be effectively detected, making it difficult for the detection model to identify them. Additionally, three samples (IDs 020, 024) were missed detection due to leaf disturbance. One sample (ID 043) was misclassified due to the presence of dead leaves that were not cleared in the field.</p>
<table-wrap id="T7" position="float">
<label>Table&#xa0;7</label>
<caption>
<p>Miscount analysis and causes in field application test.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Sample ID</th>
<th valign="middle" align="center">Actual disease count</th>
<th valign="middle" align="center">Disease count by this method</th>
<th valign="middle" align="center">Cause of miscount</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="center">005</td>
<td valign="middle" align="center">4</td>
<td valign="middle" align="center">1</td>
<td valign="middle" align="center">Occlusion</td>
</tr>
<tr>
<td valign="middle" align="center">009</td>
<td valign="middle" align="center">5</td>
<td valign="middle" align="center">2</td>
<td valign="middle" align="center">Occlusion</td>
</tr>
<tr>
<td valign="middle" align="center">017</td>
<td valign="middle" align="center">6</td>
<td valign="middle" align="center">3</td>
<td valign="middle" align="center">Occlusion</td>
</tr>
<tr>
<td valign="middle" align="center">020</td>
<td valign="middle" align="center">6</td>
<td valign="middle" align="center">5</td>
<td valign="middle" align="center">Missed detection due to leaf disturbance</td>
</tr>
<tr>
<td valign="middle" align="center">024</td>
<td valign="middle" align="center">8</td>
<td valign="middle" align="center">7</td>
<td valign="middle" align="center">Missed detection due to leaf disturbance</td>
</tr>
<tr>
<td valign="middle" align="center">041</td>
<td valign="middle" align="center">8</td>
<td valign="middle" align="center">4</td>
<td valign="middle" align="center">Occlusion</td>
</tr>
<tr>
<td valign="middle" align="center">043</td>
<td valign="middle" align="center">3</td>
<td valign="middle" align="center">5</td>
<td valign="middle" align="center">Misclassification due to dead leaves not cleared</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec id="s3_4">
<label>3.4</label>
<title>Summary of experimental results</title>
<p>In summary, the comprehensive evaluation results from <xref ref-type="table" rid="T2"><bold>Tables&#xa0;2</bold></xref> through 5 demonstrate that XooNet achieves a superior balance between detection accuracy and computational efficiency. Compared to the baseline YOLOv11-OBB, the final optimized model improved the mAP by 4.7% (increasing from 88.4% to 93.1%) while significantly reducing model complexity. Furthermore, field deployment tests confirmed that with TensorRT acceleration on the Nvidia Jetson Nano, the model achieves an inference speed of 21.3 FPS. These findings validate that XooNet meets the strict requirements for real-time, high-precision bacterial blight screening in wild rice fields.</p>
</sec>
</sec>
<sec id="s4" sec-type="discussion">
<label>4</label>
<title>Discussion</title>
<p>Although significant progress has been made in the detection of rice BB, existing methods often fail to adequately consider the critical issue of equipment costs. Some techniques, while offering high detection accuracy, rely on expensive equipment and require specialized operators, limiting their practical application. Additionally, some methods are computationally complex, which restricts their use in routine breeding practices. In contrast, the XooNet method presented in this study employs low-cost UAVs and lightweight deep learning models, maintaining high detection precision while reducing equipment and operational costs, making it more suitable for practical breeding applications.</p>
<p>One key advantage of this method is that it offers a cost-effective solution. Unlike other methods that rely on expensive hyperspectral equipment or specialized hardware, XooNet utilizes commercially available UAVs, making it more affordable and accessible for breeding laboratories with limited budgets. For example, hyperspectral imaging systems that cost over $80,000 are often used in some studies for this task, but such systems are not feasible for routine field use due to their high cost and technical requirements. In contrast, XooNet provides an equally effective solution at a fraction of the cost, enabling broader adoption in real-world breeding scenarios.</p>
<p>Another key advantage is its robust performance in complex field environments. One of the standout features of XooNet is its lightweight OBB detection algorithm, which has been specifically designed for UAV-based BB disease screening. This algorithm ensures stable performance in the dynamic and often challenging conditions found in real-world rice fields. It can handle detection under complex backgrounds, such as weeds, dense disease conditions, and strong midday lighting. This field performance ensures that XooNet remains reliable in diverse environmental conditions, making it ideal for BB-resistance breeding.</p>
<p>Despite the promising results, certain considerations should be observed during the application of this method. First, strict adherence to inoculation protocols and field management requirements is essential. According to the Rice Disease Resistance Evaluation Technical Standards, it is necessary to manage the disease plots carefully to avoid external factors influencing the experimental results. Although the accuracy of the proposed method is high, errors were observed primarily due to improper inoculation procedures (e.g., using leaves that were too long or too short) or poor field management (e.g., failure to clear uninfected leaves or dead leaves promptly). Therefore, improving field management practices is crucial to enhance reliability. Second, the quality of the UAVs used for image capture plays a significant role. The DJI Mini 2 UAV used in this study supports only 2x zoom, necessitating a lower flying altitude. However, subsequent tests with the DJI Mini 4 Pro UAV showed that increasing the zoom and flight altitude significantly reduced leaf disturbance, thereby improving screening accuracy.</p>
<p>While XooNet has demonstrated encouraging results in accuracy and efficiency, it is important to acknowledge a limitation regarding the data distribution. The dataset utilized in this study was constructed by pooling and randomly splitting images collected over two growing seasons (2023 and 2024). Strict independent cross-year validation (i.e., training on one year and testing on the other) was not performed in this study. Consequently, while the model demonstrated high accuracy within the tested data distribution, its generalizability to completely unseen years or varying ecological regions with significantly different environmental characteristics requires further verification.</p>
<p>To address these limitations and enhance practical applicability and robustness in field conditions, future research will focus on several key directions. First, we plan to enhance model performance through data diversity and reinforcement learning. We aim to expand the diversity of the data covering various dimensions such as different wild rice varieties, fungal strains, experimental environments, and growth stages. Additionally, introducing deep reinforcement learning methods will help address errors caused by dynamic factors such as leaf vibrations and airflow disturbances. Second, future work could integrate the current method with genotypic data to advance genomic selection (GS) techniques in breeding. Combining phenotypic data with DNA markers can help accurately select high-quality germplasm with disease-resistant genes. Finally, we aim to integrate environmental factors, such as climate data and soil conditions, with disease resistance phenotypic data. This approach will help develop predictive models to assess how wild rice varieties perform under different ecological conditions, optimizing breeding strategies and adapting to climate change.</p>
<p>Despite certain limitations, the XooNet method offers valuable technical guidance for automated, precise disease screening in disease-resistant breeding applications. The method has already contributed to several wild rice BB resistance breeding projects, providing an efficient screening tool. As the technology continues to improve and gain wider adoption, XooNet will further demonstrate its reliability and effectiveness, providing strong technical support for rice disease resistance breeding.</p>
</sec>
<sec id="s5" sec-type="conclusions">
<label>5</label>
<title>Conclusions</title>
<p>This study introduces XooNet, a novel UAV-based method for automated BB resistance screening in wild rice, which classifies wild rice into several levels based on BB resistance. To enable this method, a high-precision and lightweight OBB detection algorithm for BB in wild rice is developed. Experimental results show that the screening method achieved an accuracy of 97.5%. Through the implementation of the lightweight OBB algorithm and LAMP pruning, the final detection model achieved an accuracy of 93.1% with a parameter size of 1.4M and a computational complexity of 3.5 GFLOPs. The XooNet method offers a cost-effective and efficient solution for large-scale BB resistance screening, overcoming the limitations of traditional methods and existing UAV-based approaches. It provides strong support for the integration of wild rice resistance genes into breeding programs, enabling rapid identification of resistant varieties. Future research will aim to integrate phenotypic and genotypic data, along with environmental factors, to enhance the accuracy and applicability of this method.</p>
</sec>
</body>
<back>
<sec id="s6" sec-type="data-availability">
<title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p></sec>
<sec id="s7" sec-type="author-contributions">
<title>Author contributions</title>
<p>PP: Writing &#x2013; original draft, Methodology, Writing &#x2013; review &amp; editing, Conceptualization. WG: Data curation, Writing &#x2013; review &amp; editing, Software. MiL: Formal Analysis, Writing &#x2013; review &amp; editing, Data curation. HL: Data curation, Validation, Writing &#x2013; review &amp; editing. JY: Writing &#x2013; review &amp; editing, Data curation. ZG: Data curation, Writing &#x2013; review &amp; editing. HZ: Writing &#x2013; review &amp; editing, Data curation. GY: Funding acquisition, Writing &#x2013; review &amp; editing, Validation. MaL: Writing &#x2013; review &amp; editing, Funding acquisition, Project administration. LY: Writing &#x2013; review &amp; editing, Project administration, Funding acquisition. XZ: Project administration, Writing &#x2013; review &amp; editing, Funding acquisition. GZ: Funding acquisition, Writing &#x2013; review &amp; editing, Project administration. JZ: Project administration, Writing &#x2013; review &amp; editing, Funding acquisition.</p></sec>
<ack>
<title>Acknowledgments</title>
<p>Appreciations are given to the editors and reviewers of the Frontiers in Plant Science.</p>
</ack>
<sec id="s9" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p></sec>
<sec id="s10" sec-type="ai-statement">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p></sec>
<sec id="s11" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p></sec>
<ref-list>
<title>References</title>
<ref id="B1">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Amante-Bordeos</surname> <given-names>A.</given-names></name>
<name><surname>Sitch</surname> <given-names>L. A.</given-names></name>
<name><surname>Nelson</surname> <given-names>R.</given-names></name>
<name><surname>Dalmacio</surname> <given-names>R. D.</given-names></name>
<name><surname>Oliva</surname> <given-names>N. P.</given-names></name>
<name><surname>Aswidinnoor</surname> <given-names>H.</given-names></name>
<etal/>
</person-group>. (<year>1992</year>). 
<article-title>Transfer of bacterial blight and blast resistance from the tetraploid wild rice Oryza minuta to cultivated rice, Oryza sativa</article-title>. <source>Theor. Appl. Genet.</source> <volume>84</volume>, <fpage>345</fpage>&#x2013;<lpage>354</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/BF00229493</pub-id>, PMID: <pub-id pub-id-type="pmid">24203194</pub-id>
</mixed-citation>
</ref>
<ref id="B2">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Bai</surname> <given-names>X.</given-names></name>
<name><surname>Fang</surname> <given-names>H.</given-names></name>
<name><surname>He</surname> <given-names>Y.</given-names></name>
<name><surname>Zhang</surname> <given-names>J.</given-names></name>
<name><surname>Tao</surname> <given-names>M.</given-names></name>
<name><surname>Wu</surname> <given-names>Q.</given-names></name>
<etal/>
</person-group>. (<year>2023</year>). 
<article-title>Dynamic UAV phenotyping for rice disease resistance analysis based on multisource data</article-title>. <source>Plant Phenom.</source> <volume>5</volume>, <elocation-id>19</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.34133/plantphenomics.0019</pub-id>, PMID: <pub-id pub-id-type="pmid">37040287</pub-id>
</mixed-citation>
</ref>
<ref id="B3">
<mixed-citation publication-type="confproc">
<person-group person-group-type="author">
<name><surname>Chen</surname> <given-names>J.</given-names></name>
<name><surname>Kao</surname> <given-names>S.-h.</given-names></name>
<name><surname>He</surname> <given-names>H.</given-names></name>
<name><surname>Zhuo</surname> <given-names>W.</given-names></name>
<name><surname>Wen</surname> <given-names>S.</given-names></name>
<name><surname>Lee</surname> <given-names>C.-H.</given-names></name>
<etal/>
</person-group>. (<year>2023</year>). &#x201c;
<article-title>Run, don&#x2019;t walk: chasing higher FLOPS for faster neural networks</article-title>,&#x201d; in <conf-name>Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR))</conf-name>. (<publisher-loc>Vancouver, Canada</publisher-loc>: 
<publisher-name>IEEE</publisher-name>),  <fpage>12021</fpage>&#x2013;<lpage>12031</lpage>.
</mixed-citation>
</ref>
<ref id="B4">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Fukagawa</surname> <given-names>N. K.</given-names></name>
<name><surname>Ziska</surname> <given-names>L. H.</given-names></name>
</person-group> (<year>2019</year>). 
<article-title>Rice: Importance for global nutrition</article-title>. <source>J. Nutr. Sci. Vitaminol.</source> <volume>65</volume>, <fpage>S2</fpage>&#x2013;<lpage>S3</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3177/jnsv.65.S2</pub-id>, PMID: <pub-id pub-id-type="pmid">31619630</pub-id>
</mixed-citation>
</ref>
<ref id="B5">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Gnanamanickam</surname> <given-names>S.</given-names></name>
<name><surname>Priyadarisini</surname> <given-names>V. B.</given-names></name>
<name><surname>Narayanan</surname> <given-names>N.</given-names></name>
<name><surname>Vasudevan</surname> <given-names>P.</given-names></name>
<name><surname>Kavitha</surname> <given-names>S.</given-names></name>
</person-group> (<year>1999</year>). 
<article-title>An overview of bacterial blight disease of rice and strategies for its management</article-title>. <source>Curr. Sci.</source> <volume>77</volume>, <fpage>1435</fpage>&#x2013;<lpage>1444</lpage>.
</mixed-citation>
</ref>
<ref id="B6">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name><surname>Gupta</surname> <given-names>R. C.</given-names></name>
<name><surname>Gupta</surname> <given-names>P. K.</given-names></name>
</person-group> (<year>2025</year>). &#x201c;
<article-title>Toxicity of fungicides</article-title>,&#x201d; in <source>Veterinary toxicology</source> (<publisher-loc>Amsterdam, Netherlands</publisher-loc>: 
<publisher-name>Elsevier</publisher-name>), <fpage>581</fpage>&#x2013;<lpage>593</lpage>.
</mixed-citation>
</ref>
<ref id="B7">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Hidayatullah</surname> <given-names>P.</given-names></name>
<name><surname>Syakrani</surname> <given-names>N.</given-names></name>
<name><surname>Sholahuddin</surname> <given-names>M. R.</given-names></name>
<name><surname>Gelar</surname> <given-names>T.</given-names></name>
<name><surname>Tubagus</surname> <given-names>R.</given-names></name>
</person-group> (<year>2025</year>). 
<article-title>YOLOv8 to YOLO11: A comprehensive architecture in-depth comparative review</article-title>. <source>arXiv. preprint. arXiv:2501.13400</source>. doi:&#xa0;<pub-id pub-id-type="doi">10.48550/arXiv.2501.13400</pub-id>
</mixed-citation>
</ref>
<ref id="B8">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Jain</surname> <given-names>R.</given-names></name>
<name><surname>Kochar</surname> <given-names>M.</given-names></name>
<name><surname>Dubey</surname> <given-names>M. K.</given-names></name>
<name><surname>Sharma</surname> <given-names>S. S.</given-names></name>
<name><surname>Yang</surname> <given-names>W.</given-names></name>
<name><surname>Cahill</surname> <given-names>D.</given-names></name>
</person-group> (<year>2025</year>). 
<article-title>Advancing Diagnostics for Xanthomonas oryzae pv. oryzae: Challenges and Future Directions</article-title>. <source>ACS Agric. Sci. Technol.</source> <volume>5</volume>, <fpage>1529</fpage>&#x2013;<lpage>1548</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1021/acsagscitech.5c00197</pub-id>
</mixed-citation>
</ref>
<ref id="B9">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Jegham</surname> <given-names>N.</given-names></name>
<name><surname>Koh</surname> <given-names>C. Y.</given-names></name>
<name><surname>Abdelatti</surname> <given-names>M.</given-names></name>
<name><surname>Hendawi</surname> <given-names>A.</given-names></name>
</person-group> (<year>2024</year>). 
<article-title>Yolo evolution: A comprehensive benchmark and architectural review of yolov12, yolo11, and their previous versions</article-title>. <source>arXiv. preprint. arXiv:2411.00201</source>. doi:&#xa0;<pub-id pub-id-type="doi">10.48550/arXiv.2411.00201</pub-id>
</mixed-citation>
</ref>
<ref id="B10">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Khanam</surname> <given-names>R.</given-names></name>
<name><surname>Hussain</surname> <given-names>M.</given-names></name>
</person-group> (<year>2024</year>). 
<article-title>Yolov11: An overview of the key architectural enhancements</article-title>. <source>arXiv. preprint. arXiv:2410.17725</source>. doi:&#xa0;<pub-id pub-id-type="doi">10.48550/arXiv.2410.17725</pub-id>
</mixed-citation>
</ref>
<ref id="B11">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name><surname>Kumar</surname> <given-names>D.</given-names></name>
<name><surname>Arya</surname> <given-names>S. K.</given-names></name>
<name><surname>Srivastava</surname> <given-names>D.</given-names></name>
<name><surname>Shamim</surname> <given-names>M.</given-names></name>
<name><surname>Desai</surname> <given-names>L.</given-names></name>
<name><surname>Tyagi</surname> <given-names>M.</given-names></name>
</person-group> (<year>2023</year>). &#x201c;
<article-title>Impact of major rice bacterial diseases on agriculture and food security</article-title>,&#x201d; in <source>Bacterial diseases of rice and their management</source> (<publisher-loc>Palm Bay, USA</publisher-loc>: 
<publisher-name>Apple Academic Press</publisher-name>), <fpage>1</fpage>&#x2013;<lpage>28</lpage>.
</mixed-citation>
</ref>
<ref id="B12">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Lau</surname> <given-names>K. W.</given-names></name>
<name><surname>Po</surname> <given-names>L.-M.</given-names></name>
<name><surname>Rehman</surname> <given-names>Y. A. U.</given-names></name>
</person-group> (<year>2024</year>). 
<article-title>Large separable kernel attention: Rethinking the large kernel attention design in cnn</article-title>. <source>Expert Syst. Appl.</source> <volume>236</volume>, <fpage>121352</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.eswa.2023.121352</pub-id>
</mixed-citation>
</ref>
<ref id="B13">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Lee</surname> <given-names>J.</given-names></name>
<name><surname>Park</surname> <given-names>S.</given-names></name>
<name><surname>Mo</surname> <given-names>S.</given-names></name>
<name><surname>Ahn</surname> <given-names>S.</given-names></name>
<name><surname>Shin</surname> <given-names>J.</given-names></name>
</person-group> (<year>2020</year>). 
<article-title>Layer-adaptive sparsity for the magnitude-based pruning</article-title>. <source>arXiv:2010.07611</source>. doi:&#xa0;<pub-id pub-id-type="doi">10.48550/arXiv.2010.07611</pub-id>. arXiv e-prints.
</mixed-citation>
</ref>
<ref id="B15">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Li</surname> <given-names>Y.</given-names></name>
<name><surname>Fan</surname> <given-names>Y.</given-names></name>
<name><surname>You</surname> <given-names>Y.</given-names></name>
<name><surname>Wang</surname> <given-names>P.</given-names></name>
<name><surname>Ling</surname> <given-names>Y.</given-names></name>
<name><surname>Yin</surname> <given-names>H.</given-names></name>
<etal/>
</person-group>. (<year>2025</year>). 
<article-title>Efficient marker-assisted pyramiding of xa21 and xa23 genes into elite rice restorer lines confers broad-spectrum resistance to bacterial blight</article-title>. <source>Plants</source> <volume>14</volume>, <elocation-id>2107</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/plants14142107</pub-id>, PMID: <pub-id pub-id-type="pmid">40733348</pub-id>
</mixed-citation>
</ref>
<ref id="B14">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Li</surname> <given-names>H.</given-names></name>
<name><surname>Li</surname> <given-names>J.</given-names></name>
<name><surname>Wei</surname> <given-names>H.</given-names></name>
<name><surname>Liu</surname> <given-names>Z.</given-names></name>
<name><surname>Zhan</surname> <given-names>Z.</given-names></name>
<name><surname>Ren</surname> <given-names>Q.</given-names></name>
</person-group> (<year>2022</year>). 
<article-title>Slim-neck by GSConv: A better design paradigm of detector architectures for autonomous vehicles</article-title>. <source>arXiv. preprint. arXiv:2206.02424</source>. doi:&#xa0;<pub-id pub-id-type="doi">10.48550/arXiv.2206.02424</pub-id>
</mixed-citation>
</ref>
<ref id="B16">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Londo</surname> <given-names>J. P.</given-names></name>
<name><surname>Chiang</surname> <given-names>Y.-C.</given-names></name>
<name><surname>Hung</surname> <given-names>K.-H.</given-names></name>
<name><surname>Chiang</surname> <given-names>T.-Y.</given-names></name>
<name><surname>Schaal</surname> <given-names>B. A.</given-names></name>
</person-group> (<year>2006</year>). 
<article-title>Phylogeography of Asian wild rice, Oryza rufipogon, reveals multiple independent domestications of cultivated rice, Oryza sativa</article-title>. <source>Proc. Natl. Acad. Sci.</source> <volume>103</volume>, <fpage>9578</fpage>&#x2013;<lpage>9583</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1073/pnas.0603152103</pub-id>, PMID: <pub-id pub-id-type="pmid">16766658</pub-id>
</mixed-citation>
</ref>
<ref id="B17">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Lu</surname> <given-names>J.</given-names></name>
<name><surname>Qi</surname> <given-names>Q.</given-names></name>
<name><surname>Zheng</surname> <given-names>G.</given-names></name>
<name><surname>Eitel</surname> <given-names>J. U. H.</given-names></name>
<name><surname>Zhang</surname> <given-names>Q.</given-names></name>
<name><surname>Zhang</surname> <given-names>J.</given-names></name>
<etal/>
</person-group>. (<year>2025</year>). 
<article-title>High-throughput field phenotyping using unmanned aerial vehicles (UAVs) for rapid estimation of photosynthetic traits</article-title>. <source>Plant Phenom.</source> <volume>7</volume>, <elocation-id>45</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.plaphe.2025.100045</pub-id>, PMID: <pub-id pub-id-type="pmid">41415165</pub-id>
</mixed-citation>
</ref>
<ref id="B18">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Muthayya</surname> <given-names>S.</given-names></name>
<name><surname>Sugimoto</surname> <given-names>J. D.</given-names></name>
<name><surname>Montgomery</surname> <given-names>S.</given-names></name>
<name><surname>Maberly</surname> <given-names>G. F.</given-names></name>
</person-group> (<year>2014</year>). 
<article-title>An overview of global rice production, supply, trade, and consumption</article-title>. <source>Ann. New York. Acad. Sci.</source> <volume>1324</volume>, <fpage>7</fpage>&#x2013;<lpage>14</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1111/nyas.12540</pub-id>, PMID: <pub-id pub-id-type="pmid">25224455</pub-id>
</mixed-citation>
</ref>
<ref id="B19">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Oliveira-Garcia</surname> <given-names>E.</given-names></name>
<name><surname>Budot</surname> <given-names>B. O.</given-names></name>
<name><surname>Manangkil</surname> <given-names>J.</given-names></name>
<name><surname>Lana</surname> <given-names>F. D.</given-names></name>
<name><surname>Angira</surname> <given-names>B.</given-names></name>
<name><surname>Famoso</surname> <given-names>A.</given-names></name>
<etal/>
</person-group>. (<year>2024</year>). 
<article-title>An efficient method for screening rice breeding lines against races of Magnaporthe oryzae</article-title>. <source>Plant Dis.</source> <volume>108</volume>, <fpage>1179</fpage>&#x2013;<lpage>1187</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1094/PDIS-05-23-0922-RE</pub-id>, PMID: <pub-id pub-id-type="pmid">37807096</pub-id>
</mixed-citation>
</ref>
<ref id="B20">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Pan</surname> <given-names>P.</given-names></name>
<name><surname>Guo</surname> <given-names>W.</given-names></name>
<name><surname>Zheng</surname> <given-names>X.</given-names></name>
<name><surname>Hu</surname> <given-names>L.</given-names></name>
<name><surname>Zhou</surname> <given-names>G.</given-names></name>
<name><surname>Zhang</surname> <given-names>J.</given-names></name>
</person-group> (<year>2023</year>a). 
<article-title>Xoo-YOLO: a detection method for wild rice bacterial blight in the field from the perspective of unmanned aerial vehicles</article-title>. <source>Front. Plant Sci.</source> <volume>14</volume>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fpls.2023.1256545</pub-id>, PMID: <pub-id pub-id-type="pmid">37936939</pub-id>
</mixed-citation>
</ref>
<ref id="B21">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Pan</surname> <given-names>P.</given-names></name>
<name><surname>Shao</surname> <given-names>M.</given-names></name>
<name><surname>He</surname> <given-names>P.</given-names></name>
<name><surname>Hu</surname> <given-names>L.</given-names></name>
<name><surname>Zhao</surname> <given-names>S.</given-names></name>
<name><surname>Huang</surname> <given-names>L.</given-names></name>
<etal/>
</person-group>. (<year>2024</year>a). 
<article-title>Lightweight cotton diseases real-time detection model for resource-constrained devices in natural environments</article-title>. <source>Front. Plant Sci.</source> <volume>15</volume>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fpls.2024.1383863</pub-id>, PMID: <pub-id pub-id-type="pmid">38903431</pub-id>
</mixed-citation>
</ref>
<ref id="B22">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Pan</surname> <given-names>P.</given-names></name>
<name><surname>Wenlong</surname> <given-names>G.</given-names></name>
<name><surname>Hengbo</surname> <given-names>L.</given-names></name>
<name><surname>Yifan</surname> <given-names>S.</given-names></name>
<name><surname>Zhihao</surname> <given-names>G.</given-names></name>
<name><surname>Ye</surname> <given-names>J.</given-names></name>
<etal/>
</person-group>. (<year>2025</year>). 
<article-title>Accelerating wild rice disease-resistant germplasm exploration: artificial intelligence (AI)-powered wild rice blast disease level evaluation and disease-resistance identification</article-title>. <source>Rice Sci.</source> <volume>32</volume>, <fpage>727</fpage>&#x2013;<lpage>746</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.rsci.2025.05.005</pub-id>
</mixed-citation>
</ref>
<ref id="B23">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Pan</surname> <given-names>P.</given-names></name>
<name><surname>Yao</surname> <given-names>Q.</given-names></name>
<name><surname>Shen</surname> <given-names>J.</given-names></name>
<name><surname>Hu</surname> <given-names>L.</given-names></name>
<name><surname>Zhao</surname> <given-names>S.</given-names></name>
<name><surname>Huang</surname> <given-names>L.</given-names></name>
<etal/>
</person-group>. (<year>2024</year>b). 
<article-title>CVW-etr: A high-precision method for estimating the severity level of cotton verticillium wilt disease</article-title>. <source>Plants</source> <volume>13</volume>, <fpage>3050</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/plants13212960</pub-id>, PMID: <pub-id pub-id-type="pmid">39519879</pub-id>
</mixed-citation>
</ref>
<ref id="B24">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Pan</surname> <given-names>P.</given-names></name>
<name><surname>Zhang</surname> <given-names>J.</given-names></name>
<name><surname>Zheng</surname> <given-names>X.</given-names></name>
<name><surname>Zhou</surname> <given-names>G.</given-names></name>
<name><surname>Hu</surname> <given-names>L.</given-names></name>
<name><surname>Feng</surname> <given-names>Q.</given-names></name>
<etal/>
</person-group>. (<year>2023</year>b). 
<article-title>Research progress of deep learning in intelligent identification of disease resistance of crops and their related species</article-title>. <source>Acta Agricult. Zhejiangensis.</source> <volume>35</volume>, <fpage>1993</fpage>&#x2013;<lpage>2012</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3969/j.issn.1004&#x2043;1524.20236105</pub-id>
</mixed-citation>
</ref>
<ref id="B25">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Sabar</surname> <given-names>M.</given-names></name>
<name><surname>Sana-e-Mustafa</surname> <given-names>M. I.</given-names></name>
<name><surname>Khan</surname> <given-names>R. A. R.</given-names></name>
<name><surname>Fatima</surname> <given-names>R.</given-names></name>
<name><surname>Saher</surname> <given-names>H.</given-names></name>
<name><surname>Shahzadi</surname> <given-names>F.</given-names></name>
<etal/>
</person-group>. (<year>2024</year>). 
<article-title>Sheath blight and bacterial blight resistance in rice: mechanisms, progress and future perspectives for sustainable rice production</article-title>. <source>Plant Bull.</source> <volume>3</volume>, <fpage>102</fpage>&#x2013;<lpage>112</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.55627/pbulletin.003.01.0748</pub-id>
</mixed-citation>
</ref>
<ref id="B26">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Sahu</surname> <given-names>P. K.</given-names></name>
<name><surname>Sao</surname> <given-names>R.</given-names></name>
<name><surname>Choudhary</surname> <given-names>D. K.</given-names></name>
<name><surname>Thada</surname> <given-names>A.</given-names></name>
<name><surname>Kumar</surname> <given-names>V.</given-names></name>
<name><surname>Mondal</surname> <given-names>S.</given-names></name>
<etal/>
</person-group>. (<year>2022</year>). 
<article-title>Advancement in the breeding, biotechnological and genomic tools towards development of durable genetic resistance against the rice blast disease</article-title>. <source>Plants</source> <volume>11</volume>, <fpage>2386</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/plants11182386</pub-id>, PMID: <pub-id pub-id-type="pmid">36145787</pub-id>
</mixed-citation>
</ref>
<ref id="B27">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Shaodan</surname> <given-names>L.</given-names></name>
<name><surname>Yue</surname> <given-names>Y.</given-names></name>
<name><surname>Jiayi</surname> <given-names>L.</given-names></name>
<name><surname>Xiaobin</surname> <given-names>L.</given-names></name>
<name><surname>Jie</surname> <given-names>M.</given-names></name>
<name><surname>Haiyong</surname> <given-names>W.</given-names></name>
<etal/>
</person-group>. (<year>2023</year>). 
<article-title>Application of UAV-based imaging and deep learning in assessment of rice blast resistance</article-title>. <source>Rice Sci.</source> <volume>30</volume>, <fpage>652</fpage>&#x2013;<lpage>660</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.rsci.2023.06.005</pub-id>
</mixed-citation>
</ref>
<ref id="B28">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Shcherbakova</surname> <given-names>L.</given-names></name>
</person-group> (<year>2019</year>). 
<article-title>Fungicide resistance of plant pathogenic fungi and their chemosensitization as a tool to increase anti-disease effects of triazoles and strobilurines</article-title>. <source>Sel&#x2019;skokhozyaistvennaya. Biol.</source> <volume>54</volume>, <fpage>875</fpage>&#x2013;<lpage>891</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.15389/agrobiology.2019.5.875eng</pub-id>
</mixed-citation>
</ref>
<ref id="B29">
<mixed-citation publication-type="confproc">
<person-group person-group-type="author">
<name><surname>Shi</surname> <given-names>D.</given-names></name>
</person-group> (<year>2024</year>). &#x201c;
<article-title>Transnext: Robust foveal visual perception for vision transformers</article-title>,&#x201d; in <conf-name>Proceedings of the IEEE/CVF conference on computer vision and pattern recognition)</conf-name>. (<publisher-loc>Seattle, USA</publisher-loc>: 
<publisher-name>IEEE</publisher-name>), <fpage>17773</fpage>&#x2013;<lpage>17783</lpage>.
</mixed-citation>
</ref>
<ref id="B30">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Song</surname> <given-names>W.-Y.</given-names></name>
<name><surname>Wang</surname> <given-names>G.-L.</given-names></name>
<name><surname>Chen</surname> <given-names>L.-L.</given-names></name>
<name><surname>Kim</surname> <given-names>H.-S.</given-names></name>
<name><surname>Pi</surname> <given-names>L.-Y.</given-names></name>
<name><surname>Holsten</surname> <given-names>T.</given-names></name>
<etal/>
</person-group>. (<year>1995</year>). 
<article-title>A receptor kinase-like protein encoded by the rice disease resistance gene, Xa21</article-title>. <source>Science</source> <volume>270</volume>, <fpage>1804</fpage>&#x2013;<lpage>1806</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1126/science.270.5243.1804</pub-id>, PMID: <pub-id pub-id-type="pmid">8525370</pub-id>
</mixed-citation>
</ref>
<ref id="B31">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Tannidi</surname> <given-names>B.</given-names></name>
<name><surname>Anantha</surname> <given-names>M.</given-names></name>
<name><surname>Laha</surname> <given-names>G.</given-names></name>
<name><surname>Sundaram</surname> <given-names>R.</given-names></name>
<name><surname>Senguttuvel</surname> <given-names>P.</given-names></name>
<name><surname>Chandavarapu</surname> <given-names>R.</given-names></name>
<etal/>
</person-group>. (<year>2025</year>). 
<article-title>Identification of novel sources of bacterial leaf blight resistance in wild species of rice</article-title>. <source>Plant Genet. Resour.</source> <volume>23</volume>, <fpage>129</fpage>&#x2013;<lpage>137</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1017/S1479262124000601</pub-id>
</mixed-citation>
</ref>
<ref id="B32">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Thompson</surname> <given-names>L. A.</given-names></name>
<name><surname>Darwish</surname> <given-names>W. S.</given-names></name>
</person-group> (<year>2019</year>). 
<article-title>Environmental chemical contaminants in food: review of a global problem</article-title>. <source>J. Toxicol.</source> <volume>2019</volume>, <fpage>2345283</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1155/2019/2345283</pub-id>, PMID: <pub-id pub-id-type="pmid">30693025</pub-id>
</mixed-citation>
</ref>
<ref id="B34">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Wang</surname> <given-names>R.</given-names></name>
<name><surname>Bie</surname> <given-names>X.</given-names></name>
<name><surname>Xiao</surname> <given-names>J.</given-names></name>
<name><surname>Xu</surname> <given-names>S.</given-names></name>
<name><surname>Li</surname> <given-names>P.</given-names></name>
<name><surname>Feng</surname> <given-names>X.</given-names></name>
<etal/>
</person-group>. (<year>2025</year>). 
<article-title>Identification of a novel screening strategy of rice resistance breeding through phytoalexin content</article-title>. <source>Planta</source> <volume>262</volume>, <fpage>25</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s00425-025-04739-5</pub-id>, PMID: <pub-id pub-id-type="pmid">40512184</pub-id>
</mixed-citation>
</ref>
<ref id="B33">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Wang</surname> <given-names>C.</given-names></name>
<name><surname>Zhang</surname> <given-names>X.</given-names></name>
<name><surname>Fan</surname> <given-names>Y.</given-names></name>
<name><surname>Gao</surname> <given-names>Y.</given-names></name>
<name><surname>Zhu</surname> <given-names>Q.</given-names></name>
<name><surname>Zheng</surname> <given-names>C.</given-names></name>
<etal/>
</person-group>. (<year>2015</year>). 
<article-title>XA23 is an executor R protein and confers broad-spectrum disease resistance in rice</article-title>. <source>Mol. Plant</source> <volume>8</volume>, <fpage>290</fpage>&#x2013;<lpage>302</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.molp.2014.10.010</pub-id>, PMID: <pub-id pub-id-type="pmid">25385701</pub-id>
</mixed-citation>
</ref>
<ref id="B35">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Xu</surname> <given-names>Z.</given-names></name>
<name><surname>Xu</surname> <given-names>X.</given-names></name>
<name><surname>Li</surname> <given-names>Y.</given-names></name>
<name><surname>Liu</surname> <given-names>L.</given-names></name>
<name><surname>Wang</surname> <given-names>Q.</given-names></name>
<name><surname>Wang</surname> <given-names>Y.</given-names></name>
<etal/>
</person-group>. (<year>2024</year>). 
<article-title>Tal6b/AvrXa27A, a hidden TALE targeting the susceptibility geneOsSWEET11a and the resistance geneXa27 in rice</article-title>. <source>Plant Commun.</source> <volume>5</volume>, <elocation-id>100721</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.xplc.2023.100721</pub-id>, PMID: <pub-id pub-id-type="pmid">37735868</pub-id>
</mixed-citation>
</ref>
<ref id="B36">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zhang</surname> <given-names>D.</given-names></name>
<name><surname>Li</surname> <given-names>Z.</given-names></name>
</person-group> (<year>2025</year>). 
<article-title>Case study: successful breeding of rice varieties resistant to bacterial blight</article-title>. <source>Mol. Pathog.</source> <volume>16</volume>. doi:&#xa0;<pub-id pub-id-type="doi">10.5376/mp.2025.16.0018</pub-id>
</mixed-citation>
</ref>
<ref id="B37">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zhang</surname> <given-names>J.</given-names></name>
<name><surname>Yang</surname> <given-names>Y.</given-names></name>
<name><surname>Feng</surname> <given-names>X.</given-names></name>
<name><surname>Xu</surname> <given-names>H.</given-names></name>
<name><surname>Chen</surname> <given-names>J.</given-names></name>
<name><surname>He</surname> <given-names>Y.</given-names></name>
</person-group> (<year>2020</year>). 
<article-title>Identification of bacterial blight resistant rice seeds using terahertz imaging and hyperspectral imaging combined with convolutional neural network</article-title>. <source>Front. Plant Sci.</source> <volume>11</volume>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fpls.2020.00821</pub-id>, PMID: <pub-id pub-id-type="pmid">32670316</pub-id>
</mixed-citation>
</ref>
<ref id="B38">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zheng</surname> <given-names>X.</given-names></name>
<name><surname>Peng</surname> <given-names>Y.</given-names></name>
<name><surname>Qiao</surname> <given-names>J.</given-names></name>
<name><surname>Henry</surname> <given-names>R.</given-names></name>
<name><surname>Qian</surname> <given-names>Q.</given-names></name>
</person-group> (<year>2024</year>). 
<article-title>Wild rice: unlocking the future of rice breeding</article-title>. <source>Plant Biotechnol. J.</source> <volume>22</volume>, <fpage>3218</fpage>&#x2013;<lpage>3226</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1111/pbi.14443</pub-id>, PMID: <pub-id pub-id-type="pmid">39150344</pub-id>
</mixed-citation>
</ref>
<ref id="B39">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zhu</surname> <given-names>X.</given-names></name>
<name><surname>Liu</surname> <given-names>X.</given-names></name>
<name><surname>Wu</surname> <given-names>Q.</given-names></name>
<name><surname>Liu</surname> <given-names>M.</given-names></name>
<name><surname>Hu</surname> <given-names>X.</given-names></name>
<name><surname>Deng</surname> <given-names>H.</given-names></name>
<etal/>
</person-group>. (<year>2025</year>). 
<article-title>Utilizing UAV-based high-throughput phenotyping and machine learning to evaluate drought resistance in wheat germplasm</article-title>. <source>Comput. Electron. Agric.</source> <volume>237</volume>, <elocation-id>110602</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2025.110602</pub-id>
</mixed-citation>
</ref>
<ref id="B40">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Ziyi</surname> <given-names>Y.</given-names></name>
<name><surname>Zhijian</surname> <given-names>X.</given-names></name>
<name><surname>Qingwen</surname> <given-names>Y.</given-names></name>
<name><surname>Weihua</surname> <given-names>Q.</given-names></name>
</person-group> (<year>2022</year>). 
<article-title>Conservation and utilization of genetic resources of wild rice in China</article-title>. <source>Rice Sci.</source> <volume>29</volume>, <fpage>216</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.rsci.2021.08.003</pub-id>
</mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn id="n1" fn-type="custom" custom-type="edited-by">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3068311">Yu Nishizawa</ext-link>, Kagoshima University, Japan</p></fn>
<fn id="n2" fn-type="custom" custom-type="reviewed-by">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2854701">Jianliang Wang</ext-link>, Yangzhou University, China</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3174407">George Adamides</ext-link>, Agricultural Research Institute, Cyprus</p></fn>
</fn-group>
</back>
</article>