<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Plant Sci.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Plant Science</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Plant Sci.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">1664-462X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fpls.2026.1736123</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>ApaltAI: a web-based diagnostic system with a sequential voting architecture for detecting anthracnose and scab in avocado fruit</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" equal-contrib="yes">
<name><surname>Moreano</surname><given-names>Mikjael</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="author-notes" rid="fn003"><sup>&#x2020;</sup></xref>
<xref ref-type="author-notes" rid="fn004"><sup>&#x2021;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3346070/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project-administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author" equal-contrib="yes">
<name><surname>Sosa</surname><given-names>Angel</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="author-notes" rid="fn003"><sup>&#x2020;</sup></xref>
<xref ref-type="author-notes" rid="fn004"><sup>&#x2021;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3262715/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project-administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
</contrib>
<contrib contrib-type="author" equal-contrib="yes">
<name><surname>Mauricio</surname><given-names>David</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="author-notes" rid="fn003"><sup>&#x2020;</sup></xref>
<xref ref-type="author-notes" rid="fn004"><sup>&#x2021;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3389970/publications/new"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project-administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Rivera</surname><given-names>Luis</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="author-notes" rid="fn004"><sup>&#x2021;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2633430/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Santisteban</surname><given-names>Jos&#xe9;</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>*</sup></xref>
<xref ref-type="author-notes" rid="fn004"><sup>&#x2021;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3187503/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Faculty of Engineering, Universidad Peruana de Ciencias Aplicadas (UPC)</institution>, <city>Lima</city>,&#xa0;<country country="pe">Peru</country></aff>
<aff id="aff2"><label>2</label><institution>Department of Computer Science, Universidad Nacional Mayor de San Marcos</institution>, <city>Lima</city>,&#xa0;<country country="pe">Peru</country></aff>
<aff id="aff3"><label>3</label><institution>Mathematical Sciences Laboratory, Universidade Estadual do Norte Fluminense</institution>, <city>Rio de Janeiro</city>,&#xa0;<country country="br">Brazil</country></aff>
<author-notes>
<corresp id="c001"><label>*</label>Correspondence: Jos&#xe9; Santisteban, <email xlink:href="mailto:pcsilsan@upc.edu.pe">pcsilsan@upc.edu.pe</email></corresp>
<fn fn-type="equal" id="fn003">
<p>&#x2020;These authors have contributed equally to this work</p></fn>
<fn fn-type="other" id="fn004">
<p>&#x2021;ORCID: Luis Rivera, <uri xlink:href="https://orcid.org/0000-0002-5029-2561">orcid.org/0000-0002-5029-2561</uri>; Mikjael Moreano, <uri xlink:href="https://orcid.org/0009-0005-3778-2012">orcid.org/0009-0005-3778-2012</uri>; Angel Sosa, <uri xlink:href="https://orcid.org/0009-0003-4527-4590">orcid.org/0009-0003-4527-4590</uri>; David Mauricio, <uri xlink:href="https://orcid.org/0000-0001-9262-626X">orcid.org/0000-0001-9262-626X</uri>; Jos&#xe9; Santisteban, <uri xlink:href="https://orcid.org/0000-0003-4526-642X">orcid.org/0000-0003-4526-642X</uri></p></fn>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-24">
<day>24</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>17</volume>
<elocation-id>1736123</elocation-id>
<history>
<date date-type="received">
<day>31</day>
<month>10</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>03</day>
<month>02</month>
<year>2026</year>
</date>
<date date-type="rev-recd">
<day>20</day>
<month>01</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2026 Moreano, Sosa, Mauricio, Rivera and Santisteban.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Moreano, Sosa, Mauricio, Rivera and Santisteban</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-24">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<p>Avocado (<italic>Persea americana</italic> Mill.), with a global production estimated at 10.4 million tons in 2023, suffers annual losses of 20-30% due to diseases such as anthracnose (<italic>Colletotrichum gloeosporioides</italic>) and scab (<italic>Sphaceloma perseae</italic>), resulting in substantial economic impacts for major producing countries (Mexico, Peru, and Colombia). This study introduces an advanced system that integrates a binary sequential voting architecture (VotingBS) with a fully functional web application, for the automated identification of two high-incidence diseases: anthracnose and scab, both of which critically affect fruit quality and yield. The proposed VotingBS architecture implements a hierarchical two-stage classification strategy. In the first stage, a five-model deep learning ensemble differentiates between healthy and diseased fruits. In the second stage, another ensemble determines which of the two diseases is present. For this purpose, a collection of 674 labeled fruit images was used for training and validation. Experimental results demonstrate outstanding model performance, achieving key metrics such as 98.92% precision, 98.89% recall, and 99.03% accuracy, significantly outperforming traditional approaches. Moreover, the solution was deployed through a web app featuring dedicated modules for crop management, phytosanitary analysis, and disease diagnosis. This architecture enhances the system&#x2019;s practical utility and facilitates its adoption by farmers, field technicians, and agricultural monitoring agencies. Overall, this work demonstrates how combining hybrid deep learning models with accessible digital platforms can revolutionize plant disease diagnostics, fostering a more efficient, automated, and resilient precision agriculture.</p>
</abstract>
<kwd-group>
<kwd>avocado</kwd>
<kwd>convolutional neural networks</kwd>
<kwd>deep learning</kwd>
<kwd>disease detection</kwd>
<kwd>image processing</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. Research funding is provided by the Direcci&#xf3;n de Investigaci&#xf3;n de la Universidad Peruana de Ciencias Aplicadas (UPC), EXPOST-2026-1.</funding-statement>
</funding-group>
<counts>
<fig-count count="8"/>
<table-count count="6"/>
<equation-count count="6"/>
<ref-count count="40"/>
<page-count count="15"/>
<word-count count="7097"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Sustainable and Intelligent Phytoprotection</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec id="s1" sec-type="intro">
<label>1</label>
<title>Introduction</title>
<p>Avocado (<italic>Persea Americana</italic> Mill.) is a widely consumed fruit, particularly across the Americas, and is highly valued for its bioactive properties and health benefits. Its pulp is rich in monounsaturated and polyunsaturated fatty acids, phytosterols, and fat-soluble vitamins, compounds that have been shown to positively influence metabolic health and contribute to the prevention of chronic diseases (<xref ref-type="bibr" rid="B3">Ahmed et&#xa0;al., 2025</xref>). In 2023, avocado ranked as the second most exported tropical fruit worldwide, with a volume of 2.8 million tons, surpassed only by pineapple at 3.2 million tons. Mexico and Peru remain the leading exporters (<xref ref-type="bibr" rid="B16">FAO, 2024</xref>).</p>
<p>Furthermore, avocado is highly susceptible to infection by various pathogenic fungi, both in the field and during postharvest stages, leading to substantial losses in fruit yield and quality (<xref ref-type="bibr" rid="B33">Silva et&#xa0;al., 2025</xref>). One of the most prevalent diseases affecting this crop is anthracnose (<italic>Colletotrichum gloeosporioides</italic>), caused by fungi of the <italic>Colletotrichum</italic> genus, which can infect fruit tissues, leading to rot (<xref ref-type="bibr" rid="B14">Col&#xed;n-Chavez et&#xa0;al., 2024</xref>). Another significant disease is scab (<italic>Sphaceloma perseae</italic>), which affects both fruit and leaves in warm and humid climates, diminishing crop quality and yield (<xref ref-type="bibr" rid="B12">Chellappan, 2024</xref>).</p>
<p>Conventionally, the diagnosis of these pathologies has relied on visual inspection by agronomists, a process that is subjective, time-consuming, and difficult to scale, particularly for smallholder farmers who often lack immediate access to specialist expertise. This diagnostic bottleneck delays timely intervention, exacerbating yield and quality losses (<xref ref-type="bibr" rid="B15">Demilie, 2024</xref>). While precision agriculture and Deep Learning (DL) offer promising alternatives, their translation into practical, accessible tools for specific crops like avocado remains limited. There is a pronounced gap between the development of accurate DL models in controlled research settings and their deployment as usable, reliable diagnostic aids in real-world agricultural scenarios. This work addresses this gap by developing ApaltAI, an integrated system that combines a novel, high-accuracy decision architecture with a functional web application designed specifically for end-users in the avocado production chain.</p>
<p>Artificial intelligence is increasingly shaping agriculture, with applications that range from optimizing irrigation through machine learning (<xref ref-type="bibr" rid="B38">Villagomez et&#xa0;al., 2024</xref>) to the automated identification of plant diseases. In this field, convolutional neural networks (CNNs) have demonstrated high efficacy in image analysis, proving useful not only in agriculture but also in domains such as medicine. In clinical practice, for instance, CNNs have achieved performance levels comparable to human specialists in detecting ocular pathologies (<xref ref-type="bibr" rid="B25">Moreno-Lozano et&#xa0;al., 2024</xref>) and brain abnormalities (<xref ref-type="bibr" rid="B29">Rodr&#xed;guez et&#xa0;al., 2024</xref>). Their key advantage lies in the ability to automatically extract and learn visual patterns, making them particularly well suited to agricultural problems where diseased crops often exhibit wide morphological variability.</p>
<p>The strong performance of CNNs has encouraged extensive research on applying deep learning (DL) to crop disease detection. <xref ref-type="bibr" rid="B35">Sultan et&#xa0;al. (2025)</xref> developed LeafDNet, a model based on Xception architecture, trained with 5,491 images of crops such as rose, mango and tomato. Their system achieved 99% precision and 98% accuracy. <xref ref-type="bibr" rid="B18">Huang et&#xa0;al. (2023)</xref> developed a hybrid model for disease detection in tomato plants; the proposed model, FC-SNDPN, reached a precision of 97.59%. <xref ref-type="bibr" rid="B26">Moussafir et&#xa0;al. (2022)</xref> designed a model to identify diseases in tomatoes using a hybrid architecture and a dataset of 14,526 images. After evaluating seven architectures, the two best-performing models were combined, resulting in 98.1% precision. <xref ref-type="bibr" rid="B9">Butt et&#xa0;al. (2025)</xref> developed a system for detecting diseases in citrus fruits; the hybrid model combining DenseNet201 and a C-SVM (Support Vector Machine) classifier yielded the highest accuracy on their fruit dataset, achieving 99.2%. Finally, <xref ref-type="bibr" rid="B7">Banjar et&#xa0;al. (2025)</xref> introduced the E-AppleNet model for disease detection in apple crops, using 3,168 images from the PlantVillage dataset. Utilizing the EfficientNetV2 architecture, their system achieved a 99% accuracy rate.</p>
<p>While DL has shown promise for avocado disease detection (e.g., <xref ref-type="bibr" rid="B10">Campos-Ferreira and Gonz&#xe1;lez-Camacho, 2021</xref>), existing studies often focus solely on model accuracy, leaving a critical void: the integration of robust detection models into accessible, end-to-end platforms ready for field use. Furthermore, many approaches employ single, complex classifiers that must simultaneously distinguish between healthy tissue and multiple diseases, a task prone to error propagation. To overcome these limitations, this study introduces ApaltAI, a comprehensive web-based diagnostic system. The core of ApaltAI is the VotingBS (Binary Sequential Voting) architecture, a novel decision model designed to enhance reliability by decomposing the diagnosis into a hierarchical, two-stage process. This design is inherently more robust and is operationalized through a purpose-built web application, making the advanced diagnostic capability directly accessible to farmers and technicians. Therefore, the central theme of this article is the development and validation of an integrated, accessible system (ApaltAI) for avocado disease detection, whose performance and practicality are driven by its innovative VotingBS decision engine.</p>
<p>The combination of CNNs with sophisticated techniques has given rise to hybrid systems, which now represent a promising approach for classifying agricultural images and detecting plant diseases. Recent studies on disease detection in potato and apple cultivation have shown that hybrid and models outperform approaches relying solely on CNNs, achieving significant improvements in both accuracy and efficiency (<xref ref-type="bibr" rid="B36">Tiwari et&#xa0;al., 2020</xref>; <xref ref-type="bibr" rid="B8">Bansal et&#xa0;al., 2021</xref>).</p>
<p>To bridge the identified gap between accurate model development and field-deployable solutions, this work pursues two interconnected objectives: (1) to design and validate the VotingBS (Binary Sequential Voting) architecture, a novel hybrid decision framework that enhances diagnostic reliability by decomposing the classification into a hierarchical, two-stage process; and (2) to engineer and deploy ApaltAI, a fully functional web-based diagnostic system built around this architecture, making the technology accessible to end-users. Consequently, the primary contributions of this work are threefold: (a) the VotingBS architecture, a robust decision system that strategically combines multiple deep learning models with a sequential, weighted voting logic to mitigate error propagation; (b) the ApaltAI integrated system, a deployable software platform featuring a modular three-tier design and specialized modules that translate the VotingBS model into a practical diagnostic service; and (c) a comprehensive experimental benchmark, demonstrating that the integrated system not only outperforms state-of-the-art singular and hybrid models but also establishes a new performance benchmark (precision, recall, accuracy &gt;98.9%) for avocado fruit disease detection.</p>
<p>This article is divided into six sections. Section two reviews the background and related works. Section three describes the materials and methods, including the design of the VotingBS architecture, the web application and the validation process. Section four reports the experimental results, followed by section five, which discusses these findings in the context of related work and outlines the system&#x2019;s contributions. Finally, section six summarizes the main conclusions and suggests directions for future research.</p>
</sec>
<sec id="s2">
<label>2</label>
<title>Background and related works</title>
<p>The application of CNNs for plant disease identification involves a carefully structured sequence of stages, each of which plays a critical role in achieving reliable diagnostic performance. In general, the studies analyzed on this topic follow the process sequence shown in <xref ref-type="fig" rid="f1"><bold>Figure&#xa0;1</bold></xref>. Analyzing this common workflow across studies is crucial for identifying both established best practices and persisting limitations, thereby framing the specific research gap addressed by our proposed system.</p>
<fig id="f1" position="float">
<label>Figure&#xa0;1</label>
<caption>
<p>General process for crop disease detection.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1736123-g001.tif">
<alt-text content-type="machine-generated">Avocado fruits growing on a tree above a flowchart illustrating the process of developing a diagnostic model, with stages: data collection, data labeling, data preprocessing, model training, model evaluation, model validation, final, and diagnosis.</alt-text>
</graphic></fig>
<p>The process flow followed consists of:</p>
<list list-type="bullet">
<list-item>
<p><italic>Data collection</italic>: The process begins with acquiring images of affected crops. These images are either captured directly in the field using cameras and mobile devices or obtained from open-access repositories such as Kaggle and PlantVillage. For example, <xref ref-type="bibr" rid="B7">Banjar et&#xa0;al. (2025)</xref> utilized 3,168 images from PlantVillage, categorized into four distinct disease classes.</p></list-item>
<list-item>
<p><italic>Data labeling</italic>: Each image is accurately annotated to identify the specific disease it presents. This detailed annotation enables the model to learn and differentiate distinctive patterns for each disease. For instance, <xref ref-type="bibr" rid="B31">Saleem et&#xa0;al. (2022)</xref> performed detailed labeling of images from crops such as apple, avocado, grape, kiwi, and pear, creating the NZDLPlantDisease-v1 dataset, which comprises 20 different classes.</p></list-item>
<list-item>
<p><italic>Data preprocessing</italic>: Before initiating training, all images are subjected to a preprocessing pipeline that includes scaling, normalization, contrast adjustment, and/or noise removal, as highlighted in several studies (<xref ref-type="bibr" rid="B4">Alshammari et&#xa0;al., 2022</xref>; <xref ref-type="bibr" rid="B20">Kaya and G&#xfc;rsoy, 2023</xref>). This step is crucial for improving image quality and ensuring the model is trained under optimal conditions, facilitating accurate identification of disease-related patterns. For example, <xref ref-type="bibr" rid="B32">Sholihati et&#xa0;al. (2020)</xref> applied data augmentation to enrich their dataset, resulting in a more robust system.</p></list-item>
<list-item>
<p><italic>Model training</italic>: Involves training the DL model with labeled, preprocessed images, enabling it to learn and differentiate the characteristic visual features of each disease.</p></list-item>
<list-item>
<p><italic>Model evaluation and validation</italic>: A dataset of unseen images serves to evaluate the trained model. This evaluation quantifies the accuracy and provides the basis for refining its architecture and hyperparameters to optimize performance.</p></list-item>
</list>
<p>The various related works have, in one way or another, followed the presented workflow. For training and validation purposes, these authors utilized datasets such as PlantVillage, which aggregates images of potatoes, tomatoes, apples, and strawberries, among other produce, across several classes; and own datasets containing various classes of images of potatoes, apples, olives, bananas, guavas, and mangoes, among others. A few studies also employed the New Plant Disease Dataset and the Potato Leaf Disease Dataset. In the quality analysis of potatoes, the VGG+LR model was formulated by <xref ref-type="bibr" rid="B36">Tiwari et&#xa0;al. (2020)</xref> using the PlantVillage dataset, and the VGG16 model was formulated by <xref ref-type="bibr" rid="B32">Sholihati et&#xa0;al. (2020)</xref> using their own dataset. The PlantVillage dataset was also used in the quality analysis of tomatoes with CNN-based models by <xref ref-type="bibr" rid="B19">Karthik et&#xa0;al. (2020)</xref>; <xref ref-type="bibr" rid="B2">Agarwal et&#xa0;al. (2020)</xref>, and in the DenseNet121 model proposed by <xref ref-type="bibr" rid="B1">Abbas et&#xa0;al. (2021)</xref>. For apple care, own datasets were used in Hybrid models (DenseNet121, EfficientNetB7, EfficientNet) by <xref ref-type="bibr" rid="B8">Bansal et&#xa0;al. (2021)</xref>, in the Xception + F-RCNN model by <xref ref-type="bibr" rid="B21">Khan et&#xa0;al. (2022)</xref>, the MLP-CNN by <xref ref-type="bibr" rid="B37">Turkoglu et&#xa0;al. (2022)</xref>, and the standard CNN by <xref ref-type="bibr" rid="B39">Vishnoi et&#xa0;al. (2023)</xref>. Additionally, the PlantVillage dataset was used in the CNN model by <xref ref-type="bibr" rid="B22">Mahato et&#xa0;al. (2022)</xref>, the DenseNet+1D-CNN model by <xref ref-type="bibr" rid="B30">Sai and Neeraja (2022)</xref>, and the CNN + Unet model by <xref ref-type="bibr" rid="B27">Polly and Devi (2024)</xref>. Other specific datasets were also used in apple analysis, such as the New Plant Disease Dataset in the AIE-ALDC model by <xref ref-type="bibr" rid="B5">Al-Wesabi et&#xa0;al. (2022)</xref>, as well as a proprietary dataset by <xref ref-type="bibr" rid="B6">Banarase and Shirbahadurkar (2024)</xref> in MobileNetV2. <xref ref-type="bibr" rid="B17">Hari and Singh (2023)</xref> used a CNN-based model with their own dataset for the analysis of three types of fruits (Banana, Guava, Mango). We only observed one study concerning avocado quality classification: the MSCA-PSCO MobileNetV2 model developed by <xref ref-type="bibr" rid="B24">Mishra et&#xa0;al. (2022)</xref> using their own dataset. <xref ref-type="table" rid="T1"><bold>Table&#xa0;1</bold></xref> summarizes these 19 studies on DL-based disease detection in fruits using images and their performance results.</p>
<table-wrap id="T1" position="float">
<label>Table&#xa0;1</label>
<caption>
<p>DL studies for crop disease detection.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="left">Study</th>
<th valign="middle" align="left">Dataset</th>
<th valign="middle" align="left">Crop type</th>
<th valign="middle" align="left">Model</th>
<th valign="middle" align="left">Results</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left"><xref ref-type="bibr" rid="B36">Tiwari et&#xa0;al. (2020)</xref></td>
<td valign="middle" align="left">PlantVillage: 2,152 (3 classes)</td>
<td valign="middle" align="left">Potato</td>
<td valign="middle" align="left">VGG19+LR</td>
<td valign="middle" align="left">Acc=97.8%</td>
</tr>
<tr>
<td valign="middle" align="left"><xref ref-type="bibr" rid="B19">Karthik et&#xa0;al. (2020)</xref></td>
<td valign="middle" align="left">PlantVillage: 120,000 (4 classes)</td>
<td valign="middle" align="left">Tomato</td>
<td valign="middle" align="left">CNN</td>
<td valign="middle" align="left">Acc=98%</td>
</tr>
<tr>
<td valign="middle" align="left"><xref ref-type="bibr" rid="B2">Agarwal et&#xa0;al. (2020)</xref></td>
<td valign="middle" align="left">PlantVillage: 17,500 (10 classes)</td>
<td valign="middle" align="left">Tomato</td>
<td valign="middle" align="left">CNN</td>
<td valign="middle" align="left">Acc=91.2%</td>
</tr>
<tr>
<td valign="middle" align="left"><xref ref-type="bibr" rid="B32">Sholihati et&#xa0;al. (2020)</xref></td>
<td valign="middle" align="left">Own dataset: 5,100 (5 classes)</td>
<td valign="middle" align="left">Potato</td>
<td valign="middle" align="left">VGG16</td>
<td valign="middle" align="left">Acc=91.31%</td>
</tr>
<tr>
<td valign="middle" align="left"><xref ref-type="bibr" rid="B1">Abbas et&#xa0;al. (2021)</xref></td>
<td valign="middle" align="left">PlantVillage: 16,012 (10 classes)</td>
<td valign="middle" align="left">Tomato</td>
<td valign="middle" align="left">DenseNet121</td>
<td valign="middle" align="left">Acc=97.11%</td>
</tr>
<tr>
<td valign="middle" align="left"><xref ref-type="bibr" rid="B8">Bansal et&#xa0;al. (2021)</xref></td>
<td valign="middle" align="left">Own dataset: 3,642 (4 classes)</td>
<td valign="middle" align="left">Apple</td>
<td valign="middle" align="left">Hybrid (DenseNet121, EfficientNetB7, EfficientNet NoisyStudent)</td>
<td valign="middle" align="left">Acc=96.25%</td>
</tr>
<tr>
<td valign="middle" align="left"><xref ref-type="bibr" rid="B24">Mishra et&#xa0;al. (2022)</xref></td>
<td valign="middle" align="left">Own dataset: 19,460 (2 classes)</td>
<td valign="middle" align="left">Avocado</td>
<td valign="middle" align="left">MSCA-PSCO MobileNetV2</td>
<td valign="middle" align="left">Acc=98.42%</td>
</tr>
<tr>
<td valign="middle" align="left"><xref ref-type="bibr" rid="B4">Alshammari et&#xa0;al. (2022)</xref></td>
<td valign="middle" align="left">Own dataset: 3,400 (3 classes)</td>
<td valign="middle" align="left">Olive</td>
<td valign="middle" align="left">ViT+VGG16</td>
<td valign="middle" align="left">Acc=97%<break/>Pre=98%</td>
</tr>
<tr>
<td valign="middle" align="left"><xref ref-type="bibr" rid="B5">Al-Wesabi et&#xa0;al. (2022)</xref></td>
<td valign="middle" align="left">New Plant Disease Dataset: 9,714 (4 classes)</td>
<td valign="middle" align="left">Apple</td>
<td valign="middle" align="left">AIE-ALDC</td>
<td valign="middle" align="left">Acc=99.20%</td>
</tr>
<tr>
<td valign="middle" align="left"><xref ref-type="bibr" rid="B21">Khan et&#xa0;al. (2022)</xref></td>
<td valign="middle" align="left">Own dataset: 5,201 (10 classes)</td>
<td valign="middle" align="left">Apple</td>
<td valign="middle" align="left">Xception + F-RCNN</td>
<td valign="middle" align="left">Acc=81.09%</td>
</tr>
<tr>
<td valign="middle" align="left"><xref ref-type="bibr" rid="B22">Mahato et&#xa0;al. (2022)</xref></td>
<td valign="middle" align="left">PlantVillage: 32,950 (4 classes)</td>
<td valign="middle" align="left">Apple</td>
<td valign="middle" align="left">CNN</td>
<td valign="middle" align="left">Pre=99,31%</td>
</tr>
<tr>
<td valign="middle" align="left"><xref ref-type="bibr" rid="B30">Sai and Neeraja (2022)</xref></td>
<td valign="middle" align="left">PlantVillage: 8,875 (4 classes)</td>
<td valign="middle" align="left">Apple; Grape; Potato; Strawberry</td>
<td valign="middle" align="left">DenseNet<break/>+1D-CNN</td>
<td valign="middle" align="left">Acc=97%</td>
</tr>
<tr>
<td valign="middle" align="left"><xref ref-type="bibr" rid="B37">Turkoglu et&#xa0;al. (2022)</xref></td>
<td valign="middle" align="left">Own dataset: 1,192 (4 classes)</td>
<td valign="middle" align="left">Apple</td>
<td valign="middle" align="left">MLP-CNNs</td>
<td valign="middle" align="left">Acc=99.2%</td>
</tr>
<tr>
<td valign="middle" align="left"><xref ref-type="bibr" rid="B17">Hari and Singh (2023)</xref></td>
<td valign="middle" align="left">Own dataset: 1,791 (8 classes)</td>
<td valign="middle" align="left">Banana; Guava; Mango</td>
<td valign="middle" align="left">CNN</td>
<td valign="middle" align="left">Acc=99.14%</td>
</tr>
<tr>
<td valign="middle" align="left"><xref ref-type="bibr" rid="B39">Vishnoi et&#xa0;al. (2023)</xref></td>
<td valign="middle" align="left">Own dataset: 3,171 (4 classes)</td>
<td valign="middle" align="left">Apple</td>
<td valign="middle" align="left">CNN</td>
<td valign="middle" align="left">Acc=98%</td>
</tr>
<tr>
<td valign="middle" align="left"><xref ref-type="bibr" rid="B23">Mir et&#xa0;al. (2024)</xref></td>
<td valign="middle" align="left">Own dataset: 4,190 (8 classes)</td>
<td valign="middle" align="left">Potato</td>
<td valign="middle" align="left">CNN + RF</td>
<td valign="middle" align="left">Acc=93.66%</td>
</tr>
<tr>
<td valign="middle" align="left"><xref ref-type="bibr" rid="B27">Polly and Devi (2024)</xref></td>
<td valign="middle" align="left">PlantVillage: 8,631 (4 classes)</td>
<td valign="middle" align="left">Tomato; Corn; Apple</td>
<td valign="middle" align="left">CNN + UNet</td>
<td valign="middle" align="left">Acc=98.01%<break/>Pre=99.5%</td>
</tr>
<tr>
<td valign="middle" align="left"><xref ref-type="bibr" rid="B6">Banarase and Shirbahadurkar (2024)</xref></td>
<td valign="middle" align="left">Own dataset: 3,175 (4 classes)</td>
<td valign="middle" align="left">Apple</td>
<td valign="middle" align="left">MobileNetV2</td>
<td valign="middle" align="left">Acc=99.36%</td>
</tr>
<tr>
<td valign="middle" align="left"><xref ref-type="bibr" rid="B34">Sinamenye et&#xa0;al. (2025)</xref></td>
<td valign="middle" align="left">Potato Leaf Disease Dataset: 3,076 (7 classes)</td>
<td valign="middle" align="left">Potato</td>
<td valign="middle" align="left">EfficientNetV2B3 + ViT</td>
<td valign="middle" align="left">Acc=85.06%<break/>Pre=82.86%</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>F-RCNN, Faster Region Convolutional Neural Network; Pre, Precision; DCNN, Deep Convolutional Neural Network; Acc, Accuracy; ViT, Vision Transformer.</p></fn>
</table-wrap-foot>
</table-wrap>
<p>The analysis in <xref ref-type="table" rid="T1"><bold>Table&#xa0;1</bold></xref> consolidates the remarkable progress of DL, particularly CNNs, in crop disease detection, with many models achieving accuracy rates above 95% across various crops. This establishes a strong technological precedent. However, three critical gaps relevant to our work can be observed: (1) a predominant focus on leaf diseases over fruit-specific pathologies; (2) a scarcity of studies dedicated to avocado, particularly targeting fruit diseases like anthracnose and scab; and (3) a strong emphasis on model accuracy in isolation, with fewer examples of complete, deployable systems tailored for end-user adoption. These gaps highlight the opportunity and necessity for the present study. Consequently, while leveraging the established efficacy of CNNs, our work introduces a novel sequential decision architecture (VotingBS) specifically designed to enhance robustness for fruit disease diagnosis and embeds it within a fully functional web application (ApaltAI). The adoption of deep learning-based diagnostic systems not only enhances disease identification accuracy but is also designed to lead to more sustainable crop management through accessible, timely diagnostics.</p>
</sec>
<sec id="s3" sec-type="materials|methods">
<label>3</label>
<title>Materials and methods</title>
<sec id="s3_1">
<label>3.1</label>
<title>Proposed detection architecture</title>
<p>An IT-based architecture is proposed for detecting scab and anthracnose in avocado fruits using image analysis. It employs deep learning-based image processing and follows five components: image acquisition, preprocessing, diagnosis module, the DL model and the diagnostic output.</p>
<p><xref ref-type="fig" rid="f2"><bold>Figure&#xa0;2</bold></xref> shows the workflow, starting with the farmer capturing fruit images, which are then preprocessed to enhance clarity and definition. The optimized data are analyzed by a diagnosis module powered by a pre-trained ensemble architecture that combines several DL models for classification. Based on this analysis, the system evaluates each image and determines the fruit&#x2019;s condition, classifying it as healthy, affected by scab or affected by anthracnose. Finally, the diagnostic result is delivered to the farmer, enabling appropriate treatment decisions. The components of the system are described below (<xref ref-type="table" rid="T2"><bold>Table&#xa0;2</bold></xref>).</p>
<fig id="f2" position="float">
<label>Figure&#xa0;2</label>
<caption>
<p>Proposed disease detection process.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1736123-g002.tif">
<alt-text content-type="machine-generated">Flowchart illustrates a farmer capturing fruit images with a camera, inputting them for preprocessing, followed by analysis by a machine-learning model, leading to disease diagnosis service and diagnostic output displayed on a clipboard.</alt-text>
</graphic></fig>
<table-wrap id="T2" position="float">
<label>Table&#xa0;2</label>
<caption>
<p>Components of the detection process.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Component</th>
<th valign="middle" align="center">Description</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">Image Acquisition</td>
<td valign="middle" align="left">Digital cameras, drones and mobile devices now play a central role in agricultural monitoring, providing a practical means of capturing images in the field (<xref ref-type="bibr" rid="B13">Chen et&#xa0;al., 2021</xref>). Accurate diagnosis depends on the availability of high-quality images that clearly display critical indicators such as leaf spots, discoloration areas and irregular texture patterns (<xref ref-type="bibr" rid="B28">Rani et&#xa0;al., 2023</xref>).</td>
</tr>
<tr>
<td valign="middle" align="left">Preprocessing</td>
<td valign="middle" align="left">To facilitate analysis, a preprocessing pipeline is applied to highlight the most relevant features. The process begins with image resizing to match the input requirements of the pre-trained model architecture. This is followed by pixel value normalization, which standardizes data distribution and enhances training stability. Data augmentation is also incorporated through adjustments in lighting and contrast, helping reduce overfitting and improving the model&#x2019;s adaptability to diverse visual conditions.</td>
</tr>
<tr>
<td valign="middle" align="left">Model</td>
<td valign="middle" align="left">Once preprocessed, the images are fed into a validated DL model. In this study, an ensemble architecture is used, integrating predictions from models such as DenseNet121, ResNet50, InceptionV3, VGG16, and EfficientNetB2. Model parameters are optimized during training using the Stochastic Gradient Descent (SGD) algorithm, which iteratively computes weight updates using random data subsets to minimize the loss function and promote effective convergence.</td>
</tr>
<tr>
<td valign="middle" align="left">Disease diagnosis service</td>
<td valign="middle" align="left">This service receives an input image and processes it using a DL model (either singular or hybrid) to generate a diagnosis of the disease.</td>
</tr>
<tr>
<td valign="middle" align="left">Diagnostic output</td>
<td valign="middle" align="left">Presents the diagnosis, including the detected disease, the model&#x2019;s confidence level (expressed as a percentage or probability) and a history of previous diagnoses. Additionally, it provides information about the identified disease along with agronomic management recommendations and treatment options, all delivered through an interface.</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The construction of the hybrid DL model VotingBS is carried out in two stages. First, each DL model is trained separately. Transfer learning is employed for this purpose, leveraging pre-trained weights to initialize the networks and fine-tune them for the specific task. In this study, five DL architectures are considered: DenseNet121, ResNet50, InceptionV3, VGG16, and EfficientNetB2. Second, a binary sequential voting architecture, called VotingBS, is constructed to analyze the input avocado image and generate a diagnosis of &#x2018;Healthy&#x2019; or an outcome indicating affliction by scab or anthracnose. This architecture is described below.</p>
</sec>
<sec id="s3_2">
<label>3.2</label>
<title>VotingBS</title>
<p>This process leads to the construction of the VotingBS (Binary Sequential Voting) architecture. VotingBS is a hybrid decision system that orchestrates two sets of five binary DL models <inline-formula>
<mml:math display="inline" id="im1"><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:msubsup><mml:mi>D</mml:mi><mml:mi>i</mml:mi><mml:mn>1</mml:mn></mml:msubsup></mml:mrow></mml:math></inline-formula> and <inline-formula>
<mml:math display="inline" id="im2"><mml:mrow><mml:msubsup><mml:mi>D</mml:mi><mml:mi>i</mml:mi><mml:mn>2</mml:mn></mml:msubsup><mml:mo>,</mml:mo><mml:mo>&#xa0;</mml:mo><mml:mo>&#xa0;</mml:mo><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mo>&#x2026;</mml:mo><mml:mo>,</mml:mo><mml:mn>5</mml:mn><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula> through a structured, two-phase voting scheme. The scheme operates as follows: In the first phase. A unanimous or majority decision of &#x201c;healthy&#x201d; concludes the process with that result. Otherwise, the system proceeds to the second phase. In this phase &#x2014;where the avocado is considered unhealthy&#x2014; the models <inline-formula>
<mml:math display="inline" id="im3"><mml:mrow><mml:msubsup><mml:mi>D</mml:mi><mml:mi>i</mml:mi><mml:mn>2</mml:mn></mml:msubsup><mml:mo>,</mml:mo><mml:mo>&#xa0;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mo>&#xa0;</mml:mo><mml:mo>&#x2026;</mml:mo><mml:mi>n</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#xa0;</mml:mo></mml:mrow></mml:math></inline-formula> classify the image as either anthracnose or scab. Their outputs are again submitted to a voting process, which selects the majority decision as the final classification. This binary and sequential structure constitutes the VotingBS architecture and is illustrated in <xref ref-type="fig" rid="f3"><bold>Figure&#xa0;3</bold></xref>. Therefore, VotingBS is not merely a <italic>post-hoc</italic> voting mechanism; it is an integral hybrid system where the specialized DL models and the sequential decision logic are co-designed. This justifies its direct comparison against singular DL models (which lack this decision structure) and other ensemble methods, as all represent distinct approaches to the classification task.</p>
<fig id="f3" position="float">
<label>Figure&#xa0;3</label>
<caption>
<p>The VotingBS decision architecture.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1736123-g003.tif">
<alt-text content-type="machine-generated">Flowchart shows avocado image preprocessing leading to a two-phase diagnostic system: phase one for disease detection, followed by voting consensus to assess health. If healthy, result is healthy; if not, phase two identifies pathogen, concluding with voting consensus for final result such as scab or anthracnose.</alt-text>
</graphic></fig>
<p><xref ref-type="fig" rid="f3"><bold>Figure&#xa0;3</bold></xref> illustrates the sequential two-phase voting process: a first voting stage to distinguish healthy from diseased fruit, and upon a diseased outcome, a second voting stage to discriminate between anthracnose and scab. In both voting phases, each DL model (<inline-formula>
<mml:math display="inline" id="im4"><mml:mrow><mml:msub><mml:mi>D</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>,</mml:mo><mml:mo>&#xa0;</mml:mo><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mo>&#x2026;</mml:mo><mml:mo>,</mml:mo><mml:mn>5</mml:mn><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula> produces a classification result along with a confidence value, given by its individual precision (<inline-formula>
<mml:math display="inline" id="im5"><mml:mrow><mml:msub><mml:mi>P</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>,</mml:mo><mml:mo>&#xa0;</mml:mo><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mo>&#x2026;</mml:mo><mml:mo>,</mml:mo><mml:mn>5</mml:mn><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula>. These confidence values are normalized to obtain class-specific normalized weights (<xref ref-type="disp-formula" rid="eq1"><bold>Equation 1</bold></xref>).</p>
<disp-formula id="eq1"><label>(1)</label>
<mml:math display="block" id="M1"><mml:mrow><mml:msub><mml:mi>w</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:msub><mml:mi>P</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mrow><mml:msubsup><mml:mstyle displaystyle="true"><mml:mo>&#x2211;</mml:mo></mml:mstyle><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mn>5</mml:mn></mml:msubsup><mml:msub><mml:mi>P</mml:mi><mml:mi>j</mml:mi></mml:msub><mml:mo>&#xa0;</mml:mo><mml:mo>&#xa0;</mml:mo></mml:mrow></mml:mfrac></mml:mrow></mml:math>
</disp-formula>
<p>Subsequently, the normalized weights are aggregated according to the predicted class, such as A and S. The final score for each class is then calculated as shown in <xref ref-type="disp-formula" rid="eq2">Equations 2</xref> and <xref ref-type="disp-formula" rid="eq3">3</xref>.</p>
<disp-formula id="eq2"><label>(2)</label>
<mml:math display="block" id="M2"><mml:mrow><mml:mi>S</mml:mi><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mo>&#xa0;</mml:mo><mml:mi>A</mml:mi><mml:mo>&#xa0;</mml:mo><mml:mo>=</mml:mo><mml:mstyle displaystyle="true"><mml:msubsup><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mn>5</mml:mn></mml:msubsup><mml:mrow><mml:msub><mml:mi>w</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>&#xb7;</mml:mo><mml:mi>I</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mover accent="true"><mml:mi>y</mml:mi><mml:mo>^</mml:mo></mml:mover><mml:mi>i</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mi>A</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mstyle><mml:mo>&#xa0;</mml:mo></mml:mrow></mml:math>
</disp-formula>
<disp-formula id="eq3"><label>(3)</label>
<mml:math display="block" id="M3"><mml:mrow><mml:mi>S</mml:mi><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mo>&#xa0;</mml:mo><mml:mi>S</mml:mi><mml:mo>&#xa0;</mml:mo><mml:mo>=</mml:mo><mml:mstyle displaystyle="true"><mml:msubsup><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mn>5</mml:mn></mml:msubsup><mml:mrow><mml:msub><mml:mi>w</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>&#xb7;</mml:mo><mml:mi>I</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mover accent="true"><mml:mi>y</mml:mi><mml:mo>^</mml:mo></mml:mover><mml:mi>i</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mi>S</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mstyle></mml:mrow></mml:math>
</disp-formula>
<p>Where <inline-formula>
<mml:math display="inline" id="im6"><mml:mrow><mml:mi>I</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mover accent="true"><mml:mi>y</mml:mi><mml:mo>^</mml:mo></mml:mover><mml:mi>i</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mi>k</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:math></inline-formula> is a function that equals 1 if <inline-formula>
<mml:math display="inline" id="im7"><mml:mrow><mml:msub><mml:mover accent="true"><mml:mi>y</mml:mi><mml:mo>^</mml:mo></mml:mover><mml:mi>i</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mi>k</mml:mi></mml:mrow></mml:math></inline-formula> (<inline-formula>
<mml:math display="inline" id="im8"><mml:mrow><mml:mi>k</mml:mi><mml:mo>=</mml:mo><mml:mi>A</mml:mi><mml:mo>&#xa0;</mml:mo><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mo>&#xa0;</mml:mo><mml:mi>S</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula> and 0 otherwise.</p>
<p>Finally, the class receiving the highest confidence score is assigned as the diagnostic result. This voting-based approach enables the integration of multiple model outputs and improves the overall classification accuracy by reducing the impact of erroneous predictions from any single model.</p>
</sec>
<sec id="s3_3">
<label>3.3</label>
<title>Web application</title>
<p>ApaltAI is a web-based application powered by CNNs, developed to identify pathologies in avocado fruits. The platform implements a classification scheme optimized to process images and generate diagnostic results. Its development addresses the need to provide farmers, particularly small-scale producers, with an accessible tool to identify plant pathologies, enabling the timely adoption of preventive or corrective measures with minimal latency.</p>
<sec id="s3_3_1">
<label>3.3.1</label>
<title>Logical architecture</title>
<p>The architecture of ApaltAI follows a modular three-layer design (<xref ref-type="fig" rid="f4"><bold>Figure&#xa0;4</bold></xref>), ensuring scalability, security, and efficiency in diagnostic processing:</p>
<fig id="f4" position="float">
<label>Figure&#xa0;4</label>
<caption>
<p>Web application architecture.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1736123-g004.tif">
<alt-text content-type="machine-generated">System architecture diagram showing a user interacting with a frontend built using Visual Studio Code and Angular eighteen, which communicates via HTTP client to a Spring Boot Java backend. The backend connects to a disease diagnosis service using Python and a hybrid model, and a MySQL database storing crop, user, diagnosis, and file data. Images are stored on Google Cloud.</alt-text>
</graphic></fig>
<list list-type="bullet">
<list-item>
<p><italic>Frontend layer</italic>: A responsive web interface designed for non-technical users (e.g., farmers), optimized for both mobile and desktop devices. It allows intuitive image uploads and the visualization of diagnostic results.</p></list-item>
<list-item>
<p><italic>Backend layer</italic>: Implemented using Spring Boot (Java), this layer handles: business logic and workflow management; authentication via JWT (JSON Web Tokens); secure communication with other layers through RESTful APIs; and integration with storage services.</p></list-item>
<list-item>
<p><italic>Diagnosis layer</italic>: A specialized service developed with FastAPI (Python) that encapsulates the CNN-based classification model (TensorFlow/Keras). Key features include: (a) image preprocessing (normalization, data augmentation); (b) real-time inference using the trained model; and (c) generation of diagnostic outputs.</p></list-item>
</list>
</sec>
<sec id="s3_3_2">
<label>3.3.2</label>
<title>Technologies used</title>
<p>The development of ApaltAI integrates four main components that collectively enable the full functionality of the application:</p>
<list list-type="bullet">
<list-item>
<p>Frontend: Developed using Angular 18 and the Bootstrap 5.3.2 styling framework, this component serves as the primary interaction point for farmers. It is designed to facilitate user interaction by allowing the upload of fruit images for analysis in JPEG and PNG formats.</p></list-item>
<list-item>
<p>Backend: Built with Spring Boot 3.3.4, the backend forms the core of the application, managing business logic, user management, and session handling. Security is a priority, implemented via JWT to ensure secure communication between client and server and to restrict access to critical functions to authenticated users only. Additionally, RESTful APIs are used for managing crops, diagnostics, and related information.</p></list-item>
<list-item>
<p>Diagnosis service: This specialized service is implemented using FastAPI 0.115.11 in Python and hosts the disease detection model developed with TensorFlow 2.19.0, supported by auxiliary libraries such as Scikit-learn 1.6.1 and Keras. The diagnosis service receives images from the backend, processes and analyzes each image to detect signs of disease and returns the results for user presentation.</p></list-item>
<list-item>
<p>Data storage: The system uses MySQL for structured data storage, such as user records and diagnostic results, leveraging its scalability, high performance, and automated administration. For handling unstructured data (images), Google Cloud Storage is employed &#x2014;a highly scalable solution that ensures fast and efficient access, even with growing data volumes. This dual-architecture approach optimizes both metadata processing and storage of critical visual resources for the model.</p></list-item>
</list>
</sec>
<sec id="s3_3_3">
<label>3.3.3</label>
<title>Application modules</title>
<p>The proposed web application integrates three main modules designed to facilitate user-system interaction for disease detection in avocado crops: (1) Crop Module, (2) Analysis Module, and (3) Diagnosis Module. Their key functionalities are described below:</p>
<list list-type="bullet">
<list-item>
<p><italic>Crop module</italic>: This module allows users to register new crops and maintain ongoing monitoring through personalized notes. Each registered crop is displayed in a table, from which it can be consulted, edited, or deleted as needed. Over time, users can add annotations to record phenological events, environmental conditions, or other relevant occurrences. This annotation capability supports more organized crop management and helps maintain a useful historical record for future decision-making.</p></list-item>
<list-item>
<p><italic>Analysis module</italic>: Designed to process images in an automated and efficient manner. The process begins with the validation of the image uploaded by the user, checking for aspects such as format, minimum required resolution, and file size. This step ensures compliance with analytical requirements (see <xref ref-type="fig" rid="f5"><bold>Figure&#xa0;5</bold></xref>). Once validated, the image proceeds through preprocessing to guarantee optimal input quality. The VotingBS architecture then analyzes the processed images to detect and classify their condition. The resulting data obtained can be saved in a relational database and subsequently forwarded to the diagnosis module for visualization and further analysis.</p></list-item>
</list>
<p><xref ref-type="fig" rid="f5"><bold>Figure&#xa0;5</bold></xref> illustrates the user interface for image upload and validation, a key component of the ApaltAI workflow that demonstrates the integration of the VotingBS architecture into a user-friendly process.</p>
<fig id="f5" position="float">
<label>Figure&#xa0;5</label>
<caption>
<p>Interface of the analysis module.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1736123-g005.tif">
<alt-text content-type="machine-generated">Screenshot of the ApaltAI web application showing the “Analyze Crop” page where a file named “antracnosis-11.jpg” is selected, a green Analyze button is highlighted, and an avocado image is displayed with the text “La imagen es válida.” Red arrows label the “Select file” and “Analyze” buttons.</alt-text>
</graphic></fig>
<list list-type="bullet">
<list-item>
<p><italic>Diagnosis module</italic> is designed to deliver actionable and trustworthy diagnostic information to support agricultural decision-making (see <xref ref-type="fig" rid="f6"><bold>Figure&#xa0;6</bold></xref>). For each analysis, the interface presents the primary diagnosis (e.g., &#x2018;Healthy&#x2019;, &#x2018;Anthracnose&#x2019;, &#x2018;Scab&#x2019;) alongside a model confidence score (derived from VotingBS scheme), providing users with a transparent measure of the system&#x2019;s certainty. To enhance interpretability, each result is accompanied by detailed technical information on the identified disease &#x2014;including characteristic symptoms, causal agents, and conditions favoring its development&#x2014; along with science-based agronomic management recommendations. Furthermore, the module maintains a complete chronological history of all diagnoses for a given crop, enabling users to track disease progression and treatment efficacy over time. This combination of a quantifiable confidence metric, explanatory agronomic context, and historical tracking is explicitly designed to bridge the gap between algorithmic output and informed field decision, thereby fostering user trust and interpretability.</p></list-item>
</list>
<fig id="f6" position="float">
<label>Figure&#xa0;6</label>
<caption>
<p>Interface of the diagnosis module.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1736123-g006.tif">
<alt-text content-type="machine-generated">Screenshot of a plant disease diagnosis application showing an avocado with dark spots, labeled sections for result, analyzed image, date of analysis as July 3, 2025, and technical disease information in Spanish.</alt-text>
</graphic></fig>
</sec>
</sec>
<sec id="s3_4">
<label>3.4</label>
<title>Validation strategy</title>
<p>A detailed framework was followed during the validation to ensure reliable findings. This process included four main stages: (1) dataset description, (2) definition of evaluation metrics, (3) execution of controlled experiments across different models, and (4) comparative analysis of the results. Each stage was carefully documented and adapted to address specific challenges in avocado disease detection, thereby ensuring both technical and agricultural relevance.</p>
<sec id="s3_4_1">
<label>3.4.1</label>
<title>Dataset</title>
<p>This research utilized a dataset of avocado fruit images compiled from multiple sources: 351 images from the public dataset available on Kaggle (camposfe1/clasificacion-de-enfermedades-con-deep-learning), 63 images from the &#x201c;Hass&#x201d; Avocado Ripening Photographic Dataset (<xref ref-type="bibr" rid="B40">Xavier et&#xa0;al., 2024</xref>), supplemented by 260 additional images to achieve equitable distribution among the three classification groups: healthy, affected by scab, and affected by anthracnose. These additional images were sourced from various online platforms and verified by agronomy specialists. <xref ref-type="fig" rid="f7"><bold>Figure&#xa0;7</bold></xref> shows some examples of these images.</p>
<fig id="f7" position="float">
<label>Figure&#xa0;7</label>
<caption>
<p>Avocado fruit images by class: <bold>(A)</bold> Scab, <bold>(B)</bold> Anthracnose, <bold>(C)</bold> Healthy.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1736123-g007.tif">
<alt-text content-type="machine-generated">Six avocados are arranged in two rows of three; some have visible blemishes or brown spots, while others appear smooth and unblemished, illustrating differences in fruit surface quality.</alt-text>
</graphic></fig>
<p>The dataset was subjected to the following preprocessing pipeline. RGB input images were resized to the required input dimensions for each architecture: 224&#xd7;224 pixels for ResNet50, VGG16, and DenseNet121; 299&#xd7;299 for InceptionV3; and 260&#xd7;260 for EfficientNetB2. Subsequently, pixel values were normalized using the specific preprocessing methods provided by each model. Data augmentation was implemented during training using only photometric transformations that conserve crucial diagnostic features, including brightness adjustments of &#xb1;0.2 and contrast adjustments of &#xb1;0.5. This conservative strategy was chosen deliberately. The key diagnostic features for anthracnose and scab&#x2014;such as lesion color, texture, and precise morphological boundaries&#x2014;are sensitive to geometric distortions (e.g., aggressive cropping or rotation) which could alter their relative scale or orientation, potentially confusing the model. The selected thresholds for brightness (&#xb1; 0.2) and contrast (&#xb1; 0.5) were empirically set to simulate realistic variations in natural lighting and camera capture conditions without causing unrealistic over- or under-exposure that would distort color-based diagnostic cues. While more extensive augmentation strategies (including geometric transformations) are valuable for enhancing robustness to viewpoint changes, they were reserved for future work with larger, more diverse field datasets where such variability is inherent. The preprocessing steps maintained the original dataset size, with the total number of processed images kept constant at 674. Photometric variations were generated in real-time for each image during the different training epochs, without creating additional physical copies. <xref ref-type="table" rid="T3"><bold>Table&#xa0;3</bold></xref> summarizes the dataset characteristics before and after preprocessing.</p>
<table-wrap id="T3" position="float">
<label>Table&#xa0;3</label>
<caption>
<p>Characteristics of the original and preprocessed dataset.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Characteristic</th>
<th valign="middle" align="center">Original</th>
<th valign="middle" align="center">Preprocessed</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">Dimension</td>
<td valign="middle" align="center">Variable</td>
<td valign="middle" align="center">224 x 224<break/>260 x 260<break/>299 x 299</td>
</tr>
<tr>
<td valign="middle" align="left">Pixel range</td>
<td valign="middle" align="center">0 - 255</td>
<td valign="middle" align="center">0 - 1</td>
</tr>
<tr>
<td valign="middle" align="left">Total images</td>
<td valign="middle" align="center">674</td>
<td valign="middle" align="center">674</td>
</tr>
<tr>
<td valign="middle" align="left">Healthy images</td>
<td valign="middle" align="center">282</td>
<td valign="middle" align="center">282</td>
</tr>
<tr>
<td valign="middle" align="left">Scab images</td>
<td valign="middle" align="center">196</td>
<td valign="middle" align="center">196</td>
</tr>
<tr>
<td valign="middle" align="left">Anthracnose images</td>
<td valign="middle" align="center">196</td>
<td valign="middle" align="center">196</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The preprocessed dataset was divided into two subsets with the following distribution: 85% for training and 15% for validation. A hold-out validation strategy was employed instead of k-fold cross-validation due to the substantial computational cost associated with training and fine-tuning five distinct deep learning architectures multiple times. This approach provides a clear and computationally efficient partition for unbiased performance evaluation and model selection, which is a standard practice for the comparative analysis of deep learning architectures in similar studies. The assembled dataset of 674 images provides a foundational basis for the comparative development and validation of the proposed VotingBS architecture against established model benchmarks. While larger datasets exist for other crops, this collection is of comparable size to foundational works in specialized agricultural vision tasks (e.g., <xref ref-type="bibr" rid="B37">Turkoglu et&#xa0;al., 2022</xref>: 1,192 images; <xref ref-type="bibr" rid="B17">Hari and Singh, 2023</xref>: 1,791 images) and is sufficient for the primary objective of this study: to demonstrate the efficacy and comparative advantage of a novel decision architecture under controlled experimental conditions. The limitations of this dataset regarding generalization to uncontrolled field environments are explicitly addressed in the Discussion (Section 5).</p>
</sec>
<sec id="s3_4_2">
<label>3.4.2</label>
<title>Evaluation metrics</title>
<p>To evaluate the model&#x2019;s performance, standard image classification metrics were employed, including precision, recall, and accuracy, as used in various agricultural studies (<xref ref-type="bibr" rid="B5">Al-Wesabi et&#xa0;al., 2022</xref>; <xref ref-type="bibr" rid="B27">Polly and Devi, 2024</xref>; <xref ref-type="bibr" rid="B26">Moussafir et&#xa0;al., 2022</xref>). These metrics are defined and formulated as follows:</p>
<disp-formula id="eq4"><label>(4)</label>
<mml:math display="block" id="M4"><mml:mrow><mml:mi>P</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>i</mml:mi><mml:mi>s</mml:mi><mml:mi>i</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:mo>&#xa0;</mml:mo><mml:mo>=</mml:mo><mml:mo>&#xa0;</mml:mo><mml:mfrac><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi><mml:mo>&#xa0;</mml:mo><mml:mo>+</mml:mo><mml:mo>&#xa0;</mml:mo><mml:mi>F</mml:mi><mml:mi>P</mml:mi></mml:mrow></mml:mfrac></mml:mrow></mml:math>
</disp-formula>
<disp-formula id="eq5"><label>(5)</label>
<mml:math display="block" id="M5"><mml:mrow><mml:mi>R</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>a</mml:mi><mml:mi>l</mml:mi><mml:mi>l</mml:mi><mml:mtext>&#xa0;</mml:mtext><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi><mml:mo>&#xa0;</mml:mo><mml:mo>+</mml:mo><mml:mo>&#xa0;</mml:mo><mml:mi>F</mml:mi><mml:mi>N</mml:mi></mml:mrow></mml:mfrac></mml:mrow></mml:math>
</disp-formula>
<disp-formula id="eq6"><label>(6)</label>
<mml:math display="block" id="M6"><mml:mrow><mml:mi>A</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mi>u</mml:mi><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>y</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi><mml:mo>&#xa0;</mml:mo><mml:mo>+</mml:mo><mml:mo>&#xa0;</mml:mo><mml:mi>T</mml:mi><mml:mi>N</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi><mml:mo>&#xa0;</mml:mo><mml:mo>+</mml:mo><mml:mo>&#xa0;</mml:mo><mml:mi>T</mml:mi><mml:mi>N</mml:mi><mml:mo>&#xa0;</mml:mo><mml:mo>+</mml:mo><mml:mo>&#xa0;</mml:mo><mml:mi>F</mml:mi><mml:mi>P</mml:mi><mml:mo>&#xa0;</mml:mo><mml:mo>+</mml:mo><mml:mo>&#xa0;</mml:mo><mml:mi>F</mml:mi><mml:mi>N</mml:mi></mml:mrow></mml:mfrac></mml:mrow></mml:math>
</disp-formula>
<p>Where, precision (<xref ref-type="disp-formula" rid="eq4"><bold>Equation 4</bold></xref>) measures how well the model correctly classifies an avocado into each category (scab, anthracnose, or healthy), minimizing confusion between classes; recall metric (<xref ref-type="disp-formula" rid="eq5"><bold>Equation 5</bold></xref>) quantifies how well the model can find all positive cases for every class of avocado, ensuring that no instances of scab or anthracnose are missed; accuracy (<xref ref-type="disp-formula" rid="eq6"><bold>Equation 6</bold></xref>) reflects the overall percentage of avocado images correctly classified (scab, anthracnose, or healthy) by the model.</p>
</sec>
<sec id="s3_4_3">
<label>3.4.3</label>
<title>Experiments</title>
<p>Five CNN models &#x2014;ResNet50, InceptionV3, EfficientNetB2, VGG16, and DenseNet121&#x2014; were evaluated, with hyperparameters optimized through literature review and empirical testing (<xref ref-type="table" rid="T4"><bold>Table&#xa0;4</bold></xref>). To mitigate the risk of overfitting given the dataset size, two strategies were employed: (1) Transfer learning using ImageNet pre-trained weights, which provides models with robust generic feature extractors from the start, and (2) Photometric data augmentation (brightness and contrast adjustments) during training, which introduces variability and improves model invariance to lighting conditions. These strategies were chosen to enhance generalization within the constraints of the available data, allowing for a robust comparative evaluation of the proposed architectures.</p>
<table-wrap id="T4" position="float">
<label>Table&#xa0;4</label>
<caption>
<p>Hyperparameters of the singular DL models.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">ResNet50</th>
<th valign="middle" align="center">InceptionV3</th>
<th valign="middle" align="center">EfficientNetB2</th>
<th valign="middle" align="center">VGG16</th>
<th valign="middle" align="center">DenseNet121</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">Batch size = 32<break/>weights = Imagenet<break/>Input shape = 224 x 224<break/>include_top = False<break/>Dense activation = relu<break/>Learning rate = 0.001<break/>Optimizer = SGD<break/>Epochs = 80</td>
<td valign="middle" align="left">Batch size = 32<break/>weights = Imagenet<break/>Input shape = 299 x 299<break/>include_top = False<break/>Dense activation = relu<break/>Learning rate = 0.001<break/>Optimizer = SGD<break/>Epochs = 80</td>
<td valign="middle" align="left">Batch size = 32<break/>weights = Imagenet<break/>Input shape = 260 x 260<break/>include_top = False<break/>Dense activation = relu<break/>Learning rate = 0.001<break/>Optimizer = SGD<break/>Epochs 80</td>
<td valign="middle" align="left">Batch size = 32<break/>weights = Imagenet<break/>Input shape = 224 x 224<break/>include_top = False<break/>Dense activation = relu<break/>Learning rate = 0.001<break/>Optimizer = SGD<break/>Epochs = 80</td>
<td valign="middle" align="left">Batch size = 32<break/>weights = Imagenet<break/>Input shape = 224 x 224<break/>include_top = False<break/>Dense activation = relu<break/>Learning rate = 0.001<break/>Optimizer = SGD<break/>Epochs = 80</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The five CNN models were implemented in a development environment equipped with an AMD Ryzen 7 5700X eight-core CPU, 16 GB RAM, and 1TB SSD storage, using Python. Experiments were executed under this hardware configuration, and the hyperparameters of each architecture were manually tuned based on performance across the validation subset. Three scenarios were considered in the model&#x2019;s evaluation:</p>
<list list-type="bullet">
<list-item>
<p><italic>Singular</italic>: Classification using the five individual multiclass models.</p></list-item>
<list-item>
<p><italic>Hybrid</italic>: Classification using the hybrid multiclass models VGG16+RF and DenseNet121+RF, which demonstrated superior performance compared to other hybrid combinations of two individual models.</p></list-item>
<list-item>
<p><italic>Voting</italic>: Classification using a voting scheme (involving all five singular models) and the VotingBS model.</p></list-item>
</list>
<p>Following the structure of the VotingBS model, each of the 5 models was trained in two sequential phases. A binary classification task was performed in the first phase, where images were categorized as either healthy or unhealthy. In the second phase, the same architecture was reused to classify between the two main diseases under study: scab and anthracnose.</p>
</sec>
</sec>
</sec>
<sec id="s4" sec-type="results">
<label>4</label>
<title>Results</title>
<p><xref ref-type="fig" rid="f8"><bold>Figure&#xa0;8</bold></xref> presents the confusion matrices of the nine DL models implemented for the three avocado disease categories. Among them, five are singular models, three are hybrid models &#x2014;one of which is a multiclass voting model&#x2014; and the last is the proposed binary sequential voting architecture.</p>
<fig id="f8" position="float">
<label>Figure&#xa0;8</label>
<caption>
<p>Confusion matrices of the evaluated classification architectures: <bold>(A)</bold> ResNet50, <bold>(B)</bold> VGG16, <bold>(C)</bold> InceptionV3, <bold>(D)</bold> EfficientNetB2, <bold>(E)</bold> DenseNet121, <bold>(F)</bold> DenseNet121 + RF, <bold>(G)</bold> VGG16 + RF, <bold>(H)</bold> Voting, <bold>(I)</bold>. VotingBS.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1736123-g008.tif">
<alt-text content-type="machine-generated">Nine confusion matrix graphics, each comparing predicted versus actual classifications for Anthracnose, Scab, and Healthy categories. Color intensity visually indicates count, with most values strongly clustered along the diagonal, reflecting high classification accuracy.</alt-text>
</graphic></fig>
<p>The accuracy and loss curves across training epochs for the five singular models, during both training and validation, are provided in <xref ref-type="table" rid="T5"><bold>Table&#xa0;5</bold></xref>. The training loss function stabilizes around epoch 50 for all models, except for ResNet50, which stabilizes earlier around epoch 10. However, during validation, EfficientNetB2 shows better loss stabilization. Additionally, it is noted that validation accuracy remains lower than training accuracy across all models, with VGG16 demonstrating the most consistent convergence.</p>
<table-wrap id="T5" position="float">
<label>Table&#xa0;5</label>
<caption>
<p>Accuracy and loss per epoch for singular DL models.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">DL Model</th>
<th valign="middle" align="center">Accuracy</th>
<th valign="middle" align="center">Loss</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="center">ResNet50</td>
<td valign="top" align="left"><inline-graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1736123-i001.tif"><alt-text content-type="machine-generated">Line graph displaying train accuracy and validation accuracy across 80 epochs. Train accuracy quickly reaches above 0.99, while validation accuracy plateaus near 0.96, exhibiting more fluctuation throughout the epochs.</alt-text></inline-graphic></td>
<td valign="top" align="left"><inline-graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1736123-i002.tif"><alt-text content-type="machine-generated">Line chart showing training loss and validation loss over 80 epochs. Training loss, in blue, decreases rapidly and stabilizes near zero. Validation loss, in orange, declines initially but fluctuates around 0.2, suggesting potential overfitting.</alt-text></inline-graphic></td>
</tr>
<tr>
<td valign="middle" align="center">VGG16</td>
<td valign="top" align="left"><inline-graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1736123-i003.tif"><alt-text content-type="machine-generated">Line graph showing train accuracy and validation accuracy over 80 epochs. Both curves rise sharply at first, then plateau near 1.0 accuracy, with train accuracy staying consistently higher than validation accuracy.</alt-text></inline-graphic></td>
<td valign="top" align="left"><inline-graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1736123-i004.tif"><alt-text content-type="machine-generated">Line chart showing training loss and validation loss versus epoch. Both losses start high, rapidly decrease, and plateau near zero by epoch eighty, indicating a well-trained model with minimal overfitting.</alt-text></inline-graphic></td>
</tr>
<tr>
<td valign="middle" align="center">InceptionV3</td>
<td valign="top" align="left"><inline-graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1736123-i005.tif"><alt-text content-type="machine-generated">Line chart showing train accuracy and validation accuracy versus epoch for a machine learning model. Train accuracy increases quickly, plateauing near one point zero, while validation accuracy stabilizes around zero point nine after an initial rise.</alt-text></inline-graphic></td>
<td valign="top" align="left"><inline-graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1736123-i006.tif"><alt-text content-type="machine-generated">Line chart showing loss versus epoch for train and validation sets across eighty epochs. Train loss decreases steadily, nearing zero, while validation loss plateaus above zero, indicating potential overfitting.</alt-text></inline-graphic></td>
</tr>
<tr>
<td valign="middle" align="center">EfficientNetB2</td>
<td valign="top" align="left"><inline-graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1736123-i007.tif"><alt-text content-type="machine-generated">Line graph comparing train accuracy and validation accuracy over 80 epochs. Train accuracy rises rapidly, staying above validation accuracy, both stabilizing after 10 epochs. Accuracy ranges from 0.5 to 1.0.</alt-text></inline-graphic></td>
<td valign="top" align="left"><inline-graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1736123-i008.tif"><alt-text content-type="machine-generated">Line graph showing train loss and validation loss over 80 epochs for a machine learning model. Both losses decrease steeply at first, then flatten, with train loss consistently lower than validation loss.</alt-text></inline-graphic></td>
</tr>
<tr>
<td valign="middle" align="center">DenseNet121</td>
<td valign="top" align="left"><inline-graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1736123-i009.tif"><alt-text content-type="machine-generated">Line graph illustrating training and validation accuracy over 80 epochs. Training accuracy increases steadily and plateaus near 1.0, while validation accuracy plateaus around 0.92 with some fluctuations. Legend distinguishes both lines.</alt-text></inline-graphic></td>
<td valign="top" align="left"><inline-graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-17-1736123-i010.tif"><alt-text content-type="machine-generated">Line chart showing train loss and validation loss over eighty epochs. Both curves decrease rapidly at first, then flatten, with validation loss remaining consistently higher than train loss, indicating potential overfitting.</alt-text></inline-graphic></td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The performance metrics of the DL models under the three evaluation scenarios are shown in <xref ref-type="table" rid="T6"><bold>Table&#xa0;6</bold></xref>. In the singular scenario, VGG16 achieved the best results, with 97.18% precision, 97.09% recall, and 97.09% accuracy. In the hybrid model scenario, the VGG16 + RF combination yielded superior performance, achieving 97.81% precision, 98.11% recall, and 98.06% accuracy. The findings also demonstrate that the proposed VotingBS consistently outperformed both the individual pre-trained architectures and the hybrid models across all metrics, reaching 98.92% precision, 98.89% recall, and 99.03% accuracy. To provide a foundational baseline and contextualize the advancement of our proposed models, <xref ref-type="table" rid="T6"><bold>Table&#xa0;6</bold></xref> also includes the performance of standard machine learning methods (CNN, RF, SVM, MLP) as reported by <xref ref-type="bibr" rid="B10">Campos-Ferreira and Gonz&#xe1;lez-Camacho, 2021</xref>, <xref ref-type="bibr" rid="B11">2023)</xref> on a subset of the same dataset used in this study.</p>
<table-wrap id="T6" position="float">
<label>Table&#xa0;6</label>
<caption>
<p>Comparative performance of models across different architectural scenarios.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Scenarios</th>
<th valign="middle" align="center">Method</th>
<th valign="middle" align="center">Precisi&#xf3;n</th>
<th valign="middle" align="center">Recall</th>
<th valign="middle" align="center">Accuracy</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" rowspan="9" align="center">Singular</td>
<td valign="middle" align="center">CNN (<xref ref-type="bibr" rid="B10">Campos-Ferreira and Gonz&#xe1;lez-Camacho, 2021</xref>)</td>
<td valign="middle" align="center">79.33%</td>
<td valign="middle" align="center">85.00%</td>
<td valign="middle" align="center">87.00%</td>
</tr>
<tr>
<td valign="middle" align="center">RF (<xref ref-type="bibr" rid="B11">Campos-Ferreira et&#xa0;al., 2023</xref>)*</td>
<td valign="middle" align="center">98.00%</td>
<td valign="middle" align="center">97.67%</td>
<td valign="middle" align="center">98.00%</td>
</tr>
<tr>
<td valign="middle" align="center">SVM (<xref ref-type="bibr" rid="B11">Campos-Ferreira et&#xa0;al., 2023</xref>)*</td>
<td valign="middle" align="center">97.67%</td>
<td valign="middle" align="center">97.00%</td>
<td valign="middle" align="center">97.00%</td>
</tr>
<tr>
<td valign="middle" align="center">MLP (<xref ref-type="bibr" rid="B11">Campos-Ferreira et&#xa0;al., 2023</xref>)*</td>
<td valign="middle" align="center">98.03%</td>
<td valign="middle" align="center">98.00%</td>
<td valign="middle" align="center">98.00%</td>
</tr>
<tr>
<td valign="middle" align="center">ResNet50</td>
<td valign="middle" align="center">95.53 %</td>
<td valign="middle" align="center">95.15 %</td>
<td valign="middle" align="center">95.15 %</td>
</tr>
<tr>
<td valign="middle" align="center">VGG16</td>
<td valign="middle" align="center">97.18 %</td>
<td valign="middle" align="center">97.09 %</td>
<td valign="middle" align="center">97.09 %</td>
</tr>
<tr>
<td valign="middle" align="center">InceptionV3</td>
<td valign="middle" align="center">92.66 %</td>
<td valign="middle" align="center">92.23 %</td>
<td valign="middle" align="center">92.23 %</td>
</tr>
<tr>
<td valign="middle" align="center">EfficientNetB2</td>
<td valign="middle" align="center">96.33 %</td>
<td valign="middle" align="center">96.12 %</td>
<td valign="middle" align="center">96.12 %</td>
</tr>
<tr>
<td valign="middle" align="center">DenseNet121</td>
<td valign="middle" align="center">90.91 %</td>
<td valign="middle" align="center">90.29 %</td>
<td valign="middle" align="center">90.29 %</td>
</tr>
<tr>
<td valign="middle" rowspan="2" align="center">Hybrid</td>
<td valign="middle" align="center">VGG16 + RF</td>
<td valign="middle" align="center">97.81 %</td>
<td valign="middle" align="center">98.11 %</td>
<td valign="middle" align="center">98.06 %</td>
</tr>
<tr>
<td valign="middle" align="center">DenseNet121 + RF</td>
<td valign="middle" align="center">94.25 %</td>
<td valign="middle" align="center">94.17 %</td>
<td valign="middle" align="center">94.17 %</td>
</tr>
<tr>
<td valign="middle" rowspan="2" align="center">Voting</td>
<td valign="middle" align="center">Voting</td>
<td valign="middle" align="center">97.18 %</td>
<td valign="middle" align="center">97.09 %</td>
<td valign="middle" align="center">97.09 %</td>
</tr>
<tr>
<td valign="middle" align="center">VotingBS</td>
<td valign="middle" align="center">98.92 %</td>
<td valign="middle" align="center">98.89 %</td>
<td valign="middle" align="center">99.03 %</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>*Uses a subset of the dataset employed in this study.</p></fn>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="s5" sec-type="discussion">
<label>5</label>
<title>Discussion</title>
<p>This research proposes a smart detection system for avocado fruit diseases using image analysis. The system is based on hybrid DL models and consists of three main modules (crops, analysis, and diagnosis) and is designed to facilitate intuitive disease identification and crop monitoring. The cultivation module enables users to register notes and create new crop entries, while the image preprocessing and disease identification of the fruit are handled by the analysis module. The diagnosis module provides access to the current and historical health status of each fruit, including diagnosis date, identified disease, causal agents, and recommended treatments.</p>
<p>This study introduces the VotingBS architecture, an innovative two-phase sequential voting scheme designed to optimize disease diagnosis in avocado crops. In the first phase, five DL models (ResNet50, VGG16, InceptionV3, EfficientNetB2, and DenseNet121) perform a binary classification (healthy vs. diseased fruit), while in the second phase, they specifically discriminate between anthracnose and scab. Experiments were performed using a collection of 674 images (571 for training and 103 for validation). The results demonstrated the superiority of this method: while the best singular multiclass model (VGG16) achieved 97.18% precision and 97.09% recall and accuracy, and its hybrid version with Random Forest (VGG16+RF) improved these results by +0.6%, the VotingBS architecture significantly outperformed all alternatives, reaching 98.92% precision, 98.89% recall, and 99.03% accuracy, thereby surpassing the best results reported in the literature.</p>
<p>The superiority of the VotingBS approach lies in its sequential architecture, which decomposes the diagnostic task into two clearly defined stages, thereby reducing cumulative errors typically observed in conventional models. This strategy not only proved effective in the presented case study but also establishes a promising paradigm for its application in other crops affected by multiple pathologies. The results suggest that breaking down complex tasks into simpler subproblems &#x2014;combined with weighted voting schemes&#x2014; can offer significant advantages in diagnostic accuracy over traditional approaches.</p>
<sec id="s5_1">
<label>5.1</label>
<title>Limitations and future work</title>
<p>The performance of the VotingBS architecture, while superior in our experiments, must be interpreted within the constraints of the dataset used. The primary limitation is the dataset&#x2019;s size (n=674) and composition, which originates from mixed sources and does not include cases of disease co-infection or very early symptoms. Consequently, the reported high accuracy reflects optimal performance on a curated dataset and serves as proof-of-concept. Therefore, the primary direction for future work is the validation of the model&#x2019;s generalization capability on a larger, prospectively collected field image corpus that captures the full heterogeneity of real orchards, including diverse lighting, occlusions, and complex disease presentations. In parallel, the next critical phase for the ApaltAI system is its operational validation, encompassing formal performance evaluation under high-load and poor-connectivity conditions, as well as extensive User Acceptance Testing (UAT) with avocado farmers to ensure its practical usability and adaptability in the field. These steps are essential to transition the integrated system from a robust prototype to a reliable agricultural tool.</p>
</sec>
</sec>
<sec id="s6" sec-type="conclusions">
<label>6</label>
<title>Conclusions</title>
<p>This research developed an innovative system for disease detection in avocado crops, combining a hybrid binary sequential ensemble architecture (VotingBS) with a supportive web application. The core innovation lies in its two-stage decision architecture: initially classifying fruits as healthy or unhealthy through the voting of five deep learning models and subsequently identifying the specific disease (anthracnose or scab) through a second weighted voting process among other five specialized models. This hierarchical approach demonstrated outstanding performance &#x2014;98.92% precision, 98.89% recall, and 99.03% accuracy&#x2014; significantly surpassing both singular and hybrid models documented in previous studies.</p>
<p>Although the results highlight the system&#x2019;s potential, its current scope is constrained by the limitations of the dataset used. Nevertheless, this work lays the groundwork for key future developments: (1) integration with precision agriculture systems to enable parcel-level monitoring, and (2) scaling VotingBS to include additional and co-occurring diseases. These advancements would position the proposed application as an intelligent, comprehensive, and scalable solution for the sustainable phytosanitary management of avocado crops.</p>
</sec>
</body>
<back>
<sec id="s7" sec-type="data-availability">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/supplementary material. Further inquiries can be directed to the corresponding author.</p></sec>
<sec id="s8" sec-type="author-contributions">
<title>Author contributions</title>
<p>MM: Writing &#x2013; original draft, Investigation, Software, Data curation, Validation, Conceptualization, Project administration, Methodology, Writing &#x2013; review &amp; editing. AS: Investigation, Writing &#x2013; review &amp; editing, Conceptualization, Writing &#x2013; original draft, Software, Validation, Project administration, Methodology, Data curation. DM: Project administration, Supervision, Methodology, Writing &#x2013; review &amp; editing, Validation, Formal analysis, Investigation, Visualization, Conceptualization. LR: Supervision, Visualization, Writing &#x2013; review &amp; editing, Investigation. JS: Visualization, Validation, Supervision, Writing &#x2013; review &amp; editing.</p></sec>
<ack>
<title>Acknowledgments</title>
<p>The authors are grateful to the Direcci&#xf3;n de Investigaci&#xf3;n de la Universidad Peruana de Ciencias Aplicadas (UPC) for the support provided for this study.</p>
</ack>
<sec id="s10" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p></sec>
<sec id="s11" sec-type="ai-statement">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p></sec>
<sec id="s12" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p></sec>
<ref-list>
<title>References</title>
<ref id="B1">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Abbas</surname> <given-names>A.</given-names></name>
<name><surname>Jain</surname> <given-names>S.</given-names></name>
<name><surname>Gour</surname> <given-names>M.</given-names></name>
<name><surname>Vankudothu</surname> <given-names>S.</given-names></name>
</person-group> (<year>2021</year>). 
<article-title>Tomato plant disease detection using transfer learning with C-GAN synthetic images</article-title>. <source>Comput. Electron. Agric.</source> <volume>187</volume>, <elocation-id>106279</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2021.106279</pub-id>
</mixed-citation>
</ref>
<ref id="B2">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Agarwal</surname> <given-names>M.</given-names></name>
<name><surname>Singh</surname> <given-names>A.</given-names></name>
<name><surname>Arjaria</surname> <given-names>S.</given-names></name>
<name><surname>Sinha</surname> <given-names>A.</given-names></name>
<name><surname>Gupta</surname> <given-names>S.</given-names></name>
</person-group> (<year>2020</year>). 
<article-title>ToLeD: tomato leaf disease detection using convolution neural network</article-title>. <source>Proc. Comput. Sci.</source> <volume>167</volume>, <fpage>293</fpage>&#x2013;<lpage>301</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.procs.2020.03.225</pub-id>
</mixed-citation>
</ref>
<ref id="B3">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Ahmed</surname> <given-names>N.</given-names></name>
<name><surname>Smith</surname> <given-names>R. W.</given-names></name>
<name><surname>Chen</surname> <given-names>P. X.</given-names></name>
<name><surname>Rogers</surname> <given-names>M. A.</given-names></name>
<name><surname>Spagnuolo</surname> <given-names>P. A.</given-names></name>
</person-group> (<year>2025</year>). 
<article-title>Bioaccessibility of avocado polyhydroxylated fatty alcohols</article-title>. <source>Food Chem.</source> <volume>463</volume>, <elocation-id>140811</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.foodchem.2024.140811</pub-id>, PMID: <pub-id pub-id-type="pmid">39255710</pub-id>
</mixed-citation>
</ref>
<ref id="B4">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Alshammari</surname> <given-names>H.</given-names></name>
<name><surname>Gasmi</surname> <given-names>K.</given-names></name>
<name><surname>Ltaifa</surname> <given-names>I. B.</given-names></name>
<name><surname>Krichen</surname> <given-names>M.</given-names></name>
<name><surname>Ammar</surname> <given-names>L. B.</given-names></name>
<name><surname>Mahmood</surname> <given-names>M. A.</given-names></name>
</person-group> (<year>2022</year>). 
<article-title>Olive disease classification based on vision transformer and CNN models</article-title>. <source>Comput. Intell. Neurosci.</source> <volume>2022</volume>, <elocation-id>3998193</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1155/2022/3998193</pub-id>, PMID: <pub-id pub-id-type="pmid">35958771</pub-id>
</mixed-citation>
</ref>
<ref id="B5">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Al-Wesabi</surname> <given-names>F. N.</given-names></name>
<name><surname>Albraikan</surname> <given-names>A. A.</given-names></name>
<name><surname>Hilal</surname> <given-names>A. M.</given-names></name>
<name><surname>Eltahir</surname> <given-names>M. M.</given-names></name>
<name><surname>Hamza</surname> <given-names>M. A.</given-names></name>
<name><surname>Zamani</surname> <given-names>A. S.</given-names></name>
</person-group> (<year>2022</year>). 
<article-title>Artificial intelligence enabled apple leaf disease classification for precision agriculture</article-title>. <source>Computers Materials Continua</source> <volume>70</volume>, <fpage>6223</fpage>&#x2013;<lpage>6238</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.32604/cmc.2022.021299</pub-id>
</mixed-citation>
</ref>
<ref id="B6">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Banarase</surname> <given-names>S. J.</given-names></name>
<name><surname>Shirbahadurkar</surname> <given-names>S.</given-names></name>
</person-group> (<year>2024</year>). 
<article-title>Orchard Guard: Deep Learning powered apple leaf disease detection with MobileNetV2 model</article-title>. <source>J. Integrated Sci. Technol.</source> <volume>12</volume>, <elocation-id>799</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.62110/sciencein.jist.2024.v12.799</pub-id>
</mixed-citation>
</ref>
<ref id="B7">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Banjar</surname> <given-names>A.</given-names></name>
<name><surname>Javed</surname> <given-names>A.</given-names></name>
<name><surname>Nawaz</surname> <given-names>M.</given-names></name>
<name><surname>Dawood</surname> <given-names>H.</given-names></name>
</person-group> (<year>2025</year>). 
<article-title>E-appleNet: an enhanced deep learning approach for apple fruit leaf disease classification</article-title>. <source>Appl. Fruit Sci.</source> <volume>67</volume>, <fpage>18</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s10341-024-01239-w</pub-id>
</mixed-citation>
</ref>
<ref id="B8">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Bansal</surname> <given-names>P.</given-names></name>
<name><surname>Kumar</surname> <given-names>R.</given-names></name>
<name><surname>Kumar</surname> <given-names>S.</given-names></name>
</person-group> (<year>2021</year>). 
<article-title>Disease detection in apple leaves using deep convolutional neural network</article-title>. <source>Agric. (Switzerland)</source> <volume>11</volume>, <elocation-id>617</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/agriculture11070617</pub-id>
</mixed-citation>
</ref>
<ref id="B9">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Butt</surname> <given-names>N.</given-names></name>
<name><surname>Iqbal</surname> <given-names>M. M.</given-names></name>
<name><surname>Ramzan</surname> <given-names>S.</given-names></name>
<name><surname>Raza</surname> <given-names>A.</given-names></name>
<name><surname>Abualigah</surname> <given-names>L.</given-names></name>
<name><surname>Fitriyani</surname> <given-names>N. L.</given-names></name>
<etal/>
</person-group>. (<year>2025</year>). 
<article-title>Citrus diseases detection using innovative deep learning approach and Hybrid Meta-Heuristic</article-title>. <source>PloS One</source> <volume>20</volume>, <fpage>e0316081</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1371/journal.pone.0316081</pub-id>, PMID: <pub-id pub-id-type="pmid">39841644</pub-id>
</mixed-citation>
</ref>
<ref id="B10">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Campos-Ferreira</surname> <given-names>U. E.</given-names></name>
<name><surname>Gonz&#xe1;lez-Camacho</surname> <given-names>J. M.</given-names></name>
</person-group> (<year>2021</year>). 
<article-title>Convolutional neural network classifier for identifying diseases of avocado fruit (Persea americana Mill.) from digital images</article-title>. <source>Agrociencia</source> <volume>55</volume>, <fpage>695</fpage>&#x2013;<lpage>709</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.47163/agrociencia.v55i8.2662</pub-id>
</mixed-citation>
</ref>
<ref id="B11">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Campos-Ferreira</surname> <given-names>U. E.</given-names></name>
<name><surname>Gonz&#xe1;lez-Camacho</surname> <given-names>J. M.</given-names></name>
<name><surname>Carrillo-Salazar</surname> <given-names>A.</given-names></name>
</person-group> (<year>2023</year>). 
<article-title>Automatic identification of avocado fruit diseases based on machine learning and chromatic descriptors</article-title>. <source>Rev. Chapingo Serie Horticultura</source> <volume>29</volume>, <fpage>115</fpage>&#x2013;<lpage>130</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.5154/r.rchsh.2023.04.002</pub-id>
</mixed-citation>
</ref>
<ref id="B12">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Chellappan</surname> <given-names>B. V.</given-names></name>
</person-group> (<year>2024</year>). 
<article-title>Comparative secretome analysis unveils species-specific virulence factors in Elsinoe perseae, the causative agent of the scab disease of avocado (Persea americana)</article-title>. <source>AIMS Microbiol.</source> <volume>10</volume>, <fpage>894</fpage>&#x2013;<lpage>916</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3934/microbiol.2024039</pub-id>, PMID: <pub-id pub-id-type="pmid">39628720</pub-id>
</mixed-citation>
</ref>
<ref id="B13">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Chen</surname> <given-names>C.-J.</given-names></name>
<name><surname>Huang</surname> <given-names>Y.-Y.</given-names></name>
<name><surname>Li</surname> <given-names>Y.-S.</given-names></name>
<name><surname>Chen</surname> <given-names>Y.-C.</given-names></name>
<name><surname>Chang</surname> <given-names>C.-Y.</given-names></name>
<name><surname>Huang</surname> <given-names>Y.-M.</given-names></name>
</person-group> (<year>2021</year>). 
<article-title>Identification of fruit tree pests with deep learning on embedded drone to achieve accurate pesticide spraying</article-title>. <source>IEEE Access</source> <volume>9</volume>, <fpage>21986</fpage>&#x2013;<lpage>21997</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/ACCESS.2021.3056082</pub-id>
</mixed-citation>
</ref>
<ref id="B14">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Col&#xed;n-Ch&#xe1;vez</surname> <given-names>C.</given-names></name>
<name><surname>Virgen-Ortiz</surname> <given-names>J. J.</given-names></name>
<name><surname>Mart&#xed;nez-T&#xe9;llez</surname> <given-names>M. A.</given-names></name>
<name><surname>Avelino-Ram&#xed;rez</surname> <given-names>C.</given-names></name>
<name><surname>Gallegos-Santoyo</surname> <given-names>N. L.</given-names></name>
<name><surname>Miranda-Ackerman</surname> <given-names>M. A.</given-names></name>
</person-group> (<year>2024</year>). 
<article-title>Control of anthracnose (Colletotrichum gloeosporioides) growth in &#x201c;Hass&#x201d; avocado fruit using sachets filled with oregano oil-starch-capsules</article-title>. <source>Future Foods</source> <volume>10</volume>, <elocation-id>100394</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.fufo.2024.100394</pub-id>
</mixed-citation>
</ref>
<ref id="B15">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Demilie</surname> <given-names>W. B.</given-names></name>
</person-group> (<year>2024</year>). 
<article-title>Plant disease detection and classification techniques: a comparative study of the performances</article-title>. <source>J. Big Data</source> <volume>11</volume>, <elocation-id>5</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s40537-023-00863-9</pub-id>
</mixed-citation>
</ref>
<ref id="B16">
<mixed-citation publication-type="web">
<person-group person-group-type="author"><collab>FAO</collab>
</person-group> (<year>2024</year>). M
<article-title>ajor tropical fruits. Market Review 2023</article-title>. Available online at: <uri xlink:href="https://openknowledge.fao.org/server/api/core/bitstreams/1458b76c-b520-4add-9123-4e4481d43c06/content">https://openknowledge.fao.org/server/api/core/bitstreams/1458b76c-b520-4add-9123-4e4481d43c06/content</uri> (Accessed <date-in-citation content-type="access-date">March 23, 2025</date-in-citation>).
</mixed-citation>
</ref>
<ref id="B17">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Hari</surname> <given-names>P.</given-names></name>
<name><surname>Singh</surname> <given-names>M. P.</given-names></name>
</person-group> (<year>2023</year>). 
<article-title>A lightweight convolutional neural network for disease detection of fruit leaves</article-title>. <source>Neural Computing Appl.</source> <volume>35</volume>, <fpage>14855</fpage>&#x2013;<lpage>14866</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s00521-023-08496-y</pub-id>
</mixed-citation>
</ref>
<ref id="B18">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Huang</surname> <given-names>X.</given-names></name>
<name><surname>Chen</surname> <given-names>A.</given-names></name>
<name><surname>Zhou</surname> <given-names>G.</given-names></name>
<name><surname>Zhang</surname> <given-names>X.</given-names></name>
<name><surname>Wang</surname> <given-names>J.</given-names></name>
<name><surname>Peng</surname> <given-names>N.</given-names></name>
<etal/>
</person-group>. (<year>2023</year>). 
<article-title>Tomato leaf disease detection system based on FC-SNDPN</article-title>. <source>Multimedia Tools Appl.</source> <volume>82</volume>, <fpage>2121</fpage>&#x2013;<lpage>2144</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s11042-021-11790-3</pub-id>
</mixed-citation>
</ref>
<ref id="B19">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Karthik</surname> <given-names>R.</given-names></name>
<name><surname>Hariharan</surname> <given-names>M.</given-names></name>
<name><surname>Anand</surname> <given-names>S.</given-names></name>
<name><surname>Mathikshara</surname> <given-names>P.</given-names></name>
<name><surname>Johnson</surname> <given-names>A.</given-names></name>
<name><surname>Menaka</surname> <given-names>R.</given-names></name>
</person-group> (<year>2020</year>). 
<article-title>Attention embedded residual CNN for disease detection in tomato leaves</article-title>. <source>Appl. Soft Computing J.</source> <volume>86</volume>, <elocation-id>105933</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.asoc.2019.105933</pub-id>
</mixed-citation>
</ref>
<ref id="B20">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Kaya</surname> <given-names>Y.</given-names></name>
<name><surname>G&#xfc;rsoy</surname> <given-names>E.</given-names></name>
</person-group> (<year>2023</year>). 
<article-title>A novel multi-head CNN design to identify plant diseases using the fusion of RGB images</article-title>. <source>Ecol. Inf.</source> <volume>75</volume>, <elocation-id>101998</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.ecoinf.2023.101998</pub-id>
</mixed-citation>
</ref>
<ref id="B21">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Khan</surname> <given-names>A. I.</given-names></name>
<name><surname>Quadri</surname> <given-names>S. M. K.</given-names></name>
<name><surname>Banday</surname> <given-names>S.</given-names></name>
<name><surname>Latief</surname> <given-names>J.</given-names></name>
</person-group> (<year>2022</year>). 
<article-title>Deep diagnosis: A real-time apple leaf disease detection system based on deep learning</article-title>. <source>Comput. Electron. Agric.</source> <volume>198</volume>, <elocation-id>107093</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2022.107093</pub-id>
</mixed-citation>
</ref>
<ref id="B22">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Mahato</surname> <given-names>D. K.</given-names></name>
<name><surname>Pundir</surname> <given-names>A.</given-names></name>
<name><surname>Saxena</surname> <given-names>G. J.</given-names></name>
</person-group> (<year>2022</year>). 
<article-title>An improved deep convolutional neural network for image-based apple plant leaf disease detection and identification</article-title>. <source>J. Institution Engineers (India): Ser. A</source> <volume>103</volume>, <fpage>975</fpage>&#x2013;<lpage>987</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s40030-022-00668-8</pub-id>
</mixed-citation>
</ref>
<ref id="B23">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name><surname>Mir</surname> <given-names>T. A.</given-names></name>
<name><surname>Gupta</surname> <given-names>S.</given-names></name>
<name><surname>Chauhan</surname> <given-names>R.</given-names></name>
<name><surname>Singh</surname> <given-names>M.</given-names></name>
<name><surname>Banerjee</surname> <given-names>D.</given-names></name>
<name><surname>Kumar</surname> <given-names>B. V.</given-names></name>
</person-group> (<year>2024</year>). &#x201c;
<article-title>Enhanced Multiclassification of Avocado Leaf Diseases: CNN and Random Forest Integration</article-title>,&#x201d; in <source>Proceedings of the 2024 3rd International Conference for Innovation in Technology (INOCON)</source> (
<publisher-name>IEEE</publisher-name>, <publisher-loc>Bangalore, India</publisher-loc>), <fpage>1</fpage>&#x2013;<lpage>6</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/INOCON60754.2024.10512211</pub-id>
</mixed-citation>
</ref>
<ref id="B24">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Mishra</surname> <given-names>S.</given-names></name>
<name><surname>Ayane</surname> <given-names>T. H.</given-names></name>
<name><surname>Ellappan</surname> <given-names>V.</given-names></name>
<name><surname>Rathee</surname> <given-names>D. S.</given-names></name>
<name><surname>Kalla</surname> <given-names>H.</given-names></name>
</person-group> (<year>2022</year>). 
<article-title>Avocado fruit disease detection and classification using modified SCA&#x2013;PSO algorithm-based MobileNetV2 convolutional neural network</article-title>. <source>Iran J. Comput. Sci.</source> <volume>5</volume>, <fpage>345</fpage>&#x2013;<lpage>358</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s42044-022-00116-7</pub-id>
</mixed-citation>
</ref>
<ref id="B25">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Moreno-Lozano</surname> <given-names>M. I.</given-names></name>
<name><surname>Ticlavilca-Inche</surname> <given-names>E. J.</given-names></name>
<name><surname>Casta&#xf1;eda</surname> <given-names>P.</given-names></name>
<name><surname>Wong-Durand</surname> <given-names>S.</given-names></name>
<name><surname>Mauricio</surname> <given-names>D.</given-names></name>
<name><surname>O&#xf1;ate-Andino</surname> <given-names>A.</given-names></name>
</person-group> (<year>2024</year>). 
<article-title>A performance evaluation of convolutional neural network architectures for pterygium detection in anterior segment eye images</article-title>. <source>Diagnostics</source> <volume>14</volume>, <elocation-id>2026</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/diagnostics14182026</pub-id>, PMID: <pub-id pub-id-type="pmid">39335704</pub-id>
</mixed-citation>
</ref>
<ref id="B26">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Moussafir</surname> <given-names>M.</given-names></name>
<name><surname>Chaibi</surname> <given-names>H.</given-names></name>
<name><surname>Saadane</surname> <given-names>R.</given-names></name>
<name><surname>Chehri</surname> <given-names>A.</given-names></name>
<name><surname>Rharras</surname> <given-names>A. E.</given-names></name>
<name><surname>Jeon</surname> <given-names>G.</given-names></name>
</person-group> (<year>2022</year>). 
<article-title>Design of efficient techniques for tomato leaf disease detection using genetic algorithm-based and deep neural networks</article-title>. <source>Plant Soil</source> <volume>479</volume>, <fpage>251</fpage>&#x2013;<lpage>266</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s11104-022-05513-2</pub-id>
</mixed-citation>
</ref>
<ref id="B27">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Polly</surname> <given-names>R.</given-names></name>
<name><surname>Devi</surname> <given-names>E. A.</given-names></name>
</person-group> (<year>2024</year>). 
<article-title>Semantic segmentation for plant leaf disease classification and damage detection: A deep learning approach</article-title>. <source>Smart Agric. Technol.</source> <volume>9</volume>, <elocation-id>100526</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.atech.2024.100526</pub-id>
</mixed-citation>
</ref>
<ref id="B28">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Rani</surname> <given-names>R.</given-names></name>
<name><surname>Sahoo</surname> <given-names>J.</given-names></name>
<name><surname>Bellamkonda</surname> <given-names>S.</given-names></name>
<name><surname>Kumar</surname> <given-names>S.</given-names></name>
<name><surname>Pippal</surname> <given-names>S. K.</given-names></name>
</person-group> (<year>2023</year>). 
<article-title>Role of artificial intelligence in agriculture: an analysis and advancements with focus on plant diseases</article-title>. <source>IEEE Access</source> <volume>11</volume>, <fpage>137999</fpage>&#x2013;<lpage>138019</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/ACCESS.2023.3339375</pub-id>
</mixed-citation>
</ref>
<ref id="B29">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Rodr&#xed;guez</surname> <given-names>M. J.</given-names></name>
<name><surname>Zuloaga-Rotta</surname> <given-names>L.</given-names></name>
<name><surname>Borja-Rosales</surname> <given-names>R.</given-names></name>
<name><surname>Rodr&#xed;guez</surname> <given-names>J. R.</given-names></name>
<name><surname>Vilca-Aguilar</surname> <given-names>M.</given-names></name>
<name><surname>Salas-Ojeda</surname> <given-names>M.</given-names></name>
<etal/>
</person-group>. (<year>2024</year>). 
<article-title>Explainable machine learning models for brain diseases: insights from a systematic review</article-title>. <source>Neurol. Int.</source> <volume>16</volume>, <fpage>1285</fpage>&#x2013;<lpage>1307</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/neurolint16060098</pub-id>, PMID: <pub-id pub-id-type="pmid">39585057</pub-id>
</mixed-citation>
</ref>
<ref id="B30">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Sai</surname> <given-names>B.</given-names></name>
<name><surname>Neeraja</surname> <given-names>S.</given-names></name>
</person-group> (<year>2022</year>). 
<article-title>Plant leaf disease classification and damage detection system using deep learning models</article-title>. <source>Multimedia Tools Appl.</source> <volume>81</volume>, <fpage>24021</fpage>&#x2013;<lpage>24040</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s11042-022-12147-0</pub-id>
</mixed-citation>
</ref>
<ref id="B31">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Saleem</surname> <given-names>M. H.</given-names></name>
<name><surname>Potgieter</surname> <given-names>J.</given-names></name>
<name><surname>Arif</surname> <given-names>K. M.</given-names></name>
</person-group> (<year>2022</year>). 
<article-title>A performance-optimized deep learning-based plant disease detection approach for horticultural crops of New Zealand</article-title>. <source>IEEE Access</source> <volume>10</volume>, <fpage>89798</fpage>&#x2013;<lpage>89822</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/ACCESS.2022.3201104</pub-id>
</mixed-citation>
</ref>
<ref id="B32">
<mixed-citation publication-type="confproc">
<person-group person-group-type="author">
<name><surname>Sholihati</surname> <given-names>R. A.</given-names></name>
<name><surname>Sulistijono</surname> <given-names>I. A.</given-names></name>
<name><surname>Risnumawan</surname> <given-names>A.</given-names></name>
<name><surname>Kusumawati</surname> <given-names>E.</given-names></name>
</person-group> (<year>2020</year>). &#x201c;
<article-title>Potato leaf disease classification using deep learning approach</article-title>,&#x201d; in <conf-name>Proceedings of the 2020 International Electronics Symposium (IES)</conf-name>, <conf-loc>Surubaya, Indonesia</conf-loc>. <fpage>392</fpage>&#x2013;<lpage>397</lpage> (
<publisher-name>IEEE</publisher-name>). doi:&#xa0;<pub-id pub-id-type="doi">10.1109/IES50839.2020.9231784</pub-id>
</mixed-citation>
</ref>
<ref id="B33">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Silva</surname> <given-names>T. F.</given-names></name>
<name><surname>Pimentel</surname> <given-names>J. L.</given-names></name>
<name><surname>V&#xe9;lez-Olmedo</surname> <given-names>J. B.</given-names></name>
<name><surname>Anderson</surname> <given-names>W.</given-names></name>
<name><surname>Bassay</surname> <given-names>L. E.</given-names></name>
<name><surname>Pinho</surname> <given-names>D. B.</given-names></name>
</person-group> (<year>2025</year>). 
<article-title>Four new fungal pathogens causing avocado dieback in Brazil</article-title>. <source>Crop Prot.</source> <volume>192</volume>, <elocation-id>107168</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.cropro.2025.107168</pub-id>
</mixed-citation>
</ref>
<ref id="B34">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Sinamenye</surname> <given-names>J. H.</given-names></name>
<name><surname>Chatterjee</surname> <given-names>A.</given-names></name>
<name><surname>Shrestha</surname> <given-names>R.</given-names></name>
</person-group> (<year>2025</year>). 
<article-title>Potato plant disease detection: leveraging hybrid deep learning models</article-title>. <source>BMC Plant Biol.</source> <volume>25</volume>, <fpage>647</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s12870-025-06679-4</pub-id>, PMID: <pub-id pub-id-type="pmid">40380088</pub-id>
</mixed-citation>
</ref>
<ref id="B35">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Sultan</surname> <given-names>T.</given-names></name>
<name><surname>Chowdhury</surname> <given-names>M. S.</given-names></name>
<name><surname>Jahan</surname> <given-names>N.</given-names></name>
<name><surname>Mridha</surname> <given-names>M. F.</given-names></name>
<name><surname>Alfarhood</surname> <given-names>S.</given-names></name>
<name><surname>Safran</surname> <given-names>M.</given-names></name>
<etal/>
</person-group>. (<year>2025</year>). 
<article-title>LeafDNet: transforming leaf disease diagnosis through deep transfer learning</article-title>. <source>Plant Direct</source> <volume>9</volume>, <fpage>e70047</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1002/pld3.70047</pub-id>, PMID: <pub-id pub-id-type="pmid">39943923</pub-id>
</mixed-citation>
</ref>
<ref id="B36">
<mixed-citation publication-type="confproc">
<person-group person-group-type="author">
<name><surname>Tiwari</surname> <given-names>D.</given-names></name>
<name><surname>Ashish</surname> <given-names>M.</given-names></name>
<name><surname>Gangwar</surname> <given-names>N.</given-names></name>
<name><surname>Sharma</surname> <given-names>A.</given-names></name>
<name><surname>Patel</surname> <given-names>S.</given-names></name>
<name><surname>Bhardwaj</surname> <given-names>S.</given-names></name>
</person-group> (<year>2020</year>). &#x201c;
<article-title>Potato leaf diseases detection using deep learning</article-title>,&#x201d; in <conf-name>Proceedings of the 2020 International Conference on Intelligent Computing and Control Systems (ICICCS)</conf-name>, <conf-loc>Madurai, India</conf-loc>. <fpage>461</fpage>&#x2013;<lpage>466</lpage> (
<publisher-name>IEE</publisher-name>). doi:&#xa0;<pub-id pub-id-type="doi">10.1109/ICICCS48265.2020.9121067</pub-id>
</mixed-citation>
</ref>
<ref id="B37">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Turkoglu</surname> <given-names>M.</given-names></name>
<name><surname>Hanbay</surname> <given-names>D.</given-names></name>
<name><surname>Sengur</surname> <given-names>A.</given-names></name>
</person-group> (<year>2022</year>). 
<article-title>Multi-model LSTM-based convolutional neural networks for detection of apple diseases and pests</article-title>. <source>J. Ambient Intell. Humanized Computing</source> <volume>13</volume>, <fpage>3335</fpage>&#x2013;<lpage>3345</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s12652-019-01591-w</pub-id>
</mixed-citation>
</ref>
<ref id="B38">
<mixed-citation publication-type="confproc">
<person-group person-group-type="author">
<name><surname>Villagomez</surname> <given-names>R. B.</given-names></name>
<name><surname>Abele</surname> <given-names>D. V.</given-names></name>
<name><surname>Mauricio</surname> <given-names>D.</given-names></name>
</person-group> (<year>2024</year>). &#x201c;
<article-title>Potato crop irrigation system in Peru based on ioT and machine learning</article-title>,&#x201d; in <conf-name>Proceedings of the 2024 12th IEEE Andescon (ANDESCON)</conf-name>, <conf-loc>Cusco, Peru</conf-loc>. <fpage>1</fpage>&#x2013;<lpage>6</lpage> (
<publisher-name>IEEE</publisher-name>). doi:&#xa0;<pub-id pub-id-type="doi">10.1109/ANDESCON61840.2024.10755749</pub-id>
</mixed-citation>
</ref>
<ref id="B39">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Vishnoi</surname> <given-names>V. K.</given-names></name>
<name><surname>Kumar</surname> <given-names>K.</given-names></name>
<name><surname>Kumar</surname> <given-names>B.</given-names></name>
<name><surname>Mohan</surname> <given-names>S.</given-names></name>
<name><surname>Khan</surname> <given-names>A. A.</given-names></name>
</person-group> (<year>2023</year>). 
<article-title>Detection of apple plant diseases using leaf images through convolutional neural network</article-title>. <source>IEEE Access</source> <volume>11</volume>, <fpage>6594</fpage>&#x2013;<lpage>6609</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/ACCESS.2022.3232917</pub-id>
</mixed-citation>
</ref>
<ref id="B40">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name><surname>Xavier</surname> <given-names>P.</given-names></name>
<name><surname>Rodrigues</surname> <given-names>P.</given-names></name>
<name><surname>Silva</surname> <given-names>C. L. M.</given-names></name>
</person-group> (<year>2024</year>). <source><italic>&#x2018;Hass&#x2019; avocado ripening photographic dataset.</italic> Version 1</source> (<publisher-loc>Amsterdam, Netherlands</publisher-loc>: 
<publisher-name>Mendeley Data</publisher-name>). doi:&#xa0;<pub-id pub-id-type="doi">10.17632/3xd9n945v8.1</pub-id>
</mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn id="n1" fn-type="custom" custom-type="edited-by">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1473881">Chaolong Zhang</ext-link>, Jinling Institute of Technology, China</p></fn>
<fn id="n2" fn-type="custom" custom-type="reviewed-by">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/211542">Ho-jong Ju</ext-link>, Jeonbuk National University, Republic of Korea</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2040903">Vignesh Tamilarasan</ext-link>, Sri Krishna College of Engineering &amp; Technology, India</p></fn>
</fn-group>
</back>
</article>