<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="2.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Plant Sci.</journal-id>
<journal-title>Frontiers in Plant Science</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Plant Sci.</abbrev-journal-title>
<issn pub-type="epub">1664-462X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fpls.2022.1077568</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Plant Science</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>A lightweight convolutional neural network for recognition of severity stages of maydis leaf blight disease of maize</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Haque</surname>
<given-names>Md. Ashraful</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="author-notes" rid="fn001">
<sup>*</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1354869"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Marwaha</surname>
<given-names>Sudeep</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="author-notes" rid="fn001">
<sup>*</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1354864"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Arora</surname>
<given-names>Alka</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1575367"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Deb</surname>
<given-names>Chandan Kumar</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1283635"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Misra</surname>
<given-names>Tanuj</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Nigam</surname>
<given-names>Sapna</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2066046"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Hooda</surname>
<given-names>Karambir Singh</given-names>
</name>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
</contrib>
</contrib-group>
<aff id="aff1">
<sup>1</sup>
<institution>Division of Computer Applications, Indian Council of Agriculture Research (ICAR)-Indian Agricultural Statistics Research Institute</institution>, <addr-line>New Delhi</addr-line>, <country>India</country>
</aff>
<aff id="aff2">
<sup>2</sup>
<institution>Department of Computer Science, Rani Lakshmi Bai Central Agricultural University</institution>, <addr-line>Jhansi</addr-line>, <country>India</country>
</aff>
<aff id="aff3">
<sup>3</sup>
<institution>Division of Germplasm Evaluation, Indian Council of Agriculture Research (ICAR)-National Bureau of Plant Genetic Resources</institution>, <addr-line>New Delhi</addr-line>, <country>India</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>Edited by: Yunchao Tang, Zhongkai University of Agriculture and Engineering, China</p>
</fn>
<fn fn-type="edited-by">
<p>Reviewed by: Jana Shafi, Prince Sattam Bin Abdulaziz University, Saudi Arabia; Ajoy Kumar Roy, Indian Council of Agricultural Research (ICAR), India</p>
</fn>
<fn fn-type="corresp" id="fn001">
<p>*Correspondence: Sudeep Marwaha, <email xlink:href="mailto:sudeep@icar.gov.in">sudeep@icar.gov.in</email>; Md. Ashraful Haque, <email xlink:href="mailto:ashraful.haque@icar.gov.in">ashraful.haque@icar.gov.in</email>
</p>
</fn>
<fn fn-type="other" id="fn002">
<p>This article was submitted to Sustainable and Intelligent Phytoprotection, a section of the journal Frontiers in Plant Science</p>
</fn>
</author-notes>
<pub-date pub-type="epub">
<day>19</day>
<month>12</month>
<year>2022</year>
</pub-date>
<pub-date pub-type="collection">
<year>2022</year>
</pub-date>
<volume>13</volume>
<elocation-id>1077568</elocation-id>
<history>
<date date-type="received">
<day>23</day>
<month>10</month>
<year>2022</year>
</date>
<date date-type="accepted">
<day>01</day>
<month>12</month>
<year>2022</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2022 Haque, Marwaha, Arora, Deb, Misra, Nigam and Hooda</copyright-statement>
<copyright-year>2022</copyright-year>
<copyright-holder>Haque, Marwaha, Arora, Deb, Misra, Nigam and Hooda</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<p>Maydis leaf blight (MLB) of maize (<italic>Zea Mays L.</italic>), a serious fungal disease, is capable of causing up to 70% damage to the crop under severe conditions. Severity of diseases is considered as one of the important factors for proper crop management and overall crop yield. Therefore, it is quite essential to identify the disease at the earliest possible stage to overcome the yield loss. In this study, we created an image database of maize crop, MDSD (Maydis leaf blight Disease Severity Dataset), containing 1,760 digital images of MLB disease, collected from different agricultural fields and categorized into four groups viz. healthy, low, medium and high severity stages. Next, we proposed a lightweight convolutional neural network (CNN) to identify the severity stages of MLB disease. The proposed network is a simple CNN framework augmented with two modified <italic>Inception</italic> modules, making it a lightweight and efficient multi-scale feature extractor. The proposed network reported approx. 99.13% classification accuracy with the f1-score of 98.97% on the test images of MDSD. Furthermore, the class-wise accuracy levels were 100% for healthy samples, 98% for low severity samples and 99% for the medium and high severity samples. In addition to that, our network significantly outperforms the popular pretrained models, viz. VGG16, VGG19, InceptionV3, ResNet50, Xception, MobileNetV2, DenseNet121 and NASNetMobile for the MDSD image database. The experimental findings revealed that our proposed lightweight network is excellent in identifying the images of severity stages of MLB disease despite complicated background conditions.</p>
</abstract>
<kwd-group>
<kwd>maydis leaf blight disease</kwd>
<kwd>maize crop</kwd>
<kwd>disease severity stages</kwd>
<kwd>MDSD image database</kwd>
<kwd>convolutional neural network</kwd>
<kwd>inception module</kwd>
</kwd-group>
<counts>
<fig-count count="10"/>
<table-count count="5"/>
<equation-count count="10"/>
<ref-count count="44"/>
<page-count count="14"/>
<word-count count="5807"/>
</counts>
</article-meta>
</front>
<body>
<sec id="s1" sec-type="intro">
<label>1</label>
<title>Introduction</title>
    <p>In India, maize (<italic>Zea Mays</italic> L.) is the third most important cereal grain crop. The maize crop is being grown in Kharif and rabi seasons across the country (<xref ref-type="bibr" rid="B20">Kaur et&#xa0;al., 2020</xref>). It is considered as the &#x2018;Queen of Cereals&#x2019; due to its multiple use cases, such as staple food for human beings, feed-fodder for livestock animals, raw materials for several processed foods, industrial products, a rich source of starch and so on. As per the reports, around 31.65 mt of maize was produced across the country during 2020-2021 (<xref ref-type="bibr" rid="B16">ICAR-IIMR, 2021</xref>). Every year, around 13.2% of the total crop yield is damaged due to the attack of several disease-causing pathogens (<xref ref-type="bibr" rid="B1">Aggarwal et&#xa0;al., 2021</xref>). Among several diseases, Maydis leaf blight or MLB (aka Southern corn leaf Blight) is a serious fungal disease across maize-growing regions of India. Generally, the country&#x2019;s warm and humid climatic condition is extremely favorable for the disease development (<xref ref-type="bibr" rid="B29">Malik et&#xa0;al., 2018</xref>). The MLB disease is caused by <italic>Bipolaris maydis (Nisik. &amp; Miyake)</italic> Shoemaker 1959 fungus. In the early stages, its symptoms appear as small and oval to diamond-shaped, necrotic to brown-colored lesions on the leaf surfaces. These lesions get elongated as the disease progresses (<xref ref-type="bibr" rid="B1">Aggarwal et&#xa0;al., 2021</xref>). It is reported that this disease alone is capable of causing damage approx. 70% of the total crop yield in severe conditions (<xref ref-type="bibr" rid="B15">Hooda et&#xa0;al., 2018</xref>). The severity of diseases is an important parameter that measures the intensity level of disease symptoms in the affected portion of the crop and is crucial for disease management too (<xref ref-type="bibr" rid="B15">Hooda et&#xa0;al., 2018</xref>). Therefore, our first and foremost aim must be to identify and control the disease at the earliest possible stage of severity to minimize the risk of potential yield loss of maize crop. However, the conventional approach for identifying the severity stages involves visual observations and laboratory analysis. But the fact is, these approaches require highly trained and experienced personnel, which makes them practically infeasible many times. Hence, there is a much need for a precise, quick, cost-effective and automated approach to identify the disease severity stages in the field conditions.</p>
<p>In recent years, several computer vision techniques have been applied to several challenging agricultural problems (<xref ref-type="bibr" rid="B19">Kamilaris and Prenafeta-Bold&#xfa;, 2018</xref>). In this connection, the convolutional neural networks (aka CNNs) are considered as the benchmark for different image-based problem identification in the agriculture domain. The CNN approaches have eased the image recognition process by automatically extracting the features from the images as compared to the hand-engineered feature extractions in the traditional machine learning approaches (<xref ref-type="bibr" rid="B21">LeCun et&#xa0;al., 2015</xref>). In case of diagnosis of diseases as well as their severity stages, CNNs have shown significantly better results than the traditional image processing and machine learning techniques. In this context, a very limited number of works have been reported to diagnose disease severity stages in maize crop using in-field images. Therefore, we proposed a novel lightweight CNN network for identifying the severity stages of MLB disease in maize crop. This network would be a practical and viable solution for the farm community of the country. The main contributions of this study are provided below:</p>
<list list-type="bullet">
<list-item>
<p>Created an image database known as MDSD (Maydis leaf blight Disease Severity Dataset) containing digital images of maize leaves infected with MLB disease covering all severity stages. The images of MDSD were collected in non-destructive manner with natural field backgrounds from different agricultural fields.</p>
</list-item>
<list-item>
<p>Proposed a lightweight and efficient convolutional neural network (CNN) model augmented with modified inception modules. The proposed network is trained and validated on the images of the MDSD database for automatic identification of severity stages of MLB disease.</p>
</list-item>
<list-item>
<p>To evaluate the effectiveness of the proposed network, we conducted a comparative analysis of the prediction performance between the proposed model and a few popular state-of-the-art pretrained networks.</p>
</list-item>
</list>
<p>This article is organized into six sections. Section 1 (present section) highlights the importance of maize crop, the devastating effect of MLB diseases, constraints of the conventional approaches of disease recognition and management, importance of computer vision-based technologies <italic>etc.</italic>: Section 2 explores and briefly discusses the related works relevant to the present study, Section 3 explains the materials and methodologies used to carry out the current study; Section 4 reports and discusses the experimental results and finding of the study; Section 5 presents the ablation studies; and Section 6 concludes the whole study highlighting the impact and crucial finding and aligns the future perspective of this study.</p>
</sec>
<sec id="s2">
<label>2</label>
<title>Related work</title>
<p>In this section, we will briefly discuss the methodologies proposed by research works from across the globe for recognizing diseases as well their severity stages. In recent years, deep learning-based techniques are gaining momentum for identifying diseases of several crops. Several authors like <xref ref-type="bibr" rid="B30">Mohanty et&#xa0;al. (2016)</xref>; <xref ref-type="bibr" rid="B38">Sladojevic et&#xa0;al. (2016)</xref>; <xref ref-type="bibr" rid="B10">Ferentinos (2018)</xref>; <xref ref-type="bibr" rid="B3">Barbedo (2019)</xref> and <xref ref-type="bibr" rid="B2">Atila et&#xa0;al. (2021)</xref> focused on identifying the diseases of crops at once by applying variety of deep learning models such as state-of-the-art networks, transfer learning models, custom defined models, hybrid CNN models and many more. These works targeted identifying diseases of multiple crops by a single deep learning model. Whereas most of the reported works aimed at crop-specific disease identification problems such as for Rice crop (<xref ref-type="bibr" rid="B26">Lu et&#xa0;al., 2017</xref>; <xref ref-type="bibr" rid="B6">Chen et&#xa0;al., 2020</xref>; <xref ref-type="bibr" rid="B36">Rahman et&#xa0;al., 2020</xref>), Wheat crop (<xref ref-type="bibr" rid="B27">Lu et&#xa0;al., 2017</xref>; <xref ref-type="bibr" rid="B33">Picon et&#xa0;al., 2019</xref>; <xref ref-type="bibr" rid="B31">Nigam et&#xa0;al., 2021</xref>), Tomato crop (<xref ref-type="bibr" rid="B11">Fuentes et&#xa0;al., 2018</xref>; <xref ref-type="bibr" rid="B44">Zhang et&#xa0;al., 2018</xref>), Maize crop (<xref ref-type="bibr" rid="B9">DeChant et&#xa0;al., 2017</xref>; <xref ref-type="bibr" rid="B35">Priyadharshini et&#xa0;al., 2019</xref>; <xref ref-type="bibr" rid="B28">Lv et&#xa0;al., 2020</xref>; <xref ref-type="bibr" rid="B12">Haque et&#xa0;al., 2021</xref>; <xref ref-type="bibr" rid="B14">Haque et&#xa0;al., 2022a</xref>; <xref ref-type="bibr" rid="B13">Haque et&#xa0;al., 2022b</xref>), etc. The experimental findings of these research works reported significant results by employing several types of CNN-based networks to identify the diseases using color images. Some of these works used lab-based images of crop-diseases such as plant village for their model development, while some has used in-field images</p>
<p>Nowadays, the identification of severity stages of diseases has also attracted the attention of researchers. Significant works have been carried out to identify the disease severity stages using digital images. <xref ref-type="bibr" rid="B43">Wang et&#xa0;al. (2017)</xref> applied transfer learning of popular deep CNN models to diagnose disease severity in apple plants and obtained more than 94% classification accuracy on the test dataset. They used publicly available images and assessed them into 4 categories of severity stages for their experiment. <xref ref-type="bibr" rid="B23">Liang et&#xa0;al. (2019)</xref> proposed a robust approach for disease diagnosis and disease severity estimation of several crops using deep learning models. <xref ref-type="bibr" rid="B41">Verma et&#xa0;al. (2020)</xref> worked on tomato late blight disease and <xref ref-type="bibr" rid="B34">Prabhakar et&#xa0;al. (2020)</xref> worked on estimating the severity stages of tomato early blight disease using Deep CNN models. Recently, <xref ref-type="bibr" rid="B37">Sibiya and Sumbwanyambe (2021)</xref> used Deep CNN models to classify images of common rust disease of maize crop into four classes of severity levels. They applied fuzzy logic-based techniques to automatically categorize the diseased images into severity categories. <xref ref-type="bibr" rid="B32">Nigam et&#xa0;al. (2021)</xref> classified the stem rust disease of wheat crop into four severity categories using deep convolutional neural networks. <xref ref-type="bibr" rid="B7">Chen et&#xa0;al. (2021)</xref> worked on estimating the severity of the rice bacterial leaf streak disease using a segmentation-based approach. <xref ref-type="bibr" rid="B42">Wang et&#xa0;al. (2021)</xref> proposed an image-segmentation-based approach by integrating a deep CNN model to recognize severity stages of downy mildew, powdery mildew and cucumber viral diseases of cucumber crops. <xref ref-type="bibr" rid="B18">Ji and Wu (2022)</xref> proposed fuzzy logic integrated deep learning model for detecting the severity levels of grape black measles disease. <xref ref-type="bibr" rid="B25">Liu et&#xa0;al. (2022)</xref> developed a two-stage CNN model for diagnosing the severity of Alternaria leaf blotch disease of the Apple plant. A summary of the previous works is provided in <xref ref-type="table" rid="T1">
<bold>Table&#xa0;1</bold>
</xref>.</p>
<table-wrap id="T1" position="float">
<label>Table&#xa0;1</label>
<caption>
<p>A brief summary of related works.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="top" align="left">Authors</th>
<th valign="top" align="center">Work done</th>
<th valign="top" align="center">Dataset</th>
<th valign="top" align="center">Approach</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">
<xref ref-type="bibr" rid="B43">Wang et&#xa0;al. (2017)</xref>
</td>
<td valign="top" align="left">Diagnosis of disease severity in Apple plant</td>
<td valign="top" align="left">Plant Village dataset</td>
<td valign="top" align="left">Transfer learning approach</td>
</tr>
<tr>
<td valign="top" align="left">
<xref ref-type="bibr" rid="B23">Liang et&#xa0;al. (2019)</xref>
</td>
<td valign="top" align="left">Diagnosis of diseases and their severity levels of several crops</td>
<td valign="top" align="left">Own dataset</td>
<td valign="top" align="left">Custom CNN based on ResNet50 and ShuffleNet models</td>
</tr>
<tr>
<td valign="top" align="left">
<xref ref-type="bibr" rid="B41">Verma et&#xa0;al. (2020)</xref>
</td>
<td valign="top" align="left">Identification of severity levels of tomato late blight disease</td>
<td valign="top" align="left">Plant Village</td>
<td valign="top" align="left">Transfer learning approach</td>
</tr>
<tr>
<td valign="top" align="left">
<xref ref-type="bibr" rid="B34">Prabhakar et&#xa0;al. (2020)</xref>
</td>
<td valign="top" align="left">Detection of severity levels of tomato early blight disease</td>
<td valign="top" align="left">Plant Village</td>
<td valign="top" align="left">Pre-trained ResNet101 models</td>
</tr>
<tr>
<td valign="top" align="left">
<xref ref-type="bibr" rid="B37">Sibiya and Sumbwanyambe (2021)</xref>
</td>
<td valign="top" align="left">Classification of common rust disease of maize into four severity levels</td>
<td valign="top" align="left">PlantVillage dataset</td>
<td valign="top" align="left">OTSU threshold-segmentation method</td>
</tr>
<tr>
<td valign="top" align="left">
<xref ref-type="bibr" rid="B31">Nigam et&#xa0;al. (2021)</xref>
</td>
<td valign="top" align="left">Estimation of severity of stem rust disease of wheat</td>
<td valign="top" align="left">Own dataset</td>
<td valign="top" align="left">Custom CNN network</td>
</tr>
<tr>
<td valign="top" align="left">
<xref ref-type="bibr" rid="B7">Chen et&#xa0;al. (2021)</xref>
</td>
<td valign="top" align="left">Estimation severity of the Rice bacterial leaf streak disease</td>
<td valign="top" align="left">Own dataset</td>
<td valign="top" align="left">Segmentation-based CNN approach</td>
</tr>
<tr>
<td valign="top" align="left">
<xref ref-type="bibr" rid="B42">Wang et&#xa0;al. (2021)</xref>
</td>
<td valign="top" align="left">Recognition of severity stages of downy mildew, powdery mildew and cucumber viral diseases of cucumber</td>
<td valign="top" align="left">Own dataset</td>
<td valign="top" align="left">Image-segmentation-based CNN model</td>
</tr>
<tr>
<td valign="top" align="left">
<xref ref-type="bibr" rid="B18">Ji and Wu (2022)</xref>
</td>
<td valign="top" align="left">Detection of severity levels of black measles disease of grape</td>
<td valign="top" align="left">Own Dataset</td>
<td valign="top" align="left">Fuzzy logic integrated Deep learning approach</td>
</tr>
<tr>
<td valign="top" align="left">
<xref ref-type="bibr" rid="B25">Liu et&#xa0;al. (2022)</xref>
</td>
<td valign="top" align="left">Diagnosis of severity levels of Alternaria leaf blotch disease of Apple plant</td>
<td valign="top" align="left">Own dataset</td>
<td valign="top" align="left">Custom CNN model</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s3" sec-type="materials|methods">
<label>3</label>
<title>Materials and methods</title>
<sec id="s3_1">
<label>3.1</label>
<title>Flow of the proposed approach</title>
<p>The workflow of the proposed disease severity identification approach is depicted graphically in <xref ref-type="fig" rid="f1">
<bold>Figure&#xa0;1</bold>
</xref>. First, digital images of MLB disease of maize crop were captured from the fields and MDSD image database was created. Next, images were labelled into respective severity categories based on domain experts&#x2019; observations and saved into respective folders in the storage disk. Then, images were pre-processed and augmented to increase the training dataset; After that, the whole image dataset was split into two categories viz. training and testing sets and the proposed CNN model was trained and validated. Finally, based on the performance evaluation, the MLB disease-severity identification model was finalized and its architecture was saved on the disk. Detailed illustrations of these phases are discussed in the following sections.</p>
<fig id="f1" position="float">
<label>Figure&#xa0;1</label>
<caption>
<p>Overall framework of the proposed approach for recognition of severity stages of MLB disease.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-13-1077568-g001.tif"/>
</fig>
</sec>
<sec id="s3_2">
<label>3.2</label>
<title>Image acquisition</title>
<p>In this study, we created an image database known as MDSD containing digital images of maize leaves affected with MLB disease. The images were collected in a non-destructive manner from several agricultural plots located at Bidhan Chandra Krishi Visvavidyalaya, Kalyani (22.9920&#xb0; N, 88.4495&#xb0; E) and ICAR-Indian Agricultural Research Institute, New Delhi (28.6331&#xb0; N, 77.1525&#xb0; E) during 2018-2020. Digital cameras (Nikon D3500 W/AF) and smartphones (Redmi Y2 and Asus Max Pro M1) were used for capturing the images under normal daylight conditions. We collected the images of MLB disease by focusing the camera lens on the symptomatic portions of leaves starting from the disease incidence stage to the highest severity stage with complex field backgrounds.</p>
</sec>
<sec id="s3_3">
<label>3.3</label>
<title>Disease severity stages</title>
<p>The images of MLB disease were thoroughly verified and categorized into four groups based on their symptomatic characteristics viz. healthy (no symptoms), low severity, medium severity and high severity stages as provided in <xref ref-type="table" rid="T2">
<bold>Table&#xa0;2</bold>
</xref>. The categorization into the severity groups was done under the strict supervision of subject matter specialists (domain experts) of maize pathology at ICAR-IIMR, Ludhiana, India. Sample images of each category of MLB disease are shown in <xref ref-type="fig" rid="f2">
<bold>Figure&#xa0;2</bold>
</xref>.</p>
<table-wrap id="T2" position="float">
<label>Table&#xa0;2</label>
<caption>
<p>Categorization and summary of images of MDSD database.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="left">Category</th>
<th valign="middle" align="center">Characteristics</th>
<th valign="middle" align="center">Original</th>
<th valign="middle" align="center">Synthetic</th>
<th valign="middle" align="center">Total</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">
<bold>Healthy</bold>
</td>
<td valign="middle" align="left">No disease symptoms</td>
<td valign="middle" align="center">511</td>
<td valign="middle" align="center">3066</td>
<td valign="middle" align="center">3577</td>
</tr>
<tr>
<td valign="middle" align="left">
<bold>Low severity</bold>
</td>
<td valign="middle" align="left">Disease symptoms cover &lt;25% of the total leaf area</td>
<td valign="middle" align="center">389</td>
<td valign="middle" align="center">3112</td>
<td valign="middle" align="center">3501</td>
</tr>
<tr>
<td valign="middle" align="left">
<bold>Medium severity</bold>
</td>
<td valign="middle" align="left">Disease symptoms cover 25-50% of the total leaf area</td>
<td valign="middle" align="center">621</td>
<td valign="middle" align="center">3105</td>
<td valign="middle" align="center">3726</td>
</tr>
<tr>
<td valign="middle" align="left">
<bold>High severity</bold>
</td>
<td valign="middle" align="left">Disease symptoms cover &gt; 50% of the total leaf area</td>
<td valign="middle" align="center">239</td>
<td valign="middle" align="center">3346</td>
<td valign="middle" align="center">3585</td>
</tr>
<tr>
<td valign="middle" align="left"/>
<td valign="middle" align="left">Total</td>
<td valign="middle" align="center">1,760</td>
<td valign="middle" align="center">
<bold>12629</bold>
</td>
<td valign="middle" align="center">
<bold>14389</bold>
</td>
</tr>
</tbody>
</table>
</table-wrap>
<fig id="f2" position="float">
<label>Figure&#xa0;2</label>
<caption>
<p>Sample images of MLB disease of maize crop grouped into four categories <bold>(A)</bold> Healthy <bold>(B)</bold> Low Severity <bold>(C)</bold> Medium Severity and <bold>(D)</bold> High Severity.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-13-1077568-g002.tif"/>
</fig>
</sec>
<sec id="s3_4">
<label>3.4</label>
<title>Image pre-processing</title>
<p>Prior to training process, slight pre-processing of the raw images was required for better modelling. At first, unwanted images like duplicate, noisy, out-of-focus, blurred images were discarded from the raw images. After that, images were resized to 256 &#xd7; 256 pixel size by keeping hardware system constraints in mind and for better interpretation by the proposed model.</p>
</sec>
<sec id="s3_5">
<label>3.5</label>
<title>Image augmentation</title>
<p>In order to increase the number of images for model training, synthetic images were generated and augmented with the original dataset. Here, we used two techniques to generate the synthetic images: geometric transformation and brightness adjustment. The overall summary of images in the MDSD database is provided in <xref ref-type="table" rid="T2">
<bold>Table&#xa0;2</bold>
</xref>.</p>
<sec id="s3_5_1">
<label>3.5.1</label>
<title>Geometric transformation</title>
<p>Geometric transformation means transforming the orientation of the images. In this study, we applied several geometric transformations randomly to generate artificial images which involved rotating (90&#xb0;, 180&#xb0; and 270&#xb0;), flipping (top-down and left-right), skewing, and zooming. The geometric transformations were applied using the &#x2018;Augmentor&#x2019; library (<xref ref-type="bibr" rid="B4">Bloice et&#xa0;al., 2019</xref>) which provides translation invariance transformation of the images.</p>
</sec>
<sec id="s3_5_2">
<label>3.5.2</label>
<title>Brightness adjustment</title>
<p>As the images were captured using different devices and at different periods of time, the images weren&#x2019;t homogeneous in terms of illumination. The light intensity on the diseased images greatly impacts when we apply computer vision techniques. Hence, we applied a <italic>gamma function</italic> in our images to generate synthetic images with different brightness levels. The <italic>gamma function</italic> is an image processing technique that applies the non-linear adjustment to individual pixel values to encode and decode the luminance of an image. The gamma function can be defined mathematically by the following formula (eq. 1).</p>
<disp-formula>
<mml:math display="block" id="M1">
<mml:mrow>
<mml:msub>
<mml:mi>i</mml:mi>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>=</mml:mo>
<mml:mi>a</mml:mi>
<mml:msubsup>
<mml:mi>i</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>n</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo stretchy="false">/</mml:mo>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:msubsup>
</mml:mrow>
</mml:math>
</disp-formula>
<p>where, <italic>i<sub>in</sub>
</italic> is the input images with pixel values scaled from [0, 255] to [0, 1], <italic>&#x3b3;</italic> is the gamma value, <italic>i<sub>out</sub>
</italic> is the output image scaled back to [0, 255] and <italic>a</italic> is a constant value (mainly equal to 1). The gamma values ( <italic>&#x3b3;</italic> ) &lt; 1 will shift the image towards the darker end of the spectrum while gamma values ( <italic>&#x3b3;</italic> ) &gt; 1 will make the image brighter and the <italic>&#x3b3;</italic>=1 will not affect the input image.&#x201d;</p>
</sec>
</sec>
<sec id="s3_6">
<label>3.6</label>
<title>Proposed lightweight CNN model</title>
<p>In this study, we proposed a lightweight convolutional neural network (CNN) to identify the severity stages of MLB disease of maize crop. In this network, we have incorporated the modified <italic>Inception</italic> modules into a simple CNN framework, enhancing the network&#x2019;s finer and multi-scale feature extraction capability. The proposed model is composed of several computational modules which are discussed in following subsections:</p>
<sec id="s3_6_1">
<label>3.6.1</label>
<title>CRB layer (<italic>crb</italic>)</title>
<p>The CRB is the most important layer in the proposed lightweight model which encompasses three popular operations viz. Convolution operation, ReLU and Batch Normalization operation as shown in <xref ref-type="fig" rid="f3">
<bold>Figure&#xa0;3</bold>
</xref>. The main function of this CRB layer was to generate pattern detectors from the images in the form of feature maps.</p>
<fig id="f3" position="float">
<label>Figure&#xa0;3</label>
<caption>
<p>Framework of the CRB module of the proposed model.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-13-1077568-g003.tif"/>
</fig>
<sec id="s3_6_1_1">
<label>3.6.1.1</label>
<title>Convolution operation (conv)</title>
<p>The convolution operation involves the extraction of inherent features (aka feature maps) from the input images by using a set of kernels/filters (<xref ref-type="bibr" rid="B22">LeCun et&#xa0;al., 1998</xref>). The kernel/filters are of smaller size than the input images such as 3 &#xd7; 3 or 1 &#xd7; 1. Mathematically, the convolution operation is expressed by eq. 2:</p>
<disp-formula>
<mml:math display="block" id="M2">
<mml:mrow>
<mml:msubsup>
<mml:mi>z</mml:mi>
<mml:mi>k</mml:mi>
<mml:mi>l</mml:mi>
</mml:msubsup>
<mml:mo>=</mml:mo>
<mml:munderover>
<mml:mo>&#x2211;</mml:mo>
<mml:mi>k</mml:mi>
<mml:mi>m</mml:mi>
</mml:munderover>
<mml:msubsup>
<mml:mi>x</mml:mi>
<mml:mrow>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>k</mml:mi>
</mml:mrow>
<mml:mi>l</mml:mi>
</mml:msubsup>
<mml:mo>.</mml:mo>
<mml:msubsup>
<mml:mi>w</mml:mi>
<mml:mi>k</mml:mi>
<mml:mi>l</mml:mi>
</mml:msubsup>
<mml:mo>+</mml:mo>
<mml:msubsup>
<mml:mi>b</mml:mi>
<mml:mi>k</mml:mi>
<mml:mi>l</mml:mi>
</mml:msubsup>
</mml:mrow>
</mml:math>
</disp-formula>
<p>where,</p>
<p>
<inline-formula>
<mml:math display="inline" id="im1">
<mml:mrow>
<mml:msubsup>
<mml:mi>z</mml:mi>
<mml:mi>k</mml:mi>
<mml:mi>l</mml:mi>
</mml:msubsup>
</mml:mrow>
</mml:math>
</inline-formula> denotes the output feature map of <italic>k</italic>-th input at <italic>l</italic>-th layer of the model</p>
<p>
<inline-formula>
<mml:math display="inline" id="im2">
<mml:mrow>
<mml:msubsup>
<mml:mi>x</mml:mi>
<mml:mi>k</mml:mi>
<mml:mi>l</mml:mi>
</mml:msubsup>
</mml:mrow>
</mml:math>
</inline-formula> denotes the <italic>k</italic>-th input feature map at <italic>l</italic>-th layer of the model</p>
<p>
<inline-formula>
<mml:math display="inline" id="im3">
<mml:mrow>
<mml:msubsup>
<mml:mi>w</mml:mi>
<mml:mi>k</mml:mi>
<mml:mi>l</mml:mi>
</mml:msubsup>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula>
<mml:math display="inline" id="im4">
<mml:mrow>
<mml:msubsup>
<mml:mi>b</mml:mi>
<mml:mi>k</mml:mi>
<mml:mi>l</mml:mi>
</mml:msubsup>
</mml:mrow>
</mml:math>
</inline-formula> denotes the weights and bias at the <italic>l</italic>-th layer of the model</p>
</sec>
<sec id="s3_6_1_2">
<label>3.6.1.2</label>
<title>ReLU operation (ReLU)</title>
<p>ReLU (Rectifier Linear Unit) is the widely used activation function for the CNN models that enhances the non-linear attributes within the input feature maps (<xref ref-type="bibr" rid="B12">Haque et&#xa0;al., 2021</xref>). The ReLU function requires less computation hence speed up the overall training process. Its convergence speed is higher than the other functions and induces sparsity in feature maps. It is expressed by the following equation (eq 3):</p>
<disp-formula>
<mml:math display="block" id="M3">
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>L</mml:mi>
<mml:mi>U</mml:mi>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mi>z</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo>=</mml:mo>
<mml:mrow>
<mml:mo>{</mml:mo>
<mml:mrow>
<mml:mtable>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:msub>
<mml:mi>z</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mtext>if</mml:mtext>
<mml:mo>&#xa0;</mml:mo>
<mml:msub>
<mml:mi>z</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
<mml:mo>&#xa0;</mml:mo>
<mml:mn>&gt;&#xa0;0</mml:mn>
<mml:mo>&#xa0;</mml:mo>
</mml:mrow>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:mn>0</mml:mn>
<mml:mo>,</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mtext>if</mml:mtext>
<mml:mo>&#xa0;</mml:mo>
<mml:msub>
<mml:mi>z</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>&#x2264;</mml:mo>
<mml:mn>&#xa0;0</mml:mn>
</mml:mrow>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:mrow>
</mml:mrow>
</mml:math>
</disp-formula>
<p>where, <italic>z<sub>k</sub>
</italic> denotes the output feature map of <italic>k</italic>-th input feature map</p>
</sec>
<sec id="s3_6_1_3">
<label>3.6.1.3</label>
<title>Batch normalization operation (BN)</title>
<p>The batch normalization process transforms a batch of images (say <italic>m</italic>) to have a mean zero and standard deviation of one. It speeds up the training process and handles the internal covariances of the input feature maps (<xref ref-type="bibr" rid="B17">Ioffe, 2017</xref>). The batch normalization is expressed as the following equations (eq. 4 and eq. 5):</p>
<disp-formula>
<mml:math display="block" id="M4">
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>=</mml:mo>
<mml:mi>&#x3b3;</mml:mi>
<mml:msub>
<mml:mover accent="true">
<mml:mi>z</mml:mi>
<mml:mo>^</mml:mo>
</mml:mover>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>+</mml:mo>
<mml:mi>&#x3b2;</mml:mi>
<mml:mo>&#xa0;</mml:mo>
</mml:mrow>
</mml:math>
</disp-formula>
<disp-formula>
<mml:math display="block" id="M5">
<mml:mrow>
<mml:msub>
<mml:mover accent="true">
<mml:mi>z</mml:mi>
<mml:mo>^</mml:mo>
</mml:mover>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:msub>
<mml:mi>z</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>E</mml:mi>
<mml:mo stretchy="false">(</mml:mo>
<mml:msub>
<mml:mi>z</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:msqrt>
<mml:mrow>
<mml:mi>v</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>r</mml:mi>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mi>z</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo>+</mml:mo>
<mml:mi>&#x3f5;</mml:mi>
</mml:mrow>
</mml:msqrt>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<p>where,</p>
<p>
<italic>y</italic>
<sub>
<italic>i</italic>
</sub> denotes the output feature map</p>
<p>
<inline-formula>
<mml:math display="inline" id="im6">
<mml:mrow>
<mml:msub>
<mml:mover accent="true">
<mml:mi>z</mml:mi>
<mml:mo>^</mml:mo>
</mml:mover>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the normalized input feature map</p>
<p>
<italic>E</italic>(<italic>z</italic>
<sub>
<italic>i</italic>
</sub>) denotes the mean of the input feature map <italic>z</italic>
<sub>
<italic>i</italic>
</sub>
<italic>var</italic>(<italic>z</italic>
<sub>
<italic>i</italic>
</sub>) denotes the variance of the input feature map <italic>z</italic>
<sub>
<italic>i</italic>
</sub> <italic>&#x3b3;</italic>&#xa0;<italic>and</italic>&#xa0;<italic>&#x3b2;</italic> are the scaling and offset factors of the network that are trainable</p>
</sec>
</sec>
<sec id="s3_6_2">
<label>3.6.2</label>
<title>Maxpool module (<italic>pool</italic>)</title>
<p>The maxpooling operation extracts the maximum element from the respective regions of feature map covered by the pooling kernels (<xref ref-type="bibr" rid="B8">Chollet, 2021</xref>). The maxpool layer outputs the most promising features from the input images without adding any extra trainable parameters to the network. In this proposed model, we applied maxpool with a kernel size of 3 x 3 and strides of 1 and 2.</p>
</sec>
<sec id="s3_6_3">
<label>3.6.3</label>
<title>Modified inception module (<italic>incep</italic>)</title>
<p>Generally, the &#x2018;inception&#x2019; module of Inception networks obtain the integration of sparse structure by approximating the available dense component of the network (<xref ref-type="bibr" rid="B39">Szegedy et&#xa0;al., 2015</xref>; <xref ref-type="bibr" rid="B40">Szegedy et&#xa0;al., 2016</xref>). In this study, we proposed a modified inception module by applying few changes with respect to the kernel sizes, number of filters and parallel convolutions. In the proposed inception module, we applied symmetrical (1 x 1) and asymmetrical convolution kernels in a parallel manner with a maxpool operation. Here, we factorized the convolutions with spatial filters of n &#xd7; n (for 3 x 3 or 5 x 5) into asymmetrical convolutions with filter sizes n&#xd7;1 and 1&#xd7;n (e.g., 3 x 1 and 1 x 3; 5 x 1 and 1 x 5). Prior to each asymmetrical convolution, one 1 x 1 convolution kernel is incorporated to reduce the representational bottleneck of the network. We also applied ReLU in each convolution operation to induce sparsity in the feature maps (as shown in <xref ref-type="fig" rid="f4">
<bold>Figure&#xa0;4</bold>
</xref>). Finally, the outputs from all parallel convolutions and maxpool layers were concatenated and passed to the next layer of the network.</p>
<fig id="f4" position="float">
<label>Figure&#xa0;4</label>
<caption>
<p>Architecture of the proposed modified inception module.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-13-1077568-g004.tif"/>
</fig>
</sec>
<sec id="s3_6_4">
<label>3.6.4</label>
<title>GAP module (<italic>gap</italic>)</title>
<p>The GAP or Global Average Pooling is a unique pooling operation designed to generate a scalar vector of features by computing the average of each feature map. It aggressively summarizes the presence of a feature in an image by downsampling the entire input feature map to a single value (<xref ref-type="bibr" rid="B24">Lin et&#xa0;al., 2013</xref>). The purpose of the GAP layer was to reduce the chance of overfitting as it doesn&#x2019;t add any extra learnable parameters to the network.</p>
</sec>
<sec id="s3_6_5">
<label>3.6.5</label>
<title>Softmax layer (<italic>softmax</italic>)</title>
<p>A softmax layer was added at the end point of the proposed CNN model. The softmax layer contains the same number of nodes as the number of classes in the dataset under study. The <italic>softmax function</italic> generates the output probability values from the input feature vectors. It converts the non-normalized feature vectors of the network into a probability distribution over the predicted output class (<xref ref-type="bibr" rid="B5">Bouchard, 2007</xref>). Mathematically, softmax function is expressed as the following equation (eq: 6):</p>
<disp-formula>
<mml:math display="block" id="M6">
<mml:mrow>
<mml:mi>S</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>f</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>m</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>x</mml:mi>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mi>z</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:msup>
<mml:mi>e</mml:mi>
<mml:mrow>
<mml:msub>
<mml:mi>z</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
</mml:mrow>
</mml:msup>
</mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mo>&#x2211;</mml:mo>
<mml:mi>j</mml:mi>
</mml:msub>
<mml:msup>
<mml:mi>e</mml:mi>
<mml:mrow>
<mml:msub>
<mml:mi>z</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<p>where, <italic>z<sub>j</sub>
</italic> denotes the <italic>j</italic>-th item of the output feature vector</p>
<p>The overall framework of the proposed network in a graphical manner is provided in <xref ref-type="fig" rid="f5">
<bold>Figure&#xa0;5</bold>
</xref>. Also, a detailed layer-wise description like layer names, kernel/filter sizes, strides, output shapes, number of kernels/filters and number of training parameters is provided in <xref ref-type="table" rid="T3">
<bold>Table&#xa0;3</bold>
</xref>.</p>
<fig id="f5" position="float">
<label>Figure&#xa0;5</label>
<caption>
<p>Overall architectural framework of the proposed CNN model.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-13-1077568-g005.tif"/>
</fig>
<table-wrap id="T3" position="float">
<label>Table&#xa0;3</label>
<caption>
<p>Layer-wise configuration of the proposed model.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="left">Name</th>
<th valign="middle" align="center">Layers</th>
<th valign="middle" align="center">Kernel size</th>
<th valign="middle" align="center">Stride</th>
<th valign="middle" align="center">Output shape</th>
<th valign="middle" align="center"># Kernel</th>
<th valign="middle" align="center">Parameters</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">
<bold>
<italic>input</italic>
</bold>
</td>
<td valign="middle" align="left">Input images</td>
<td valign="middle" align="center">&#x2013;</td>
<td valign="middle" align="center">&#x2013;</td>
<td valign="middle" align="center">256 x 256 x 3</td>
<td valign="middle" align="center">&#x2013;</td>
<td valign="middle" align="center">0</td>
</tr>
<tr>
<td valign="middle" align="left">
<bold>
<italic>crb_1</italic>
</bold>
</td>
<td valign="middle" align="left">Conv + ReLU + BN</td>
<td valign="middle" align="center">3 x 3</td>
<td valign="middle" align="center">1</td>
<td valign="middle" align="center">256 x 256</td>
<td valign="middle" align="center">32</td>
<td valign="middle" align="center">864 + 96 + 0</td>
</tr>
<tr>
<td valign="middle" align="left">
<bold>
<italic>crb_2</italic>
</bold>
</td>
<td valign="middle" align="left">Conv + ReLU + BN</td>
<td valign="middle" align="center">3 x 3</td>
<td valign="middle" align="center">1</td>
<td valign="middle" align="center">254 x 254</td>
<td valign="middle" align="center">64</td>
<td valign="middle" align="center">18,432 +192 + 0</td>
</tr>
<tr>
<td valign="middle" align="left">
<bold>
<italic>pool_1</italic>
</bold>
</td>
<td valign="middle" align="left">Max-Pooling</td>
<td valign="middle" align="center">3 x 3</td>
<td valign="middle" align="center">2</td>
<td valign="middle" align="center">126 x 126</td>
<td valign="middle" align="center">64</td>
<td valign="middle" align="center">0</td>
</tr>
<tr>
<td valign="middle" align="left">
<bold>
<italic>crb_3</italic>
</bold>
</td>
<td valign="middle" align="left">Conv + ReLU + BN</td>
<td valign="middle" align="center">3 x 3</td>
<td valign="middle" align="center">1</td>
<td valign="middle" align="center">126 x 126</td>
<td valign="middle" align="center">64</td>
<td valign="middle" align="center">36,864 + 192 + 0</td>
</tr>
<tr>
<td valign="middle" align="left">
<bold>
<italic>crb_4</italic>
</bold>
</td>
<td valign="middle" align="left">Conv + ReLU + BN</td>
<td valign="middle" align="center">1 x 1</td>
<td valign="middle" align="center">1</td>
<td valign="middle" align="center">126 x 126</td>
<td valign="middle" align="center">96</td>
<td valign="middle" align="center">6,144 + 288 + 0</td>
</tr>
<tr>
<td valign="middle" align="left">
<bold>
<italic>crb_5</italic>
</bold>
</td>
<td valign="middle" align="left">Conv + ReLU + BN</td>
<td valign="middle" align="center">1 x 1</td>
<td valign="middle" align="center">1</td>
<td valign="middle" align="center">126 x 126</td>
<td valign="middle" align="center">96</td>
<td valign="middle" align="center">9,216 + 288 + 0</td>
</tr>
<tr>
<td valign="middle" align="left">
<bold>
<italic>pool_2</italic>
</bold>
</td>
<td valign="middle" align="left">Max-Pooling</td>
<td valign="middle" align="center">3 x 3</td>
<td valign="middle" align="center">1</td>
<td valign="middle" align="center">124 x 124</td>
<td valign="middle" align="center">96</td>
<td valign="middle" align="center">0</td>
</tr>
<tr>
<td valign="middle" align="left">
<bold>
<italic>incep_1</italic>
</bold>
</td>
<td valign="middle" align="left">Inception</td>
<td valign="middle" align="center">1 x 1, 3 x 1,<break/>1 x 3, 5 x 1,<break/>1 x 5</td>
<td valign="middle" align="center">1</td>
<td valign="middle" align="center">124 x 124</td>
<td valign="middle" align="center">32, 64, 128, 256</td>
<td valign="middle" align="center">72,128</td>
</tr>
<tr>
<td valign="middle" align="left">
<bold>
<italic>pool_3</italic>
</bold>
</td>
<td valign="middle" align="left">Max-Pooling</td>
<td valign="middle" align="center">3 x 3</td>
<td valign="middle" align="center">2</td>
<td valign="middle" align="center">61 x 61</td>
<td valign="middle" align="center">256</td>
<td valign="middle" align="center">0</td>
</tr>
<tr>
<td valign="middle" align="left">
<bold>
<italic>crb_6</italic>
</bold>
</td>
<td valign="middle" align="left">Conv + ReLU + BN</td>
<td valign="middle" align="center">1 x 1</td>
<td valign="middle" align="center">1</td>
<td valign="middle" align="center">61 x 61</td>
<td valign="middle" align="center">128</td>
<td valign="middle" align="center">32,768 + 384 + 0</td>
</tr>
<tr>
<td valign="middle" align="left">
<bold>
<italic>crb_7</italic>
</bold>
</td>
<td valign="middle" align="left">Conv + ReLU + BN</td>
<td valign="middle" align="center">3 x 3</td>
<td valign="middle" align="center">1</td>
<td valign="middle" align="center">61 x 61</td>
<td valign="middle" align="center">128</td>
<td valign="middle" align="center">1,47,656 + 384 + 0</td>
</tr>
<tr>
<td valign="middle" align="left">
<bold>
<italic>crb_8</italic>
</bold>
</td>
<td valign="middle" align="left">Conv + ReLU + BN</td>
<td valign="middle" align="center">1 x 1</td>
<td valign="middle" align="center">1</td>
<td valign="middle" align="center">61 x 61</td>
<td valign="middle" align="center">256</td>
<td valign="middle" align="center">32,768 + 768 + 0</td>
</tr>
<tr>
<td valign="middle" align="left">
<bold>
<italic>pool_4</italic>
</bold>
</td>
<td valign="middle" align="left">Max-Pooling</td>
<td valign="middle" align="center">3 x 3</td>
<td valign="middle" align="center">1</td>
<td valign="middle" align="center">59 x 59</td>
<td valign="middle" align="center">256</td>
<td valign="middle" align="center">0</td>
</tr>
<tr>
<td valign="middle" align="left">
<bold>
<italic>crb_9</italic>
</bold>
</td>
<td valign="middle" align="left">Conv + ReLU + BN</td>
<td valign="middle" align="center">1 x 1</td>
<td valign="middle" align="center">1</td>
<td valign="middle" align="center">59 x 59</td>
<td valign="middle" align="center">256</td>
<td valign="middle" align="center">65,536 + 768 + 0</td>
</tr>
<tr>
<td valign="middle" align="left">
<bold>
<italic>crb_10</italic>
</bold>
</td>
<td valign="middle" align="left">Conv + ReLU + BN</td>
<td valign="middle" align="center">3 x 3</td>
<td valign="middle" align="center">1</td>
<td valign="middle" align="center">59 x 59</td>
<td valign="middle" align="center">256</td>
<td valign="middle" align="center">8,84,736 + 1152 + 0</td>
</tr>
<tr>
<td valign="middle" align="left">
<bold>
<italic>pool_5</italic>
</bold>
</td>
<td valign="middle" align="left">Max-Pooling</td>
<td valign="middle" align="center">3 x 3</td>
<td valign="middle" align="center">2</td>
<td valign="middle" align="center">29 x 29</td>
<td valign="middle" align="center">384</td>
<td valign="middle" align="center">0</td>
</tr>
<tr>
<td valign="middle" align="left">
<bold>
<italic>incep_2</italic>
</bold>
</td>
<td valign="middle" align="left">Inception</td>
<td valign="middle" align="center">1 x 1, 3 x 1,<break/>1 x 3, 5 x 1,<break/>1 x 5</td>
<td valign="middle" align="center">1</td>
<td valign="middle" align="center">29 x 29</td>
<td valign="middle" align="center">32, 64, 128, 256</td>
<td valign="middle" align="center">1,94,008</td>
</tr>
<tr>
<td valign="middle" align="left">
<bold>
<italic>gap</italic>
</bold>
</td>
<td valign="middle" align="left">Global average pooling</td>
<td valign="middle" align="center">&#x2013;</td>
<td valign="middle" align="center">&#x2013;</td>
<td valign="middle" align="center">320</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">0</td>
</tr>
<tr>
<td valign="middle" align="left">
<bold>
<italic>softmax</italic>
</bold>
</td>
<td valign="middle" align="left">Softmax layer</td>
<td valign="middle" align="center">&#x2013;</td>
<td valign="middle" align="center">&#x2013;</td>
<td valign="middle" align="center">1 x 4</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">1,248</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec id="s3_7">
<label>3.7</label>
<title>Evaluation metrics</title>
<p>We evaluated the prediction performance of the proposed CNN model on the images of testing data. We computed the confusion matrix (<italic>CM</italic>) which represents the model&#x2019;s prediction performance in a tabular fashion. In <italic>CM</italic>, row elements denote the actual values, while the column entities present the predicted values. In the <italic>CM</italic> the diagonal elements represent the correct predictions (i.e. true positives (TP) and true negatives (TN)), while the incorrect predictions (i.e. false positives (FP), false negatives (FN)) are denoted by the off-diagonal elements. Also computed the relevant evaluation metrics such as Recall, Precision and f1-score.</p>
<p>
<bold>Classification Accuracy:</bold> The classification accuracy (or accuracy) defines the proportion of the correct prediction out of the total predictions. The following expression measures it:</p>
<disp-formula>
<mml:math display="block" id="M7">
<mml:mrow>
<mml:mtext>C</mml:mtext>
<mml:mi>l</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>f</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>n</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>A</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>y</mml:mi>
<mml:mo>=</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>e</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>P</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>v</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>s</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo>+</mml:mo>
<mml:mi>T</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>e</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>N</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>g</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>v</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>s</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>m</mml:mi>
<mml:mi>b</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>r</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>o</mml:mi>
<mml:mi>f</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>s</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>m</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>s</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>i</mml:mi>
<mml:mi>n</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>t</mml:mi>
<mml:mi>h</mml:mi>
<mml:mi>e</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>d</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>t</mml:mi>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<p>
<bold>Recall (Sensitivity):</bold> The recall or sensitivity is the measure which tells that the % of actual positive are predicted as positive. The following expression calculates it-</p>
<disp-formula>
<mml:math display="block" id="M8">
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>l</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:mi>S</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>v</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>y</mml:mi>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo>=</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>e</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>P</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>v</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>s</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>e</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>N</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>g</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>v</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>s</mml:mi>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:mi>F</mml:mi>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo>+</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>T</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>e</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>P</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>v</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>s</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<p>
<bold>Precision:</bold> Precision is the measure which gives the % of predicted as positives that are actually positive. The following expression calculates it-</p>
<disp-formula>
<mml:math display="block" id="M9">
<mml:mrow>
<mml:mtext>P</mml:mtext>
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>n</mml:mi>
<mml:mo>=</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>e</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>P</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>v</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>s</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:mi>F</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>e</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>p</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>v</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>s</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:mi>F</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo>&#xa0;</mml:mo>
<mml:mo>+</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>T</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>e</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mi>P</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>v</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>s</mml:mi>
<mml:mo>&#xa0;</mml:mo>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<p>
<bold>f1-Score</bold>: f1-Score is the measure that tells us about the robustness of the model. It is the harmonic mean of precision and recall. The following expression calculates it-</p>
<disp-formula>
<mml:math display="block" id="M10">
<mml:mrow>
<mml:mi>f</mml:mi>
<mml:mn>1</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>S</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mo>=</mml:mo>
<mml:mo>&#xa0;</mml:mo>
<mml:mn>2</mml:mn>
<mml:mo>*</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>n</mml:mi>
<mml:mo>*</mml:mo>
<mml:mi>R</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>l</mml:mi>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>n</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>R</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>l</mml:mi>
</mml:mrow>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
</sec>
</sec>
<sec id="s4" sec-type="results">
<label>4</label>
<title>Results and discussion</title>
<p>In this study, 1,760 images of MLB disease of maize were collected under the MDSD database from agricultural fields which were then augmented to 14,389 images. The MDSD image database is categorized into 4 groups viz. healthy, low severity, medium severity and high severity based on the intensity levels of the disease symptoms on leaves. We randomly split the whole dataset into two sets viz. training and testing sets in the ratio of 80:20. Here, the proposed convolutional neural network (CNN) was trained and tested with the MDSD dataset for automated diagnosis of severity stages of MLB disease. In this approach, several combinations of <italic>CRB</italic> and inception modules were attempted. However, CNN network with 10 <italic>CRB</italic> and 2 modified inception modules gave the optimal classification performance. Furthermore, to inspect the effectiveness of the proposed model, we also employed a few state-of-the-art pre-trained models viz: VGG16, VGG19, InceptionV3, ResNet50, Xception, MobileNetV2, DenseNet121 and NASNetMobile networks in this study. All the models were trained and tested with similar hyperparameters and configurations as shown in <xref ref-type="table" rid="T4">
<bold>Table&#xa0;4</bold>
</xref>. All the model architectures were implemented in python using the tensorflow environment, an open-source deep learning framework. We performed all the experimental analyses by utilizing the high computation power of the Tesla V100 GPUs in the NVIDIA DGX servers.</p>
<table-wrap id="T4" position="float">
<label>Table&#xa0;4</label>
<caption>
<p>Hyperparameters used during model training.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="left">Name</th>
<th valign="middle" align="center">Hyper Parameters</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">Loss Function</td>
<td valign="middle" align="center">Categorical Cross Entropy</td>
</tr>
<tr>
<td valign="middle" align="left">Optimization function</td>
<td valign="middle" align="center">Nadam</td>
</tr>
<tr>
<td valign="middle" align="left">Learning Rate</td>
<td valign="middle" align="center">0.001</td>
</tr>
<tr>
<td valign="middle" align="left">Momentum</td>
<td valign="middle" align="center">0.9</td>
</tr>
<tr>
<td valign="middle" align="left">Weight Decay</td>
<td valign="middle" align="center">0.004</td>
</tr>
<tr>
<td valign="middle" align="left">Epochs</td>
<td valign="middle" align="center">500</td>
</tr>
<tr>
<td valign="middle" align="left">Batch size</td>
<td valign="middle" align="center">128</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>In the present study, we trained and validated our proposed CNN model 500 times (epochs) using a batch size of 128 (according to the hardware system feasibility) on the MDSD database. Our proposed model achieved the training accuracy of 99.78% with loss of 0.046, whereas the testing accuracy achieved so far was 99.13% with loss of 0.0317. We presented the epoch-wise training and testing behavior (for both classification accuracy and loss) of the proposed model in <xref ref-type="fig" rid="f6">
<bold>Figures&#xa0;6A, B</bold>
</xref> to showcase the model&#x2019;s efficiency on images of MDSD database.</p>
<fig id="f6" position="float">
<label>Figure&#xa0;6</label>
<caption>
<p>Epoch-wise behaviour of training and testing of the proposed CNN model <bold>(A)</bold> Classification accuracy: training vs testing and <bold>(B)</bold> Loss: training vs testing.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-13-1077568-g006.tif"/>
</fig>
<p>The experimental findings on the testing set of the MDSD image database reported that our proposed model achieved the overall classification accuracy (99.13%) which is far better than the employed pre-trained networks as shown in <xref ref-type="fig" rid="f7">
<bold>Figures&#xa0;7A, B</bold>
</xref>. However, among the state-of-the-art pre-trained models, the DenseNet121 model achieves the highest accuracy of 95.65% on the test dataset (shown in <xref ref-type="fig" rid="f7">
<bold>Figure&#xa0;7A</bold>
</xref>). The rest of the models achieve accuracy within 85 to 92%. The proposed model also obtained the lowest (0.0317) of all, while the DenseNet121 model reaches 0.1063 (can be seen in <xref ref-type="fig" rid="f7">
<bold>Figure&#xa0;7B</bold>
</xref>). These experimental results cater the superiority and effectiveness of the proposed model over the popular pre-trained models.</p>
<fig id="f7" position="float">
<label>Figure&#xa0;7</label>
<caption>
<p>Comparative performance of the proposed model and pretrained models <bold>(A)</bold> models wise classification accuracies on test data and <bold>(B)</bold> model-wise testing loss.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-13-1077568-g007.tif"/>
</fig>
<p>The interpretation of the model&#x2019;s performance evaluation based on classification accuracy and training loss wouldn&#x2019;t be sufficient. Hence, we calculated the average f1-scores of all the models to evaluate the models in an unbiased way. We presented the obtained f1-scores of the models (proposed as well as pre-trained) in <xref ref-type="fig" rid="f8">
<bold>Figure&#xa0;8</bold>
</xref>. It is quite evident from <xref ref-type="fig" rid="f8">
<bold>Figure&#xa0;8</bold>
</xref>, that our proposed model obtained the highest f1-score than the pre-trained models in the testing dataset of MLB disease. Our proposed model&#x2019;s prediction performance on the MLB disease dataset was far better than the popular pretrained models. This result implies that our proposed CNN model could identify the unknown images of MDSD database and classify them into respective severity classes.</p>
<fig id="f8" position="float">
<label>Figure&#xa0;8</label>
<caption>
<p>f1-scores of the models obtained on testing dataset.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-13-1077568-g008.tif"/>
</fig>
<p>To better understand the prediction performance of our proposed model, we presented the confusion matrix in <xref ref-type="fig" rid="f9">
<bold>Figure&#xa0;9</bold>
</xref>. <xref ref-type="fig" rid="f9">
<bold>Figure&#xa0;9</bold>
</xref> shows that our proposed model was 100% accurate in predicting the healthy samples, 98% accurate for the low severity samples, 99% accurate for both samples of medium severity and high severity. Moreover, we also computed recall, precision and f1-score to present the class-wise prediction performance of the proposed model as shown in <xref ref-type="table" rid="T5">
<bold>Table&#xa0;5</bold>
</xref>. <xref ref-type="table" rid="T5">
<bold>Table&#xa0;5</bold>
</xref> shows that the proposed model obtained quite high scores (approx. 99%) for all three metrics. It is evident from the confusion matrix and the performance metrics (recall, precision, and f1-score) that our model performed remarkably well for all the classes of the severity of MLB disease in MDSD database. The model&#x2019;s performance was quite appreciable not only for healthy or high severity images but also for low severity images in which the symptoms of the disease are very mild. This result supports the significance of the proposed CNN model in recognizing severity levels for the unknown images of MLB disease of maize crop.</p>
<fig id="f9" position="float">
<label>Figure&#xa0;9</label>
<caption>
<p>Confusion matrix of the proposed model on testing dataset.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-13-1077568-g009.tif"/>
</fig>
<table-wrap id="T5" position="float">
<label>Table&#xa0;5</label>
<caption>
<p>Class-wise performance of the proposed model.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="left">Category</th>
<th valign="middle" align="center">Recall (%)</th>
<th valign="middle" align="center">Precision (%)</th>
<th valign="middle" align="center">f1-score (%)</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">
<bold>Healthy</bold>
</td>
<td valign="middle" align="center">100</td>
<td valign="middle" align="center">100</td>
<td valign="middle" align="center">100</td>
</tr>
<tr>
<td valign="middle" align="left">
<bold>Low Severity</bold>
</td>
<td valign="middle" align="center">99.20</td>
<td valign="middle" align="center">98.02</td>
<td valign="middle" align="center">98.61</td>
</tr>
<tr>
<td valign="middle" align="left">
<bold>Medium Severity</bold>
</td>
<td valign="middle" align="center">98.13</td>
<td valign="middle" align="center">98.55</td>
<td valign="middle" align="center">98.34</td>
</tr>
<tr>
<td valign="middle" align="left">
<bold>High Severity</bold>
</td>
<td valign="middle" align="center">98.60</td>
<td valign="middle" align="center">99.30</td>
<td valign="middle" align="center">98.95</td>
</tr>
<tr>
<td valign="middle" align="left">
<bold>Average</bold>
</td>
<td valign="middle" align="center">98.98</td>
<td valign="middle" align="center">98.97</td>
<td valign="middle" align="center">
<bold>98.97</bold>
</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>From the overall analysis of all the employed models, it is apparent that our proposed lightweight CNN model outperforms the popular pre-trained models for identifying the severity stages of MLB disease. However, the most important aspect of this study is that the proposed model can identify the images of the severity of MLB disease even with complex background conditions. This makes the proposed CNN model an effective and cost-effective approach for identifying the appropriate disease severity stages for the researchers, subject matter specialists and farmers in the field condition.</p>
</sec>
<sec id="s5">
<label>5</label>
<title>Ablation studies</title>
<p>In this section, we presented the ablation studies for selecting the optimum number of inception modules and best optimization function for the proposed model. First, we trained our proposed CNN model by incorporating 0,1,2 and 3 <italic>Inception</italic> modules. The experimental results reported in <xref ref-type="fig" rid="f10">
<bold>Figures&#xa0;10A, B</bold>
</xref>, depict that the proposed CNN framework achieved around 95% testing accuracy without any inception module. However, the accuracy kept increasing as the number of Inception modules increased as shown in <xref ref-type="fig" rid="f10">
<bold>Figure&#xa0;10A</bold>
</xref>. As a result, the proposed model showed the best prediction performance (classification accuracy of 99.13%) with two <italic>Inception</italic> modules compared to the others. From <xref ref-type="fig" rid="f10">
<bold>Figure&#xa0;10B</bold>
</xref>, it is apparent that as the number of <italic>Inception</italic> modules increased, the testing loss decreased and the proposed model achieved the lowest testing loss (i.e. 0.317) with two <italic>Inception</italic> modules. Hence, the two Inception modules were selected for the proposed CNN model.</p>
<fig id="f10" position="float">
<label>Figure&#xa0;10</label>
<caption>
<p>Depiction of the effect of number of <italic>Inception</italic> modules and optimization functions in performance of the proposed CNN model <bold>(A)</bold> Number of Inception modules vs classification accuracy <bold>(B)</bold> Number of Inception modules vs testing loss and <bold>(C)</bold> Optimization functions vs classification accuracy .</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpls-13-1077568-g010.tif"/>
</fig>
<p>We also conducted experiments with different optimization functions, which have a huge role in model convergence and feature learning. We experimented with four types of optimization functions viz. <italic>Stochastic gradient descent</italic> (<italic>SGD</italic>), <italic>RMSProp</italic>, <italic>Adam</italic> and <italic>Nadam</italic> in the proposed model and presented the results in <xref ref-type="fig" rid="f10">
<bold>Figure&#xa0;10C</bold>
</xref>. Among the four optimization functions, <italic>Nadam</italic> function showed the best performance in the MLB disease severity dataset of maize crop.</p>
</sec>
<sec id="s6" sec-type="conclusions">
<label>6</label>
<title>Conclusion</title>
<p>In this study, we addressed the major issue of crop management i.e., disease severity stages by proposing a deep learning-based diagnosis approach. In this regard, we created an image database known as MDSD containing images of MLB disease with four different severity stages viz. healthy, low severity, medium and high severity. Next, we proposed a novel lightweight CNN model to identify of severity stages of MLB disease using the images of MDSD. The proposed CNN model&#x2019;s basic framework comprises a stack of computational layers like the CBR layer (<italic>Convolution, ReLU and Batch normalization</italic>) augmented with two modified <italic>Inception</italic> modules. On the test dataset, our proposed model reported 99.13% classification accuracy with an f1-score of 98.97% which is quite superior than most of the popular state-of-the-art pretrained models. Furthermore, the overall experimental analysis demonstrated that our proposed CNN model efficiently captures the promising features of the images with complex backgrounds and classifies them into respective severity classes. Therefore, this automated approach for identifying the severity stages of MLB disease using the proposed CNN model would be feasible and cost-effective for the farm community and the subject matter specialists. However, in the present study, the proposed CNN model only applies to the MLB disease of maize crop. In the future, the study can be further expanded to identify severity stages of other major diseases of maize crop and diseases of other crops as per the availability of image dataset.</p>
</sec>
<sec id="s7" sec-type="data-availability">
<title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p>
</sec>
<sec id="s8" sec-type="author-contributions">
<title>Author contributions</title>
<p>MH: Conceptualization, Methodology, Investigation, Writing - Original Draft, Visualization; SM: Conceptualization, Supervision, Investigation; AA: Supervision, Writing - Review and Editing; CD: Conceptualization, Supervision, Writing - Original Draft, Visualization; TM: Writing - Review and Editing; SN: Writing - Original Draft, Visualization; KH: Field data generation; Data curation. All authors contributed to the article and approved the submitted version.</p>
</sec>
</body>
<back>
<ack>
<title>Acknowledgments</title>
<p>Authors acknowledge the support and resources provide by ICAR-Indian Agricultural Statistical Research Institute, New Delhi; ICAR-Indian Institute of Maize research, Ludhiana and ICAR-Indian Agriculture Research Institute, New Delhi for carrying out this research work. Authors also acknowledge the resources provided by the National Agricultural Science Funds (NASF), ICAR and National Agricultural Higher Education Project (NAHEP), ICAR.</p>
</ack>
<sec id="s9" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="s10" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Aggarwal</surname> <given-names>S. K.</given-names>
</name>
<name>
<surname>Gogoi</surname> <given-names>R.</given-names>
</name>
<name>
<surname>Rakshit</surname> <given-names>S.</given-names>
</name>
</person-group> (<year>2021</year>). <source>Major diseases of maize and their management</source> (<publisher-loc>Ludhiana, Punjab</publisher-loc>: <publisher-name>ICAR-IIMR</publisher-name>), <fpage>pp 27</fpage>. Available at: <uri xlink:href="https://iimr.icar.gov.in/publications-category/technical-bulletins/">https://iimr.icar.gov.in/publications-category/technical-bulletins/</uri>. 141004.</citation>
</ref>
<ref id="B2">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Atila</surname> <given-names>&#xdc;.</given-names>
</name>
<name>
<surname>U&#xe7;ar</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Akyol</surname> <given-names>K.</given-names>
</name>
<name>
<surname>U&#xe7;ar</surname> <given-names>E.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Plant leaf disease classification using EfficientNet deep learning model</article-title>. <source>Ecol. Inf.</source> <volume>61</volume>, <elocation-id>101182</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.ecoinf.2020.101182</pub-id>
</citation>
</ref>
<ref id="B3">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Barbedo</surname> <given-names>J. G. A.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Plant disease identification from individual lesions and spots using deep learning</article-title>. <source>Biosyst. Engineering</source> <volume>180</volume>, <fpage>96</fpage>&#x2013;<lpage>107</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.biosystemseng.2019.02.002</pub-id>
</citation>
</ref>
<ref id="B4">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bloice</surname> <given-names>M. D.</given-names>
</name>
<name>
<surname>Roth</surname> <given-names>P. M.</given-names>
</name>
<name>
<surname>Holzinger</surname> <given-names>A.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Biomedical image augmentation using augmentor</article-title>. <source>Bioinformatics</source> <volume>35</volume> (<issue>21</issue>), <fpage>4522</fpage>&#x2013;<lpage>4524</lpage>. doi: <pub-id pub-id-type="doi">10.1093/bioinformatics/btz259</pub-id>
</citation>
</ref>
<ref id="B5">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Bouchard</surname> <given-names>G.</given-names>
</name>
</person-group> (<year>2007</year>). &#x201c;<article-title>Efficient bounds for the softmax function, applications to inference in hybrid models</article-title>,&#x201d; in <source>NIPS Workshop for Approximate Bayesian Inference in Continuous/Hybrid Systems</source>. (<publisher-loc>Whistler, CA</publisher-loc>).</citation>
</ref>
<ref id="B6">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chen</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>D.</given-names>
</name>
<name>
<surname>Nanehkaran</surname> <given-names>Y. A.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>D.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Detection of rice plant diseases based on deep transfer learning</article-title>. <source>J. Sci. Food Agric.</source> <volume>100</volume> (<issue>7</issue>), <fpage>3246</fpage>&#x2013;<lpage>3256</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1002/jsfa.10365</pub-id>
</citation>
</ref>
<ref id="B7">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chen</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Zhao</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Sun</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Ban</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>Y.</given-names>
</name>
<etal/>
</person-group>. (<year>2021</year>). <article-title>An approach for rice bacterial leaf streak disease segmentation and disease severity estimation</article-title>. <source>Agriculture</source> <volume>11</volume> (<issue>5</issue>), <fpage>420</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/agriculture11050420</pub-id>
</citation>
</ref>
<ref id="B8">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Chollet</surname> <given-names>F.</given-names>
</name>
</person-group> (<year>2021</year>). <source>Deep learning with Python</source> (<publisher-loc>Shelter Island, NY</publisher-loc>: <publisher-name>Manning Publications Co</publisher-name>).</citation>
</ref>
<ref id="B9">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>DeChant</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Wiesner-Hanks</surname> <given-names>T.</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Stewart.</surname> <given-names>E. L.</given-names>
</name>
<name>
<surname>Yosinski</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Gore</surname> <given-names>M. A.</given-names>
</name>
<etal/>
</person-group>. (<year>2017</year>). <article-title>Automated identification of northern leaf blight-infected maize plants from field imagery using deep learning</article-title>. <source>Phytopathology</source> <volume>107</volume> (<issue>11</issue>), <fpage>1426</fpage>&#x2013;<lpage>1432</lpage>. doi: <pub-id pub-id-type="doi">10.1094/PHYTO-11-16-0417-R</pub-id>
</citation>
</ref>
<ref id="B10">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ferentinos</surname> <given-names>K. P.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Deep learning models for plant disease detection and diagnosis</article-title>. <source>Comput. Electron. Agric.</source> <volume>145</volume>, <fpage>311</fpage>&#x2013;<lpage>318</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2018.01.009</pub-id>
</citation>
</ref>
<ref id="B11">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Fuentes</surname> <given-names>A. F.</given-names>
</name>
<name>
<surname>Yoon</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Park</surname> <given-names>D. S.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>High-performance deep neural network-based tomato plant diseases and pests diagnosis system with refinement filter bank</article-title>. <source>Front. Plant Sci.</source> <volume>9</volume>, <elocation-id>1162</elocation-id>. doi: <pub-id pub-id-type="doi">10.3389/fpls.2018.01162</pub-id>
</citation>
</ref>
<ref id="B12">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Haque</surname> <given-names>M. A.</given-names>
</name>
<name>
<surname>Marwaha</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Arora</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Paul</surname> <given-names>R. K.</given-names>
</name>
<name>
<surname>Hooda</surname> <given-names>K. S.</given-names>
</name>
<name>
<surname>Sharma</surname> <given-names>A.</given-names>
</name>
<etal/>
</person-group>. (<year>2021</year>). <article-title>Image-based identification of maydis leaf blight disease of maize (Zea mays) using deep learning</article-title>. <source>Indian J. Agric. Sci.</source> <volume>91</volume> (<issue>9</issue>), <fpage>1362</fpage>&#x2013;<lpage>1369</lpage>. doi: <pub-id pub-id-type="doi">10.56093/ijas.v91i9.116089</pub-id>
</citation>
</ref>
<ref id="B13">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Haque</surname> <given-names>M. A.</given-names>
</name>
<name>
<surname>Marwaha</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Deb</surname> <given-names>C. K.</given-names>
</name>
<name>
<surname>Nigam</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Arora</surname> <given-names>A.</given-names>
</name>
</person-group> (<year>2022</year>b). <article-title>Recognition of diseases of maize crop using deep learning models</article-title>. <source>Neural Computing Appl.</source>, <fpage>1</fpage>&#x2013;<lpage>15</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s00521-022-08003-9</pub-id>
</citation>
</ref>
<ref id="B14">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Haque</surname> <given-names>M. A.</given-names>
</name>
<name>
<surname>Marwaha</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Deb</surname> <given-names>C. K.</given-names>
</name>
<name>
<surname>Nigam</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Arora</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Hooda</surname> <given-names>K. S.</given-names>
</name>
<etal/>
</person-group>. (<year>2022</year>a). <article-title>Deep learning-based approach for identification of diseases of maize crop</article-title>. <source>Sci. Rep.</source> <volume>12</volume> (<issue>1</issue>), <fpage>6334</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/S41598-022-10140-Z</pub-id>
</citation>
</ref>
<ref id="B15">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Hooda</surname> <given-names>K. S.</given-names>
</name>
<name>
<surname>Bagaria</surname> <given-names>P. K.</given-names>
</name>
<name>
<surname>Khokhar</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Kaur</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Rakshit</surname> <given-names>S.</given-names>
</name>
</person-group> (<year>2018</year>). <source>Mass screening techniques for resistance to maize diseases</source> (<publisher-loc>Ludhiana</publisher-loc>: <publisher-name>ICAR-Indian Institute of Maize Research, PAU Campus</publisher-name>), <fpage>pp 93</fpage>. Available at: <uri xlink:href="https://iimr.icar.gov.in/publications-category/technical-bulletins/">https://iimr.icar.gov.in/publications-category/technical-bulletins/</uri>. 141004.</citation>
</ref>
<ref id="B16">
<citation citation-type="book">
<person-group person-group-type="author">
<collab>ICAR-IIMR</collab>
</person-group> (<year>2021</year>). <source>Annual report</source> (<publisher-loc>Ludhiana</publisher-loc>: <publisher-name>ICAR-Indian Institute of Maize Research Punjab Agricultural University campus</publisher-name>). Available at: <uri xlink:href="https://iimr.icar.gov.in/publications-category/annual-reports/">https://iimr.icar.gov.in/publications-category/annual-reports/</uri>. 141004.</citation>
</ref>
<ref id="B17">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Ioffe</surname> <given-names>S.</given-names>
</name>
</person-group> (<year>2017</year>). <source>Batch renormalization: Towards reducing minibatch dependence in batch-normalized models</source>. doi: <pub-id pub-id-type="doi">10.48550/arXiv.1702.03275</pub-id>
</citation>
</ref>
<ref id="B18">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ji</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Wu</surname> <given-names>Z.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Automatic detection and severity analysis of grape black measles disease based on deep learning and fuzzy logic</article-title>. <source>Comput. Electron. Agric.</source> <volume>193</volume>, <fpage>106718</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.compag.2022.106718</pub-id>
</citation>
</ref>
<ref id="B19">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Kamilaris</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Prenafeta-Bold&#xfa;</surname> <given-names>F. X.</given-names>
</name>
</person-group> (<year>2018</year>). &#x201c;<article-title>Deep learning in agriculture: A survey</article-title>,&#x201d; in <source>Computers and electronics in agriculture</source>. <volume>147</volume>, <fpage>70</fpage>&#x2013;<lpage>90</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2018.02.016</pub-id>
</citation>
</ref>
<ref id="B20">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kaur</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Kumar</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Hooda</surname> <given-names>K. S.</given-names>
</name>
<name>
<surname>Gogoi</surname> <given-names>R.</given-names>
</name>
<name>
<surname>Bagaria</surname> <given-names>P.</given-names>
</name>
<name>
<surname>Singh</surname> <given-names>R. P.</given-names>
</name>
<etal/>
</person-group>. (<year>2020</year>). <article-title>Leaf stripping: an alternative strategy to manage banded leaf and sheath blight of maize</article-title>. <source>Indian Phytopathol.</source> <volume>73</volume> (<issue>2</issue>), <fpage>203</fpage>&#x2013;<lpage>211</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s42360-020-00208-z</pub-id>
</citation>
</ref>
<ref id="B21">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>LeCun</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Bengio</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Hinton</surname> <given-names>G.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Deep learning</article-title>. <source>Nature</source> <volume>521</volume> (<issue>7553</issue>), <fpage>436</fpage>&#x2013;<lpage>444</lpage>. doi: <pub-id pub-id-type="doi">10.1038/nature14539</pub-id>
</citation>
</ref>
<ref id="B22">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>LeCun</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Bottou</surname> <given-names>L.</given-names>
</name>
<name>
<surname>Bengio</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Haffner</surname> <given-names>P.</given-names>
</name>
</person-group> (<year>1998</year>). <article-title>Gradient-based learning applied to document recognition</article-title>. <source>In Proc. IEEE</source> <volume>86</volume> (<issue>11</issue>), <fpage>2278</fpage>&#x2013;<lpage>2324</lpage>. doi: <pub-id pub-id-type="doi">10.1109/5.726791</pub-id>
</citation>
</ref>
<ref id="B23">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liang</surname> <given-names>Q.</given-names>
</name>
<name>
<surname>Xiang</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Hu</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Coppola</surname> <given-names>G.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>D.</given-names>
</name>
<name>
<surname>Sun</surname> <given-names>W.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>PD2SE-net: Computer-assisted plant disease diagnosis and severity estimation network</article-title>. <source>Comput. Electron. Agric.</source> <volume>157</volume>, <fpage>518</fpage>&#x2013;<lpage>529</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.compag.2019.01.034</pub-id>
</citation>
</ref>
<ref id="B24">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lin</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>Q.</given-names>
</name>
<name>
<surname>Yan</surname> <given-names>S.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Network in network</article-title>. <source>arXiv preprint</source> <volume>1312</volume>, <fpage>4400</fpage>. arXiv. doi: <pub-id pub-id-type="doi">10.48550/arXiv.1312.4400</pub-id>
</citation>
</ref>
<ref id="B25">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname> <given-names>B. Y.</given-names>
</name>
<name>
<surname>Fan</surname> <given-names>K. J.</given-names>
</name>
<name>
<surname>Su</surname> <given-names>W. H.</given-names>
</name>
<name>
<surname>Peng</surname> <given-names>Y.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Two-stage convolutional neural networks for diagnosing the severity of alternaria leaf blotch disease of the apple tree</article-title>. <source>Remote Sensing</source> <volume>14</volume> (<issue>11</issue>), <fpage>2519</fpage>. doi: <pub-id pub-id-type="doi">10.3390/rs14112519</pub-id>
</citation>
</ref>
<ref id="B26">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lu</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Hu</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Zhao</surname> <given-names>G.</given-names>
</name>
<name>
<surname>Mei</surname> <given-names>F.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>C.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>An in-field automatic wheat disease diagnosis system</article-title>. <source>Comput. Electron. Agric.</source> <volume>142</volume>, <fpage>369</fpage>&#x2013;<lpage>379</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.compag.2017.09.012</pub-id>
</citation>
</ref>
<ref id="B27">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lu</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Yi</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Zeng</surname> <given-names>N.</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>Y.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Identification of rice diseases using deep convolutional neural networks</article-title>. <source>Neurocomputing</source> <volume>267</volume>, <fpage>378</fpage>&#x2013;<lpage>384</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neucom.2017.06.023</pub-id>
</citation>
</ref>
<ref id="B28">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lv</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Zhou</surname> <given-names>G.</given-names>
</name>
<name>
<surname>He</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Hu</surname> <given-names>Y.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Maize leaf disease identification based on feature enhancement and DMS-robust alexnet</article-title>. <source>IEEE Access</source> <volume>8</volume>, <fpage>57952</fpage>&#x2013;<lpage>57966</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ACCESS.2020.2982443</pub-id>
</citation>
</ref>
<ref id="B29">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Malik</surname> <given-names>V. K.</given-names>
</name>
<name>
<surname>Singh</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Hooda</surname> <given-names>K. S.</given-names>
</name>
<name>
<surname>Yadav</surname> <given-names>N. K.</given-names>
</name>
<name>
<surname>Chauhan</surname> <given-names>P. K.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Efficacy of newer molecules, bioagents and botanicals against maydis leaf blight and banded leaf and sheath blight of maize</article-title>. <source>Plant Pathol. J.</source> <volume>34</volume> (<issue>2</issue>), <fpage>121</fpage>. doi: <pub-id pub-id-type="doi">10.5423/PPJ.OA.11.2017.0251</pub-id>
</citation>
</ref>
<ref id="B30">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mohanty</surname> <given-names>S. P.</given-names>
</name>
<name>
<surname>Hughes</surname> <given-names>D. P.</given-names>
</name>
<name>
<surname>Salath&#xe9;</surname> <given-names>M.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Using deep learning for image-based plant disease detection</article-title>. <source>Front. Plant Sci.</source> <volume>7</volume> (<issue>1419</issue>). doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fpls.2016.01419</pub-id>
</citation>
</ref>
<ref id="B31">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Nigam</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Jain</surname> <given-names>R.</given-names>
</name>
<name>
<surname>Marwaha</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Arora</surname> <given-names>A.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Wheat rust disease identification using deep learning</article-title>. <source>Gruyter</source>, <fpage>239</fpage>&#x2013;<lpage>250</lpage>. doi: <pub-id pub-id-type="doi">10.1515/9783110691276-012</pub-id>
</citation>
</ref>
<ref id="B32">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Nigam</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Jain</surname> <given-names>R.</given-names>
</name>
<name>
<surname>Prakash</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Marwaha</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Arora</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Singh</surname> <given-names>V. K.</given-names>
</name>
</person-group> (<year>2021</year>). &#x201c;<article-title>&amp; prakasha, t. l., (2021, july). Wheat disease severity estimation: A deep learning approach</article-title>,&#x201d; in <source>International conference on Internet of things and connected technologies</source> (<publisher-loc>Cham</publisher-loc>: <publisher-name>Springer</publisher-name>), <fpage>185</fpage>&#x2013;<lpage>193</lpage>.</citation>
</ref>
<ref id="B33">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Picon</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Alvarez-Gila</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Seitz</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Ortiz-Barredo</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Echazarra</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Johannes</surname> <given-names>A.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Deep convolutional neural networks for mobile capture device-based crop disease classification in the wild</article-title>. <source>Comput. Electron. Agric.</source> <volume>161</volume>, <fpage>280</fpage>&#x2013;<lpage>290</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compag.2018.04.002</pub-id>
</citation>
</ref>
<ref id="B34">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Prabhakar</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Purushothaman</surname> <given-names>R.</given-names>
</name>
<name>
<surname>Awasthi</surname> <given-names>D. P.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Deep learning based assessment of disease severity for early blight in tomato crop</article-title>. <source>Multimedia Tools Appl.</source> <volume>79</volume> (<issue>39</issue>), <fpage>28773</fpage>&#x2013;<lpage>28784</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s11042-020-09461-w</pub-id>
</citation>
</ref>
<ref id="B35">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Priyadharshini</surname> <given-names>R. A.</given-names>
</name>
<name>
<surname>Arivazhagan</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Arun</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Mirnalini</surname> <given-names>A.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Maize leaf disease classification using deep convolutional neural networks</article-title>. <source>Neural Computing Appl.</source> <volume>31</volume> (<issue>12</issue>), <fpage>8887</fpage>&#x2013;<lpage>8895</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00521-019-04228-3</pub-id>
</citation>
</ref>
<ref id="B36">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rahman</surname> <given-names>C. R.</given-names>
</name>
<name>
<surname>Arko</surname> <given-names>P. S.</given-names>
</name>
<name>
<surname>Ali</surname> <given-names>M. E.</given-names>
</name>
<name>
<surname>Iqbal Khan</surname> <given-names>M. A.</given-names>
</name>
<name>
<surname>Apon</surname> <given-names>S. H.</given-names>
</name>
<name>
<surname>Nowrin</surname> <given-names>F.</given-names>
</name>
<etal/>
</person-group>. (<year>2020</year>). <article-title>Identification and recognition of rice diseases and pests using convolutional neural networks</article-title>. <source>Biosyst. Eng.</source> <volume>194</volume>, <fpage>112</fpage>&#x2013;<lpage>120</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.biosystemseng.2020.03.020</pub-id>
</citation>
</ref>
<ref id="B37">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sibiya</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Sumbwanyambe</surname> <given-names>M.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Automatic fuzzy logic-based maize common rust disease severity predictions with thresholding and deep learning</article-title>. <source>Pathogens</source> <volume>10</volume> (<issue>2</issue>), <fpage>131</fpage>. doi: <pub-id pub-id-type="doi">10.3390/pathogens10020131</pub-id>
</citation>
</ref>
<ref id="B38">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sladojevic</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Arsenovic</surname> <given-names>M.</given-names>
</name>
<name>
<surname>Anderla</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Culibrk</surname> <given-names>D.</given-names>
</name>
<name>
<surname>Stefanovic</surname> <given-names>D.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Deep neural networks based recognition of plant diseases by leaf image classification</article-title>. <source>Comput. Intell. Neurosci.</source> <volume>2016</volume>, <fpage>6</fpage>. doi: <pub-id pub-id-type="doi">10.1155/2016/3289801</pub-id>
</citation>
</ref>
<ref id="B39">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Szegedy</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>W.</given-names>
</name>
<name>
<surname>Jia</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Sermanet</surname> <given-names>P.</given-names>
</name>
<name>
<surname>Reed</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Anguelov</surname> <given-names>D.</given-names>
</name>
<etal/>
</person-group>. (<year>2015</year>). &#x201c;<article-title>Going deeper with convolutions</article-title>,&#x201d; in <source>Proceedings of IEEE conference on computer vision and pattern recognition</source> (<publisher-name>IEEE</publisher-name>), <fpage>1</fpage>&#x2013;<lpage>9</lpage>.</citation>
</ref>
<ref id="B40">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Szegedy</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Vanhoucke</surname> <given-names>V.</given-names>
</name>
<name>
<surname>Ioffe</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Shlens</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Wojna</surname> <given-names>Z.</given-names>
</name>
</person-group> (<year>2016</year>). &#x201c;<article-title>Rethinking the inception architecture for computer vision</article-title>,&#x201d; in <source>Proceedings of IEEE conference on computer vision and pattern recognition</source> (<publisher-name>IEEE</publisher-name>), <fpage>2818</fpage>&#x2013;<lpage>2826</lpage>.</citation>
</ref>
<ref id="B41">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Verma</surname> <given-names>S.</given-names>
</name>
<name>
<surname>Chug</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Singh</surname> <given-names>A. P.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Application of convolutional neural networks for evaluation of disease severity in tomato plant</article-title>. <source>J. Discrete Math. Sci. Cryptography</source> <volume>23</volume> (<issue>1</issue>), <fpage>273</fpage>&#x2013;<lpage>282</lpage>. doi: <pub-id pub-id-type="doi">10.1080/09720529.2020.1721890</pub-id>
</citation>
</ref>
<ref id="B42">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Du</surname> <given-names>P.</given-names>
</name>
<name>
<surname>Wu</surname> <given-names>H.</given-names>
</name>
<name>
<surname>Li</surname> <given-names>J.</given-names>
</name>
<name>
<surname>Zhao</surname> <given-names>C.</given-names>
</name>
<name>
<surname>Zhu</surname> <given-names>H.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>A cucumber leaf disease severity classification method based on the fusion of DeepLabV3+ and U-net</article-title>. <source>Comput. Electron. Agric.</source> <volume>189</volume>, <fpage>106373</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.compag.2021.106373</pub-id>
</citation>
</ref>
<ref id="B43">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname> <given-names>G.</given-names>
</name>
<name>
<surname>Sun</surname> <given-names>Y.</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>J.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Automatic image-based plant disease severity estimation using deep learning</article-title>. <source>Comput. Intell. Neurosci.</source> <volume>2017</volume>, <fpage>1</fpage>&#x2013;<lpage>8</lpage>. doi: <pub-id pub-id-type="doi">10.1155/2017/2917536</pub-id>
</citation>
</ref>
<ref id="B44">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname> <given-names>K.</given-names>
</name>
<name>
<surname>Wu</surname> <given-names>Q.</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>A.</given-names>
</name>
<name>
<surname>Meng</surname> <given-names>X.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Can deep learning identify tomato leaf disease</article-title>? <source>Adv. Multimedia</source> <volume>2018</volume>, <fpage>1</fpage>&#x2013;<lpage>10</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1155/2018/6710865</pub-id>
</citation>
</ref>
</ref-list>
</back>
</article>