<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="2.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Med.</journal-id>
<journal-title>Frontiers in Medicine</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Med.</abbrev-journal-title>
<issn pub-type="epub">2296-858X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fmed.2025.1529335</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Medicine</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>PMPred-AE: a computational model for the detection and interpretation of pathological myopia based on artificial intelligence</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Zhang</surname> <given-names>Hong-Qi</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2913425/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Arif</surname> <given-names>Muhammad</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2792246/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Thafar</surname> <given-names>Maha A.</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/804788/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Albaradei</surname> <given-names>Somayah</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2717615/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Cai</surname> <given-names>Peiling</given-names></name>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1734004/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Zhang</surname> <given-names>Yang</given-names></name>
<xref ref-type="aff" rid="aff6"><sup>6</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2176592/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Tang</surname> <given-names>Hua</given-names></name>
<xref ref-type="aff" rid="aff7"><sup>7</sup></xref>
<xref ref-type="aff" rid="aff8"><sup>8</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/625623/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Lin</surname> <given-names>Hao</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/182351/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>The Clinical Hospital of Chengdu Brain Science Institute, School of Life Science and Technology, University of Electronic Science and Technology of China</institution>, <addr-line>Chengdu</addr-line>, <country>China</country></aff>
<aff id="aff2"><sup>2</sup><institution>College of Science and Engineering, Hamad Bin Khalifa University</institution>, <addr-line>Doha</addr-line>, <country>Qatar</country></aff>
<aff id="aff3"><sup>3</sup><institution>Computer Science Department, College of Computers and Information Technology, Taif University</institution>, <addr-line>Taif</addr-line>, <country>Saudi Arabia</country></aff>
<aff id="aff4"><sup>4</sup><institution>Department of Computer Science, Faculty of Computing and Information Technology, King Abdulaziz University</institution>, <addr-line>Jeddah</addr-line>, <country>Saudi Arabia</country></aff>
<aff id="aff5"><sup>5</sup><institution>School of Basic Medical Sciences, Chengdu University</institution>, <addr-line>Chengdu</addr-line>, <country>China</country></aff>
<aff id="aff6"><sup>6</sup><institution>Innovative Institute of Chinese Medicine and Pharmacy, Academy for Interdiscipline, Chengdu University of Traditional Chinese Medicine</institution>, <addr-line>Chengdu</addr-line>, <country>China</country></aff>
<aff id="aff7"><sup>7</sup><institution>School of Basic Medical Sciences, Southwest Medical University</institution>, <addr-line>Luzhou</addr-line>, <country>China</country></aff>
<aff id="aff8"><sup>8</sup><institution>Central Nervous System Drug Key Laboratory of Sichuan Province</institution>, <addr-line>Luzhou</addr-line>, <country>China</country></aff>
<author-notes>
<fn fn-type="edited-by" id="fn0001">
<p>Edited by: Alaa Abd-alrazaq, Weill Cornell Medicine-Qatar, Qatar</p>
</fn>
<fn fn-type="edited-by" id="fn0002">
<p>Reviewed by: Balachandran Manavalan, Sungkyunkwan University, Republic of Korea</p>
<p>Watshara Shoombuatong, Mahidol University, Thailand</p>
</fn>
<corresp id="c001">&#x002A;Correspondence: Yang Zhang, <email>yangzhang@cdutcm.edu.cn</email>; Hua Tang, <email>huatang@swmu.edu.cn</email>; Hao Lin, <email>hlin@uestc.edu.cn</email></corresp>
</author-notes>
<pub-date pub-type="epub">
<day>13</day>
<month>03</month>
<year>2025</year>
</pub-date>
<pub-date pub-type="collection">
<year>2025</year>
</pub-date>
<volume>12</volume>
<elocation-id>1529335</elocation-id>
<history>
<date date-type="received">
<day>16</day>
<month>11</month>
<year>2024</year>
</date>
<date date-type="accepted">
<day>27</day>
<month>02</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2025 Zhang, Arif, Thafar, Albaradei, Cai, Zhang, Tang and Lin.</copyright-statement>
<copyright-year>2025</copyright-year>
<copyright-holder>Zhang, Arif, Thafar, Albaradei, Cai, Zhang, Tang and Lin</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<sec id="sec1">
<title>Introduction</title>
<p>Pathological myopia (PM) is a serious visual impairment that may lead to irreversible visual damage or even blindness. Timely diagnosis and effective management of PM are of great significance. Given the increasing number of myopia cases worldwide, there is an urgent need to develop an automated, accurate, and highly interpretable PM diagnostic technology.</p>
</sec>
<sec id="sec2">
<title>Methods</title>
<p>We proposed a computational model called PMPred-AE based on EfficientNetV2-L with attention mechanism optimization. In addition, Gradient-weighted class activation mapping (Grad-CAM) technology was used to provide an intuitive and visual interpretation for the model&#x2019;s decision-making process.</p>
</sec>
<sec id="sec3">
<title>Results</title>
<p>The experimental results demonstrated that PMPred-AE achieved excellent performance in automatically detecting PM, with accuracies of 98.50, 98.25, and 97.25% in the training, validation, and test datasets, respectively. In addition, PMPred-AE can focus on specific areas of PM image when making detection decisions.</p>
</sec>
<sec id="sec4">
<title>Discussion</title>
<p>The developed PMPred-AE model is capable of reliably providing accurate PM detection. In addition, the Grad-CAM technology was also used to provide an intuitive and visual interpretation for the decision-making process of the model. This approach provides healthcare professionals with an effective tool for interpretable AI decision-making process.</p>
</sec>
</abstract>
<kwd-group>
<kwd>myopia</kwd>
<kwd>pathological myopia</kwd>
<kwd>deep learning</kwd>
<kwd>EfficientNetv2</kwd>
<kwd>Grad-CAM</kwd>
</kwd-group>
<contract-num rid="cn1">62250028</contract-num>
<contract-num rid="cn1">62172343</contract-num>
<contract-num rid="cn2">2022YFS0614</contract-num>
<contract-sponsor id="cn1">National Natural Science Foundation of China<named-content content-type="fundref-id">10.13039/501100001809</named-content></contract-sponsor>
<contract-sponsor id="cn2">Sichuan Science and Technology Program</contract-sponsor>
<counts>
<fig-count count="5"/>
<table-count count="2"/>
<equation-count count="7"/>
<ref-count count="53"/>
<page-count count="11"/>
<word-count count="6255"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Precision Medicine</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec5">
<label>1</label>
<title>Introduction</title>
<p>Pathological myopia (PM) is a serious visual disease that can lead to irreversible visual damage or even blindness (<xref ref-type="bibr" rid="ref1 ref2 ref3">1&#x2013;3</xref>). In recent years, PM has become one of the main causes of visual impairment and permanent blindness worldwide, especially in Asian countries. According to the research by Holden et al. (<xref ref-type="bibr" rid="ref4">4</xref>), by 2050, nearly half of the global population will be affected by myopia, with approximately 10% suffering from high myopia, which will also become the leading cause of permanent blindness. In addition, retinopathy and complications related to myopia may also increase the risk of visual damage (<xref ref-type="bibr" rid="ref5 ref6 ref7">5&#x2013;7</xref>). Therefore, timely diagnosis and early detection of PM are crucial. Currently, develop an automated, accurate, and non-invasive method PM diagnosis method is an urgent task.</p>
<p>With the development of artificial intelligence (AI) and the accumulation of myopia data, a variety of computational methods have been developed (<xref ref-type="bibr" rid="ref8 ref9 ref10">8&#x2013;10</xref>). For example, Liu et al. (<xref ref-type="bibr" rid="ref10">10</xref>) introduced a method using texture features and Support Vector Machine (SVM) (<xref ref-type="bibr" rid="ref11 ref12 ref13">11&#x2013;13</xref>) to automatically detect PM. This method processed retinal fundus images by extracting region of interest (ROI) and detecting the optic nerve head. Subsequently, texture-based metrics were generated, categorized and grouped into zones for context-based generation of features. Finally, SVM was used to detect PM based on these features, achieving an accuracy (ACC) of 87.5% (<xref ref-type="bibr" rid="ref14">14</xref>). Zhang et al. (<xref ref-type="bibr" rid="ref15">15</xref>) proposed an automatic detection method for PM based on max-relevance and min-redundancy (mRMR). This method built a feature space from information extracted from fundus images and medical screening data, created a ranked feature library using mRMR, searched for the most compact feature set with a forward selection wrapper, and then used SVM for detection. As a result, they achieved an ACC of 89.3% for the right eye and 88.5% for the left eye (<xref ref-type="bibr" rid="ref15">15</xref>). Xu et al. (<xref ref-type="bibr" rid="ref16">16</xref>) developed a detection method for PM based on bag-of-feature and sparse learning. During the training phase, the codebook for the bag-of-feature model and the classification model were learned, and the top related visual features were discovered through sparse learning.</p>
<p>In the detection phase, local features were first extracted from a given retinal fundus image, quantified using the learned codebook to obtain global features. Finally, the classification model was used to determine the presence of PM, achieving an ACC of 90.6% (<xref ref-type="bibr" rid="ref16">16</xref>). Zhang et al. (<xref ref-type="bibr" rid="ref17">17</xref>) also developed an automatic diagnostic method for PM based on heterogeneous biomedical data, integrating data from various sources including imaging data, demographic/clinical data, and genotyping data, and ultimately using a multiple kernel learning (MKL) approach to accurately detect PM, achieving an average Area Under Curve (AUC) of 0.888. Chen et al. (<xref ref-type="bibr" rid="ref18">18</xref>) introduced a deep learning architecture for automating the diagnosis of glaucoma. This method used a convolutional neural networks (CNN) (<xref ref-type="bibr" rid="ref19">19</xref>, <xref ref-type="bibr" rid="ref20">20</xref>) model with four convolutional layers and two fully connected layers, combined with dropout and data augmentation strategies to enhance diagnostic performance. The method achieved AUC values of 0.831 and 0.887 on the ORIGA and SCES datasets, respectively (<xref ref-type="bibr" rid="ref18">18</xref>). Xu et al. (<xref ref-type="bibr" rid="ref21">21</xref>) proposed an automated detection method for tessellated fundus based on texture features, color features and SVM. The method could achieve an ACC of 98%. Xu et al. (<xref ref-type="bibr" rid="ref22">22</xref>) proposed a method for detecting ocular disease based on multiple informatics domains. This method combined pre-learned SVM classifiers effectively merging personal demographic data, genome information, and visual information from retinal fundus images. The final model obtained an AUCs of 0.935 for glaucoma, 0.822 for age-related macular degeneration (AMD), and 0.946 for PM (<xref ref-type="bibr" rid="ref22">22</xref>). Septiarini et al. (<xref ref-type="bibr" rid="ref23">23</xref>) introduced a method based on statistical features to automatically detect peripapillary atrophy in retinal fundus images. This method involved four steps: optic nerve head (ONH) localization, ONH segmentation, preprocessing, and features extraction. Through these steps, three key features were extracted: standard deviation (<italic>&#x03C3;</italic>), smoothness (S), and third moment (&#x03BC;3). By using a backpropagation neural network (BPNN), they achieved an ACC of 95% (<xref ref-type="bibr" rid="ref23">23</xref>). Rauf et al. (<xref ref-type="bibr" rid="ref24">24</xref>) proposed a CNN-based method for PM detection and obtained an ACC of 95%. Although these studies have achieved positive results, there are still several challenges: (1) Many advanced deep learning methods are emerging, but in the field of PM detection, these advanced technologies have not yet been applied. (2) Due to the uniqueness of the medical industry and the high requirements for model accuracy, model performance still needs to be improved. (3) Due to the differences in actual medical facilities, the efficiency of these models in poorly equipment medical environments is an important problem that needs to be overcome. (4) As an auxiliary diagnosis method, the interpretability of models was an important task, but current research in this area is still insufficient (<xref ref-type="bibr" rid="ref25 ref26 ref27 ref28">25&#x2013;28</xref>).</p>
<p>To address the aforementioned challenges, this study designed an improved model named PMPred-AE based on EfficientNetV2-L to automatically identify and diagnose PM. This study further enhanced the model&#x2019;s ability to identify key features in the retina images by introducing the attention mechanism, thereby improving the accuracy of the diagnosis of PM. In order to provide visual explanations for the decision-making process of the model, we also adopted the Gradient-weighted class activation mapping (Grad-CAM) technique. Our study provides an efficient, accurate, and explainable model for the detection of PM.</p>
</sec>
<sec sec-type="materials|methods" id="sec6">
<label>2</label>
<title>Materials and methods</title>
<sec id="sec7">
<label>2.1</label>
<title>Dataset construction</title>
<p>The study utilized the PALM Challenge dataset, comprising training images, verification images and test images. The training dataset contains 187 non-PM and 213&#x202F;PM. Similarity, the verification set consists of 400 images, with 189 labeled as non-PM and 211 as PM. Additionally, test set includes 400 images with corresponding labels: 187 categorized as non-PM and 213 as PM (<xref ref-type="bibr" rid="ref29">29</xref>). This dataset configuration enabled rigorous evaluation and validation of the proposed methodologies.</p>
</sec>
<sec id="sec8">
<label>2.2</label>
<title>Model design</title>
<p>The PMPred-AE architecture consists of two core components: a feature extractor and a classifier. In the feature extraction stage, we chose EfficientNetV2-L, an advanced CNN model aimed at accelerating image processing and improving its performance. As an upgraded version of the EfficientNet series, EfficientNetV2-L underwent pre-trained on a massive ImageNet dataset that covers millions of images and thousands of categories. Through its scalable architecture, EfficientNetV2-L cleverly balances the network depth, width, and resolution to achieve optimal performance and efficiency. EfficientNetV2-L is an upgraded version of the EfficientNet series. It optimizes the balance of network depth, width, and resolution to achieve high efficiency and accuracy in image processing tasks. Compared to advanced vision transformer (ViT) series&#x2019; ViT-L/16, EfficientNetV2-L achieves higher accuracy. Meanwhile, the training speed could increase by 7 times (<xref ref-type="bibr" rid="ref30">30</xref>). In particular, the model utilizes lightweight depthwise separable convolution techniques, significantly reducing computational burden and model size while maintaining efficient feature extraction capabilities. Therefore, in the context of PM-detection, EfficientNetV2-L could efficiently identify key features in images and provide accurate data input for classifiers, significantly improving the performance of the model. Moreover, its superior computing speed and efficiency made it very suitable for application in medical environments with rudimentary equipment, providing strong technical support for early diagnosis and treatment. In the classification stage, we used an improved fully-connected neural network based on the attention mechanism. The core function of this improvement is to enhance the model&#x2019;s attention to the most important parts of the input features. By assigning different weights to the input features, the attention mechanism allows the model to prioritize the features that contribute the most to the final classification decision, rather than treating all input features equally. This dynamic weight allocation method not only improves the model&#x2019;s understanding of the data, but also increases the adaptability and flexibility of the model, enabling it to automatically focus on the most critical information. Specifically, we used a linear layer to transform all the features into a one-dimensional space, and then map them to a value between 0 and 1 using the Softmax function. Finally, this weight is multiplied by the original input features to emphasize the features that contribute the most to the classification result. This improvement was particularly important for the detection of PM. It allows the model to pay special attention to the areas that revealed the pathological features of myopia. Through this mechanism, our model provided an efficient tool for the early diagnosis and treatment of PM.</p>
</sec>
<sec id="sec9">
<label>2.3</label>
<title>Grad-CAM</title>
<p>In order to visually explain the decision-making process of CNN in PM detection tasks, we used Grad-CAM technique to generate a heatmap. Through Grad-CAM, we can clearly see which areas are given more attention when the model makes detection. This approach relies on the gradient information of the model, particularly focusing on the gradients of the feature layers from the last convolutional layer, to highlight the regions that contribute most to the model predictions. The working principle of Grad-CAM can be briefly described by the following mathematical expression.</p>
<p>First, for each channel in the feature layer <inline-formula>
<mml:math id="M1">
<mml:mi>A</mml:mi>
</mml:math>
</inline-formula>, the global average pooling of these slopes is calculated to obtain the weight coefficient (<xref ref-type="disp-formula" rid="E1">Equation 1</xref>):</p>
<disp-formula id="E1">
<label>(1)</label>
<mml:math id="M2">
<mml:msubsup>
<mml:mi>&#x03B1;</mml:mi>
<mml:mi>k</mml:mi>
<mml:mi>c</mml:mi>
</mml:msubsup>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mi>Z</mml:mi>
</mml:mfrac>
<mml:munder>
<mml:mstyle displaystyle="true">
<mml:mo stretchy="true">&#x2211;</mml:mo>
</mml:mstyle>
<mml:mi>i</mml:mi>
</mml:munder>
<mml:munder>
<mml:mstyle displaystyle="true">
<mml:mo stretchy="true">&#x2211;</mml:mo>
</mml:mstyle>
<mml:mi>j</mml:mi>
</mml:munder>
<mml:mfrac>
<mml:mrow>
<mml:mo>&#x2202;</mml:mo>
<mml:msup>
<mml:mi>y</mml:mi>
<mml:mi>c</mml:mi>
</mml:msup>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2202;</mml:mo>
<mml:msubsup>
<mml:mi>A</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
<mml:mi>k</mml:mi>
</mml:msubsup>
</mml:mrow>
</mml:mfrac>
</mml:math>
</disp-formula>
<p>where, <inline-formula>
<mml:math id="M3">
<mml:msup>
<mml:mi>y</mml:mi>
<mml:mi>c</mml:mi>
</mml:msup>
</mml:math>
</inline-formula> is the output score of the model for category <inline-formula>
<mml:math id="M4">
<mml:mi>c</mml:mi>
</mml:math>
</inline-formula>, <inline-formula>
<mml:math id="M5">
<mml:msub>
<mml:mi>A</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:math>
</inline-formula> is the activation value of the feature layer at position <inline-formula>
<mml:math id="M6">
<mml:mfenced open="(" close=")" separators=",">
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mfenced>
</mml:math>
</inline-formula>, <italic>k</italic> is the <italic>k</italic>-th channel in the feature layer A, and <inline-formula>
<mml:math id="M7">
<mml:mi>Z</mml:mi>
</mml:math>
</inline-formula> is the total number of units in the feature layer.</p>
<p>Then, the weight coefficient is multiplied by the activation value of the feature layer and then accumulated. The final heatmap is generated by filtering through the <italic>ReLU</italic> function (<xref ref-type="disp-formula" rid="E2">Equation 2</xref>):</p>
<disp-formula id="E2">
<label>(2)</label>
<mml:math id="M8">
<mml:msubsup>
<mml:mi>L</mml:mi>
<mml:mrow>
<mml:mi mathvariant="italic">Grad</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>C</mml:mi>
<mml:mi>A</mml:mi>
<mml:mi>M</mml:mi>
</mml:mrow>
<mml:mi>c</mml:mi>
</mml:msubsup>
<mml:mo>=</mml:mo>
<mml:mi mathvariant="italic">ReLU</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:munder>
<mml:mstyle displaystyle="true">
<mml:mo stretchy="true">&#x2211;</mml:mo>
</mml:mstyle>
<mml:mi>k</mml:mi>
</mml:munder>
<mml:msubsup>
<mml:mi>&#x03B1;</mml:mi>
<mml:mi>k</mml:mi>
<mml:mi>c</mml:mi>
</mml:msubsup>
<mml:msup>
<mml:mi>A</mml:mi>
<mml:mi>k</mml:mi>
</mml:msup>
</mml:mrow>
</mml:mfenced>
</mml:math>
</disp-formula>
<p>This process ensures that only features that have positive impact on l prediction category <italic>c</italic> of the mode were visualized, thereby enhancing the clarity and interpretability of the model&#x2019;s decision. By applying Grad-CAM to the PMPred-AE model, the heatmap clearly reveals that the model focuses on the location of key pathological changes in the retina image when identifying PM. The heatmap provided by Grad-CAM not only demonstrates the reason behind the model&#x2019;s high performance, but also proves its focusing ability, which is crucial to improve the reliability and trust of the model in practical medical applications. Through this way, Grad-CAM provides healthcare professionals with an intuitive tool to better understand and explain the decision-making process of the PMPred-AE, especially in medical diagnosis and treatment planning.</p>
</sec>
<sec id="sec10">
<label>2.4</label>
<title>Parameter setting</title>
<p>The learning rate is set to 0.0001, the batch size is 8, the number of epochs is 50, and the optimizer is AdamW.</p>
</sec>
<sec id="sec11">
<label>2.5</label>
<title>Evaluation index</title>
<p>Several widely used evaluation indicators (<xref ref-type="bibr" rid="ref31 ref32 ref33 ref34 ref35 ref36 ref37">31&#x2013;37</xref>), including precision (Pre) (<xref ref-type="disp-formula" rid="E3">Equation 3</xref>), recall (Rec) (<xref ref-type="disp-formula" rid="E4">Equation 4</xref>), accuracy (ACC) (<xref ref-type="disp-formula" rid="E5">Equation 5</xref>), F1-score (F1) (<xref ref-type="disp-formula" rid="E6">Equation 6</xref>), and Matthew&#x2019;s coefficient of association (MCC) (<xref ref-type="disp-formula" rid="E7">Equation 7</xref>), were utilized to evaluate model&#x2019;s performance, defined as follows:</p>
<disp-formula id="E3">
<label>(3)</label>
<mml:math id="M9">
<mml:mi>P</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:math>
</disp-formula>
<disp-formula id="E4">
<label>(4)</label>
<mml:math id="M10">
<mml:mi>R</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>c</mml:mi>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:math>
</disp-formula>
<disp-formula id="E5">
<label>(5)</label>
<mml:math id="M11">
<mml:mi>A</mml:mi>
<mml:mi>C</mml:mi>
<mml:mi>C</mml:mi>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>T</mml:mi>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>T</mml:mi>
<mml:mi>N</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:math>
</disp-formula>
<disp-formula id="E6">
<label>(6)</label>
<mml:math id="M12">
<mml:mi>F</mml:mi>
<mml:mn>1</mml:mn>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>2</mml:mn>
<mml:mi mathvariant="italic">PreRec</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>R</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>c</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:math>
</disp-formula>
<disp-formula id="E7">
<label>(7)</label>
<mml:math id="M13">
<mml:mi>M</mml:mi>
<mml:mi>C</mml:mi>
<mml:mi>C</mml:mi>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>&#x00D7;</mml:mo>
<mml:mi>T</mml:mi>
<mml:mi>N</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>&#x00D7;</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:msqrt>
<mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>N</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>N</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:msqrt>
</mml:mfrac>
</mml:math>
</disp-formula>
<p>where <italic>TP</italic>, <italic>TN</italic>, <italic>FP</italic>, and <italic>FN</italic> represented the true positive, true negative, false positive, and false negative of the sample, respectively. We also drew the receiver operating characteristic curve (ROC) and precise recall curve (PRC), and obtained the area under the curve (AUC, AUPRC) (<xref ref-type="bibr" rid="ref27">27</xref>, <xref ref-type="bibr" rid="ref38 ref39 ref40 ref41">38&#x2013;41</xref>).</p>
</sec>
</sec>
<sec sec-type="results" id="sec12">
<label>3</label>
<title>Results</title>
<sec id="sec13">
<label>3.1</label>
<title>Overview of experiment</title>
<p>In our experiment, we first adopted data augmentation techniques to enrich and expand the original data set, and created more diverse training samples. Data enhancement included operations such as image rotation, resizing, and cropping. It was designed to simulate different shooting conditions and perspectives to improve the model&#x2019;s generalization and robustness. The data-enhanced dataset was used to train our PMPred-AE model, which was based on the EfficientNetV2-L architecture and optimized to meet the specific requirements of PM-detection. EfficientNetV2-L is the foundation of our model. It has been pre-trained on the ImageNet data set, and therefore has strong feature extraction capabilities (<xref ref-type="bibr" rid="ref42">42</xref>, <xref ref-type="bibr" rid="ref43">43</xref>). In order to further improve the performance of the model, we introduced an attention mechanism in the fully connected layer of the model. This mechanism enables the model to focus more on the key areas related to PM diagnosis in the image, thereby improving the accuracy of diagnosis. During the model training process, the model parameters were adjusted based on the performance on the verification set to achieve the optimal configuration. After training, we visualized the output of the model at different levels (shallow, middle, and deep). This step helped us understand how the model gradually extracted and utilized image features. In addition, we also used Grad-CAM technology to generate a heatmap that highlight the areas that the model focuses on when making predictions. In this way, we can not only verify the decision-making process of the model, but also provide intuitive visual explanations for doctors to help them better understand the basis of the model. Overall, our experiment combined data augmentation, attention mechanisms, and advanced model architecture and explanatory techniques to develop an efficient, accurate, and explainable model for the detection of PM (<xref ref-type="fig" rid="fig1">Figure 1</xref>).</p>
<fig position="float" id="fig1">
<label>Figure 1</label>
<caption>
<p>Experimental workflow overview diagram.</p>
</caption>
<graphic xlink:href="fmed-12-1529335-g001.tif"/>
</fig>
</sec>
<sec id="sec14">
<label>3.2</label>
<title>Data augmentation</title>
<p>Due to the difficulty of collecting and annotating pathological images, only a small number of data samples could be collected under normal circumstances. Therefore, data augmentation was a very necessary task. It can effectively reduce the over-fitting degree of the model, and allow the model to learn more general knowledge instead of focusing too much on noise and some unique features, thereby improve the generalization and robustness of the model (<xref ref-type="bibr" rid="ref44 ref45 ref46">44&#x2013;46</xref>). In this study, we employed a combination approach for sample augmentation. The detailed procedure included initially resizing the images to 256&#x00D7;256 pixels. Subsequently, they are randomly cropped to 224&#x00D7;224 pixels. Then anti-aliasing techniques were applied to ensure image quality. In addition, to increase visual variety, the probability of horizontal and vertical flipping was set to 50%. This method also incorporated subtle random affine transformations, including rotations between &#x2212;10 to 10 degrees, translations of 10% of the image width or height, and scaling between 90 to 110%. Furthermore, random erasure is applied with a 50% probability, randomly covering a small portion of the image, enhancing the model&#x2019;s ability to handle image occlusion (<xref ref-type="fig" rid="fig2">Figure 2</xref>). Finally, the images were converted into tensors and normalized according to a specific mean and standard deviation to suit the needs of model training. We mainly used these methods to address the following issues: by randomly cropping and resizing, we simulated the scene where doctors observe the eyes from different distances and angles, and random rotation and affine transformation helped the model identify pathological features from multiple angles. Random erasure simulates potential occlusions during actual medical image acquisition. Normalization ensures consistency of image data during training, while anti-aliasing maintains the clarity of image details, which is crucial for identifying pathological features. By introducing various visual perturbations, this comprehensive data augmentation strategy facilitates the model in extracting valuable features from diverse image transformations, thereby enhancing performance and robustness in real-world application scenarios.</p>
<fig position="float" id="fig2">
<label>Figure 2</label>
<caption>
<p>Data augmentation result diagram.</p>
</caption>
<graphic xlink:href="fmed-12-1529335-g002.tif"/>
</fig>
</sec>
<sec id="sec15">
<label>3.3</label>
<title>Model validation</title>
<p>A series of experiments have shown that the PMPred-AE model exhibits excellent performance in PM classification tasks. Firstly, the model is trained on the training set to ensure that it has sufficient learning foundation and can capture the key features and patterns in the data (<xref ref-type="bibr" rid="ref47 ref48 ref49 ref50">47&#x2013;50</xref>). Then, the validation set was used to adjust the parameters of the model, which further improved its performance and ensured its generalization ability on unseen data (<xref ref-type="bibr" rid="ref51">51</xref>, <xref ref-type="bibr" rid="ref52">52</xref>). The experimental results showed that PMPred-AE performed well on the test set, and all evaluation indicators reached a very high level, such as ACC, F1, Pre, Rec and MCC with values of 0.9725, 0.9744, 0.9676, 0.9812 and 0.9448, respectively. This indicates that PMPred-AE has excellent ability to effectively distinguish PM from non-PM (<xref ref-type="fig" rid="fig3">Figure 3A</xref>, <xref ref-type="table" rid="tab1">Table 1</xref>). In addition, by plotting ROC and PRC, we observed that the PMPred-AE model had good AUC and AUPRC under both conditions, with values of 0.9955 and 0.9962, respectively. This further demonstrated the efficiency of PMPred-AE model in feature extraction and capability in recognizing PM (<xref ref-type="fig" rid="fig3">Figures 3B</xref>,<xref ref-type="fig" rid="fig3">C</xref>). Finally, we used t-SNE technology to visualize the output of the model (<xref ref-type="fig" rid="fig3">Figure 3D</xref>) (<xref ref-type="bibr" rid="ref53">53</xref>). The results showed that PM and non-PM can be clearly distinguished in a low-dimensional space, indicating that the model can effectively represent their features in a low-dimensional space and capture the complex patterns and structural differences between them. This further suggests that the PMPred-AE model has broad application prospect in clinical practice.</p>
<fig position="float" id="fig3">
<label>Figure 3</label>
<caption>
<p>Model validation result diagram. <bold>(A)</bold> Evaluation results of the model. <bold>(B)</bold> ROC results of the model. <bold>(C)</bold> PRC results of the model. <bold>(D)</bold> t-SNE visualization the model.</p>
</caption>
<graphic xlink:href="fmed-12-1529335-g003.tif"/>
</fig>
<table-wrap position="float" id="tab1">
<label>Table 1</label>
<caption>
<p>The performance evaluation of model.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Method</th>
<th align="center" valign="top">ACC</th>
<th align="center" valign="top">Pre</th>
<th align="center" valign="top">Rec</th>
<th align="center" valign="top">F1</th>
<th align="center" valign="top">ROC</th>
<th align="center" valign="top">MCC</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Train</td>
<td align="center" valign="middle">0.9850</td>
<td align="center" valign="middle">0.9814</td>
<td align="center" valign="middle">0.9906</td>
<td align="center" valign="middle">0.9860</td>
<td align="center" valign="middle">0.9974</td>
<td align="center" valign="middle">0.9699</td>
</tr>
<tr>
<td align="left" valign="top">Val</td>
<td align="center" valign="middle">0.9825</td>
<td align="center" valign="middle">0.9857</td>
<td align="center" valign="middle">0.9810</td>
<td align="center" valign="middle">0.9834</td>
<td align="center" valign="middle">0.9986</td>
<td align="center" valign="middle">0.9649</td>
</tr>
<tr>
<td align="left" valign="top">Test</td>
<td align="center" valign="middle">0.9725</td>
<td align="center" valign="middle">0.9676</td>
<td align="center" valign="middle">0.9812</td>
<td align="center" valign="middle">0.9744</td>
<td align="center" valign="middle">0.9955</td>
<td align="center" valign="middle">0.9448</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="sec16">
<label>3.4</label>
<title>Model explanations</title>
<p>To further confirm that PMPred-AE could effectively extract features, we visualized the output of the model&#x2019;s shallow, middle, and deep layers. It can be clearly observed that as the depth of the model increases, the model can extract more abstract and higher-level features. This proves that the hierarchical structure of PMPred-AE model effectively promoted the gradual extraction and refinement of features (<xref ref-type="fig" rid="fig4">Figure 4A</xref>). Later, in order to further investigate why PMPred-AE could efficiently distinguish PM and non-PM, we used the Grad-CAM technology to generate a heatmap that could reveal the areas that the model focused on when making predictions, thus providing an explanation for the model&#x2019;s decision-making process (<xref ref-type="fig" rid="fig4">Figure 4B</xref>). The heatmap revealed that the PMPred-AE model could effectively focus on the location of the key pathological changes in the image when identifying PM. These positions were often the key for distinguishing between PM and non-PM, which explained why the model could achieve high accuracy. This focusing ability not only improved the prediction performance of the model, but also increased its reliability and credibility in practical applications, especially in medical diagnosis and treatment planning.</p>
<fig position="float" id="fig4">
<label>Figure 4</label>
<caption>
<p>Model explanation display diagram. <bold>(A)</bold> Visualize the output results of shallow, middle, and deep layers of the model. <bold>(B)</bold> Visualization results of Grad-CAM.</p>
</caption>
<graphic xlink:href="fmed-12-1529335-g004.tif"/>
</fig>
</sec>
<sec id="sec17">
<label>3.5</label>
<title>Comparisons with existed works</title>
<p>To further demonstrate the performance of PMPred-AE in detecting PM, we should compare the proposed model with existed studies. However, those studies we mentioned earlier did not share their source code and used different datasets, making it impossible for use to make a fair comparison. Fortunately, we could use the PALM&#x2019;s benchmark data from 2023 (Base-2023) (<xref ref-type="bibr" rid="ref29">29</xref>). The experiment results showed that among all evaluation metrics, PMPred-AE is superior to Base-2023 (<xref ref-type="fig" rid="fig5">Figure 5</xref>, <xref ref-type="table" rid="tab2">Table 2</xref>). By comparing with Base-2023, we further consolidated the validation of the PMPred-AE model and provided more reliable support for its application in clinical practice.</p>
<fig position="float" id="fig5">
<label>Figure 5</label>
<caption>
<p>Comparison diagram of Base-2023.</p>
</caption>
<graphic xlink:href="fmed-12-1529335-g005.tif"/>
</fig>
<table-wrap position="float" id="tab2">
<label>Table 2</label>
<caption>
<p>Comparison with published results.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Method</th>
<th align="center" valign="top">ACC</th>
<th align="center" valign="top">Pre</th>
<th align="center" valign="top">Rec</th>
<th align="center" valign="top">F1</th>
<th align="center" valign="top">ROC</th>
<th align="center" valign="top">MCC</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Base-2023</td>
<td align="center" valign="top">0.968</td>
<td align="center" valign="top">/</td>
<td align="center" valign="top">0.962</td>
<td align="center" valign="top">0.969</td>
<td align="center" valign="top">0.994</td>
<td align="center" valign="top">/</td>
</tr>
<tr>
<td align="left" valign="top">PMPred-AE</td>
<td align="center" valign="middle"><bold>0.9725</bold></td>
<td align="center" valign="middle"><bold>0.9676</bold></td>
<td align="center" valign="middle"><bold>0.9812</bold></td>
<td align="center" valign="middle"><bold>0.9744</bold></td>
<td align="center" valign="middle"><bold>0.9955</bold></td>
<td align="center" valign="middle"><bold>0.9448</bold></td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>The bold font indicates the classifiers that work best.</p>
</table-wrap-foot>
</table-wrap>
</sec>
</sec>
<sec sec-type="discussion" id="sec18">
<label>4</label>
<title>Discussion</title>
<p>In this study, we designed an improved EfficientNetV2-L model based on the attention mechanism (PMPred-AE) for the automatic detection of PM. By using EfficientNetV2-L as the basic architecture for feature extraction and introducing improvements based on the attention-based mechanism in the classification stage, the PMPred-AE model could efficiently identify key features in eye image and significantly improve the prediction performance of the model. In the research, data augmentation techniques were used to expand the training samples, including image rotation, resizing, and cropping to improve the model&#x2019;s generalization ability and reliability. In addition, Grad-CAM technology was introduced during the model training process to generate heatmaps, which provided a visual means to explain the decision process of the PMPred-AE in the identification of PM. The heatmap generated by Grad-CAM can clearly show the areas that the model focused on when making predictions, thereby enhancing the clarity and interpretability of the model&#x2019;s decisions. Compared with existing work, PMPred-AE had a significant improvement in ACC, Rec, ROC, and F1. This confirmed its leading position in the field of PM-detection and provided strong support for its application in clinical practice.</p>
<p>The PMPred-AE model demonstrates significant potential and scalability in the field of medical image analysis. In addition to effectively detecting PM, PMPred-AE is also applicable to various medical imaging tasks, including the analysis of tumors, brain diseases, and lung diseases. Despite the unique characteristics of different medical images, PMPred-AE offers an efficient and interpretable framework that can be applied across diverse medical scenarios, showcasing substantial clinical application potential. The clinical value of PMPred-AE lies not only in its high accuracy and efficiency but also in its seamless integration with existing healthcare systems. The model can directly process images generated by standard medical devices without requiring additional workflows. Furthermore, PMPred-AE uses Grad-CAM technology to generate heatmaps that visualize the regions the model focuses on, helping physicians make more precise clinical decisions. The model&#x2019;s lightweight design ensures efficient operation even in resource-constrained environments, making it particularly suitable for regions with limited healthcare resources. However, there are several challenges to be addressed in the deployment of PMPred-AE in practice. First, the quality and diversity of fundus images may vary due to differences in imaging devices and conditions, potentially affecting model performance. To address this, we can enhance the model&#x2019;s generalization ability by expanding the training dataset and incorporating data augmentation techniques. Second, although the model employs an efficient network architecture, inference speed and computational resource requirements could become limiting factors in resource-constrained environments. To mitigate this, we plan to deploy the model on the cloud, leveraging cloud computing resources for inference to reduce the local computational burden. In summary, while the deployment of PMPred-AE faces several challenges, improvements in data quality, optimization of computational resources, and enhanced model robustness can effectively address these issues, ensuring the successful application of the model in clinical practice.</p>
<p>In summary, this research successfully developed an efficient, accurate, and explainable model for the detection of PM by combining advanced model architecture, attention mechanism, and explanatory techniques. This comprehensive method not only improved the performance of the model, but also provided a valuable reference for clinical diagnosis, demonstrating the great potential of deep learning in the field of medical image analysis. In the future, with the continuous advancement of algorithms and technology, such models are expected to play a greater role in improving the efficiency and accuracy of PM diagnosis. The source code has been uploaded to GitHub and can be accessed at: <ext-link xlink:href="https://github.com/ZhangHongqi215/PMPred-AE" ext-link-type="uri">https://github.com/ZhangHongqi215/PMPred-AE</ext-link>.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="sec19">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/supplementary material, further inquiries can be directed to the corresponding authors.</p>
</sec>
<sec sec-type="author-contributions" id="sec20">
<title>Author contributions</title>
<p>H-QZ: Data curation, Formal analysis, Investigation, Methodology, Resources, Software, Writing &#x2013; original draft. MA: Investigation, Methodology, Validation, Writing &#x2013; original draft. MT: Validation, Writing &#x2013; original draft. SA: Methodology, Validation, Writing &#x2013; original draft. PC: Formal analysis, Investigation, Writing &#x2013; original draft. YZ: Conceptualization, Funding acquisition, Project administration, Writing &#x2013; review &#x0026; editing. HT: Conceptualization, Funding acquisition, Project administration, Supervision, Writing &#x2013; review &#x0026; editing. HL: Conceptualization, Funding acquisition, Project administration, Supervision, Writing &#x2013; review &#x0026; editing.</p>
</sec>
<sec sec-type="funding-information" id="sec21">
<title>Funding</title>
<p>The author(s) declare that financial support was received for the research and/or publication of this article. This work was supported by the National Natural Science Foundation of China (62250028, 62172343), the Sichuan Science and Technology Program (Grant No. 2022YFS0614).</p>
</sec>
<sec sec-type="COI-statement" id="sec22">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="sec23">
<title>Generative AI statement</title>
<p>The author(s) declare that no Generative AI was used in the creation of this manuscript.</p>
</sec>
<sec sec-type="disclaimer" id="sec24">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="ref1"><label>1.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Morgan</surname> <given-names>IG</given-names></name> <name><surname>French</surname> <given-names>AN</given-names></name> <name><surname>Ashby</surname> <given-names>RS</given-names></name> <name><surname>Guo</surname> <given-names>X</given-names></name> <name><surname>Ding</surname> <given-names>X</given-names></name> <name><surname>He</surname> <given-names>M</given-names></name> <etal/></person-group>. <article-title>The epidemics of myopia: aetiology and prevention</article-title>. <source>Prog Retin Eye Res</source>. (<year>2018</year>) <volume>62</volume>:<fpage>134</fpage>&#x2013;<lpage>49</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.preteyeres.2017.09.004</pub-id></citation></ref>
<ref id="ref2"><label>2.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hemelings</surname> <given-names>R</given-names></name> <name><surname>Elen</surname> <given-names>B</given-names></name> <name><surname>Blaschko</surname> <given-names>MB</given-names></name> <name><surname>Jacob</surname> <given-names>J</given-names></name> <name><surname>Stalmans</surname> <given-names>I</given-names></name> <name><surname>De Boever</surname> <given-names>P</given-names></name></person-group>. <article-title>Pathological myopia classification with simultaneous lesion segmentation using deep learning</article-title>. <source>Comput Methods Prog Biomed</source>. (<year>2021</year>) <volume>199</volume>:<fpage>105920</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cmpb.2020.105920</pub-id>, PMID: <pub-id pub-id-type="pmid">33412285</pub-id></citation></ref>
<ref id="ref3"><label>3.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Saw</surname> <given-names>SM</given-names></name> <name><surname>Gazzard</surname> <given-names>G</given-names></name> <name><surname>Shih-Yen</surname> <given-names>EC</given-names></name> <name><surname>Chua</surname> <given-names>WH</given-names></name></person-group>. <article-title>Myopia and associated pathological complications</article-title>. <source>Ophthalmic Physiol Opt</source>. (<year>2005</year>) <volume>25</volume>:<fpage>381</fpage>&#x2013;<lpage>91</lpage>. doi: <pub-id pub-id-type="doi">10.1111/j.1475-1313.2005.00298.x</pub-id>, PMID: <pub-id pub-id-type="pmid">16101943</pub-id></citation></ref>
<ref id="ref4"><label>4.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Holden</surname> <given-names>BA</given-names></name> <name><surname>Fricke</surname> <given-names>TR</given-names></name> <name><surname>Wilson</surname> <given-names>DA</given-names></name> <name><surname>Jong</surname> <given-names>M</given-names></name> <name><surname>Naidoo</surname> <given-names>KS</given-names></name> <name><surname>Sankaridurg</surname> <given-names>P</given-names></name> <etal/></person-group>. <article-title>Global prevalence of myopia and high myopia and temporal trends from 2000 through 2050</article-title>. <source>Ophthalmology</source>. (<year>2016</year>) <volume>123</volume>:<fpage>1036</fpage>&#x2013;<lpage>42</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ophtha.2016.01.006</pub-id>, PMID: <pub-id pub-id-type="pmid">26875007</pub-id></citation></ref>
<ref id="ref5"><label>5.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yang</surname> <given-names>J</given-names></name> <name><surname>Ouyang</surname> <given-names>X</given-names></name> <name><surname>Fu</surname> <given-names>H</given-names></name> <name><surname>Hou</surname> <given-names>X</given-names></name> <name><surname>Liu</surname> <given-names>Y</given-names></name> <name><surname>Xie</surname> <given-names>Y</given-names></name> <etal/></person-group>. <article-title>Advances in biomedical study of the myopia-related signaling pathways and mechanisms</article-title>. <source>Biomed Pharmacother</source>. (<year>2022</year>) <volume>145</volume>:<fpage>112472</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.biopha.2021.112472</pub-id>, PMID: <pub-id pub-id-type="pmid">34861634</pub-id></citation></ref>
<ref id="ref6"><label>6.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jonas</surname> <given-names>JB</given-names></name> <name><surname>Jonas</surname> <given-names>RA</given-names></name> <name><surname>Bikbov</surname> <given-names>MM</given-names></name> <name><surname>Wang</surname> <given-names>YX</given-names></name> <name><surname>Panda-Jonas</surname> <given-names>S</given-names></name></person-group>. <article-title>Myopia: histology, clinical features, and potential implications for the etiology of axial elongation</article-title>. <source>Prog Retin Eye Res</source>. (<year>2023</year>) <volume>96</volume>:<fpage>101156</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.preteyeres.2022.101156</pub-id>, PMID: <pub-id pub-id-type="pmid">36585290</pub-id></citation></ref>
<ref id="ref7"><label>7.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Agyekum</surname> <given-names>S</given-names></name> <name><surname>Chan</surname> <given-names>PP</given-names></name> <name><surname>Zhang</surname> <given-names>Y</given-names></name> <name><surname>Huo</surname> <given-names>Z</given-names></name> <name><surname>Yip</surname> <given-names>BHK</given-names></name> <name><surname>Ip</surname> <given-names>P</given-names></name> <etal/></person-group>. <article-title>Cost-effectiveness analysis of myopia management: a systematic review</article-title>. <source>Front Public Health</source>. (<year>2023</year>) <volume>11</volume>:<fpage>1093836</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpubh.2023.1093836</pub-id>, PMID: <pub-id pub-id-type="pmid">36923029</pub-id></citation></ref>
<ref id="ref8"><label>8.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wei</surname> <given-names>L</given-names></name> <name><surname>He</surname> <given-names>W</given-names></name> <name><surname>Malik</surname> <given-names>A</given-names></name> <name><surname>Su</surname> <given-names>R</given-names></name> <name><surname>Cui</surname> <given-names>L</given-names></name> <name><surname>Manavalan</surname> <given-names>B</given-names></name></person-group>. <article-title>Computational prediction and interpretation of cell-specific replication origin sites from multiple eukaryotes by exploiting stacking framework</article-title>. <source>Brief Bioinform</source>. (<year>2020</year>) <volume>22</volume>:<fpage>bbaa275</fpage>. doi: <pub-id pub-id-type="doi">10.1093/bib/bbaa275</pub-id>, PMID: <pub-id pub-id-type="pmid">33152766</pub-id></citation></ref>
<ref id="ref9"><label>9.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>T</given-names></name> <name><surname>Qiao</surname> <given-names>H</given-names></name> <name><surname>Wang</surname> <given-names>Z</given-names></name> <name><surname>Yang</surname> <given-names>X</given-names></name> <name><surname>Pan</surname> <given-names>X</given-names></name> <name><surname>Yang</surname> <given-names>Y</given-names></name> <etal/></person-group>. <article-title>CodLncScape provides a self-enriching framework for the systematic collection and exploration of coding LncRNAs</article-title>. <source>Adv Sci</source>. (<year>2024</year>) <volume>11</volume>:<fpage>e2400009</fpage>. doi: <pub-id pub-id-type="doi">10.1002/advs.202400009</pub-id>, PMID: <pub-id pub-id-type="pmid">38602457</pub-id></citation></ref>
<ref id="ref10"><label>10.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Alhatemi</surname> <given-names>RAJ</given-names></name> <name><surname>Sava&#x015F;</surname> <given-names>S</given-names></name></person-group>. <article-title>A weighted ensemble approach with multiple pre-trained deep learning models for classification of stroke</article-title>. <source>Medinformatics</source>. (<year>2023</year>) <volume>1</volume>:<fpage>10</fpage>&#x2013;<lpage>9</lpage>. doi: <pub-id pub-id-type="doi">10.47852/bonviewMEDIN32021963</pub-id></citation></ref>
<ref id="ref11"><label>11.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>Y</given-names></name> <name><surname>Zhai</surname> <given-names>Y.</given-names></name> <name><surname>Ding</surname> <given-names>Y.</given-names></name> <name><surname>Zou</surname> <given-names>Q</given-names></name></person-group>. SBSM-Pro: support bio-sequence machine for proteins. <italic>arXiv</italic> [Preprint]. <italic>arXiv:2308.10275</italic> (<year>2023</year>).</citation></ref>
<ref id="ref12"><label>12.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>Y</given-names></name> <name><surname>Zhang</surname> <given-names>W</given-names></name> <name><surname>Yang</surname> <given-names>Y</given-names></name> <name><surname>Sun</surname> <given-names>J</given-names></name> <name><surname>Wang</surname> <given-names>L</given-names></name></person-group>. <article-title>Survival prediction of esophageal squamous cell carcinoma based on the prognostic index and sparrow search algorithm-support vector machine</article-title>. <source>Curr Bioinforma</source>. (<year>2023</year>) <volume>18</volume>:<fpage>598</fpage>&#x2013;<lpage>609</lpage>. doi: <pub-id pub-id-type="doi">10.2174/1574893618666230419084754</pub-id></citation></ref>
<ref id="ref13"><label>13.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>B</given-names></name></person-group>. <article-title>BioSeq-analysis: a platform for DNA, RNA and protein sequence analysis based on machine learning approaches</article-title>. <source>Brief Bioinform</source>. (<year>2019</year>) <volume>20</volume>:<fpage>1280</fpage>&#x2013;<lpage>94</lpage>. doi: <pub-id pub-id-type="doi">10.1093/bib/bbx165</pub-id>, PMID: <pub-id pub-id-type="pmid">29272359</pub-id></citation></ref>
<ref id="ref14"><label>14.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>J</given-names></name> <name><surname>Wong</surname> <given-names>DW</given-names></name> <name><surname>Lim</surname> <given-names>JH</given-names></name> <name><surname>Tan</surname> <given-names>NM</given-names></name> <name><surname>Zhang</surname> <given-names>Z</given-names></name> <name><surname>Li</surname> <given-names>H</given-names></name> <etal/></person-group>. <article-title>Detection of pathological myopia by PAMELA with texture-based features through an SVM approach</article-title>. <source>J Healthc Eng</source>. (<year>2010</year>) <volume>1</volume>:<fpage>1</fpage>&#x2013;<lpage>11</lpage>. doi: <pub-id pub-id-type="doi">10.1260/2040-2295.1.1.1</pub-id></citation></ref>
<ref id="ref15"><label>15.</label><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>Z</given-names></name> <name><surname>Cheng</surname> <given-names>J</given-names></name> <name><surname>Liu</surname> <given-names>J</given-names></name> <name><surname>Sheri</surname> <given-names>YCM</given-names></name> <name><surname>Kong</surname> <given-names>CC</given-names></name> <name><surname>Mei</surname> <given-names>SS</given-names></name></person-group>, editors. <article-title>Pathological myopia detection from selective fundus image features</article-title>. <conf-name>2012 7th IEEE conference on industrial electronics and applications (ICIEA). IEEE</conf-name>; (<year>2012</year>)</citation></ref>
<ref id="ref16"><label>16.</label><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Xu</surname> <given-names>Y</given-names></name> <name><surname>Liu</surname> <given-names>J</given-names></name> <name><surname>Zhang</surname> <given-names>Z</given-names></name> <name><surname>Tan</surname> <given-names>NM</given-names></name> <name><surname>Wong</surname> <given-names>DWK</given-names></name> <name><surname>Saw</surname> <given-names>SM</given-names></name> <etal/></person-group>., editors. <article-title>Learn to recognize pathological myopia in fundus images using bag-of-feature and sparse learning approach</article-title>. <conf-name>2013 IEEE 10th International Symposium on Biomedical Imaging. IEEE</conf-name>; (<year>2013</year>)</citation></ref>
<ref id="ref17"><label>17.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>Z</given-names></name> <name><surname>Xu</surname> <given-names>Y</given-names></name> <name><surname>Liu</surname> <given-names>J</given-names></name> <name><surname>Wong</surname> <given-names>DWK</given-names></name> <name><surname>Kwoh</surname> <given-names>CK</given-names></name> <name><surname>Saw</surname> <given-names>S-M</given-names></name> <etal/></person-group>. <article-title>Automatic diagnosis of pathological myopia from heterogeneous biomedical data</article-title>. <source>PLoS One</source>. (<year>2013</year>) <volume>8</volume>:<fpage>e65736</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pone.0065736</pub-id>, PMID: <pub-id pub-id-type="pmid">23799040</pub-id></citation></ref>
<ref id="ref18"><label>18.</label><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>X</given-names></name> <name><surname>Xu</surname> <given-names>Y</given-names></name> <name><surname>Wong</surname> <given-names>DWK</given-names></name> <name><surname>Wong</surname> <given-names>TY</given-names></name> <name><surname>Liu</surname> <given-names>J</given-names></name></person-group>, editors. <article-title>Glaucoma detection based on deep convolutional neural network</article-title>. <conf-name>2015 37th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC). IEEE</conf-name>; (<year>2015</year>)</citation></ref>
<ref id="ref19"><label>19.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Luo</surname> <given-names>X</given-names></name> <name><surname>Wang</surname> <given-names>Y</given-names></name> <name><surname>Zou</surname> <given-names>Q</given-names></name> <name><surname>Xu</surname> <given-names>L</given-names></name></person-group>. <article-title>Recall DNA methylation levels at low coverage sites using a CNN model in WGBs</article-title>. <source>PLoS Comput Biol</source>. (<year>2023</year>) <volume>19</volume>:<fpage>e1011205</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pcbi.1011205</pub-id>, PMID: <pub-id pub-id-type="pmid">37315069</pub-id></citation></ref>
<ref id="ref20"><label>20.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dou</surname> <given-names>LJ</given-names></name> <name><surname>Zhang</surname> <given-names>ZL</given-names></name> <name><surname>Xu</surname> <given-names>L</given-names></name> <name><surname>Zou</surname> <given-names>Q</given-names></name></person-group>. <article-title>iKcr_CNN: a novel computational tool for imbalance classification of human nonhistone crotonylation sites based on convolutional neural networks with focal loss</article-title>. <source>Comput Struct Biotechnol J</source>. (<year>2022</year>) <volume>20</volume>:<fpage>3268</fpage>&#x2013;<lpage>79</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.csbj.2022.06.032</pub-id>, PMID: <pub-id pub-id-type="pmid">35832615</pub-id></citation></ref>
<ref id="ref21"><label>21.</label><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Xu</surname> <given-names>M</given-names></name> <name><surname>Cheng</surname> <given-names>J</given-names></name> <name><surname>Wong</surname> <given-names>DWK</given-names></name> <name><surname>Cheng</surname> <given-names>C-Y</given-names></name> <name><surname>Saw</surname> <given-names>SM</given-names></name> <name><surname>Wong</surname> <given-names>TY</given-names></name></person-group>, editors. <article-title>Automated tessellated fundus detection in color fundus images</article-title>. <conf-name>Proceedings of the Ophthalmic Medical Image Analysis International Workshop, University of Iowa</conf-name>; (<year>2016</year>)</citation></ref>
<ref id="ref22"><label>22.</label><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Xu</surname> <given-names>Y</given-names></name> <name><surname>Duan</surname> <given-names>L</given-names></name> <name><surname>Fu</surname> <given-names>H</given-names></name> <name><surname>Zhang</surname> <given-names>Z</given-names></name> <name><surname>Zhao</surname> <given-names>W</given-names></name> <name><surname>You</surname> <given-names>T</given-names></name> <etal/></person-group>., editors. <article-title>Ocular disease detection from multiple informatics domains</article-title>. <conf-name>2018 IEEE 15th international symposium on biomedical imaging (ISBI 2018). IEEE</conf-name>; (<year>2018</year>).</citation></ref>
<ref id="ref23"><label>23.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Septiarini</surname> <given-names>A</given-names></name> <name><surname>Harjoko</surname> <given-names>A</given-names></name> <name><surname>Pulungan</surname> <given-names>R</given-names></name> <name><surname>Ekantini</surname> <given-names>R</given-names></name></person-group>. <article-title>Automatic detection of peripapillary atrophy in retinal fundus images using statistical features</article-title>. <source>Biomed Signal Process Control</source>. (<year>2018</year>) <volume>45</volume>:<fpage>151</fpage>&#x2013;<lpage>9</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.bspc.2018.05.028</pub-id></citation></ref>
<ref id="ref24"><label>24.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rauf</surname> <given-names>N</given-names></name> <name><surname>Gilani</surname> <given-names>SO</given-names></name> <name><surname>Waris</surname> <given-names>A</given-names></name></person-group>. <article-title>Automatic detection of pathological myopia using machine learning</article-title>. <source>Sci Rep</source>. (<year>2021</year>) <volume>11</volume>:<fpage>16570</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-021-95205-1</pub-id>, PMID: <pub-id pub-id-type="pmid">34400662</pub-id></citation></ref>
<ref id="ref25"><label>25.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wei</surname> <given-names>L</given-names></name> <name><surname>Tang</surname> <given-names>J</given-names></name> <name><surname>Zou</surname> <given-names>Q</given-names></name></person-group>. <article-title>Local-DPP: an improved DNA-binding protein prediction method by exploring local evolutionary information</article-title>. <source>Inf Sci</source>. (<year>2017</year>) <volume>384</volume>:<fpage>135</fpage>&#x2013;<lpage>44</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ins.2016.06.026</pub-id></citation></ref>
<ref id="ref26"><label>26.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>Y</given-names></name> <name><surname>Liu</surname> <given-names>C</given-names></name> <name><surname>Liu</surname> <given-names>M</given-names></name> <name><surname>Liu</surname> <given-names>T</given-names></name> <name><surname>Lin</surname> <given-names>H</given-names></name> <name><surname>Huang</surname> <given-names>C-B</given-names></name> <etal/></person-group>. <article-title>Attention is all You need: utilizing attention in AI-enabled drug discovery</article-title>. <source>Brief Bioinform</source>. (<year>2024</year>) <volume>25</volume>:<fpage>bbad467</fpage>. doi: <pub-id pub-id-type="doi">10.1093/bib/bbad467</pub-id>, PMID: <pub-id pub-id-type="pmid">38189543</pub-id></citation></ref>
<ref id="ref27"><label>27.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>T</given-names></name> <name><surname>Huang</surname> <given-names>J</given-names></name> <name><surname>Luo</surname> <given-names>D</given-names></name> <name><surname>Ren</surname> <given-names>L</given-names></name> <name><surname>Ning</surname> <given-names>L</given-names></name> <name><surname>Huang</surname> <given-names>J</given-names></name> <etal/></person-group>. <article-title>Cm-siRPred: predicting chemically modified SiRNA efficiency based on multi-view learning strategy</article-title>. <source>Int J Biol Macromol</source>. (<year>2024</year>) <volume>264</volume>:<fpage>130638</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ijbiomac.2024.130638</pub-id>, PMID: <pub-id pub-id-type="pmid">38460652</pub-id></citation></ref>
<ref id="ref28"><label>28.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Xu</surname> <given-names>Y</given-names></name> <name><surname>Liu</surname> <given-names>T</given-names></name> <name><surname>Yang</surname> <given-names>Y</given-names></name> <name><surname>Kang</surname> <given-names>J</given-names></name> <name><surname>Ren</surname> <given-names>L</given-names></name> <name><surname>Ding</surname> <given-names>H</given-names></name> <etal/></person-group>. <article-title>Acvpred: enhanced prediction of anti-coronavirus peptides by transfer learning combined with data augmentation</article-title>. <source>Futur Gener Comput Syst</source>. (<year>2024</year>) <volume>160</volume>:<fpage>305</fpage>&#x2013;<lpage>15</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.future.2024.06.008</pub-id></citation></ref>
<ref id="ref29"><label>29.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Fang</surname> <given-names>H</given-names></name> <name><surname>Li</surname> <given-names>F</given-names></name> <name><surname>Wu</surname> <given-names>J</given-names></name> <name><surname>Fu</surname> <given-names>H</given-names></name> <name><surname>Sun</surname> <given-names>X</given-names></name> <name><surname>Orlando</surname> <given-names>JI</given-names></name> <etal/></person-group>. PALM: open fundus photograph dataset with pathologic myopia recognition and anatomical structure annotation. arXiv [Preprint]. <italic>arXiv:230507816</italic> (<year>2023</year>).</citation></ref>
<ref id="ref30"><label>30.</label><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Tan</surname> <given-names>M</given-names></name> <name><surname>Le</surname> <given-names>Q</given-names></name></person-group>, (ed.) <article-title>EfficientNetv2: smaller models and faster training</article-title>. <conf-name>International conference on machine learning. PMLR</conf-name>; (<year>2021</year>)</citation></ref>
<ref id="ref31"><label>31.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zou</surname> <given-names>X</given-names></name> <name><surname>Ren</surname> <given-names>L</given-names></name> <name><surname>Cai</surname> <given-names>P</given-names></name> <name><surname>Zhang</surname> <given-names>Y</given-names></name> <name><surname>Ding</surname> <given-names>H</given-names></name> <name><surname>Deng</surname> <given-names>K</given-names></name> <etal/></person-group>. <article-title>Accurately identifying hemagglutinin using sequence information and machine learning methods</article-title>. <source>Front Med (Lausanne)</source>. (<year>2023</year>) <volume>10</volume>:<fpage>1281880</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fmed.2023.1281880</pub-id>, PMID: <pub-id pub-id-type="pmid">38020152</pub-id></citation></ref>
<ref id="ref32"><label>32.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zulfiqar</surname> <given-names>H</given-names></name> <name><surname>Guo</surname> <given-names>Z</given-names></name> <name><surname>Ahmad</surname> <given-names>RM</given-names></name> <name><surname>Ahmed</surname> <given-names>Z</given-names></name> <name><surname>Cai</surname> <given-names>P</given-names></name> <name><surname>Chen</surname> <given-names>X</given-names></name> <etal/></person-group>. <article-title>Deep-STP: a deep learning-based approach to predict snake toxin proteins by using word embeddings</article-title>. <source>Front Med</source>. (<year>2024</year>) <volume>10</volume>:<fpage>10</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fmed.2023.1291352</pub-id>, PMID: <pub-id pub-id-type="pmid">38298505</pub-id></citation></ref>
<ref id="ref33"><label>33.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhu</surname> <given-names>H</given-names></name> <name><surname>Hao</surname> <given-names>H</given-names></name> <name><surname>Yu</surname> <given-names>L</given-names></name></person-group>. <article-title>Identifying disease-related microbes based on multi-scale variational graph autoencoder embedding Wasserstein distance</article-title>. <source>BMC Biol</source>. (<year>2023</year>) <volume>21</volume>:<fpage>294</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s12915-023-01796-8</pub-id>, PMID: <pub-id pub-id-type="pmid">38115088</pub-id></citation></ref>
<ref id="ref34"><label>34.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>X</given-names></name> <name><surname>Yang</surname> <given-names>H</given-names></name> <name><surname>Ai</surname> <given-names>C</given-names></name> <name><surname>Ding</surname> <given-names>Y</given-names></name> <name><surname>Guo</surname> <given-names>F</given-names></name> <name><surname>Tang</surname> <given-names>J</given-names></name></person-group>. <article-title>Mvml-Mpi: Multi-view multi-label learning for metabolic pathway inference</article-title>. <source>Brief Bioinform</source>. (<year>2023</year>) <volume>24</volume>:<fpage>bbad393</fpage>. doi: <pub-id pub-id-type="doi">10.1093/bib/bbad393</pub-id>, PMID: <pub-id pub-id-type="pmid">37930024</pub-id></citation></ref>
<ref id="ref35"><label>35.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liang</surname> <given-names>C</given-names></name> <name><surname>Wang</surname> <given-names>L</given-names></name> <name><surname>Liu</surname> <given-names>L</given-names></name> <name><surname>Zhang</surname> <given-names>H</given-names></name> <name><surname>Guo</surname> <given-names>F</given-names></name></person-group>. <article-title>Multi-view unsupervised feature selection with tensor robust principal component analysis and consensus graph learning</article-title>. <source>Pattern Recogn</source>. (<year>2023</year>) <volume>141</volume>:<fpage>109632</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.patcog.2023.109632</pub-id></citation></ref>
<ref id="ref36"><label>36.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>H</given-names></name> <name><surname>Pang</surname> <given-names>Y</given-names></name> <name><surname>Liu</surname> <given-names>B</given-names></name></person-group>. <article-title>BioSeq-BLM: a platform for analyzing DNA, RNA, and protein sequences based on biological language models</article-title>. <source>Nucleic Acids Res</source>. (<year>2021</year>) <volume>49</volume>:<fpage>e129</fpage>. doi: <pub-id pub-id-type="doi">10.1093/nar/gkab829</pub-id>, PMID: <pub-id pub-id-type="pmid">34581805</pub-id></citation></ref>
<ref id="ref37"><label>37.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>B</given-names></name> <name><surname>Gao</surname> <given-names>X</given-names></name> <name><surname>Zhang</surname> <given-names>H</given-names></name></person-group>. <article-title>BioSeq-Analysis2.0: an updated platform for analyzing DNA, RNA and protein sequences at sequence level and residue level based on machine learning approaches</article-title>. <source>Nucleic Acids Res</source>. (<year>2019</year>) <volume>47</volume>:<fpage>e127</fpage>. doi: <pub-id pub-id-type="doi">10.1093/nar/gkz740</pub-id>, PMID: <pub-id pub-id-type="pmid">31504851</pub-id></citation></ref>
<ref id="ref38"><label>38.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>ZY</given-names></name> <name><surname>Zhang</surname> <given-names>Z</given-names></name> <name><surname>Ye</surname> <given-names>X</given-names></name> <name><surname>Sakurai</surname> <given-names>T</given-names></name> <name><surname>Lin</surname> <given-names>H</given-names></name></person-group>. <article-title>A Bert-based model for the prediction of lncRNA subcellular localization in <italic>Homo sapiens</italic></article-title>. <source>Int J Biol Macromol</source>. (<year>2024</year>) <volume>265</volume>:<fpage>130659</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ijbiomac.2024.130659</pub-id>, PMID: <pub-id pub-id-type="pmid">38462114</pub-id></citation></ref>
<ref id="ref39"><label>39.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dou</surname> <given-names>M</given-names></name> <name><surname>Tang</surname> <given-names>J</given-names></name> <name><surname>Tiwari</surname> <given-names>P</given-names></name> <name><surname>Ding</surname> <given-names>Y</given-names></name> <name><surname>Guo</surname> <given-names>F</given-names></name></person-group>. <article-title>Drug-drug interaction relation extraction based on deep learning: a review</article-title>. <source>ACM Comput Surv</source>. (<year>2024</year>) <volume>56</volume>:<fpage>1</fpage>&#x2013;<lpage>33</lpage>. doi: <pub-id pub-id-type="doi">10.1145/3645089</pub-id></citation></ref>
<ref id="ref40"><label>40.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Charoenkwan</surname> <given-names>P</given-names></name> <name><surname>Schaduangrat</surname> <given-names>N</given-names></name> <name><surname>Lio</surname> <given-names>P</given-names></name> <name><surname>Moni</surname> <given-names>MA</given-names></name> <name><surname>Shoombuatong</surname> <given-names>W</given-names></name> <name><surname>Manavalan</surname> <given-names>B</given-names></name></person-group>. <article-title>Computational prediction and interpretation of druggable proteins using a stacked ensemble-learning framework</article-title>. <source>iScience</source>. (<year>2022</year>) <volume>25</volume>:<fpage>104883</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.isci.2022.104883</pub-id></citation></ref>
<ref id="ref41"><label>41.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bupi</surname> <given-names>N</given-names></name> <name><surname>Sangaraju</surname> <given-names>VK</given-names></name> <name><surname>Phan</surname> <given-names>LT</given-names></name> <name><surname>Lal</surname> <given-names>A</given-names></name> <name><surname>Vo</surname> <given-names>TTB</given-names></name> <name><surname>Ho</surname> <given-names>PT</given-names></name> <etal/></person-group>. <article-title>An effective integrated machine learning framework for identifying severity of tomato yellow leaf curl virus and their experimental validation</article-title>. <source>Research</source>. (<year>2023</year>) <volume>6</volume>:<fpage>0016</fpage>. doi: <pub-id pub-id-type="doi">10.34133/research.0016</pub-id>, PMID: <pub-id pub-id-type="pmid">36930763</pub-id></citation></ref>
<ref id="ref42"><label>42.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cadrin-Chenevert</surname> <given-names>A</given-names></name></person-group>. <article-title>Moving from imagenet to radimagenet for improved transfer learning and generalizability</article-title>. <source>Radiol Artif Intell</source>. (<year>2022</year>) <volume>4</volume>:<fpage>e220126</fpage>. doi: <pub-id pub-id-type="doi">10.1148/ryai.220126</pub-id>, PMID: <pub-id pub-id-type="pmid">36204541</pub-id></citation></ref>
<ref id="ref43"><label>43.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>X</given-names></name> <name><surname>Cen</surname> <given-names>M</given-names></name> <name><surname>Xu</surname> <given-names>J</given-names></name> <name><surname>Zhang</surname> <given-names>H</given-names></name> <name><surname>Xu</surname> <given-names>XS</given-names></name></person-group>. <article-title>Improving feature extraction from histopathological images through a fine-tuning imagenet model</article-title>. <source>J Pathol Inform</source>. (<year>2022</year>) <volume>13</volume>:<fpage>100115</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jpi.2022.100115</pub-id>, PMID: <pub-id pub-id-type="pmid">36268072</pub-id></citation></ref>
<ref id="ref44"><label>44.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Charoenkwan</surname> <given-names>P</given-names></name> <name><surname>Schaduangrat</surname> <given-names>N</given-names></name> <name><surname>Manavalan</surname> <given-names>B</given-names></name> <name><surname>Shoombuatong</surname> <given-names>W</given-names></name></person-group>. <article-title>M3S-ALG: improved and robust prediction of allergenicity of chemical compounds by using a novel multi-step stacking strategy</article-title>. <source>Futur Gener Comput Syst</source>. (<year>2025</year>) <volume>162</volume>:<fpage>107455</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.future.2024.07.033</pub-id></citation></ref>
<ref id="ref45"><label>45.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>M</given-names></name> <name><surname>Li</surname> <given-names>C</given-names></name> <name><surname>Chen</surname> <given-names>R</given-names></name> <name><surname>Cao</surname> <given-names>D</given-names></name> <name><surname>Zeng</surname> <given-names>X</given-names></name></person-group>. <article-title>Geometric deep learning for drug discovery</article-title>. <source>Expert Syst Appl</source>. (<year>2023</year>) <volume>240</volume>:<fpage>122498</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.eswa.2023.122498</pub-id>, PMID: <pub-id pub-id-type="pmid">40013265</pub-id></citation></ref>
<ref id="ref46"><label>46.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zeng</surname> <given-names>X</given-names></name> <name><surname>Wang</surname> <given-names>F</given-names></name> <name><surname>Luo</surname> <given-names>Y</given-names></name> <name><surname>Kang</surname> <given-names>S-g</given-names></name> <name><surname>Tang</surname> <given-names>J</given-names></name> <name><surname>Lightstone</surname> <given-names>FC</given-names></name> <etal/></person-group>. <article-title>Deep generative molecular design reshapes drug discovery</article-title>. <source>Cell Rep Med</source>. (<year>2022</year>) <volume>3</volume>:<fpage>100794</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.xcrm.2022.100794</pub-id>, PMID: <pub-id pub-id-type="pmid">36306797</pub-id></citation></ref>
<ref id="ref47"><label>47.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hasan</surname> <given-names>MM</given-names></name> <name><surname>Tsukiyama</surname> <given-names>S</given-names></name> <name><surname>Cho</surname> <given-names>JY</given-names></name> <name><surname>Kurata</surname> <given-names>H</given-names></name> <name><surname>Alam</surname> <given-names>MA</given-names></name> <name><surname>Liu</surname> <given-names>X</given-names></name> <etal/></person-group>. <article-title>Deepm5C: a deep-learning-based hybrid framework for identifying human RNA N5-methylcytosine sites using a stacking strategy</article-title>. <source>Mol Ther</source>. (<year>2022</year>) <volume>30</volume>:<fpage>2856</fpage>&#x2013;<lpage>67</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ymthe.2022.05.001</pub-id>, PMID: <pub-id pub-id-type="pmid">35526094</pub-id></citation></ref>
<ref id="ref48"><label>48.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Shoombuatong</surname> <given-names>W</given-names></name> <name><surname>Meewan</surname> <given-names>I</given-names></name> <name><surname>Mookdarsanit</surname> <given-names>L</given-names></name> <name><surname>Schaduangrat</surname> <given-names>N</given-names></name></person-group>. <article-title>Stack-HDAC3i: a high-precision identification of HDAC3 inhibitors by exploiting a stacked ensemble-learning framework</article-title>. <source>Methods</source>. (<year>2024</year>) <volume>230</volume>:<fpage>147</fpage>&#x2013;<lpage>57</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ymeth.2024.08.003</pub-id>, PMID: <pub-id pub-id-type="pmid">39191338</pub-id></citation></ref>
<ref id="ref49"><label>49.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Manavalan</surname> <given-names>B</given-names></name> <name><surname>Lee</surname> <given-names>J</given-names></name></person-group>. <article-title>FRTpred: a novel approach for accurate prediction of protein folding rate and type</article-title>. <source>Comput Biol Med</source>. (<year>2022</year>) <volume>149</volume>:<fpage>105911</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.compbiomed.2022.105911</pub-id>, PMID: <pub-id pub-id-type="pmid">36096036</pub-id></citation></ref>
<ref id="ref50"><label>50.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pham</surname> <given-names>NT</given-names></name> <name><surname>Rakkiyapan</surname> <given-names>R</given-names></name> <name><surname>Park</surname> <given-names>J</given-names></name> <name><surname>Malik</surname> <given-names>A</given-names></name> <name><surname>Manavalan</surname> <given-names>B</given-names></name></person-group>. <article-title>H<sub>2</sub>Opred: a robust and efficient hybrid deep learning model for predicting 2'-O-methylation sites in human RNA</article-title>. <source>Brief Bioinform</source>. (<year>2023</year>) <volume>25</volume>:<fpage>bbad476</fpage>. doi: <pub-id pub-id-type="doi">10.1093/bib/bbad476</pub-id>, PMID: <pub-id pub-id-type="pmid">38180830</pub-id></citation></ref>
<ref id="ref51"><label>51.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Manavalan</surname> <given-names>B</given-names></name> <name><surname>Patra</surname> <given-names>MC</given-names></name></person-group>. <article-title>MLCPP 2.0: an updated cell-penetrating peptides and their uptake efficiency predictor</article-title>. <source>J Mol Biol</source>. (<year>2022</year>) <volume>434</volume>:<fpage>167604</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jmb.2022.167604</pub-id>, PMID: <pub-id pub-id-type="pmid">35662468</pub-id></citation></ref>
<ref id="ref52"><label>52.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Charoenkwan</surname> <given-names>P</given-names></name> <name><surname>Chumnanpuen</surname> <given-names>P</given-names></name> <name><surname>Schaduangrat</surname> <given-names>N</given-names></name> <name><surname>Shoombuatong</surname> <given-names>W</given-names></name></person-group>. <article-title>Stack-AVP: a stacked ensemble predictor based on multi-view information for fast and accurate discovery of antiviral peptides</article-title>. <source>J Mol Biol</source>. (<year>2024</year>):<fpage>168853</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jmb.2024.168853</pub-id>, PMID: <pub-id pub-id-type="pmid">39510347</pub-id></citation></ref>
<ref id="ref53"><label>53.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Van der Maaten</surname> <given-names>L</given-names></name> <name><surname>Hinton</surname> <given-names>G</given-names></name></person-group>. <article-title>Visualizing data using t-SNE</article-title>. <source>J Mach Learn Res</source>. (<year>2008</year>) <volume>9</volume>:<fpage>2579</fpage>&#x2013;<lpage>2605</lpage>.</citation></ref>
</ref-list>
</back>
</article>