<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" dtd-version="1.3" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Imaging</journal-id>
<journal-title-group>
<journal-title>Frontiers in Imaging</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Imaging</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2813-3315</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fimag.2026.1752625</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Enhancement of multi-objective Darwinian particle swarm optimization for neural-network-based multimodal medical image fusion</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name><surname>Ogbuanya</surname> <given-names>Chisom E.</given-names></name>
<xref ref-type="aff" rid="aff1"/>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<uri xlink:href="https://loop.frontiersin.org/people/3285066"/>
</contrib>
</contrib-group>
<aff id="aff1"><institution>Electronic and Computer Engineering Department, University of Nigeria</institution>, <city>Nsukka</city>, <country country="ng">Nigeria</country></aff>
<author-notes>
<corresp id="c001"><label>&#x0002A;</label>Correspondence: Chisom E. Ogbuanya, <email xlink:href="mailto:chisom.ogbuanya@unn.edu.ng">chisom.ogbuanya@unn.edu.ng</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-26">
<day>26</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>5</volume>
<elocation-id>1752625</elocation-id>
<history>
<date date-type="received">
<day>23</day>
<month>11</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>09</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>02</day>
<month>02</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2026 Ogbuanya.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Ogbuanya</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-26">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<p>The purpose of this research is to develop a multimodal medical image fusion method that will provide high-performance fusion images at a speed high enough for efficient real-time image-guided surgeries. This paper therefore proposes an improved multi-objective Darwinian particle swarm optimization method that incorporates a fractional calculus operator for effective multimodal medical image fusion. This is because multimodal medical image fusion is essential in many clinical diagnoses, and it represents a multi-objective problem due to the important objective indicators for measuring its efficiencies, such as the parameters of the neural network and the speed of the fusion process. The proposed method aims to optimize the Tsallis cross-entropy as a stimulating input to the pulse-coupled neural network (PCNN) for multimodal image fusion. In this work, multi-objective Darwinian particle swarm optimization (MODPSO) is utilized due to its ability to escape local optima more effectively than classical multi-objective particle swarm optimization (MOPSO). The approach uses the fact that the convergence rate of MODPSO is improved by introducing a fractional calculus operator, which is incorporated into the updating formulas for the velocity and position of the particles. The PCNN output serves as an optimal parameter for fusing the high-frequency coefficients of decomposed source images, which are initially decomposed into low- and high-frequency subbands. The low-frequency coefficients are fused using an averaging method. Results obtained in this paper show that the proposed method yields the highest average accuracy of 90.7% after a three-fold cross-validation was carried out with a small dataset extracted from a larger available dataset. In conclusion, the experimental results demonstrate the superiority of the proposed method over comparative methods in terms of both visual quality and quantitative evaluation.</p></abstract>
<kwd-group>
<kwd>fractional-order Darwinian particle swarm optimization</kwd>
<kwd>medical image fusion</kwd>
<kwd>multi-objective optimization</kwd>
<kwd>pulse coupled neural networks</kwd>
<kwd>Tsallis function</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This work was supported by Electronic and Computer Engineering Department, University of Nigeria, Nsukka, Nigeria.</funding-statement>
</funding-group>
<counts>
<fig-count count="4"/>
<table-count count="6"/>
<equation-count count="29"/>
<ref-count count="44"/>
<page-count count="16"/>
<word-count count="9847"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Image Retrieval and Analysis</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<label>1</label>
<title>Introduction</title>
<p>Multimodal medical image fusion has greatly assisted physicians in obtaining an accurate and dependable patient diagnosis, in image-guided surgery, and in radiotherapy (<xref ref-type="bibr" rid="B35">Vigelis et al., 2020</xref>). The main goal of most multimodal image fusion methods is to increase the clinical preciseness of medical imaging. Various imaging techniques are applied to obtain interpretable details of health problems, for example, computed tomography (CT) is used to obtain images of hard structures and bones, while magnetic resonance imaging (MRI) captures soft tissue (<xref ref-type="bibr" rid="B23">Liu et al., 2019</xref>). The fusion of MRI and CT when joined with surgical navigation assists surgeons in putting together, more accurately, a preoperative plan and possibly gives good clinical benefits in surgery (<xref ref-type="bibr" rid="B9">Ghorai et al., 2021</xref>). However, obtaining fused images of high-quality and accurately preserved image information is still a problem being addressed by ongoing research (<xref ref-type="bibr" rid="B43">Zhu et al., 2021</xref>). Preserving the essential features of images as well as reducing uncertainty and redundancy are important to ensure the fused images obtained will be useful to physicians for effective monitoring and all kinds of diagnoses (<xref ref-type="bibr" rid="B16">Li et al., 2021</xref>).</p>
<p>Tsallis cross-entropy, which was introduced on the foundation of Shannon entropy and cross-entropy, not only overcomes the limitations of Shannon entropy but also takes into consideration the information of source and fused images (<xref ref-type="bibr" rid="B35">Vigelis et al., 2020</xref>). Therefore, the implementation of Tsallis cross-entropy for pulse-coupled neural networks (PCNNs) will increase the quality of the fused images (<xref ref-type="bibr" rid="B35">Vigelis et al., 2020</xref>). Tsallis cross-entropy stimulated PCNN-based multimodal image fusion is a multi-objective optimization problem because it is expedient to optimize the entropies to obtain the optimal threshold which will then serve as external stimuli to the PCNN, and also to carry out the entire fusion in the shortest possible time. Of the multi-objective optimization algorithms currently used by researchers in research works [e.g., the Pareto archive evolutionary strategy (PASE; <xref ref-type="bibr" rid="B23">Liu et al., 2019</xref>), the strength Pareto evolutionary algorithm (SPEA 2; <xref ref-type="bibr" rid="B9">Ghorai et al., 2021</xref>), the non-dominated sorting genetic algorithm II (NSGA-II; <xref ref-type="bibr" rid="B43">Zhu et al., 2021</xref>), non-dominated sorting particle swarm optimization (<xref ref-type="bibr" rid="B16">Li et al., 2021</xref>), and multi-objective particle swarm optimization (MOPSO; <xref ref-type="bibr" rid="B10">Han et al., 2021</xref>)], MOPSO has the highest optimization capacity and rate of convergence (<xref ref-type="bibr" rid="B19">Lin et al., 2019</xref>). However, because image fusion processes are commonly known to have the shortcoming of premature convergence (<xref ref-type="bibr" rid="B26">Pires et al., 2010</xref>), multi-objective Darwinian particle swarm optimization (MODPSO) will be applied in this work because of the ability of the Darwinian particle swarm optimization (DPSO) algorithm to escape from local optima (<xref ref-type="bibr" rid="B1">Ahilan et al., 2019</xref>). The main concept of MODPSO is to process multiple concurrent particle swarm optimization (PSO) algorithms, each of which indicates a swarm. The effectiveness of the MODPSO algorithm is improved when compared with PSO in ensuring particles are not stuck in local optima. This helps to optimize the Tsallis cross-entropy effectively so that the optimal parameters of the neural network are selected for the most favorable output of the neural network for the fusion of the coefficients of the decomposed source images.</p>
<p>The main contributions of this paper can be summarized as follows:</p>
<list list-type="bullet">
<list-item><p>A novel multimodal medical image fusion algorithm is developed that incorporates a new multi-objective optimization algorithm (FOMODPSO) for the optimization of Tsallis cross-entropy to stimulate a PCNN externally, leading to a zero-learning/pretrained neural network.</p></list-item>
<list-item><p>A higher firing count to driving the selection of the correct pixel to be sent to the fused image is generated, thereby ensuring complete extraction of essential details of fused images.</p></list-item>
<list-item><p>The proposed method includes consideration of low computational needs and high speed of processing.</p></list-item>
<list-item><p>Extensive experiments on 14 groups of multimodal images demonstrate the high-performance potential of the proposed multimodal medical image fusion algorithm in the recognition of the optimal neural network parameters for feature extraction, as regards medical image fusion.</p></list-item>
</list>
<p>The core motivation for this work is the long-standing challenge of achieving computational efficiency in high-quality PCNN-based multimodal medical image fusion (MMIF), which is essential for clinical practice such as image-guided surgery. Previous optimization algorithms used for PCNN parameter selection suffer from low convergence rates, directly resulting in slow processing times. Our work addresses this by proposing the novel fractional-order multi-objective Darwinian particle swarm optimization (FOMODPSO) to dramatically increase algorithmic convergence speed and exploration capability, thereby achieving the necessary ultra-low processing time (0.545 s; see Section 5.4, <xref ref-type="table" rid="T1">Table 1</xref>) required for real-time clinical application.</p>
<table-wrap position="float" id="T1">
<label>Table 1</label>
<caption><p>Various approaches and their fusion processing time (in s).</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left"><bold>Method</bold></th>
<th valign="top" align="center" colspan="9">Time (s)</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Metric</td>
<td valign="top" align="center">QPSO &#x0002B; PCNN</td>
<td valign="top" align="center">MOPSO &#x0002B; PCNN</td>
<td valign="top" align="center">PSO &#x0002B; PCNN</td>
<td valign="top" align="center">MFOA &#x0002B; PCNN</td>
<td valign="top" align="center">WOA &#x0002B; PCNN</td>
<td valign="top" align="center">PSO-TV &#x0002B; PCNN</td>
<td valign="top" align="center">PSO-DE &#x0002B; PCNN</td>
<td valign="top" align="center">DE &#x0002B; PCNN</td>
<td valign="top" align="center">FOMODPSO &#x0002B; PCNN</td>
</tr>
<tr>
<td valign="top" align="left">Time (s)</td>
<td valign="top" align="center">2.402</td>
<td valign="top" align="center">2.257</td>
<td valign="top" align="center">2.340</td>
<td valign="top" align="center">2.686</td>
<td valign="top" align="center">1.890</td>
<td valign="top" align="center">2.542</td>
<td valign="top" align="center">1.672</td>
<td valign="top" align="center">1.205</td>
<td valign="top" align="center"><bold>0.545</bold></td>
</tr></tbody>
</table>
<table-wrap-foot>
<p>The figure in bold indicates the average time (the shortest amongst all) consumption using our proposed method.</p>
</table-wrap-foot>
</table-wrap>
<p>The rest of this paper is organized as follows. The preliminaries of the Tsallis cross-entropy, non-subsampled contourlet transform (NSST), FOMODPSO, and PCNN are introduced in Section 2. Section 3 describes the proposed FOMODPSO algorithm and its application in the optimization of the Tsallis cross-entropy for input to the PCNN for application to MMIF. Section 4 compares the proposed image fusion algorithm and other multimodal image fusion algorithms experimentally through a series of test instances. Finally, the conclusions are discussed in Section 5 along with ideas for further improvements.</p>
</sec>
<sec id="s2">
<label>2</label>
<title>Related work</title>
<p>Recently, the application of AI-based tools for improving diagnostic efficacy and healthcare assistance has become an active area of research. Over the years, different medical image fusion methods have been proposed (<xref ref-type="bibr" rid="B27">Raha et al., 2020</xref>; <xref ref-type="bibr" rid="B33">Tang et al., 2019</xref>; <xref ref-type="bibr" rid="B14">Kumar et al., 2021</xref>). The application of deep learning to image processing has made great progress. Deep learning was introduced to medical image fusion while attempting to tackle the design challenge of activity level measurement and fusion rule design found in traditional methods (<xref ref-type="bibr" rid="B41">Zhang et al., 2021</xref>). In this section, we review some recent works on deep learning and neural network-based medical imaging.</p>
<p>In <xref ref-type="bibr" rid="B3">Challa et al. (2019)</xref>, a deep complete-CNN architecture for the exposure of diabetic retinopathy and its five levels was developed; however, the speed of the medical image processing was not considered in the work. Medical image translation using a fully conditioned bounded deep network was carried out in <xref ref-type="bibr" rid="B28">Rai et al. (2023)</xref>. Bayesian deep learning was applied (<xref ref-type="bibr" rid="B6">Deshpande and Bhatt, 2019</xref>) for the registration of noisy medical images by non-linear geometric irregularities. Medical image denoising was carried out in <xref ref-type="bibr" rid="B29">Rai et al. (2021)</xref> using a multilayer deep residue network joined with sectionalized dictionaries. In <xref ref-type="bibr" rid="B30">Rai et al. (2022)</xref> a deep cascade restructuring of low-grade reduced resolution CT images of the chest to resuscitated and well-resolved CT chest images was developed as an accessible, less expensive, and safe recourse for monitoring lung wellness in COVID-19. The speed of these AI-based diagnostic processes was, however, not taken into consideration, or improved on.</p>
<p>Many works on PCNN-based MMIF have been presented in the recent past. <xref ref-type="bibr" rid="B33">Tang et al. (2019)</xref> applied the multi-swarm fruit fly optimization algorithm (MFOA) in the optimization of PCNN for MMIF. Enhanced fusion effect results were obtained; however, the convergence rate of the MFOA is quite low. <xref ref-type="bibr" rid="B14">Kumar et al. (2021)</xref> presented a hybrid MMIF method that applies PSO in the optimization of PCNN for the fusion of CT and MRI images. The results obtained gave fused images of reduced noise; however, many iterations in the fusion algorithm caused results to be low-performing in some evaluation metrics. <xref ref-type="bibr" rid="B27">Raha et al. (2020)</xref> proposed a multimodal image fusion method that applies the whale optimization algorithm (WOA) in the optimization of PCNN to give fused image results of image details; however, the convergence rate of the optimization algorithm is low. <xref ref-type="bibr" rid="B5">Das et al. (2022)</xref> applied multi-objective differential evolution (DE) in the optimization of dual-channel PCNN for multimodal image sensor fusion. Fused image results of preserved structural and textural details were obtained; however, many iterations caused the fused image results to be low-performing in some evaluation metrics. <xref ref-type="bibr" rid="B25">Nie et al. (2021)</xref> used PSO and a total vibration-based derivative optimization to optimize PCNN for MMIF based on multisource information exchange encoding. The results obtained showed the good visual quality of fused images, but the convergence rate of optimization algorithms needs to be improved. <xref ref-type="bibr" rid="B36">Wang et al. (2016)</xref> optimized PCNN with MOPSO for multimodal image fusion. The results obtained showed good-quality fused images, but the speed of the fusion process is low because of several iterations in optimization algorithms. <xref ref-type="bibr" rid="B22">Liu R. et al. (2022)</xref> presented a multimodal image fusion method that applies PSO and standard DE in the optimization of PCNN parameters to give fused image results of improved fusion effect. The many iterations of the optimization algorithms caused some evaluation metrics of the fused image results to not perform so well. <xref ref-type="bibr" rid="B40">Xu et al. (2016)</xref> used the quantum-behaved PSO (QPSO) to optimize the adaptive PCNN for multimodal image fusion. The results obtained showed higher-quality fused images; however, the convergence rate of the optimization algorithm needs to be improved for better fusion results. In all these works, proper optimization of the parameters of the PCNN with an efficient optimization algorithm would go a long way toward improving the performance of PCNN-based multimodal image fusion methods.</p>
<p>MMIF can be noted as a branch of incomplete multi-view clustering (IMC). IMC is the fusion of varying views of specific data of which none of the views is complete. MMIF however, focuses on medical data that are assumed to have complete multiple views. The multimodal data being used in this case may or may not have consistent and complementary information. External factors such as patients&#x00027; absence and the low quality of the medical dataset (<xref ref-type="bibr" rid="B38">Wen et al., 2022</xref>) could affect the consistency and completeness of the source data. The most significant impact on the fusion effect for fusion tasks can be found in the aspects of the extent of completeness and balance of the source data (<xref ref-type="bibr" rid="B20">Liu et al., 2023</xref>; <xref ref-type="bibr" rid="B21">Liu C. et al., 2022</xref>), and also the effectiveness of the fusion rules with which the source data are being fused. If the multimodal data are incomplete with only one available modality, we can generate a fusion result from only one modality, provided an effective information recovery method is applied to recover the necessary missing information completely and uniformly (<xref ref-type="bibr" rid="B20">Liu et al., 2023</xref>).</p>
</sec>
<sec id="s3">
<label>3</label>
<title>Preliminaries as foundation of the proposed method</title>
<sec>
<label>3.1</label>
<title>Notation</title>
<p>To aid with understanding of the notation, <xref ref-type="table" rid="T2">Table 2</xref> contains a list of notation and the respective definitions.</p>
<table-wrap position="float" id="T2">
<label>Table 2</label>
<caption><p>Table listing notation and its meaning.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left"><bold>Notation</bold></th>
<th valign="top" align="left"><bold>Meaning</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">T</td>
<td valign="top" align="left">The system&#x00027;s potential.</td>
</tr>
<tr>
<td valign="top" align="left"><italic>q</italic></td>
<td valign="top" align="left">The entropic index.</td>
</tr>
<tr>
<td valign="top" align="left"><inline-formula><mml:math id="M1"><mml:mi>L</mml:mi><mml:mi>F</mml:mi><mml:msubsup><mml:mrow><mml:mi>I</mml:mi></mml:mrow><mml:mrow><mml:mi>A</mml:mi></mml:mrow><mml:mrow><mml:mi>G</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula> and <inline-formula><mml:math id="M2"><mml:mi>L</mml:mi><mml:mi>F</mml:mi><mml:msubsup><mml:mrow><mml:mi>I</mml:mi></mml:mrow><mml:mrow><mml:mi>G</mml:mi></mml:mrow><mml:mrow><mml:mi>B</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula></td>
<td valign="top" align="left">Two sub-images representing the two low-frequency sub-images from two modalities.</td>
</tr>
<tr>
<td valign="top" align="left"><italic>g</italic> &#x0003D; (1, &#x02026;, <italic>G</italic>)</td>
<td valign="top" align="left">The scale of representation found in the transformed space at direction <italic>h</italic>(<italic>i, j</italic>) in alignment with a spatial location (<italic>i, j</italic>).</td>
</tr>
<tr>
<td valign="top" align="left"><italic>x</italic> &#x0003D; (<italic>x</italic><sub>1</sub>, <italic>x</italic><sub>2</sub>, &#x02026;, <italic>x</italic><sub><italic>D</italic></sub>)</td>
<td valign="top" align="left">A <italic>D</italic>-dimensional decision variable.</td>
</tr>
<tr>
<td valign="top" align="left"><italic>m</italic></td>
<td valign="top" align="left">The number of objective functions in the multi-objective optimization problem.</td>
</tr>
<tr>
<td valign="top" align="left"><italic>f</italic><sub><italic>m</italic></sub>(<italic>x</italic>)</td>
<td valign="top" align="left">The <italic>m</italic>th objective optimization problem.</td>
</tr>
<tr>
<td valign="top" align="left"><italic>g</italic><sub><italic>i</italic></sub>(<italic>x</italic>)</td>
<td valign="top" align="left">The <italic>i</italic>th inequality constraint.</td>
</tr>
<tr>
<td valign="top" align="left"><italic>p</italic></td>
<td valign="top" align="left">The number of inequality constraints.</td>
</tr>
<tr>
<td valign="top" align="left"><italic>h</italic><sub><italic>j</italic></sub>(<italic>x</italic>)</td>
<td valign="top" align="left">The <italic>j</italic>th equation constraint.</td>
</tr>
<tr>
<td valign="top" align="left"><italic>l</italic></td>
<td valign="top" align="left">The number of equation constraints.</td>
</tr>
<tr>
<td valign="top" align="left"><italic>n</italic></td>
<td valign="top" align="left">The moving particle.</td>
</tr>
<tr>
<td valign="top" align="left"><inline-formula><mml:math id="M3"><mml:msubsup><mml:mrow><mml:mi>X</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula></td>
<td valign="top" align="left">The position of the particle.</td>
</tr>
<tr>
<td valign="top" align="left"><inline-formula><mml:math id="M4"><mml:msubsup><mml:mrow><mml:mi>V</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula></td>
<td valign="top" align="left">The velocity of the particle.</td>
</tr>
<tr>
<td valign="top" align="left"><inline-formula><mml:math id="M5"><mml:msubsup><mml:mrow><mml:mi>&#x01E8A;</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula></td>
<td valign="top" align="left">The best solution found by a specific particle&#x00027;s immediate neighbors, rather than the entire swarm.</td>
</tr>
<tr>
<td valign="top" align="left">N</td>
<td valign="top" align="left">Population size.</td>
</tr>
<tr>
<td valign="top" align="left"><italic>T</italic><sub><italic>max</italic></sub></td>
<td valign="top" align="left">Fusion threshold.</td>
</tr>
<tr>
<td valign="top" align="left">D<inline-formula><mml:math id="M6"><mml:mo>&#x0221D;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>V</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:math></inline-formula></td>
<td valign="top" align="left">Fractional-order particle swarm optimization (FOPSO).</td>
</tr>
<tr>
<td valign="top" align="left"><italic>&#x003C1;1, &#x003C1;2</italic>, and <italic>&#x003C1;3</italic></td>
<td valign="top" align="left">Weights assigned to the global best, local best, and neighborhood best when determining the new velocity, respectively.</td>
</tr>
<tr>
<td valign="top" align="left"><italic>&#x003B3;1, &#x003B3;2</italic>, and <italic>&#x003B3;3</italic></td>
<td valign="top" align="left">Random vectors, with each component generally a uniform random number between 0 and 1.</td>
</tr>
<tr>
<td valign="top" align="left"><inline-formula><mml:math id="M7"><mml:msubsup><mml:mrow><mml:mi>&#x000F1;</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup><mml:mtext>&#x000A0;</mml:mtext></mml:math></inline-formula></td>
<td valign="top" align="left">The neighborhood best.</td>
</tr>
<tr>
<td valign="top" align="left"><inline-formula><mml:math id="M8"><mml:msubsup><mml:mrow><mml:mi>&#x01E21;</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula></td>
<td valign="top" align="left"><italic>The global best</italic>.</td>
</tr>
<tr>
<td valign="top" align="left"><italic>F</italic><sub><italic>ij</italic></sub> and <italic>L</italic><sub><italic>ij</italic></sub></td>
<td valign="top" align="left">The feedings and linking synapses that align to the original <italic>i</italic>th and <italic>j</italic>th position of the pixel in the original image.</td>
</tr>
<tr>
<td valign="top" align="left"><italic>S</italic><sub><italic>ij</italic></sub> </td>
<td valign="top" align="left">The external stimulus, which controls the time of ignition.</td>
</tr>
<tr>
<td valign="top" align="left"><italic>W</italic><sub><italic>ijkl</italic></sub></td>
<td valign="top" align="left">The local connection matrices made up of weights that can quantify the influence of each neuron that is near the central neuron.</td>
</tr>
<tr>
<td valign="top" align="left">&#x003B2;</td>
<td valign="top" align="left">The linking coefficient.</td>
</tr>
<tr>
<td valign="top" align="left"><italic>Y</italic><sub><italic>ij</italic></sub></td>
<td valign="top" align="left">The binary output of the model.</td>
</tr>
<tr>
<td valign="top" align="left">&#x003B1;<sub>&#x003B8;</sub></td>
<td valign="top" align="left">The attenuation time constant.</td>
</tr>
<tr>
<td valign="top" align="left"><italic>V</italic><sub>&#x003B8;</sub></td>
<td valign="top" align="left">The corresponding inherent voltage potentials.</td>
</tr>
<tr>
<td valign="top" align="left"><italic>n</italic></td>
<td valign="top" align="left">Represents the iteration time.</td>
</tr>
<tr>
<td valign="top" align="left"><italic>R<sub><italic>ij</italic></sub></italic></td>
<td valign="top" align="left">The PCNN firing times.</td>
</tr></tbody>
</table>
</table-wrap>
</sec>
<sec>
<label>3.2</label>
<title>Tsallis cross-entropy</title>
<p>Entropy is generally associated with the ratio of instability found within a system. According to Shannon Entropy (<xref ref-type="bibr" rid="B18">Lin et al., 2020</xref>), entropy is the ratio of chaos and relates to the information weight of the system. Shannon stated that when a physical system is taken apart as two free subsystems <italic>A</italic> and <italic>B</italic>, then the entropy value can be represented as</p>
<disp-formula id="EQ1"><mml:math id="M9"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>S</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>A</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:mi>B</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mi>S</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>A</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x0002B;</mml:mo><mml:mi>S</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>B</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(1)</label></disp-formula>
<p>With consideration to Shannon&#x00027;s theory, a limited entropy concept was introduced by <xref ref-type="bibr" rid="B35">Vigelis et al. (2020)</xref> and expressed as</p>
<disp-formula id="EQ2"><mml:math id="M10"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mi>q</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:mstyle displaystyle="true"><mml:msubsup><mml:mrow><mml:mo>&#x02211;</mml:mo></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msubsup></mml:mstyle><mml:msup><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>q</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mrow><mml:mi>q</mml:mi><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:mfrac><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(2)</label></disp-formula>
<p>where <italic>T</italic> represents the system potentials and <italic>q</italic> represents the entropic index (<xref ref-type="bibr" rid="B35">Vigelis et al., 2020</xref>). <xref ref-type="disp-formula" rid="EQ2">Equation 2</xref> will be equivalent to Shannon entropy when <italic>q</italic> &#x02192; 1.</p>
<p>Tsallis cross-entropy (<xref ref-type="bibr" rid="B35">Vigelis et al., 2020</xref>), however, will be applied in this work. If the set of all probability that is on <italic>I</italic><sub><italic>n</italic></sub> &#x0003D; {1, &#x02026;, <italic>n</italic>} by <inline-formula><mml:math id="M11"><mml:mrow><mml:mi>n</mml:mi></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mo>&#x02026;</mml:mo><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>:</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:munderover accentunder="false" accent="false"><mml:mrow><mml:mo>&#x02211;</mml:mo></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:munderover><mml:msub><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mtext>&#x000A0;and&#x000A0;</mml:mtext><mml:msub><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>&#x02265;</mml:mo><mml:mn>0</mml:mn><mml:mtext>&#x000A0;for&#x000A0;all&#x000A0;</mml:mtext><mml:mi>i</mml:mi></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:math></inline-formula> for the two probability distributions <inline-formula><mml:math id="M12"><mml:mi>p</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mtext>&#x000A0;and&#x000A0;</mml:mtext><mml:mi>q</mml:mi><mml:mo>=</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>q</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mtext>&#x000A0;</mml:mtext><mml:mi>i</mml:mi><mml:mi>n</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:msubsup><mml:mrow><mml:mo>&#x00394;</mml:mo></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula>. Also, if the subsystems <italic>A</italic> and <italic>B</italic> are two probability distributions, <italic>A</italic> &#x0003D; {<italic>a</italic><sub>1</sub>, <italic>a</italic><sub>2</sub>, &#x02026;, <italic>a</italic><sub><italic>N</italic></sub>} and <italic>B</italic> &#x0003D; {<italic>b</italic><sub>1</sub>, <italic>b</italic><sub>2</sub>, &#x02026;, <italic>b</italic><sub><italic>N</italic></sub>}, then Tsallis cross-entropy can be expressed as</p>
<disp-formula id="EQ3"><mml:math id="M13"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mi>q</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>A</mml:mi><mml:mo>:</mml:mo><mml:mi>B</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mstyle displaystyle="true"><mml:munderover accentunder="false" accent="false"><mml:mrow><mml:mo>&#x02211;</mml:mo></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>N</mml:mi></mml:mrow></mml:munderover></mml:mstyle><mml:msub><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mfrac><mml:mrow><mml:msup><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mfrac><mml:mrow><mml:msub><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>q</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mfrac></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:mi>q</mml:mi></mml:mrow></mml:msup><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>q</mml:mi><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:mfrac><mml:mo>&#x0002B;</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mstyle displaystyle="true"><mml:munderover accentunder="false" accent="false"><mml:mrow><mml:mo>&#x02211;</mml:mo></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>N</mml:mi></mml:mrow></mml:munderover></mml:mstyle><mml:msub><mml:mrow><mml:mi>q</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mfrac><mml:mrow><mml:msup><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mfrac><mml:mrow><mml:msub><mml:mrow><mml:mi>q</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mfrac></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:mi>q</mml:mi></mml:mrow></mml:msup><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>q</mml:mi><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:mfrac><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(3)</label></disp-formula>
<p>Tsallis cross-entropy is selected as the objective function due to its non-extensive nature, which allows for a more flexible and sensitive measurement of information discrepancy than standard Shannon entropy. This sensitivity is crucial for capturing subtle diagnostic details and non-linear correlations in complex multimodal medical images.</p>
</sec>
<sec>
<label>3.3</label>
<title>Non-subsampled shearlet transform</title>
<p>Non-subsampled shearlet transform (NSST) is popular alongside some other tools used for multi-resolution processing, such as wavelet curvelet and non-subsampled contourlet transform, because of NSST&#x00027;s applications in the decomposition of signals or images into single low-frequency subbands and many high-frequency subbands (<xref ref-type="bibr" rid="B24">Mishra and Dhabal, 2020</xref>). However, NSST indicates some peculiarity over other analytical tools by involving a shear matrix which enables direction filtering.</p>
</sec>
<sec>
<label>3.4</label>
<title>Low-frequency subband fusion</title>
<p>Two source images <italic>A</italic> and <italic>B</italic>, as represented in this proposed approach, are fused to obtain Q, the final fused image. A specific integration of fusion is represented as <italic>Q</italic> = (<italic>A, B</italic>). The fusion process in low-frequency subbands can be described as follows.</p>
<p>The proposed model integrates the low-frequency sub-images using the averaging method (<xref ref-type="bibr" rid="B12">Hu et al., 2018</xref>) to reach the fused low-frequency coefficients. A fused low-frequency sub-image result is obtained as</p>
<disp-formula id="EQ4"><mml:math id="M14"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>L</mml:mi><mml:mi>F</mml:mi><mml:msubsup><mml:mrow><mml:mi>I</mml:mi></mml:mrow><mml:mrow><mml:mi>G</mml:mi></mml:mrow><mml:mrow><mml:mi>R</mml:mi></mml:mrow></mml:msubsup><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:mfrac><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>L</mml:mi><mml:mi>F</mml:mi><mml:msubsup><mml:mrow><mml:mi>I</mml:mi></mml:mrow><mml:mrow><mml:mi>G</mml:mi></mml:mrow><mml:mrow><mml:mi>A</mml:mi></mml:mrow></mml:msubsup><mml:mo>&#x0002B;</mml:mo><mml:mi>L</mml:mi><mml:mi>F</mml:mi><mml:msubsup><mml:mrow><mml:mi>I</mml:mi></mml:mrow><mml:mrow><mml:mi>G</mml:mi></mml:mrow><mml:mrow><mml:mi>B</mml:mi></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(4)</label></disp-formula>
<p>where <inline-formula><mml:math id="M15"><mml:mi>L</mml:mi><mml:mi>F</mml:mi><mml:msubsup><mml:mrow><mml:mi>I</mml:mi></mml:mrow><mml:mrow><mml:mi>G</mml:mi></mml:mrow><mml:mrow><mml:mi>A</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula> and <inline-formula><mml:math id="M16"><mml:mi>L</mml:mi><mml:mi>F</mml:mi><mml:msubsup><mml:mrow><mml:mi>I</mml:mi></mml:mrow><mml:mrow><mml:mi>G</mml:mi></mml:mrow><mml:mrow><mml:mi>B</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula> are the two sub-images representing the two low-frequency sub-images from two modalities, and <italic>g</italic> &#x0003D; (1, &#x02026;, <italic>G</italic>) denotes the scale of representation found in the transformed space at direction <italic>h</italic>(<italic>i, j</italic>) in alignment with a spatial location (<italic>i, j</italic>).</p>
</sec>
<sec>
<label>3.5</label>
<title>Multi-objective optimization</title>
<p>The multi-objective optimization minimization problem can be described mathematically as shown in the following equation (<xref ref-type="bibr" rid="B44">Zhu and Han, 2021</xref>)<bold>:</bold></p>
<disp-formula id="EQ5"><mml:math id="M17"><mml:mtable class="eqnarray" columnalign="right"><mml:mtr><mml:mtd><mml:mo class="qopname">min</mml:mo><mml:mi>F</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mo stretchy="true">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:msub><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mo class="qopname">&#x02026;</mml:mo><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:msub><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mtext>&#x000A0;</mml:mtext></mml:mrow><mml:mo stretchy="true">)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mtext>such&#x000A0;that&#x000A0;&#x000A0;&#x000A0;</mml:mtext><mml:msub><mml:mrow><mml:mi>g</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x02264;</mml:mo><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x000A0;&#x000A0;</mml:mtext><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mo>&#x02026;</mml:mo><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mi>p</mml:mi><mml:mo>,</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x000A0;&#x000A0;</mml:mtext><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mo>&#x02026;</mml:mo><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mi>l</mml:mi><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(5)</label></disp-formula>
<p>where <italic>x</italic> &#x0003D; (<italic>x</italic><sub>1</sub>, <italic>x</italic><sub>2</sub>, &#x02026;, <italic>x</italic><sub><italic>D</italic></sub>) is a <italic>D</italic>-dimensional decision variable, <italic>m</italic> is the number of objective functions in the multi-objective optimization problem, <italic>f</italic><sub><italic>m</italic></sub>(<italic>x</italic>) is the <italic>m</italic>th objective optimization problem, <italic>g</italic><sub><italic>i</italic></sub>(<italic>x</italic>) is the <italic>i</italic>th inequality constraint, <italic>p</italic> is the number of inequality constraints, <italic>h</italic><sub><italic>j</italic></sub>(<italic>x</italic>) is the <italic>j</italic>th equation constraint, and <italic>l</italic> is the number of equation constraints.</p>
<p>MMIF is treated as a complex multi-objective optimization problem (MOP) requiring simultaneous trade-off between conflicting goals. We will explicitly define the MOP objective function vector <italic>F</italic>(<italic>x</italic>) to maximize fusion quality while minimizing processing time: Optimize <italic>F</italic>(<italic>x</italic>) &#x0003D; [max[<italic>f</italic><sub>1</sub>(<italic>x</italic>)), min[<italic>f</italic><sub>2</sub>(<italic>x</italic>))]. Here, <italic>f</italic><sub>1</sub>(<italic>x</italic>) is the fusion quality maximized using Tsallis cross-entropy (as described in Section 4.1.1), and <italic>f</italic><sub>2</sub>(<italic>x</italic>) is the overall fusion processing time (<italic>T</italic><sub><italic>run</italic></sub>), the minimization of which validates the method&#x00027;s applicability for real-time surgical procedures.</p>
<p>Definitions 1&#x02013;4 encompass the concept of multi-objective optimization (<xref ref-type="bibr" rid="B10">Han et al., 2021</xref>).</p>
<p><bold>Definition 1</bold> (Pareto dominance). The two solutions in the feasible region <italic>X</italic><sub>&#x003A9;</sub> are represented as <italic>x</italic><sub>1</sub>, <italic>x</italic><sub>2</sub>; <italic>x</italic><sub>1</sub> can be seen as the solution that dominates <italic>x</italic><sub>2</sub>, and this relationship is mathematically denoted by <italic>x</italic><sub>1</sub> &#x0003E; <italic>x</italic><sub>2</sub> if and only if</p>
<disp-formula id="EQ6"><mml:math id="M19"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mo>&#x02200;</mml:mo><mml:mi>i</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mo>&#x02026;</mml:mo><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mi>m</mml:mi></mml:mrow><mml:mo>}</mml:mo></mml:mrow><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:msub><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x02264;</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:msub><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mtext>&#x000A0;</mml:mtext></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mstyle mathvariant="sans-serif"><mml:mo>&#x02203;</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mi>j</mml:mi></mml:mstyle><mml:mtext>&#x000A0;</mml:mtext><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mo>&#x02026;</mml:mo><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mi>m</mml:mi></mml:mrow><mml:mo>}</mml:mo></mml:mrow><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:msub><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x0003C;</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:msub><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(6)</label></disp-formula>
<p>In the context of multi-objective optimization, Pareto dominance is a rule used to compare two different solutions to determine which is superior when multiple goals are being balanced (such as image contrast vs. detail preservation).</p>
<p>A solution is said to dominate another if it meets two specific criteria:</p>
<list list-type="order">
<list-item><p>It is at least as good as the other solution in all objectives.</p></list-item>
<list-item><p>It is strictly better than the other solution in at least one objective.</p></list-item>
</list>
<p>Essentially, if a solution is &#x0201C;dominant,&#x0201D; it means you cannot improve it further in one area without making it worse in another. The goal of the FOMODPSO algorithm is to find a set of these &#x0201C;non-dominated&#x0201D; solutions, which together form what is known as the Pareto front.</p>
<p><bold>Definition 2</bold> (Pareto optimal). The solution <italic>x</italic><sup>&#x0002A;</sup> &#x02208; &#x003A9; is applied as the Pareto optimal of <xref ref-type="disp-formula" rid="EQ1">Equation 1</xref> if and only if</p>
<disp-formula id="EQ7"><mml:math id="M21"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mo>&#x000AC;</mml:mo><mml:mo>&#x02203;</mml:mo><mml:mi>x</mml:mi><mml:mo>&#x02208;</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:msub><mml:mrow><mml:mi>X</mml:mi></mml:mrow><mml:mrow><mml:mo>&#x003A9;</mml:mo></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mi>x</mml:mi><mml:mo>&#x0003E;</mml:mo><mml:msup><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msup><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(7)</label></disp-formula>
<p><bold>Definition 3</bold> (Pareto optimal set). The Pareto optimal set is put together by solutions that are finally obtained in the MOP, where each meets the conditions of Definition 2. It is defined as</p>
<disp-formula id="EQ8"><mml:math id="M22"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>P</mml:mi><mml:mo>=</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msup><mml:mo>&#x02208;</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:msub><mml:mrow><mml:mi>X</mml:mi></mml:mrow><mml:mrow><mml:mo>&#x003A9;</mml:mo><mml:mtext>&#x000A0;</mml:mtext></mml:mrow></mml:msub><mml:mo>&#x02223;</mml:mo><mml:mo>&#x000AC;</mml:mo><mml:mo>&#x02203;</mml:mo><mml:mi>x</mml:mi><mml:mo>&#x02208;</mml:mo><mml:msub><mml:mrow><mml:mi>X</mml:mi></mml:mrow><mml:mrow><mml:mo>&#x003A9;</mml:mo></mml:mrow></mml:msub><mml:mo>:</mml:mo><mml:mtext>&#x000A0;&#x000A0;</mml:mtext><mml:mi>x</mml:mi><mml:mo>&#x0003E;</mml:mo><mml:msup><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mo>*</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo>}</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(8)</label></disp-formula>
<p><bold>Definition 4</bold> (Pareto front). The region formed by the objective function values of all Pareto optimal solutions is called the Pareto front surface, which is defined as</p>
<disp-formula id="EQ9"><mml:math id="M23"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>P</mml:mi><mml:mi>F</mml:mi><mml:mo>=</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mi>F</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x02223;</mml:mo><mml:mi>x</mml:mi><mml:mo>&#x02208;</mml:mo><mml:mi>P</mml:mi></mml:mrow><mml:mo>}</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(9)</label></disp-formula>
<p>The proposed FOMODPSO algorithm optimizes a multi-objective function vector, F(x), which is designed to balance image quality and efficiency. The two objectives are (1) maximize <italic>f</italic><sub>1</sub>: the Tsallis cross-entropy, which serves as the primary metric for fusion quality and information transfer, and (2) minimize <italic>f</italic><sub>2</sub>: the overall fusion processing time (<italic>T</italic><sub><italic>run</italic></sub>), ensuring the method&#x00027;s suitability for real-time clinical applications.</p>
</sec>
<sec>
<label>3.6</label>
<title>Darwinian particle swarm optimization</title>
<p>The PSO algorithm is generally given as <xref ref-type="bibr" rid="B39">Wu et al. (2021)</xref>:</p>
<disp-formula id="EQ10"><mml:math id="M24"><mml:mtable class="eqnarray" columnalign="right"><mml:mtr><mml:mtd><mml:msubsup><mml:mrow><mml:mi>v</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mi>w</mml:mi><mml:msubsup><mml:mrow><mml:mi>V</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup><mml:mo>&#x0002B;</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003C1;</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>r</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="true">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>&#x01E21;</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup><mml:mo>-</mml:mo><mml:msubsup><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="true">)</mml:mo></mml:mrow><mml:mo>&#x0002B;</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003C1;</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>r</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="true">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>&#x01E8B;</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup><mml:mo>-</mml:mo><mml:msubsup><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="true">)</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mo>&#x0002B;</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003C1;</mml:mi></mml:mrow><mml:mrow><mml:mn>3</mml:mn></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>r</mml:mi></mml:mrow><mml:mrow><mml:mn>3</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="true">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>&#x000F1;</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup><mml:mo>-</mml:mo><mml:msubsup><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="true">)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(10)</label></disp-formula>
<disp-formula id="EQ11"><mml:math id="M26"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msubsup><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:msubsup><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup><mml:mo>&#x0002B;</mml:mo><mml:msubsup><mml:mrow><mml:mi>v</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(11)</label></disp-formula>
<p>where <italic>n</italic> is the moving particle, <inline-formula><mml:math id="M27"><mml:msubsup><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula> is the position of the particle, <inline-formula><mml:math id="M28"><mml:msubsup><mml:mrow><mml:mi>v</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula> is the velocity of the particle, <inline-formula><mml:math id="M29"><mml:msubsup><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup><mml:mtext>&#x000A0;</mml:mtext><mml:mi>i</mml:mi></mml:math></inline-formula>s the local best. <inline-formula><mml:math id="M30"><mml:msubsup><mml:mrow><mml:mi>n</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup><mml:mtext>&#x000A0;</mml:mtext><mml:mi>i</mml:mi></mml:math></inline-formula>s the neighborhood best, <inline-formula><mml:math id="M31"><mml:msubsup><mml:mrow><mml:mi>g</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup><mml:mtext>&#x000A0;</mml:mtext><mml:mi>i</mml:mi><mml:mi>s</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mi>t</mml:mi><mml:mi>h</mml:mi><mml:mi>e</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mi>g</mml:mi><mml:mi>l</mml:mi><mml:mi>o</mml:mi><mml:mi>b</mml:mi><mml:mi>a</mml:mi><mml:mi>l</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:mi>b</mml:mi><mml:mi>e</mml:mi><mml:mi>s</mml:mi><mml:mi>t</mml:mi></mml:math></inline-formula>, &#x003C1;1, &#x003C1;2, and &#x003C1;3 assign weights to the global best, local best, and neighborhood best, respectively, when determining the new velocity, and &#x003B3;<italic>1</italic>, &#x003B3;<italic>2</italic>, and &#x003B3;<italic>3</italic> are random vectors with each component generally a uniform random number between 0 and 1.</p>
<p>DPSO is different from PSO only in the fact that some simple rules are followed to delete a swarm, delete particles, and spawn a new swarm and a new particle.</p>
<p>After the deletion of the particle, instead of being zero, the counter is reset to a value approaching the threshold number, according to <xref ref-type="bibr" rid="B1">Ahilan et al. (2019)</xref>:</p>
<disp-formula id="EQ12"><mml:math id="M32"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:mi>S</mml:mi><mml:msub><mml:mrow><mml:mi>C</mml:mi></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>N</mml:mi></mml:mrow><mml:mrow><mml:mi>k</mml:mi><mml:mi>i</mml:mi><mml:mi>l</mml:mi><mml:mi>l</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mi>S</mml:mi><mml:msubsup><mml:mrow><mml:mi>C</mml:mi></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow><mml:mrow><mml:mo class="qopname">max</mml:mo></mml:mrow></mml:msubsup><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>N</mml:mi></mml:mrow><mml:mrow><mml:mi>k</mml:mi><mml:mi>i</mml:mi><mml:mi>l</mml:mi><mml:mi>l</mml:mi></mml:mrow></mml:msub><mml:mo>&#x0002B;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:mfrac><mml:mtext>&#x000A0;</mml:mtext></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>where <italic>N</italic><sub>kill</sub> is the number of particles deleted from the swarm over a period in which there was no improvement in fitness.</p>
</sec>
<sec>
<label>3.7</label>
<title>Fractional-order multi-objective Darwinian particle swarm optimization (FOMODPSO)</title>
<p>Fractional calculus (FC) is an essential mathematical tool in the applied sciences (<xref ref-type="bibr" rid="B31">Sabatier et al., 2007</xref>). FC has been influential in increasing the performance of several algorithms applied in modeling, curve-fitting, filtering, pattern recognition, edge detection, identification, stability, controllability, observability, and robustness (<xref ref-type="bibr" rid="B4">Couceiro et al., 2012</xref>). FC has been applied to increase the convergence rate of PSO in some works, such as <xref ref-type="bibr" rid="B26">Pires et al. (2010)</xref> and <xref ref-type="bibr" rid="B4">Couceiro et al. (2012)</xref>, where the convergence rate was improved for the velocity of PSO and DPSO, respectively. Fractional-order Darwinian particle swarm optimization (FODPSO) was applied in <xref ref-type="bibr" rid="B1">Ahilan et al. (2019)</xref> for multilevel thresholding of medical images, and the results obtained showed that the fractional coefficient gives the advantage of a greater level of exploration, thereby giving rise to the global solution of the algorithm. FODPSO was also applied in <xref ref-type="bibr" rid="B37">Waseem et al. (2020)</xref>, where FODPSO was hybridized with a neural network to obtain better solutions for ordinary differential equations, and high-performance results were obtained.</p>
<p>The FC derived from the Gr&#x000FC;nwald&#x02013;Letnikof definition based on the concept of fractional definition with fractional coefficient &#x003B1; &#x003B5; c of a general signal <italic>x(t)</italic> is given by:</p>
<disp-formula id="EQ13"><mml:math id="M33"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mrow><mml:msup><mml:mi>D</mml:mi><mml:mo>&#x0221D;</mml:mo></mml:msup><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mi>x</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:munder><mml:mrow><mml:mi>lim</mml:mi></mml:mrow><mml:mrow><mml:mi>h</mml:mi><mml:mo>&#x02192;</mml:mo><mml:mi>o</mml:mi></mml:mrow></mml:munder><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mfrac><mml:mn>1</mml:mn><mml:mrow><mml:msup><mml:mi>h</mml:mi><mml:mi>&#x003B1;</mml:mi></mml:msup></mml:mrow></mml:mfrac><mml:mstyle displaystyle='true'><mml:munderover><mml:mo>&#x02211;</mml:mo><mml:mrow><mml:mi>k</mml:mi><mml:mo>=</mml:mo><mml:mn>0</mml:mn></mml:mrow><mml:mrow><mml:mo>+</mml:mo><mml:mi>&#x0221E;</mml:mi></mml:mrow></mml:munderover><mml:mrow><mml:mfrac><mml:mrow><mml:msup><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:mo>&#x02212;</mml:mo><mml:mn>1</mml:mn><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mi>k</mml:mi></mml:msup><mml:mi>&#x003B3;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>&#x003B1;</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mi>x</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:mi>t</mml:mi><mml:mo>&#x02212;</mml:mo><mml:mi>k</mml:mi><mml:mi>h</mml:mi><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mrow><mml:mi>&#x003B3;</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:mi>k</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn><mml:mo stretchy='false'>)</mml:mo><mml:mi>&#x003B3;</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:mi>&#x003B1;</mml:mi><mml:mo>&#x02212;</mml:mo><mml:mi>k</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn><mml:mo stretchy='false'>)</mml:mo></mml:mrow></mml:mfrac></mml:mrow></mml:mstyle></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(12)</label></disp-formula>
<p>where &#x003B3; is the gamma function.</p>
<p>The discrete-time implementation of <xref ref-type="disp-formula" rid="EQ13">Equation 12</xref> is</p>
<disp-formula id="EQ14"><mml:math id="M34"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mrow><mml:msup><mml:mi>D</mml:mi><mml:mo>&#x0221D;</mml:mo></mml:msup><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mi>x</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mfrac><mml:mn>1</mml:mn><mml:mrow><mml:msup><mml:mi>T</mml:mi><mml:mi>&#x003B1;</mml:mi></mml:msup></mml:mrow></mml:mfrac><mml:mstyle displaystyle='true'><mml:munderover><mml:mo>&#x02211;</mml:mo><mml:mrow><mml:mi>k</mml:mi><mml:mo>=</mml:mo><mml:mn>0</mml:mn></mml:mrow><mml:mi>r</mml:mi></mml:munderover><mml:mrow><mml:mfrac><mml:mrow><mml:msup><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:mo>&#x02212;</mml:mo><mml:mn>1</mml:mn><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mi>k</mml:mi></mml:msup><mml:mi>&#x003B3;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>&#x003B1;</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mi>x</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:mi>t</mml:mi><mml:mo>&#x02212;</mml:mo><mml:mi>k</mml:mi><mml:mi>h</mml:mi><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mrow><mml:mi>&#x003B3;</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:mi>k</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn><mml:mo stretchy='false'>)</mml:mo><mml:mi>&#x003B3;</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:mi>&#x003B1;</mml:mi><mml:mo>&#x02212;</mml:mo><mml:mi>k</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn><mml:mo stretchy='false'>)</mml:mo></mml:mrow></mml:mfrac></mml:mrow></mml:mstyle></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(13)</label></disp-formula>
<p>where <italic>T</italic> represents the sampling period and <italic>r</italic> represents the truncation order.</p>
<p>The equation of fractional-order particle swarm optimization (FOPSO) is expressed as</p>
<disp-formula id="EQ15"><mml:math id="M35"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mtable style="text-align:axis;" equalrows="false" columnlines="none" equalcolumns="false" class="array"><mml:mtr><mml:mtd><mml:msup><mml:mrow><mml:mi>D</mml:mi></mml:mrow><mml:mrow><mml:mo>&#x0221D;</mml:mo></mml:mrow></mml:msup><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>V</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003C1;</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>r</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>&#x01E21;</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup><mml:mo>-</mml:mo><mml:msubsup><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x0002B;</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003C1;</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>r</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>&#x01E8B;</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup><mml:mo>-</mml:mo><mml:msubsup><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x0002B;</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003C1;</mml:mi></mml:mrow><mml:mrow><mml:mn>3</mml:mn></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>r</mml:mi></mml:mrow><mml:mrow><mml:mn>3</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>&#x000F1;</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup><mml:mo>-</mml:mo><mml:msubsup><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(14)</label></disp-formula>
<p>FODPSO offers an O(<italic>r</italic>) memory need. For <italic>r</italic> &#x0003D; 4 the differential derivative is given as</p>
<disp-formula id="EQ16"><mml:math id="M36"><mml:mtable class="eqnarray" columnalign="right"><mml:mtr><mml:mtd><mml:msubsup><mml:mrow><mml:mi>v</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mi>&#x003B1;</mml:mi><mml:msubsup><mml:mrow><mml:mi>v</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup><mml:mo>&#x0002B;</mml:mo><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:mfrac><mml:mi>&#x003B1;</mml:mi><mml:msubsup><mml:mrow><mml:mi>v</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup><mml:mo>&#x0002B;</mml:mo><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mn>6</mml:mn></mml:mrow></mml:mfrac><mml:msubsup><mml:mrow><mml:mi>&#x003B1;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:mi>&#x003B1;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mi>v</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mo>-</mml:mo><mml:mn>2</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mo>&#x0002B;</mml:mo><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mn>24</mml:mn></mml:mrow></mml:mfrac><mml:msubsup><mml:mrow><mml:mi>&#x003B1;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:mi>&#x003B1;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>2</mml:mn><mml:mo>-</mml:mo><mml:mi>&#x003B1;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mi>v</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mo>-</mml:mo><mml:mn>3</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup><mml:mtext>&#x000A0;</mml:mtext><mml:msub><mml:mrow><mml:mi>&#x003C1;</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>r</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>&#x01E21;</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup><mml:mo>-</mml:mo><mml:msubsup><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mo>&#x0002B;</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003C1;</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>r</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>&#x01E8B;</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup><mml:mo>-</mml:mo><mml:msubsup><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x0002B;</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003C1;</mml:mi></mml:mrow><mml:mrow><mml:mn>3</mml:mn></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>r</mml:mi></mml:mrow><mml:mrow><mml:mn>3</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>&#x000F1;</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup><mml:mo>-</mml:mo><mml:msubsup><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(15)</label></disp-formula>
<p>where &#x003B1; is the FC and DPSO is an exceptional case of FODPSO with &#x003B1; &#x0003D; 1.</p>
<p>The FC operator is introduced into the DPSO velocity update formulas (<xref ref-type="disp-formula" rid="EQ16">Equation 15</xref>) to provide the particle velocity with a crucial &#x0201C;memory effect,&#x0201D; which fundamentally alters the algorithm&#x00027;s search behavior. This enhanced capability serves two functions: it significantly increases exploration, allowing the algorithm to escape common local optima and avoid premature convergence prevalent in image fusion tasks, and mathematically, it is proven to increase the fundamental convergence rate of the optimization process. This increased speed is the direct mechanism that ensures the high computational efficiency required for real-time application.</p>
<p>To clarify the methodological novelty, the proposed FOMODPSO represents a substantive algorithmic evolution from standard PSO variants. While MOPSO handles multiple objectives, it lacks the multi-swarm diversity of Darwinian principles, often leading to premature convergence. Conversely, DPSO excels at escaping local optima through natural selection but is traditionally limited to single-objective tasks. MODPSO bridges these by applying Pareto dominance to the Darwinian swarm-deletion logic. The final enhancement, FOMODPSO, introduces fractional-order velocity updates to add non-local memory, significantly accelerating convergence speed beyond integer-order methods. <xref ref-type="table" rid="T3">Table 3</xref> summarizes these distinctions.</p>
<table-wrap position="float" id="T3">
<label>Table 3</label>
<caption><p>Substantive comparison of evolutionary optimization variants.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left"><bold>Algorithm</bold></th>
<th valign="top" align="center"><bold>Multi-objective (Pareto)</bold></th>
<th valign="top" align="center"><bold>Darwinian (natural selection)</bold></th>
<th valign="top" align="center"><bold>Fractional calculus (memory)</bold></th>
<th valign="top" align="center"><bold>Primary strength</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">PSO</td>
<td valign="top" align="center">No</td>
<td valign="top" align="center">No</td>
<td valign="top" align="center">No</td>
<td valign="top" align="center">Simplicity, local search speed.</td>
</tr>
<tr>
<td valign="top" align="left">MOPSO</td>
<td valign="top" align="center">Yes</td>
<td valign="top" align="center">No</td>
<td valign="top" align="center">No</td>
<td valign="top" align="center">Basic trade-off management.</td>
</tr>
<tr>
<td valign="top" align="left">DPSO</td>
<td valign="top" align="center">No</td>
<td valign="top" align="center">Yes</td>
<td valign="top" align="center">No</td>
<td valign="top" align="center">Global search, local optima escape.</td>
</tr>
<tr>
<td valign="top" align="left">MODPSO</td>
<td valign="top" align="center">Yes</td>
<td valign="top" align="center">Yes</td>
<td valign="top" align="center">No</td>
<td valign="top" align="center">Robust multi-objective search.</td>
</tr>
<tr>
<td valign="top" align="left">FOMODPSO</td>
<td valign="top" align="center">Yes</td>
<td valign="top" align="center">Yes</td>
<td valign="top" align="center">Yes</td>
<td valign="top" align="center">Ultra-fast convergence &#x0002B; global search.</td>
</tr></tbody>
</table>
</table-wrap>
</sec>
<sec>
<label>3.8</label>
<title>Pretrained neural network</title>
<p>The multichannel PCNN (m-PCNN) is well known to be able to effectively manage different types of medical images (<xref ref-type="bibr" rid="B42">Zhao et al., 2014</xref>). The simplified m-PCNN is usually referred to as a three-dimensional PCNN because it has only three parameters (<xref ref-type="bibr" rid="B27">Raha et al., 2020</xref>). In this paper, we actualize the efficient optimization of parameters in the m-PCNN model. This is because it is essential to adjust the parameters to align with the input images for more accurate image fusion results. The simplified m-PCNN model can be seen in <xref ref-type="fig" rid="F1">Figure 1</xref>.</p>
<fig position="float" id="F1">
<label>Figure 1</label>
<caption><p>Simplified m-PCNN model.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimag-05-1752625-g0001.tif">
<alt-text content-type="machine-generated">Block diagram illustrating a neural processing model with three sections: receptive field, modulation field, and pulse generator. Arrows indicate pathways between summation, multiplication, threshold adjustment, and output generation blocks, with labeled signals and variable names detailing system flow.</alt-text>
</graphic>
</fig>
<p>The system utilizes a simplified m-PCNN, which is a zero-learning architecture that eliminates the need for large training datasets or traditional loss functions. Instead of manual tuning or backpropagation, the network&#x00027;s performance depends on the selection of optimal parameters (&#x003B2;, &#x00398;, &#x003B1;<sub>&#x003B8;</sub>, <italic>V</italic><sub>&#x003B8;</sub>) through the proposed FOMODPSO algorithm. This optimization process treats the Tsallis cross-entropy between source and fused images as the fitness function, ensuring that the network&#x00027;s firing activity, which drives the fusion of high-frequency subbands is maximized for information content. The architecture is applied to 14 groups of CT/MRI images (256 &#x000D7; 256 pixels), which are co-registered and normalized as a preprocessing step to ensure diagnostic accuracy.</p>
<p>The integration of these specific techniques is motivated by their complementary strengths: FOMODPSO is utilized to provide global optimization with fast convergence via fractional-order updates; Tsallis cross-entropy is selected as a robust objective function to measure multisource information transfer more effectively than standard entropy; and the PCNN is employed as the fusion rule for its biological pulse-synchronization property, which is superior at preserving fine spatial details and edges.</p>
</sec>
</sec>
<sec id="s4">
<label>4</label>
<title>The proposed method</title>
<p>This section mainly describes the process of FOMODPSO-optimized Tsallis cross-entropy for application as external stimuli for the PCNN for the fusion of high-frequency subband. Then the step-by-step fusion framework of our proposed method is described.</p>
<sec>
<label>4.1</label>
<title>High-frequency subband fusion involving PCNN</title>
<sec>
<label>4.1.1</label>
<title>Generation of optimized Tsallis cross-entropy</title>
<p>The Tsallis cross-entropy between <italic>A</italic> and <italic>B</italic> in the image analysis is defined as the sum of the Tsallis cross-entropy between pixels found within the target area of the image and the Tsallis cross-entropy between pixels found within the background area of the image. Then, with consideration of the limited feature of the Tsallis entropy, the total Tsallis cross-entropy of the image can be expressed as</p>
<disp-formula id="EQ17"><mml:math id="M38"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mi>q</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>A</mml:mi><mml:mo>:</mml:mo><mml:mi>B</mml:mi><mml:mo>;</mml:mo><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msubsup><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mi>q</mml:mi></mml:mrow><mml:mrow><mml:mi>A</mml:mi></mml:mrow></mml:msubsup><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x0002B;</mml:mo><mml:msubsup><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mi>q</mml:mi></mml:mrow><mml:mrow><mml:mi>B</mml:mi></mml:mrow></mml:msubsup><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x0002B;</mml:mo><mml:msubsup><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:mi>q</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mi>q</mml:mi></mml:mrow><mml:mrow><mml:mi>A</mml:mi></mml:mrow></mml:msubsup><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x0002B;</mml:mo><mml:msubsup><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mi>q</mml:mi></mml:mrow><mml:mrow><mml:mi>B</mml:mi></mml:mrow></mml:msubsup><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(16)</label></disp-formula>
<p>When <italic>S</italic><sub><italic>q</italic></sub>(<italic>A</italic>:<italic>B</italic>; <italic>t</italic>) accepts an optimum value, the corresponding gray value is the optimal image potential value, that is,</p>
<disp-formula id="EQ18"><mml:math id="M39"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>T</mml:mi></mml:mrow><mml:mrow><mml:mi>o</mml:mi><mml:mi>p</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mstyle displaystyle="true"><mml:munder><mml:mrow><mml:mo class="qopname">arg</mml:mo><mml:mo class="qopname">max</mml:mo></mml:mrow><mml:mrow><mml:mn>0</mml:mn><mml:mo>&#x02264;</mml:mo><mml:mi>t</mml:mi><mml:mo>&#x02264;</mml:mo><mml:mi>n</mml:mi></mml:mrow></mml:munder></mml:mstyle><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mi>q</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>A</mml:mi><mml:mo>;</mml:mo><mml:mi>B</mml:mi><mml:mo>;</mml:mo><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(17)</label></disp-formula>
<p>To account for the varying intensity ranges of high-frequency coefficients from different source modalities, the NSST coefficients are first normalized to a uniform range (e.g., [0, 1]). Subsequently, their probability distributions are estimated using histogram-based binning, which allows for the consistent calculation of the Tsallis cross-entropy regardless of the original coefficient magnitudes.</p>
<p>The fusion criterion is required for choosing the optimal fusion points and iteration number in PCNN. Tsallis cross-entropy is, therefore, applied as the criterion function for optimal fusion points. Tsallis cross-entropy can also be further explained to be used as the source of stimulation to the PCNN model. It is applied as the objective function to validate the optimal fusion points for considered images. FOMODPSO maximizes the objective function and is also used to find the optimal solution of the Tsallis cross-entropy function. This implies that the function <italic>f</italic>(<italic>T</italic><sub><italic>opt</italic></sub>) is maximized by the FOMODPSO, and the optimal values obtained indicate the region of the source images that should be fused, making the fusion results more reasonable. After the decomposition of the source images in the NSST domain, Tsallis cross-entropy is computed for all the coefficients. Then the Tsallis cross-entropies are sent to the PCNN as external stimuli.</p>
<p>Tsallis cross-entropy is strategically utilized as the objective function because, unlike standard Shannon entropy, it is a generalized form that inherently models the information transfer from both source images to the fused image simultaneously. This consideration of information from all inputs ensures the preservation of essential image features, thereby enhancing the overall fusion quality. Furthermore, maximizing the Tsallis cross-entropy function yields the optimal threshold, which is critical as it serves as the optimal external stimulus to the PCNN (<xref ref-type="disp-formula" rid="EQ19">Equation 18</xref>), making the fusion results more reasonable and accurate by selecting the most favorable coefficients.</p>
</sec>
<sec>
<label>4.1.2.</label>
<title>The structure of PCNN</title>
<p>PCNN is applied to confirm the information sensitivity of the image pixels. The neural network accepts the Tsallis cross-entropy <italic>S</italic><sub><italic>q</italic></sub>(<italic>A</italic>:<italic>B</italic>) &#x0003D; <italic>S</italic> as its input and outputs a &#x0201C;firing count&#x0201D; (<xref ref-type="bibr" rid="B24">Mishra and Dhabal, 2020</xref>). The PCNN model equations are expressed in <xref ref-type="disp-formula" rid="EQ17">Equations 16</xref>&#x02013;<xref ref-type="disp-formula" rid="EQ21">20</xref>. The greater the aligning &#x0201C;count&#x0201D; is, the more applicable the optimal fusion points <italic>T</italic><sub><italic>opt</italic></sub>:</p>
<disp-formula id="EQ19"><mml:math id="M40"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>n</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(18)</label></disp-formula>
<disp-formula id="EQ20"><mml:math id="M41"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>L</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>n</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mstyle displaystyle="true"><mml:munder class="msub"><mml:mrow><mml:mo>&#x02211;</mml:mo></mml:mrow><mml:mrow><mml:mi>k</mml:mi><mml:mi>l</mml:mi></mml:mrow></mml:munder></mml:mstyle><mml:msub><mml:mrow><mml:mi>W</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi><mml:mi>k</mml:mi><mml:mi>l</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>Y</mml:mi></mml:mrow><mml:mrow><mml:mi>n</mml:mi><mml:mi>l</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>n</mml:mi><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(19)</label></disp-formula>
<disp-formula id="EQ21"><mml:math id="M42"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>U</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>n</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>n</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mrow><mml:mo stretchy="true">(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>&#x0002B;</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mi>&#x003B2;</mml:mi><mml:msub><mml:mrow><mml:mi>L</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>n</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="true">)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(20)</label></disp-formula>
<disp-formula id="EQ22"><mml:math id="M43"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mtable style="text-align:axis;" equalrows="false" columnlines="none none none none none none none none none" equalcolumns="false" class="array"><mml:mtr><mml:mtd><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mi>i</mml:mi><mml:mi>f</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:msub><mml:mrow><mml:mi>U</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>n</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x02265;</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003B8;</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>n</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo><mml:mtext>&#x000A0;&#x000A0;</mml:mtext></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x000A0;&#x000A0;</mml:mtext><mml:mi>o</mml:mi><mml:mi>t</mml:mi><mml:mi>h</mml:mi><mml:mi>e</mml:mi><mml:mi>r</mml:mi><mml:mi>w</mml:mi><mml:mi>i</mml:mi><mml:mi>s</mml:mi><mml:mi>e</mml:mi><mml:mo>,</mml:mo><mml:mtext>&#x000A0;&#x000A0;</mml:mtext></mml:mtd></mml:mtr><mml:mtr></mml:mtr></mml:mtable></mml:mrow></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(21)</label></disp-formula>
<disp-formula id="EQ23"><mml:math id="M44"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>&#x003B8;</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>n</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003B1;</mml:mi></mml:mrow><mml:mrow><mml:mi>&#x003B8;</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:msup><mml:msub><mml:mrow><mml:mi>L</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>n</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x0002B;</mml:mo><mml:msub><mml:mrow><mml:mi>V</mml:mi></mml:mrow><mml:mrow><mml:mi>&#x003B8;</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>Y</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>n</mml:mi><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(22)</label></disp-formula>
<disp-formula id="EQ24"><mml:math id="M45"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>R</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>n</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mi>R</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>n</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x0002B;</mml:mo><mml:msub><mml:mrow><mml:mi>Y</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>n</mml:mi><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(23)</label></disp-formula>
<p>where <italic>F</italic><sub><italic>ij</italic></sub> and <italic>L</italic><sub><italic>ij</italic></sub> are the feedings and linking synapses that align to the original <italic>i</italic>th and <italic>j</italic>th position of the pixel in the original image, <italic>S</italic><sub><italic>ij</italic></sub> is the external stimulus and controls the time of ignition, <italic>W</italic><sub><italic>ijkl</italic></sub> indicates the local connection matrices made up of weights that can quantify the influence of each neuron that is near the central neuron, &#x003B2; is the linking coefficient, <italic>Y</italic><sub><italic>ij</italic></sub> is the binary output of the model, &#x003B1;<sub>&#x003B8;</sub> is the attenuation time constant, <italic>V</italic><sub>&#x003B8;</sub> are the corresponding inherent voltage potentials, <italic>n</italic> represents the iteration time, and <italic>R</italic><sub><italic>ij</italic></sub> denotes the PCNN firing times.</p>
<p><xref ref-type="disp-formula" rid="EQ19">Equations 18</xref>&#x02013;<xref ref-type="disp-formula" rid="EQ23">22</xref> are the control equations of the model, ensuring the inevitability of higher computational accuracy of the proposed model.</p>
</sec>
</sec>
<sec>
<label>4.2</label>
<title>Steps of the fusion process</title>
<p>To denote the point of the input images by similar corresponding pixels, the source images are required to be co-registered for the proposed method to give efficient results. The implementation of the multimodal image fusion scheme is described in the following steps and <xref ref-type="table" rid="T7">Algorithm 1</xref> shows the framework of FOMODPSO.</p>
<list list-type="simple">
<list-item><p><italic>Step 1</italic>. Decompose the previous co-registered medical images through NSST to generate low- and high-frequency sub-images (LFIs and HFIs, respectively).</p></list-item>
<list-item><p><italic>Step 2</italic>. Carry out the fusion on LFI coefficients using <xref ref-type="disp-formula" rid="EQ4">Equation 4</xref>.</p></list-item>
<list-item><p><italic>Step 3</italic>. Obtain the Tsallis cross-entropies that align with all HFIs as stated in Section 3.2.1. The generated entropies are then applied as external stimuli to PCNN using <xref ref-type="disp-formula" rid="EQ19">Equations 18</xref>&#x02013;<xref ref-type="disp-formula" rid="EQ23">22</xref>.</p></list-item>
<list-item><p><italic>Step 4</italic>. Compute the firing times <italic>R</italic><sub><italic>ij</italic></sub> by <xref ref-type="disp-formula" rid="EQ24">Equation 23</xref>.</p></list-item>
<list-item><p><italic>Step 5</italic>. Steps 1&#x02013;4 are repeated until a pre-selected number of iterations are achieved.</p></list-item>
<list-item><p><italic>Step 6</italic>. Carry out the fusion of HFIs (<inline-formula><mml:math id="M46"><mml:mi>H</mml:mi><mml:mi>F</mml:mi><mml:msubsup><mml:mrow><mml:mi>I</mml:mi></mml:mrow><mml:mrow><mml:mi>G</mml:mi></mml:mrow><mml:mrow><mml:mi>R</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula>) using the rule</p>
<p><disp-formula id="EQ25"><mml:math id="M47"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>H</mml:mi><mml:mi>F</mml:mi><mml:msubsup><mml:mrow><mml:mi>I</mml:mi></mml:mrow><mml:mrow><mml:mi>G</mml:mi></mml:mrow><mml:mrow><mml:mi>R</mml:mi></mml:mrow></mml:msubsup><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:mfrac><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>H</mml:mi><mml:mi>F</mml:mi><mml:msubsup><mml:mrow><mml:mi>I</mml:mi></mml:mrow><mml:mrow><mml:mi>G</mml:mi></mml:mrow><mml:mrow><mml:mi>A</mml:mi></mml:mrow></mml:msubsup><mml:mo>&#x0002B;</mml:mo><mml:mi>H</mml:mi><mml:mi>F</mml:mi><mml:msubsup><mml:mrow><mml:mi>I</mml:mi></mml:mrow><mml:mrow><mml:mi>G</mml:mi></mml:mrow><mml:mrow><mml:mi>B</mml:mi></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(24)</label></disp-formula></p></list-item>
<list-item><p><italic>Step 7</italic>. Finally, the fused image <italic>Q</italic> is generated by using inverse NSST on the fused coefficients of LFIs and HFIs.</p></list-item>
</list>
<table-wrap position="float" id="T7">
<label>Algorithm 1</label>
<caption><p>Framework of FOMODPSO algorithm.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimag-05-1752625-i0001.tif"/>
</table-wrap>
<p><xref ref-type="fig" rid="F2">Figure 2</xref> shows the schematic diagram of the steps of the proposed image fusion framework.</p>
<fig position="float" id="F2">
<label>Figure 2</label>
<caption><p>Proposed FOMODPSO&#x02013;PCNN medical image fusion framework.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimag-05-1752625-g0002.tif">
<alt-text content-type="machine-generated">Flowchart illustrating a medical image fusion process where CT and MRI source images undergo pre-processing, followed by optimization using FOMODPSO incorporating velocity updates, fitness evaluation, and Darwinian selection, then fused via a pulse-coupled neural network to produce a fused image.</alt-text>
</graphic>
</fig>
</sec>
</sec>
<sec id="s5">
<label>5</label>
<title>Experimental results and discussion</title>
<sec>
<label>5.1</label>
<title>Experimental setup</title>
<p>To investigate the accuracy and robustness of the proposed FOMODPSO&#x02013;PCNN method, the experimental setup utilized 14 groups of public benchmark medical image pairs (28 images total), consisting of CT and magnetic resonance imaging (MRI) modalities with a resolution of 256 &#x000D7; 256 pixels. These images (<xref ref-type="bibr" rid="B13">Johnson and Becker, 1999</xref>) were sourced from the Harvard Medical Image Database (<ext-link ext-link-type="uri" xlink:href="https://dataverse.harvard.edu/">https://dataverse.harvard.edu/</ext-link>). For statistical validity, the multi-objective search quality (inverted generational distance (IGD) and HV) was evaluated based on 31 independent experimental runs, reporting the mean and standard deviation for each test instance. Additionally, a three-fold cross-validation protocol was implemented for the diagnostic performance evaluation (<xref ref-type="table" rid="T4">Table 4</xref>), ensuring that the reported accuracy (90.7%) and sensitivity results are consistent across multiple subsets of the data.</p>
<table-wrap position="float" id="T4">
<label>Table 4</label>
<caption><p>Performance evaluation using three-fold cross-validation.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left"><bold>Method</bold></th>
<th valign="top" align="center"><bold>Evaluation parameter</bold></th>
<th valign="top" align="center"><bold>K = 1</bold></th>
<th valign="top" align="center"><bold>K = 2</bold></th>
<th valign="top" align="center"><bold>K = 3</bold></th>
<th valign="top" align="center"><bold>Average</bold></th>
<th valign="top" align="center"><bold>Standard deviation</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left" rowspan="4">QPSO &#x0002B; PCNN</td>
<td valign="top" align="center">Accuracy</td>
<td valign="top" align="center">0.637</td>
<td valign="top" align="center">0.558</td>
<td valign="top" align="center">0.567</td>
<td valign="top" align="center">0.587</td>
<td valign="top" align="center">0.043</td>
</tr>
<tr>
<td valign="top" align="center">Sensitivity</td>
<td valign="top" align="center">0.622</td>
<td valign="top" align="center">0.555</td>
<td valign="top" align="center">0.559</td>
<td valign="top" align="center">0.579</td>
<td valign="top" align="center">0.037</td>
</tr>
<tr>
<td valign="top" align="center">Specificity</td>
<td valign="top" align="center">0.845</td>
<td valign="top" align="center">0.764</td>
<td valign="top" align="center">0.769</td>
<td valign="top" align="center">0.793</td>
<td valign="top" align="center">0.045</td>
</tr>
<tr>
<td valign="top" align="center"><italic>F</italic><sub>1</sub> score</td>
<td valign="top" align="center">0.620</td>
<td valign="top" align="center">0.552</td>
<td valign="top" align="center">0.555</td>
<td valign="top" align="center">0.576</td>
<td valign="top" align="center">0.039</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="4">MOPSO &#x0002B; PCNN</td>
<td valign="top" align="center">Accuracy</td>
<td valign="top" align="center">0.770</td>
<td valign="top" align="center">0.657</td>
<td valign="top" align="center">0.638</td>
<td valign="top" align="center">0.688</td>
<td valign="top" align="center">0.071</td>
</tr>
<tr>
<td valign="top" align="center">Sensitivity</td>
<td valign="top" align="center">0.737</td>
<td valign="top" align="center">0.636</td>
<td valign="top" align="center">0.636</td>
<td valign="top" align="center">0.670</td>
<td valign="top" align="center">0.058</td>
</tr>
<tr>
<td valign="top" align="center">Specificity</td>
<td valign="top" align="center">0.846</td>
<td valign="top" align="center">0.753</td>
<td valign="top" align="center">0.755</td>
<td valign="top" align="center">0.785</td>
<td valign="top" align="center">0.053</td>
</tr>
<tr>
<td valign="top" align="center"><italic>F</italic><sub>1</sub> score</td>
<td valign="top" align="center">0.720</td>
<td valign="top" align="center">0.643</td>
<td valign="top" align="center">0.643</td>
<td valign="top" align="center">0.669</td>
<td valign="top" align="center">0.044</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="4">PSO &#x0002B; PCNN</td>
<td valign="top" align="center">Accuracy</td>
<td valign="top" align="center">0.774</td>
<td valign="top" align="center">0.661</td>
<td valign="top" align="center">0.641</td>
<td valign="top" align="center">0.692</td>
<td valign="top" align="center">0.072</td>
</tr>
<tr>
<td valign="top" align="center">Sensitivity</td>
<td valign="top" align="center">0.740</td>
<td valign="top" align="center">0.642</td>
<td valign="top" align="center">0.640</td>
<td valign="top" align="center">0.674</td>
<td valign="top" align="center">0.057</td>
</tr>
<tr>
<td valign="top" align="center">Specificity</td>
<td valign="top" align="center">0.847</td>
<td valign="top" align="center">0.843</td>
<td valign="top" align="center">0.833</td>
<td valign="top" align="center">0.841</td>
<td valign="top" align="center">0.007</td>
</tr>
<tr>
<td valign="top" align="center"><italic>F</italic><sub>1</sub> score</td>
<td valign="top" align="center">0.725</td>
<td valign="top" align="center">0.647</td>
<td valign="top" align="center">0.646</td>
<td valign="top" align="center">0.673</td>
<td valign="top" align="center">0.045</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="4">MFOA &#x0002B; PCNN</td>
<td valign="top" align="center">Accuracy</td>
<td valign="top" align="center">0.774</td>
<td valign="top" align="center">0.665</td>
<td valign="top" align="center">0.645</td>
<td valign="top" align="center">0.696</td>
<td valign="top" align="center">0.069</td>
</tr>
<tr>
<td valign="top" align="center">Sensitivity</td>
<td valign="top" align="center">0.744</td>
<td valign="top" align="center">0.646</td>
<td valign="top" align="center">0.644</td>
<td valign="top" align="center">0.678</td>
<td valign="top" align="center">0.057</td>
</tr>
<tr>
<td valign="top" align="center">Specificity</td>
<td valign="top" align="center">0.847</td>
<td valign="top" align="center">0.842</td>
<td valign="top" align="center">0.835</td>
<td valign="top" align="center">0.841</td>
<td valign="top" align="center">0.006</td>
</tr>
<tr>
<td valign="top" align="center"><italic>F</italic><sub>1</sub> score</td>
<td valign="top" align="center">0.727</td>
<td valign="top" align="center">0.647</td>
<td valign="top" align="center">0.647</td>
<td valign="top" align="center">0.647</td>
<td valign="top" align="center">0.046</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="4">WOA &#x0002B; PCNN</td>
<td valign="top" align="center">Accuracy</td>
<td valign="top" align="center">0.781</td>
<td valign="top" align="center">0.669</td>
<td valign="top" align="center">0.650</td>
<td valign="top" align="center">0.700</td>
<td valign="top" align="center">0.071</td>
</tr>
<tr>
<td valign="top" align="center">Sensitivity</td>
<td valign="top" align="center">0.747</td>
<td valign="top" align="center">0.650</td>
<td valign="top" align="center">0.649</td>
<td valign="top" align="center">0.682</td>
<td valign="top" align="center">0.056</td>
</tr>
<tr>
<td valign="top" align="center">Specificity</td>
<td valign="top" align="center">0.849</td>
<td valign="top" align="center">0.844</td>
<td valign="top" align="center">0.838</td>
<td valign="top" align="center">0.844</td>
<td valign="top" align="center">0.006</td>
</tr>
<tr>
<td valign="top" align="center"><italic>F</italic><sub>1</sub> score</td>
<td valign="top" align="center">0.730</td>
<td valign="top" align="center">0.651</td>
<td valign="top" align="center">0.650</td>
<td valign="top" align="center">0.677</td>
<td valign="top" align="center">0.046</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="4">PSO-TV &#x0002B; PCNN</td>
<td valign="top" align="center">Accuracy</td>
<td valign="top" align="center">0.783</td>
<td valign="top" align="center">0.672</td>
<td valign="top" align="center">0.652</td>
<td valign="top" align="center">0.702</td>
<td valign="top" align="center">0.071</td>
</tr>
<tr>
<td valign="top" align="center">Sensitivity</td>
<td valign="top" align="center">0.750</td>
<td valign="top" align="center">0.655</td>
<td valign="top" align="center">0.655</td>
<td valign="top" align="center">0.687</td>
<td valign="top" align="center">0.055</td>
</tr>
<tr>
<td valign="top" align="center">Specificity</td>
<td valign="top" align="center">0.858</td>
<td valign="top" align="center">0.850</td>
<td valign="top" align="center">0.842</td>
<td valign="top" align="center">0.850</td>
<td valign="top" align="center">0.008</td>
</tr>
<tr>
<td valign="top" align="center"><italic>F</italic><sub>1</sub> score</td>
<td valign="top" align="center">0.733</td>
<td valign="top" align="center">0.655</td>
<td valign="top" align="center">0.655</td>
<td valign="top" align="center">0.681</td>
<td valign="top" align="center">0.045</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="4">PSO-DE &#x0002B; PCNN</td>
<td valign="top" align="center">Accuracy</td>
<td valign="top" align="center">0.784</td>
<td valign="top" align="center">0.672</td>
<td valign="top" align="center">0.653</td>
<td valign="top" align="center">0.703</td>
<td valign="top" align="center">0.071</td>
</tr>
<tr>
<td valign="top" align="center">Sensitivity</td>
<td valign="top" align="center">0.753</td>
<td valign="top" align="center">0.657</td>
<td valign="top" align="center">0.657</td>
<td valign="top" align="center">0.689</td>
<td valign="top" align="center">0.055</td>
</tr>
<tr>
<td valign="top" align="center">Specificity</td>
<td valign="top" align="center">0.867</td>
<td valign="top" align="center">0.851</td>
<td valign="top" align="center">0.848</td>
<td valign="top" align="center">0.855</td>
<td valign="top" align="center">0.010</td>
</tr>
<tr>
<td valign="top" align="center"><italic>F</italic><sub>1</sub> score</td>
<td valign="top" align="center">0.736</td>
<td valign="top" align="center">0.658</td>
<td valign="top" align="center">0.657</td>
<td valign="top" align="center">0.684</td>
<td valign="top" align="center">0.045</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="4">DE &#x0002B; PCNN</td>
<td valign="top" align="center">Accuracy</td>
<td valign="top" align="center">0.786</td>
<td valign="top" align="center">0.674</td>
<td valign="top" align="center">0.653</td>
<td valign="top" align="center">0.704</td>
<td valign="top" align="center">0.072</td>
</tr>
<tr>
<td valign="top" align="center">Sensitivity</td>
<td valign="top" align="center">0.756</td>
<td valign="top" align="center">0.660</td>
<td valign="top" align="center">0.661</td>
<td valign="top" align="center">0.692</td>
<td valign="top" align="center">0.055</td>
</tr>
<tr>
<td valign="top" align="center">Specificity</td>
<td valign="top" align="center">0.889</td>
<td valign="top" align="center">0.874</td>
<td valign="top" align="center">0.852</td>
<td valign="top" align="center">0.872</td>
<td valign="top" align="center">0.019</td>
</tr>
<tr>
<td valign="top" align="center"><italic>F</italic><sub>1</sub> score</td>
<td valign="top" align="center">0.740</td>
<td valign="top" align="center">0.660</td>
<td valign="top" align="center">0.660</td>
<td valign="top" align="center">0.687</td>
<td valign="top" align="center">0.046</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="4">Proposed</td>
<td valign="top" align="center">Accuracy</td>
<td valign="top" align="center">0.988</td>
<td valign="top" align="center">0.877</td>
<td valign="top" align="center">0.857</td>
<td valign="top" align="center"><bold>0.907</bold></td>
<td valign="top" align="center"><bold>0.071</bold></td>
</tr>
<tr>
<td valign="top" align="center">Sensitivity</td>
<td valign="top" align="center">0.858</td>
<td valign="top" align="center">0.764</td>
<td valign="top" align="center">0.767</td>
<td valign="top" align="center"><bold>0.796</bold></td>
<td valign="top" align="center"><bold>0.054</bold></td>
</tr>
<tr>
<td valign="top" align="center">Specificity</td>
<td valign="top" align="center">0.898</td>
<td valign="top" align="center">0.875</td>
<td valign="top" align="center">0.854</td>
<td valign="top" align="center"><bold>0.876</bold></td>
<td valign="top" align="center"><bold>0.022</bold></td>
</tr>
<tr>
<td valign="top" align="center"><italic>F</italic><sub>1</sub> score</td>
<td valign="top" align="center">0.840</td>
<td valign="top" align="center">0.762</td>
<td valign="top" align="center">0.765</td>
<td valign="top" align="center"><bold>0.789</bold></td>
<td valign="top" align="center"><bold>0.044</bold></td>
</tr></tbody>
</table>
<table-wrap-foot>
<p>The figures in bold indicate the highest values obtained by our proposed method for the evaluation parameters (Average and Standard deviation).</p>
</table-wrap-foot>
</table-wrap>
<p>As seen in <xref ref-type="fig" rid="F3">Figure 3</xref>, images A are the source CT images and images B are the source MRI images, which are to be fused. In total, we used 28 source images to conduct our experiments; 14 from each modality. The images have 256 &#x000D7; 256 pixel resolutions. All the images in the dataset are correctly preregistered. The simulation software is adopted by Matlab R2018a version 9.4.0.813654 64-bit for the search of the optimal values of the test functions, and the hardware used is 3.20 GHz, 4 GB RAM, and Windows 7.</p>
<fig position="float" id="F3">
<label>Figure 3</label>
<caption><p>Source images for multimodal medical image fusion categorized from group 1 (G1) to group 14 (G14). In each group, <bold>(A)</bold> represents the source computed tomography (CT) images and <bold>(B)</bold> represents the source magnetic resonance imaging (MRI) images used as inputs for the fusion process.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimag-05-1752625-g0003.tif">
<alt-text content-type="machine-generated">Fourteen grouped panels labeled G1 to G14, each containing two grayscale medical brain scan images labeled A and B, show axial views with varying contrasts and anatomical details, illustrating differences in neuroimaging techniques or patient conditions.</alt-text>
</graphic>
</fig>
<p>The common parameters of the FOMODPSO, the population size and maximum iteration, are kept randomly and conveniently at 20 and 200, respectively; the stagnancy threshold is set randomly and conveniently at 15; we set the &#x003B1; parameter to 0.6 as it has been proven to give better results when applied (<xref ref-type="bibr" rid="B2">Chakraborty and Verma, 2021</xref>).</p>
<p>To evaluate the performance of the proposed method denoted as the FOMODPSO &#x0002B; PCNN MMIF method, some comparative experiments are conducted with eight image fusion algorithms on five multi-objective problems (test instances) <italic>f</italic><sub>1</sub> &#x02212; <italic>f</italic><sub>5</sub> (<xref ref-type="bibr" rid="B8">Gao et al., 2014</xref>; <xref ref-type="bibr" rid="B4">Couceiro et al., 2012</xref>). The eight image fusion algorithms are QPSO&#x0002B;PCNN (<xref ref-type="bibr" rid="B34">Tian et al., 2016</xref>), MOPSO&#x0002B;PCNN (<xref ref-type="bibr" rid="B36">Wang et al., 2016</xref>), PSO&#x0002B;PCNN (<xref ref-type="bibr" rid="B14">Kumar et al., 2021</xref>), MFOA&#x0002B;PCNN (<xref ref-type="bibr" rid="B33">Tang et al., 2019</xref>), WOA &#x0002B; PCNN (<xref ref-type="bibr" rid="B27">Raha et al., 2020</xref>), PSO-TV&#x0002B;PCNN (<xref ref-type="bibr" rid="B25">Nie et al., 2021</xref>), PSO-DE&#x0002B;PCNN (<xref ref-type="bibr" rid="B22">Liu R. et al., 2022</xref>), and DE&#x0002B;PCNN (<xref ref-type="bibr" rid="B5">Das et al., 2022</xref>). QPSO&#x0002B;PCNN is an MMIF method that uses QPSO for the optimization of the PCNN to obtain good-performance fused images. MOPSO&#x0002B;PCNN is an MMIF method that applies MOPSO in the optimization of the PCNN to obtain fused images of very good visual quality. PSO&#x0002B;PCNN is an MMIF method that applies hybrid PSO for the optimization of the PCNN to obtain high-performance fused image results. MFOA&#x0002B;PCNN is an MMIF method that applies an MFOA for the optimization of the PCNN to obtain fused image results of high quality. WOA&#x0002B;PCNN is an MMIF method that optimizes PCNN using the WOA for fused image results of well-preserved image details. PSO-TV&#x0002B;PCNN is an MMIF approach that applies PSO and total vibration (TV) for the optimization of the PCNN to obtain fused image results of high visual quality. PSO-DE&#x0002B;PCNN is an MMIF method that optimizes PCNN using PSO and standard DE algorithm to obtain fused images of very good fusion effect. DE&#x0002B;PCNN is an MMIF method that applies the DE for the optimization of the PCNN to obtain fused images of both good visual quality and superior quantitative evaluation results.</p>
<p>Multi-objective optimization vs. single-objective optimization indicates how much more the complexity of multi-objective optimization is. Another difference is in the non-uniqueness of the solution of multi-objective optimization, which is a solution set. <xref ref-type="table" rid="T5">Table 5</xref> shows the optimal solution set. The most suitable visual effect corresponding to the image fusion results is selected. They are the final optimal PCNN parameters: the connection weight <italic>W</italic> &#x0003D; [0.5, 1.0, 0.5;1.0, 0, 1.0;0.5, 1.0, 0.5], <italic>N</italic> &#x0003D; 100; &#x003B2; &#x0003D; 0.9950; &#x003B8; &#x0003D; 0.1233; <italic>V</italic><sub>&#x003B8;</sub> &#x0003D; 1.8119; &#x003B1;<sub>&#x003B8;</sub> &#x0003D; 0.1482.</p>
<table-wrap position="float" id="T5">
<label>Table 5</label>
<caption><p>Set of optimal solutions.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left"><bold>Optimal PCNN parameters</bold></th>
<th valign="top" align="center"><bold>&#x003B2;</bold></th>
<th valign="top" align="center"><bold>&#x003B8;</bold></th>
<th valign="top" align="center"><bold>V<sub>&#x003B8;</sub></bold></th>
<th valign="top" align="center"><bold>&#x003B1;<sub>&#x003B8;</sub></bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">1</td>
<td valign="top" align="center">0.9812</td>
<td valign="top" align="center">0.0212</td>
<td valign="top" align="center">0.7656</td>
<td valign="top" align="center">0.0249</td>
</tr>
<tr>
<td valign="top" align="left">2</td>
<td valign="top" align="center">0.7111</td>
<td valign="top" align="center">0.3819</td>
<td valign="top" align="center">0.7513</td>
<td valign="top" align="center">0.0488</td>
</tr>
<tr>
<td valign="top" align="left">3</td>
<td valign="top" align="center">0.9950</td>
<td valign="top" align="center">0.1233</td>
<td valign="top" align="center">1.8119</td>
<td valign="top" align="center">0.1482</td>
</tr>
<tr>
<td valign="top" align="left">4</td>
<td valign="top" align="center">0.1655</td>
<td valign="top" align="center">0.2144</td>
<td valign="top" align="center">0.5383</td>
<td valign="top" align="center">0.1794</td>
</tr>
<tr>
<td valign="top" align="left">5</td>
<td valign="top" align="center">0.2441</td>
<td valign="top" align="center">0.3971</td>
<td valign="top" align="center">4.6767</td>
<td valign="top" align="center">0.3689</td>
</tr>
<tr>
<td valign="top" align="left">6</td>
<td valign="top" align="center">0.5100</td>
<td valign="top" align="center">1.2321</td>
<td valign="top" align="center">8.9920</td>
<td valign="top" align="center">0.5000</td>
</tr></tbody>
</table>
</table-wrap>
<p>The optimization parameters can be summarized as</p>
<list list-type="bullet">
<list-item><p>FOMODPSO: population size = 20; max. iterations = 200; &#x003B1; (fractional order) = 0.6.</p></list-item>
<list-item><p>Final optimized PCNN parameters: <italic>W</italic> = [0.5, 1.0, 0.5; 1.0, 0, 1.0; 0.5, 1.0, 0.5]; &#x003B2; = 0.9950; &#x003B8; = 0.1233$; <italic>V</italic><sub>&#x003B8;</sub> = 1.8119; &#x003B1;<sub>&#x003B8;</sub> = 0.1482.</p></list-item>
</list>
</sec>
<sec>
<label>5.2</label>
<title>Performance metrics</title>
<sec>
<label>5.2.1</label>
<title>Quantitative metrics</title>
<p>We apply two metrics, IGD and hypervolume (HV; <xref ref-type="bibr" rid="B17">Li et al., 2019</xref>), in this work as the characteristic factors to increase the search performance of our FOMODPSO algorithm. The Pareto set is the set of all Pareto optimal solutions, while the Pareto front is the set of objective values of Pareto optimal solutions. The approximation set is the set of the non-dominated solutions that are obtained from searching, while the approximation front (AF) is the set of objective values of the non-dominated solutions. When tackling multi-objective optimization problems, it is beneficial when AF converges to PF as closely as possible (convergence) to ensure its accuracy and also to distribute along the entire PF as uniformly as possible (diversity) to generate numerous representative solutions. IGD can verify the convergence and diversity of multi-objective evolutionary algorithms when multi-objective problems are being solved. HV can also verify convergence and diversity simultaneously by computing the size of the space existing between reference points and individuals in AF. This implies that low IGD and high HV indicate that AF is near to PF and distributes evenly as closely as possible along the entire PF. When computing HV in the following experiments, the reference points are set as <inline-formula><mml:math id="M50"><mml:mstyle class="text"><mml:mtext mathvariant="bold">z</mml:mtext></mml:mstyle><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msup><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mstyle class="text"><mml:mtext mathvariant="bold">z</mml:mtext></mml:mstyle><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msup><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mstyle class="text"><mml:mtext mathvariant="bold">z</mml:mtext></mml:mstyle><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msup><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mstyle class="text"><mml:mtext mathvariant="bold">z</mml:mtext></mml:mstyle><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msup><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mstyle class="text"><mml:mtext mathvariant="bold">z</mml:mtext></mml:mstyle><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mn>3</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msup><mml:mtext>&#x000A0;</mml:mtext><mml:mi>f</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mtext>&#x000A0;</mml:mtext><mml:msub><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>-</mml:mo><mml:msub><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mrow><mml:mn>5</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula>, respectively.</p>
</sec>
<sec>
<label>5.2.2</label>
<title>Objective quality metrics</title>
<p>The performance of the proposed method is measured by taking into consideration some popular evaluation metrics. The chosen evaluation measures are entropy (ENT), mutual information (MI), edge information similarity (<italic>Q</italic><sub><italic>abf</italic></sub>), and structural similarity index [SSIM; for mathematical information see (<xref ref-type="bibr" rid="B15">Li et al., 2020</xref>; <xref ref-type="bibr" rid="B32">Singh and Anand, 2020</xref>; <xref ref-type="bibr" rid="B11">Hermessi et al., 2018</xref>)]. The selected evaluation measures, entropy (ENT), MI, edge information similarity (<italic>Q</italic><sub><italic>abf</italic></sub>), and structural similarity index (SSIM) are chosen to provide a multidimensional assessment of fusion quality. ENT and MI are utilized to measure the richness and volume of information transferred from the source images, while <italic>Q</italic><sub><italic>abf</italic></sub>and SSIM validate the preservation of edges and structural integrity, ensuring the fused images are diagnostically reliable. To guarantee statistical robustness, all multi-objective search quality results are reported as the mean and standard deviation across 31 independent runs. Diagnostic performance is further validated using three-fold cross-validation, ensuring that the superior performance of the FOMODPSO &#x0002B; PCNN method is consistent across the 14 clinical image groups and various test instances.</p>
</sec>
</sec>
<sec>
<label>5.3</label>
<title>Experimental results</title>
<sec>
<label>5.3.1</label>
<title>Quantitative evaluation results and analysis</title>
<p>According to the experimental results obtained 31 times running, IGD and HV (mean and standard deviation) values of fused images on test instances <italic>f</italic><sub>1</sub> &#x02212; <italic>f</italic><sub>5</sub> are computed; these are tabulated in <xref ref-type="table" rid="T6">Table 6</xref> to demonstrate the basis of comparison between FOMODPSO &#x0002B; PCNN and other image fusion methods. The first quality metric value noted in <xref ref-type="table" rid="T6">Table 6</xref> is the mean, and on the right side of the mean is the standard deviation for each of the IGD and HV experimental results. The rankings are indicated in square brackets on the right side of the quality metric values. To show that the performance of FOMODPSO &#x0002B; PCNN is better than, worse than, or similar to that of the compared method, we use &#x0201C;&#x022A2;,&#x0201D; &#x0201C;&#x0226C;,&#x0201D; and &#x0201C;&#x0007E;,&#x0201D; respectively. Finally, mean ranking (MR) is obtained by each method in all test instances and the number of &#x0201C;&#x022A2;,&#x0201D; &#x0201C;&#x0226C;,&#x0201D; and &#x0201C;&#x0007E;&#x0201D; indicated for each compared method are computed to evaluate the average performance of the image fusion methods.</p>
<table-wrap-group position="float" id="T6">
<label>Table 6</label>
<caption><p>Obtained results (mean (std. dev.)[rank]) of IGD and HV metric values of the proposed FOMODPSO &#x0002B; PCNN and eight state-of-the-art image fusion algorithms.</p></caption>
<table-wrap>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left"><bold>Instance</bold></th>
<th valign="top" align="center"><bold>QPSO &#x0002B; PCNN</bold></th>
<th valign="top" align="center"><bold>MOPSO &#x0002B; PCNN</bold></th>
<th valign="top" align="center"><bold>PSO &#x0002B; PCNN</bold></th>
<th valign="top" align="center"><bold>MFOA &#x0002B; PCNN</bold></th>
<th valign="top" align="center"><bold>WOA &#x0002B; PCNN</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td/>
<td valign="top" align="center">IGD</td>
<td/>
<td/>
<td/>
<td/>
</tr>
<tr>
<td valign="top" align="left"><italic>f</italic><sub>1</sub></td>
<td valign="top" align="center">7.531e-02<sup>&#x022A2;</sup>5.65e-04[9]</td>
<td valign="top" align="center">6.943e-02<sup>&#x022A2;</sup>4.66e-04[7]</td>
<td valign="top" align="center">5.720e-02<sup>&#x022A2;</sup>6.22e-03[6]</td>
<td valign="top" align="center">7.995e-03<sup>&#x022A2;</sup>5.84e-04[4]</td>
<td valign="top" align="center">7.239e-02<sup>&#x022A2;</sup>4.67e-03[8]</td>
</tr>
<tr>
<td valign="top" align="left"><italic>f</italic><sub>2</sub></td>
<td valign="top" align="center">8.754e-02<sup>&#x022A2;</sup>6.65e-02[9]</td>
<td valign="top" align="center">5.966e-02<sup>&#x022A2;</sup>6.66e-04[6]</td>
<td valign="top" align="center">7.723e-02<sup>&#x022A2;</sup>6.43e-03[7]</td>
<td valign="top" align="center">9.745e-03<sup>&#x022A2;</sup>7.86e-03[5]</td>
<td valign="top" align="center">8.455e-02<sup>&#x022A2;</sup>5.65e-03[8]</td>
</tr>
<tr>
<td valign="top" align="left"><italic>f</italic><sub>3</sub></td>
<td valign="top" align="center">8.351e-02<sup>&#x022A2;</sup>3.57e-03[9]</td>
<td valign="top" align="center">5.945e-02<sup>&#x022A2;</sup>7.63e-03[7]</td>
<td valign="top" align="center">8.981e-03<sup>&#x022A2;</sup>4.10e-03[3]</td>
<td valign="top" align="center">7.722e-02<sup>&#x022A2;</sup>2.82e-05[8]</td>
<td valign="top" align="center">9.566e-03<sup>&#x022A2;</sup>1.56e-04[5]</td>
</tr>
<tr>
<td valign="top" align="left"><italic>f</italic><sub>4</sub></td>
<td valign="top" align="center">6.897e-02<sup>&#x022A2;</sup>1.67e-04[7]</td>
<td valign="top" align="center">6.976e-02<sup>&#x022A2;</sup>3.69e-03[8]</td>
<td valign="top" align="center">7.741e-02<sup>&#x022A2;</sup>9.59e-05[9]</td>
<td valign="top" align="center">3.113e-04<sup>&#x0226C;</sup>3.87e-03[1]</td>
<td valign="top" align="center">9.272e-03<sup>&#x022A2;</sup>5.65e-05[6]</td>
</tr>
<tr>
<td valign="top" align="left"><italic>f</italic><sub>5</sub></td>
<td valign="top" align="center">3.540e-02<sup>&#x022A2;</sup>5.67e-03[9]</td>
<td valign="top" align="center">9.965e-03<sup>&#x022A2;</sup>2.68e-03[8]</td>
<td valign="top" align="center">9.740e-03<sup>&#x022A2;</sup>6.28e-04[6]</td>
<td valign="top" align="center">9.780e-03<sup>&#x022A2;</sup>5.86e-03[7]</td>
<td valign="top" align="center">9.551e-03<sup>&#x022A2;</sup>4.66e-03[5]</td>
</tr>
<tr>
<td/>
<td valign="top" align="center">HV</td>
<td/>
<td/>
<td/>
<td/>
</tr>
<tr>
<td valign="top" align="left"><italic>f</italic><sub>1</sub></td>
<td valign="top" align="center">1.682e&#x0002B;01<sup>&#x022A2;</sup>3.65e-03[5]</td>
<td valign="top" align="center">1.652e&#x0002B;01<sup>&#x022A2;</sup>5.23e-03[8]</td>
<td valign="top" align="center">1.676e&#x0002B;01<sup>&#x022A2;</sup>3.87e-03[7]</td>
<td valign="top" align="center">1.686e&#x0002B;01<sup>&#x022A2;</sup>1.67e-03[4]</td>
<td valign="top" align="center">1.634e&#x0002B;01<sup>&#x022A2;</sup>5.67e-03[9]</td>
</tr>
<tr>
<td valign="top" align="left"><italic>f</italic><sub>2</sub></td>
<td valign="top" align="center">2.165e&#x0002B;00<sup>&#x022A2;</sup>3.66e-04[7]</td>
<td valign="top" align="center">2.076e&#x0002B;00<sup>&#x022A2;</sup>5.45e-03[9]</td>
<td valign="top" align="center">2.177e&#x0002B;00<sup>&#x022A2;</sup>5.87e-03[6]</td>
<td valign="top" align="center">2.285e&#x0002B;00<sup>&#x022A2;</sup>2.66e-03[5]</td>
<td valign="top" align="center">2.487e&#x0002B;00<sup>&#x022A2;</sup>4.67e-03[3]</td>
</tr>
<tr>
<td valign="top" align="left"><italic>f</italic><sub>3</sub></td>
<td valign="top" align="center">3.296e&#x0002B;00<sup>&#x022A2;</sup>7.63e-04[7]</td>
<td valign="top" align="center">3.571e&#x0002B;00<sup>&#x022A2;</sup>6.21e-04[3]</td>
<td valign="top" align="center">3.334e&#x0002B;00<sup>&#x022A2;</sup>5.87e-04[6]</td>
<td valign="top" align="center">3.187e&#x0002B;00<sup>&#x022A2;</sup>6.57e-03[8]</td>
<td valign="top" align="center">3.662e&#x0002B;00<sup>&#x022A2;</sup>1.64e-03[2]</td>
</tr>
<tr>
<td valign="top" align="left"><italic>f</italic><sub>4</sub></td>
<td valign="top" align="center">4.044e&#x0002B;00<sup>&#x022A2;</sup>5.68e-03[9]</td>
<td valign="top" align="center">4.176e&#x0002B;00<sup>&#x022A2;</sup>2.70e-03[6]</td>
<td valign="top" align="center">4.206e&#x0002B;00<sup>&#x022A2;</sup>7.85e-03[5]</td>
<td valign="top" align="center">4.298e&#x0002B;00<sup>&#x022A2;</sup>5.67e-03[2]</td>
<td valign="top" align="center">4.142e&#x0002B;00<sup>&#x022A2;</sup>7.68e-04[7]</td>
</tr>
<tr>
<td valign="top" align="left"><italic>f</italic><sub>5</sub></td>
<td valign="top" align="center">5.116e&#x0002B;00<sup>&#x022A2;</sup>2.69e-03[7]</td>
<td valign="top" align="center">5.197e&#x0002B;00<sup>&#x022A2;</sup>3.32e-03[6]</td>
<td valign="top" align="center">5.368e&#x0002B;00<sup>&#x022A2;</sup>3.86e-03[3]</td>
<td valign="top" align="center">5.352e&#x0002B;00<sup>&#x022A2;</sup>7.67e-03[4]</td>
<td valign="top" align="center">5.075e&#x0002B;00<sup>&#x022A2;</sup>6.62e-04[9]</td>
</tr>
<tr>
<td valign="top" align="left">Mean rank</td>
<td valign="top" align="center">7.800</td>
<td valign="top" align="center">6.800</td>
<td valign="top" align="center">5.500</td>
<td valign="top" align="center">5.800</td>
<td valign="top" align="center">6.200</td>
</tr>
<tr>
<td valign="top" align="left">&#x022A2;/&#x0226C;/&#x0007E;</td>
<td valign="top" align="center">10/0/0</td>
<td valign="top" align="center">10/0/0</td>
<td valign="top" align="center">10/0/0</td>
<td valign="top" align="center">9/1/0</td>
<td valign="top" align="center">10/0/0</td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left"><bold>Instance</bold></th>
<th valign="top" align="center"><bold>PSO-TV</bold> &#x0002B; <bold>PCNN</bold></th>
<th valign="top" align="center"><bold>PSO-DE</bold> &#x0002B; <bold>PCNN</bold></th>
<th valign="top" align="center"><bold>DE</bold> &#x0002B; <bold>PCNN</bold></th>
<th valign="top" align="center"><bold>FOMODPSO</bold>&#x0002B;<bold>PCNN</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td/>
<td valign="top" align="center">IGD</td>
<td/>
<td/>
<td/>
</tr>
<tr>
<td valign="top" align="left"><italic>f</italic><sub>1</sub></td>
<td valign="top" align="center">8.312e-03<sup>&#x022A2;</sup>8.21e-03[5]</td>
<td valign="top" align="center">7.974e-03<sup>&#x022A2;</sup>7.33e-03[3]</td>
<td valign="top" align="center">7.885e-03<sup>&#x022A2;</sup>8.74e-05[2]</td>
<td valign="top" align="center"><bold>7.584e-032.87e-04[1]</bold></td>
</tr>
<tr>
<td valign="top" align="left"><italic>f</italic><sub>2</sub></td>
<td valign="top" align="center">9.455e-03<sup>&#x022A2;</sup>4.44e-03[4]</td>
<td valign="top" align="center">8.835e-03<sup>&#x022A2;</sup>7.36e-04[2]</td>
<td valign="top" align="center">9.177e-03<sup>&#x022A2;</sup>3.87e-04[3]</td>
<td valign="top" align="center"><bold>8.675e-035.87e-03[1]</bold></td>
</tr>
<tr>
<td valign="top" align="left"><italic>f</italic><sub>3</sub></td>
<td valign="top" align="center">8.654e-03<sup>&#x022A2;</sup>3.35e-04[2]</td>
<td valign="top" align="center">9.862e-03<sup>&#x022A2;</sup>6.87e-03[6]</td>
<td valign="top" align="center">9.252e-03<sup>&#x022A2;</sup>8.12e-04[4]</td>
<td valign="top" align="center"><bold>8.247e-036.78e-04[1]</bold></td>
</tr>
<tr>
<td valign="top" align="left"><italic>f</italic><sub>4</sub></td>
<td valign="top" align="center">8.998e-03<sup>&#x022A2;</sup>6.89e-04[5]</td>
<td valign="top" align="center">8.770e-03<sup>&#x022A2;</sup>3.93e-04[4]</td>
<td valign="top" align="center">8.676e-03<sup>&#x022A2;</sup>6.31e-03[3]</td>
<td valign="top" align="center">3.713e-042.84e-04[2]</td>
</tr>
<tr>
<td valign="top" align="left"><italic>f</italic><sub>5</sub></td>
<td valign="top" align="center">9.227e-03<sup>&#x022A2;</sup>7.50e-03[4]</td>
<td valign="top" align="center">8.874e-03<sup>&#x022A2;</sup>5.47e-03[3]</td>
<td valign="top" align="center">7.736e-03<sup>&#x0007E;</sup>5.65e-04[2]</td>
<td valign="top" align="center"><bold>7.735e-035.63e-05[1]</bold></td>
</tr>
<tr>
<td/>
<td valign="top" align="center">HV</td>
<td/>
<td/>
<td/>
</tr>
<tr>
<td valign="top" align="left"><italic>f</italic><sub>1</sub></td>
<td valign="top" align="center">1.679e&#x0002B;01<sup>&#x022A2;</sup>2.72e-03[6]</td>
<td valign="top" align="center">1.689e&#x0002B;01<sup>&#x022A2;</sup>2.67e-03[3]</td>
<td valign="top" align="center">1.695e&#x0002B;01<sup>&#x022A2;</sup>1.84e-04[2]</td>
<td valign="top" align="center"><bold>1.697e&#x0002B;017.65e-04[1]</bold></td>
</tr>
<tr>
<td valign="top" align="left"><italic>f</italic><sub>2</sub></td>
<td valign="top" align="center">2.145e&#x0002B;00<sup>&#x022A2;</sup>3.73e-03[8]</td>
<td valign="top" align="center">2.662e&#x0002B;00<sup>&#x022A2;</sup>3.69e-03[2]</td>
<td valign="top" align="center">2.467e&#x0002B;00<sup>&#x022A2;</sup>5.72e-04[4]</td>
<td valign="top" align="center"><bold>2.973e&#x0002B;008.85e-03[1]</bold></td>
</tr>
<tr>
<td valign="top" align="left"><italic>f</italic><sub>3</sub></td>
<td valign="top" align="center">3.492e&#x0002B;00<sup>&#x022A2;</sup>1.64e-03[5]</td>
<td valign="top" align="center">3.031e&#x0002B;00<sup>&#x022A2;</sup>7.72e-04[9]</td>
<td valign="top" align="center">3.499e&#x0002B;00<sup>&#x022A2;</sup>6.43e-03[4]</td>
<td valign="top" align="center"><bold>3.755e&#x0002B;004.58e-04[1]</bold></td>
</tr>
<tr>
<td valign="top" align="left"><italic>f</italic><sub>4</sub></td>
<td valign="top" align="center">4.074e&#x0002B;00<sup>&#x022A2;</sup>6.87e-04[8]</td>
<td valign="top" align="center">4.287e&#x0002B;00<sup>&#x022A2;</sup>4.77e-04[4]</td>
<td valign="top" align="center">4.287e&#x0002B;00<sup>&#x022A2;</sup>4.82e-03[3]</td>
<td valign="top" align="center"><bold>4.998e&#x0002B;006.87e-05[1]</bold></td>
</tr>
<tr>
<td valign="top" align="left"><italic>f</italic><sub>5</sub></td>
<td valign="top" align="center">5.347e&#x0002B;00<sup>&#x022A2;</sup>9.78e-04[5]</td>
<td valign="top" align="center">5.104e&#x0002B;00<sup>&#x022A2;</sup>6.75e-04[8]</td>
<td valign="top" align="center">5.450e&#x0002B;00<sup>&#x0007E;</sup>3.88e-04[2]</td>
<td valign="top" align="center"><bold>5.450e&#x0002B;007.67e-05[1]</bold></td>
</tr>
<tr>
<td valign="top" align="left">Mean rank</td>
<td valign="top" align="center">5.200</td>
<td valign="top" align="center">4.400</td>
<td valign="top" align="center">2.900</td>
<td valign="top" align="center"><bold>1.100</bold></td>
</tr>
<tr>
<td valign="top" align="left">&#x022A2;/&#x0226C;/&#x0007E;</td>
<td valign="top" align="center">10/0/0</td>
<td valign="top" align="center">10/0/0</td>
<td valign="top" align="center">8/0/2</td>
<td/>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>The figure in bold indicate that amongst all the image fusion methods, our proposed method, FOMODPSO &#x0002B; PCNN has the best performance on test instances, concerning, convergence and diversity (search quality).</p>
</table-wrap-foot>
</table-wrap>
</table-wrap-group>
<p>From <xref ref-type="table" rid="T6">Table 6</xref>, it can be observed that FOMODPSO &#x0002B; PCNN has 77 better mean metric values as compared to the other methods, and FOMODPSO &#x0002B; PCNN has nine best mean metric values and is the only method with MR less than two quality metrics. MFOA &#x0002B; PCNN performed better than FOMODPSO &#x0002B; PCNN on <italic>f</italic><sub>4</sub> concerning IGD values, while DE &#x0002B; PCNN has a similar performance to FOMODPSO &#x0002B; PCNN on <italic>f</italic><sub>5</sub> concerning IGD and HV values. All the image fusion methods are ranked according to MR: FOMODPSO &#x0002B; PCNN, DE &#x0002B; PCNN, PSO-DE &#x0002B; PCNN, PSO-TV &#x0002B; PCNN, WOA &#x0002B; PCNN, MFOA &#x0002B; PCNN, PSO &#x0002B; PCNN, MOPSO &#x0002B; PCNN, and QPSO &#x0002B; PCNN. Of the image fusion methods, FOMODPSO &#x0002B; PCNN has the best performance on test instances concerning convergence and diversity (search quality).</p>
</sec>
<sec>
<label>5.3.2</label>
<title>Visual quality evaluation results and analysis</title>
<p>From <xref ref-type="fig" rid="F4">Figure 4</xref>, which shows various medical image fusion results, it is clear that our method richly preserves the subtle essential image details, enhances spatial information, and has higher visual quality than the other comparative methods. The QPSO &#x0002B; PCNN, MOPSO &#x0002B; PCNN, and PSO &#x0002B; PCNN gave fairly good image details retention ability, but the general image contrast is not good. MFOA &#x0002B; PCNN, WOA &#x0002B; PCNN, and PSO-TV &#x0002B; PCNN, in comparison, gave results of good fused image quality, but the edge preservation ability was low. The PSO-DE &#x0002B; PCNN shows a good fusion effect but may contain some noise in the fused image results.</p>
<fig position="float" id="F4">
<label>Figure 4</label>
<caption><p>Medical image fusion results for <bold>(a)</bold> QPSO&#x0002B;PCNN, <bold>(b)</bold> MOPSO&#x0002B;PCNN, <bold>(c)</bold> PSO&#x0002B;PCNN, <bold>(d)</bold> MFOA&#x0002B;PCNN, <bold>(e)</bold> WOA &#x0002B; PCNN, <bold>(f)</bold> PSO-TV&#x0002B;PCNN, <bold>(g)</bold> PSO-DE&#x0002B;PCNN, <bold>(h)</bold> DE&#x0002B;PCNN, and <bold>(i)</bold> FOMODPSO &#x0002B; PCNN.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimag-05-1752625-g0004.tif">
<alt-text content-type="machine-generated">Grid of brain MRI scans organized in fourteen rows labeled G1 to G14 and nine columns labeled (a) to (i), showing various axial cross-sectional images with differences in structure and contrast for each group and column.</alt-text>
</graphic>
</fig>
<p>From this observation, it can be easily summarized that our proposed method gave fused images of better visual quality, had more edge-details preservation capability, and had less chance of showing artifacts.</p>
</sec>
<sec>
<label>5.3.3</label>
<title>Quantitative evaluation results and analysis</title>
<p>To evaluate the effectiveness of the proposed method, the accuracy, sensitivity, specificity, and F<sub>1</sub> score are chosen as evaluating parameters. Our proposed method has proved to have an average accuracy of 90.7%, sensitivity of 0.796, specificity of 0.876, and F<sub>1</sub> score of 0.789.</p>
<p>The superior diagnostic performance of the proposed method, characterized by a 90.7% average accuracy and 0.789 F<sub>1</sub> score, is fundamentally driven by the non-local memory features introduced by the FC operator (&#x003B1; = 0.6). Standard integer-order optimization methods often exhibit &#x0201C;myopic&#x0201D; behavior, accounting only for the current state and increasing the risk of premature convergence to local minima. In contrast, the fractional-order &#x0201C;memory effect&#x0201D; allows FOMODPSO to leverage historical search trajectories, ensuring the algorithm identifies the global optimum within the complex PCNN parameter space. This robust global search identifies the precise settings for &#x003B2;, <italic>V</italic><sub>&#x003B8;</sub>, and &#x003B1;<sub>&#x003B8;</sub> required to maximize the Tsallis cross-entropy, thereby ensuring that the most salient pathological features from both CT and MRI modalities are preserved. Because MMIF serves as a critical preprocessing step for computer-aided diagnosis systems, the provision of these high-fidelity, information-rich composite images directly enhances the reliability of subsequent diagnostic classifications, accounting for the significant performance gains observed in this work.</p>
<p>The quantitative evaluation task focuses on assessing the quality of information transfer from source images to the fused output. This is achieved by measuring the fused images against four objective metrics: peak signal-to-noise ratio, MI, structural similarity index (SSIM), and spatial frequency.</p>
<p>We extracted a small dataset from that described in Section 4.1 in order to carry out a three-fold cross-validation. This is for clarity of assessment and ease of operation. In the presented evaluation, we have undertaken an analysis of each comparative method and our proposed method, so that the method with the best values can be selected based on the performance measures. An in-depth analysis reveals that our proposed method gives the best fusion results. Experimental results for each of the cross-validation stages are shown in <xref ref-type="table" rid="T4">Table 4</xref>, while detailed experimental results of all the other stages can be found at:</p>
<list list-type="bullet">
<list-item><p><ext-link ext-link-type="uri" xlink:href="https://github.com/GoddessChysomme/FOMODPSO/files/13933461/G1-G14.pdf">https://github.com/GoddessChysomme/FOMODPSO/files/13933461/G1-G14.pdf</ext-link></p></list-item>
<list-item><p><ext-link ext-link-type="uri" xlink:href="https://github.com/GoddessChysomme/FOMODPSO/issues/1">https://github.com/GoddessChysomme/FOMODPSO/issues/1</ext-link></p></list-item>
</list>
<p>The codes used in this work are open-sourced and can be found at:</p>
<list list-type="bullet">
<list-item><p><ext-link ext-link-type="uri" xlink:href="https://github.com/GoddessChysomme/FOMODPSO/blob/main/fusion">https://github.com/GoddessChysomme/FOMODPSO/blob/main/fusion</ext-link></p></list-item>
<list-item><p><ext-link ext-link-type="uri" xlink:href="https://github.com/GoddessChysomme/FOMODPSO/blob/main/xfusmaxmin">https://github.com/GoddessChysomme/FOMODPSO/blob/main/xfusmaxmin</ext-link></p></list-item>
<list-item><p><ext-link ext-link-type="uri" xlink:href="https://github.com/GoddessChysomme/FOMODPSO/blob/main/xfusmean">https://github.com/GoddessChysomme/FOMODPSO/blob/main/xfusmean</ext-link></p></list-item>
</list>
<p>The inclusion of diagnostic classification metrics (accuracy, sensitivity, specificity, and F<sub>1</sub> score) goes beyond conventional image processing metrics (such as entropy and SSIM) to provide crucial clinical and diagnostic validation for the fused images. The purpose of MMIF is to aid in accurate patient diagnosis, and these metrics evaluate the practical utility of the fused images by measuring their ability to reliably retain or enhance diagnostically relevant features, treating the fusion result as input to a simulated diagnostic process. The highest achieved average accuracy (90.7%) confirms that the FOMODPSO-optimized PCNN parameters yield fused images that significantly improve potential downstream diagnostic classification reliability compared to all comparative methods (<xref ref-type="table" rid="T4">Table 4</xref>).</p>
<p>From <xref ref-type="table" rid="T4">Table 4</xref>, we can see that the performance of our proposed method is the highest for all evaluation parameters.</p>
<p>While the quantitative and qualitative metrics demonstrate the technical superiority of the FOMODPSO method in terms of fusion quality, these results should be interpreted as indicative of algorithmic performance rather than a comprehensive clinical validation. Given the limited dataset of 14 two-dimensional images, the current study serves as a robust proof-of-concept. Future clinical studies involving larger, multi-institutional datasets are required to validate the method&#x00027;s diagnostic efficacy in real-world medical practice.</p>
</sec>
</sec>
<sec>
<label>5.4</label>
<title>Computational efficiency</title>
<p>To evaluate the computational efficiency, we compute and list the average time consumption on all image groups for different methods, as shown in <xref ref-type="table" rid="T1">Table 1</xref>. Our method takes the shortest time, while MFOA &#x0002B; PCNN has a high time consumption. The reason our method has low time consumption is because of an increased rate of PCNN parameter optimization. Increasing the convergence rate of MODPSO using the FC operator, and applying the improved optimization algorithm to Tsallis cross-entropy maximization, and the optimized entropy is used as input to the PCNN network causing the reduction of the average time consumption of our method in comparison to other comparative methods.</p>
<p>This computational efficiency confirms that our method can be used to carry out MMIF within a very short time, and this is the main criterion for effective real-time image fusion applications (<xref ref-type="bibr" rid="B7">Fang et al., 2023</xref>).</p>
</sec>
</sec>
<sec id="s6">
<label>6</label>
<title>Conclusions</title>
<p>In this paper, a novel method of MMIF is presented. The method combines improved MODPSO and Tsallis cross-entropy to obtain the best fusion potential for the input of PCNN, for effective MMIF. The MODPSO is improved by applying a fractional calculus operator, a mathematical tool capable of increasing the performance of MODPSO with a higher convergence speed. The main goal of the proposed algorithm is to develop an MMIF algorithm that provides fused images of high visual quality without being time-consuming. To demonstrate the performance of the FOMODPSO after using it to optimize the Tsallis cross-entropy for the input of the m-PCNN for MMIF, we compare the proposed MMIF algorithm with some other image fusion algorithms on several test instances. The experimental results demonstrate that the proposed FOMODPSO &#x0002B; PCNN image fusion method excels in merging CT and MRI data. However, it is important to frame these findings as evidence of enhanced fusion performance. Further validation with larger and more diverse datasets is necessary to establish its clinical utility in various diagnostic environments. In the future, we would like to improve our FOMODPSO algorithm to guarantee the finding of an optimal solution by incorporating a wave function as the factor that indicates the state of the particle.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s7">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/supplementary material, further inquiries can be directed to the corresponding author.</p>
</sec>
<sec sec-type="author-contributions" id="s8">
<title>Author contributions</title>
<p>CO: Validation, Conceptualization, Supervision, Funding acquisition, Formal analysis, Software, Writing &#x02013; review &#x00026; editing, Project administration, Methodology, Writing &#x02013; original draft, Data curation, Resources, Visualization, Investigation.</p>
</sec>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="s10">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="s11">
<title>Publisher&#x00027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ahilan</surname> <given-names>A.</given-names></name> <name><surname>Manogaran</surname> <given-names>G.</given-names></name> <name><surname>Raja</surname> <given-names>C.</given-names></name> <name><surname>Kadry</surname> <given-names>S.</given-names></name> <name><surname>Kumar</surname> <given-names>S. N.</given-names></name> <name><surname>Agees Kumar</surname> <given-names>C.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>Segmentation by fractional order Darwinian particle swarm optimization based multilevel thresholding and improved lossless prediction based compression algorithm for medical images</article-title>. <source>IEEE Access</source> <volume>7</volume>, <fpage>89570</fpage>&#x02013;<lpage>89580</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ACCESS.2019.2891632</pub-id></mixed-citation>
</ref>
<ref id="B2">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chakraborty</surname> <given-names>R.</given-names></name> <name><surname>Verma</surname> <given-names>G.</given-names></name></person-group> (<year>2021</year>). <article-title>IFODPSO-based multi-level image segmentation scheme aided with Masi entropy</article-title>. <source>J. Ambient Intell. Humaniz. Comput</source>. <volume>12</volume>, <fpage>7793</fpage>&#x02013;<lpage>7811</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s12652-020-02506-w</pub-id></mixed-citation>
</ref>
<ref id="B3">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Challa</surname> <given-names>U. K.</given-names></name> <name><surname>Yellamraju</surname> <given-names>P.</given-names></name> <name><surname>Bhatt</surname> <given-names>J. S.</given-names></name></person-group> (<year>2019</year>). <article-title>A multi-class deep all-CNN for detection of diabetic retinopathy using retinal fundus images</article-title>. <source>Pattern Recog. Machine Intell.</source> <volume>11941</volume>, <fpage>191</fpage>&#x02013;<lpage>199</lpage>. doi: <pub-id pub-id-type="doi">10.1007/978-3-030-34869-4_21</pub-id></mixed-citation>
</ref>
<ref id="B4">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Couceiro</surname> <given-names>M. S.</given-names></name> <name><surname>Rocha</surname> <given-names>R. P.</given-names></name> <name><surname>Ferreira</surname> <given-names>N. M. F.</given-names></name> <name><surname>Machado</surname> <given-names>J. A. T.</given-names></name></person-group> (<year>2012</year>). <article-title>Introducing the fractional-order Darwinian PSO</article-title>. <source>Signal Image Video Process</source>. <volume>6</volume>, <fpage>343</fpage>&#x02013;<lpage>350</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s11760-012-0316-2</pub-id></mixed-citation>
</ref>
<ref id="B5">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Das</surname> <given-names>M. K.</given-names></name> <name><surname>Gupta</surname> <given-names>D.</given-names></name> <name><surname>Radeva</surname> <given-names>P.</given-names></name> <name><surname>Bakde</surname> <given-names>A. M.</given-names></name></person-group> (<year>2022</year>). <article-title>Multimodal image sensor fusion in a cascaded framework using optimized dual channel pulse coupled neural network</article-title>. <source>J. Ambient Intell. Human. Comput</source>. <volume>14</volume>, <fpage>11985</fpage>&#x02013;<lpage>12004</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s12652-022-03749-5</pub-id></mixed-citation>
</ref>
<ref id="B6">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Deshpande</surname> <given-names>V. S.</given-names></name> <name><surname>Bhatt</surname> <given-names>J. S.</given-names></name></person-group> (<year>2019</year>). <article-title>Bayesian deep learning for deformable medical image registration</article-title>. <source>Pattern Recog. Machine Intell.</source> <volume>11942</volume>, <fpage>41</fpage>&#x02013;<lpage>49</lpage>. doi: <pub-id pub-id-type="doi">10.1007/978-3-030-34872-4_5</pub-id></mixed-citation>
</ref>
<ref id="B7">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Fang</surname> <given-names>Y.</given-names></name> <name><surname>Bakian-Dogaheh</surname> <given-names>K.</given-names></name> <name><surname>Moghaddam</surname> <given-names>M.</given-names></name></person-group> (<year>2023</year>). <article-title>Real-time 3D microwave medical imaging with enhanced variational born iterative method</article-title>. <source>IEEE Trans. Med. Imaging</source> <volume>42</volume>, <fpage>268</fpage>&#x02013;<lpage>280</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TMI.2022.3210494</pub-id><pub-id pub-id-type="pmid">36166569</pub-id></mixed-citation>
</ref>
<ref id="B8">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Gao</surname> <given-names>Z.</given-names></name> <name><surname>Wei</surname> <given-names>J.</given-names></name> <name><surname>Liang</surname> <given-names>C.</given-names></name> <name><surname>Yan</surname> <given-names>M.</given-names></name></person-group> (<year>2014</year>). <article-title>&#x0201C;Fractional-order particle swarm optimization,&#x0201D;</article-title> in <source>The 26th Chinese Control and Decision Conference (2014 CCDC)</source> (<publisher-loc>IEEE</publisher-loc>), <fpage>1284</fpage>&#x02013;<lpage>1288</lpage>. doi: <pub-id pub-id-type="doi">10.1109/CCDC.2014.6852364</pub-id></mixed-citation>
</ref>
<ref id="B9">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ghorai</surname> <given-names>C.</given-names></name> <name><surname>Shakhari</surname> <given-names>S.</given-names></name> <name><surname>Banerjee</surname> <given-names>I.</given-names></name></person-group> (<year>2021</year>). <article-title>A SPEA-based multimetric routing protocol for intelligent transportation systems</article-title>. <source>IEEE Trans. Intell. Transport. Syst.</source> <volume>22</volume>, <fpage>6737</fpage>&#x02013;<lpage>6747</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TITS.2020.2994362</pub-id></mixed-citation>
</ref>
<ref id="B10">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Han</surname> <given-names>F.</given-names></name> <name><surname>Zheng</surname> <given-names>M.</given-names></name> <name><surname>Ling</surname> <given-names>Q.</given-names></name></person-group> (<year>2021</year>). <article-title>An improved multiobjective particle swarm optimization algorithm based on tripartite competition mechanism</article-title>. <source>Appl. Intell</source>. <volume>52</volume>, <fpage>5784</fpage>&#x02013;<lpage>5816</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10489-021-02665-z</pub-id></mixed-citation>
</ref>
<ref id="B11">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hermessi</surname> <given-names>H.</given-names></name> <name><surname>Mourali</surname> <given-names>O.</given-names></name> <name><surname>Zagrouba</surname> <given-names>E.</given-names></name></person-group> (<year>2018</year>). <article-title>Convolutional neural network-based multimodal image fusion via similarity learning in the shearlet domain</article-title>. <source>Neural Comput. Appl.</source> <volume>30</volume>, <fpage>2029</fpage>&#x02013;<lpage>2045</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00521-018-3441-1</pub-id></mixed-citation>
</ref>
<ref id="B12">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Hu</surname> <given-names>S.</given-names></name> <name><surname>Hu</surname> <given-names>P.</given-names></name> <name><surname>Ma</surname> <given-names>X.</given-names></name></person-group> (<year>2018</year>). <article-title>&#x0201C;A novel medical image fusion method based on MCA and MOPSO,&#x0201D;</article-title> in <source>Proceedings of the 2018 14th IEEE International Conference on Signal Processing (ICSP 2018)</source> (<publisher-loc>IEEE</publisher-loc>), <fpage>1163</fpage>&#x02013;<lpage>1167</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ICSP.2018.8652380</pub-id></mixed-citation>
</ref>
<ref id="B13">
<mixed-citation publication-type="web"><person-group person-group-type="author"><name><surname>Johnson</surname> <given-names>K. A.</given-names></name> <name><surname>Becker</surname> <given-names>J. A.</given-names></name></person-group> (<year>1999</year>). <source>The Whole Brain Atlas</source>. Harvard Medical School. Available online at: <ext-link ext-link-type="uri" xlink:href="http://www.med.harvard.edu/aanlib/">http://www.med.harvard.edu/aanlib/</ext-link> (Accessed May 15, 2017).</mixed-citation>
</ref>
<ref id="B14">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Kumar</surname> <given-names>M.</given-names></name> <name><surname>Ranjan</surname> <given-names>N.</given-names></name> <name><surname>Chourasia</surname> <given-names>B.</given-names></name></person-group> (<year>2021</year>). <article-title>&#x0201C;Hybrid methods of contourlet transform and particle swarm optimization for multimodal medical image fusion,&#x0201D;</article-title> in <source>2021 International Conference on Artificial Intelligence and Smart Systems (ICAIS)</source> (<publisher-loc>IEEE</publisher-loc>), <fpage>945</fpage>&#x02013;<lpage>951</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ICAIS50930.2021.9396021</pub-id></mixed-citation>
</ref>
<ref id="B15">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>J.</given-names></name> <name><surname>Guo</surname> <given-names>X.</given-names></name> <name><surname>Lu</surname> <given-names>G.</given-names></name> <name><surname>Zhang</surname> <given-names>B.</given-names></name> <name><surname>Xu</surname> <given-names>Y.</given-names></name> <name><surname>Wu</surname> <given-names>F.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>DRPL: deep regression pair learning for multi-focus image fusion</article-title>. <source>IEEE Trans. Image Process.</source> <volume>29</volume>, <fpage>4816</fpage>&#x02013;<lpage>4831</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TIP.2020.2976190</pub-id><pub-id pub-id-type="pmid">32142440</pub-id></mixed-citation>
</ref>
<ref id="B16">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>L.</given-names></name> <name><surname>Chang</surname> <given-names>L.</given-names></name> <name><surname>Gu</surname> <given-names>T.</given-names></name> <name><surname>Sheng</surname> <given-names>W.</given-names></name> <name><surname>Wang</surname> <given-names>W.</given-names></name></person-group> (<year>2021</year>). <article-title>On the norm of dominant difference for many-objective particle swarm optimization</article-title>. <source>IEEE Trans. Cybernet.</source> <volume>51</volume>, <fpage>2055</fpage>&#x02013;<lpage>2067</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TCYB.2019.2922287</pub-id><pub-id pub-id-type="pmid">31380777</pub-id></mixed-citation>
</ref>
<ref id="B17">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>X.</given-names></name> <name><surname>Song</surname> <given-names>S.</given-names></name> <name><surname>Zhang</surname> <given-names>H.</given-names></name></person-group> (<year>2019</year>). <article-title>Evolutionary multiobjective optimization with clustering-based self-adaptive mating restriction strategy</article-title>. <source>Soft Comput.</source> <volume>23</volume>, <fpage>3303</fpage>&#x02013;<lpage>3325</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00500-017-2990-z</pub-id></mixed-citation>
</ref>
<ref id="B18">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lin</surname> <given-names>Q.</given-names></name> <name><surname>Zhang</surname> <given-names>L.</given-names></name> <name><surname>Wu</surname> <given-names>T.</given-names></name> <name><surname>Mean</surname> <given-names>T.</given-names></name> <name><surname>Tseng</surname> <given-names>H.</given-names></name></person-group> (<year>2020</year>). <article-title>Application of Tsallis cross-entropy in image thresholding segmentation</article-title>. <source>Sens. Mater.</source> <volume>32</volume>:<fpage>2771</fpage>. doi: <pub-id pub-id-type="doi">10.18494/SAM.2020.2798</pub-id></mixed-citation>
</ref>
<ref id="B19">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lin</surname> <given-names>Y.</given-names></name> <name><surname>Jiang</surname> <given-names>Y. S.</given-names></name> <name><surname>Gong</surname> <given-names>Y. J.</given-names></name> <name><surname>Zhan</surname> <given-names>Z. H.</given-names></name> <name><surname>Zhang</surname> <given-names>J.</given-names></name></person-group> (<year>2019</year>). <article-title>A discrete multiobjective particle swarm optimizer for automated assembly of parallel cognitive diagnosis tests</article-title>. <source>IEEE Trans. Cybernet</source>. <volume>49</volume>, <fpage>2792</fpage>&#x02013;<lpage>2805</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TCYB.2018.2836388</pub-id><pub-id pub-id-type="pmid">29994281</pub-id></mixed-citation>
</ref>
<ref id="B20">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>C.</given-names></name> <name><surname>Wen</surname> <given-names>J.</given-names></name> <name><surname>Wu</surname> <given-names>Z.</given-names></name> <name><surname>Luo</surname> <given-names>X.</given-names></name> <name><surname>Huang</surname> <given-names>C.</given-names></name> <name><surname>Xu</surname> <given-names>Y.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Information recovery-driven deep incomplete multi-view clustering network</article-title>. <source>IEEE Trans. Neural Netw. Learn. Syst</source>. <volume>35</volume>, <fpage>15442</fpage>&#x02013;<lpage>15452</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TNNLS.2023.3286918</pub-id></mixed-citation>
</ref>
<ref id="B21">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>C.</given-names></name> <name><surname>Wu</surname> <given-names>Z.</given-names></name> <name><surname>Wen</surname> <given-names>J.</given-names></name> <name><surname>Xu</surname> <given-names>Y.</given-names></name> <name><surname>Huang</surname> <given-names>C.</given-names></name></person-group> (<year>2022</year>). <article-title>Localized sparse incomplete multi-view clustering</article-title>. <source>IEEE Trans. Multimedia</source> <volume>25</volume>, <fpage>5539</fpage>&#x02013;<lpage>5551</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TMM.2022.3194332</pub-id></mixed-citation>
</ref>
<ref id="B22">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>R.</given-names></name> <name><surname>Du</surname> <given-names>L.</given-names></name> <name><surname>Liu</surname> <given-names>C.</given-names></name></person-group> (<year>2022</year>). <article-title>&#x0201C;Medical image fusion based on NSST and PCNN optimized by PSO-DE,&#x0201D;</article-title> in <source>International Conference on Electronic Information Engineering, Big Data, and Computer Technology (EIBDCT 2022), Vol. 12256</source> (<publisher-loc>SPIE</publisher-loc>), <fpage>55</fpage>&#x02013;<lpage>60</lpage>. doi: <pub-id pub-id-type="doi">10.1117/12.2635361</pub-id></mixed-citation>
</ref>
<ref id="B23">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>Y.</given-names></name> <name><surname>Yen</surname> <given-names>G. G.</given-names></name> <name><surname>Gong</surname> <given-names>D.</given-names></name></person-group> (<year>2019</year>). <article-title>A multimodal multiobjective evolutionary algorithm using two-archive and recombination strategies</article-title>. <source>IEEE Trans. Evol. Comput.</source> <volume>23</volume>, <fpage>660</fpage>&#x02013;<lpage>674</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TEVC.2018.2879406</pub-id></mixed-citation>
</ref>
<ref id="B24">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Mishra</surname> <given-names>N. S.</given-names></name> <name><surname>Dhabal</surname> <given-names>S.</given-names></name></person-group> (<year>2020</year>). <article-title>&#x0201C;Medical image fusion using local IFS-entropy in NSST domain by stimulating PCNN,&#x0201D;</article-title> in <source>2020 IEEE 1st International Conference for Convergence in Engineering (ICCE)</source> (<publisher-loc>IEEE</publisher-loc>), <fpage>389</fpage>&#x02013;<lpage>392</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ICCE50343.2020.9290666</pub-id></mixed-citation>
</ref>
<ref id="B25">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nie</surname> <given-names>R.</given-names></name> <name><surname>Cao</surname> <given-names>J.</given-names></name> <name><surname>Zhou</surname> <given-names>D.</given-names></name> <name><surname>Qian</surname> <given-names>W.</given-names></name></person-group> (<year>2021</year>). <article-title>Multi-source information exchange encoding with PCNN for medical image fusion</article-title>. <source>IEEE Trans. Circuits Syst. Video Technol.</source> <volume>31</volume>, <fpage>986</fpage>&#x02013;<lpage>1000</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TCSVT.2020.2998696</pub-id></mixed-citation>
</ref>
<ref id="B26">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pires</surname> <given-names>E. J. S.</given-names></name> <name><surname>Machado</surname> <given-names>J. A. T.</given-names></name> <name><surname>Oliveira</surname> <given-names>P. B. D.</given-names></name> <name><surname>Cunha</surname> <given-names>J. B.</given-names></name> <name><surname>Mendes</surname> <given-names>L.</given-names></name></person-group> (<year>2010</year>). <article-title>Particle swarm optimization with fractional-order velocity</article-title>. <source>Nonlinear Dyn</source>. <volume>61</volume>, <fpage>295</fpage>&#x02013;<lpage>301</lpage>. doi: <pub-id pub-id-type="doi">10.1007/y</pub-id></mixed-citation>
</ref>
<ref id="B27">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Raha</surname> <given-names>R.</given-names></name> <name><surname>Sengupta</surname> <given-names>A.</given-names></name> <name><surname>Dhabal</surname> <given-names>S.</given-names></name></person-group> (<year>2020</year>). <article-title>&#x0201C;Medical image fusion using PCNN optimized by whale optimization algorithm,&#x0201D;</article-title> in <source>2020 IEEE 1st International Conference for Convergence in Engineering (ICCE)</source> (<publisher-loc>IEEE</publisher-loc>), <fpage>374</fpage>&#x02013;<lpage>378</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ICCE50343.2020.9290504</pub-id></mixed-citation>
</ref>
<ref id="B28">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rai</surname> <given-names>S.</given-names></name> <name><surname>Bhatt</surname> <given-names>J. S.</given-names></name> <name><surname>Kumar Patra</surname> <given-names>S.</given-names></name></person-group> (<year>2023</year>). <article-title>A strictly bounded deep network for unpaired cyclic translation of medical images</article-title>. <source>IEEE Statistical Signal Processing Workshop (SSP), Hanoi, Vietnam</source> (IEEE), <fpage>61</fpage>&#x02013;<lpage>65</lpage>. doi: <pub-id pub-id-type="doi">10.1109/SSP53291.2023.10207960</pub-id></mixed-citation>
</ref>
<ref id="B29">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rai</surname> <given-names>S.</given-names></name> <name><surname>Bhatt</surname> <given-names>J. S.</given-names></name> <name><surname>Patra</surname> <given-names>S. K.</given-names></name></person-group> (<year>2021</year>). <article-title>Augmented noise learning framework for enhancing medical image denoising</article-title>. <source>IEEE Acces</source> <volume>9</volume>, <fpage>117153</fpage>&#x02013;<lpage>117168</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ACCESS.2021.3106707</pub-id></mixed-citation>
</ref>
<ref id="B30">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Rai</surname> <given-names>S.</given-names></name> <name><surname>Bhatt</surname> <given-names>J. S.</given-names></name> <name><surname>Patra</surname> <given-names>S. K.</given-names></name></person-group> (<year>2022</year>). <article-title>&#x0201C;Accessible, affordable and low-risk lungs health monitoring in Covid-19: deep cascade reconstruction from degraded LR-ULDCT,&#x0201D;</article-title> in <source>2022 IEEE 19th International Symposium on Biomedical Imaging (ISBI), Kolkata, India</source> (<publisher-loc>IEEE</publisher-loc>), <fpage>1</fpage>&#x02013;<lpage>5</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ISBI52829.2022.9761566</pub-id></mixed-citation>
</ref>
<ref id="B31">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sabatier</surname> <given-names>J.</given-names></name> <name><surname>Agrawal</surname> <given-names>O.</given-names></name> <name><surname>Machado</surname> <given-names>J.</given-names></name></person-group> (<year>2007</year>). <source>Advances in Fractional Calculus: Theoretical Developments and Applications in Physics and Engineering</source>. Dordrecht: Springer. doi: <pub-id pub-id-type="doi">10.1007/978-1-4020-6042-7</pub-id></mixed-citation>
</ref>
<ref id="B32">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Singh</surname> <given-names>S.</given-names></name> <name><surname>Anand</surname> <given-names>R. S.</given-names></name></person-group> (<year>2020</year>). <article-title>Multimodal medical image fusion using hybrid decomposition with CNN-based feature mapping and structural clustering</article-title>. <source>IEEE Trans. Instrument. Measure.</source> <volume>69</volume>, <fpage>3855</fpage>&#x02013;<lpage>3865</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TIM.2019.2933341</pub-id></mixed-citation>
</ref>
<ref id="B33">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tang</surname> <given-names>L.</given-names></name> <name><surname>Tian</surname> <given-names>C.</given-names></name> <name><surname>Xu</surname> <given-names>K.</given-names></name></person-group> (<year>2019</year>). <article-title>Exploiting quality-guided adaptive optimization for fusing multimodal medical images</article-title>. <source>IEEE Access</source> <volume>7</volume>, <fpage>96048</fpage>&#x02013;<lpage>96059</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ACCESS.2019.2926833</pub-id></mixed-citation>
</ref>
<ref id="B34">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Tian</surname> <given-names>Y.</given-names></name> <name><surname>Li</surname> <given-names>Y.</given-names></name> <name><surname>Ye</surname> <given-names>F.</given-names></name></person-group> (<year>2016</year>). <article-title>&#x0201C;Multimodal medical image fusion based on nonsubsampled contourlet transform using improved PCNN,&#x0201D;</article-title> in <source>2016 IEEE 13th International Conference on Signal Processing (ICSP)</source> (<publisher-loc>IEEE</publisher-loc>), <fpage>799</fpage>&#x02013;<lpage>804</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ICSP.2016.7877941</pub-id></mixed-citation>
</ref>
<ref id="B35">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Vigelis</surname> <given-names>R. F.</given-names></name> <name><surname>Andrade</surname> <given-names>L. H. F. D.</given-names></name> <name><surname>Cavalcante</surname> <given-names>C. C.</given-names></name></person-group> (<year>2020</year>). <article-title>Properties of a generalized divergence related to Tsallis generalized divergence</article-title>. <source>IEEE Trans. Information Theory</source> <volume>66</volume>, <fpage>2891</fpage>&#x02013;<lpage>2897</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TIT.2019.2953029</pub-id></mixed-citation>
</ref>
<ref id="B36">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>Q.</given-names></name> <name><surname>Zhou</surname> <given-names>D.</given-names></name> <name><surname>Nie</surname> <given-names>R.</given-names></name> <name><surname>Jin</surname> <given-names>X.</given-names></name> <name><surname>He</surname> <given-names>K.</given-names></name> <name><surname>Dou</surname> <given-names>L.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>Medical image fusion using pulse coupled neural network and multi-objective particle swarm optimization</article-title>. <source>International Conference on Digital Image Processing</source> (Elsevier). doi: <pub-id pub-id-type="doi">10.1117/12.2245043</pub-id></mixed-citation>
</ref>
<ref id="B37">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Waseem</surname> <given-names>W.</given-names></name> <name><surname>Sulaiman</surname> <given-names>M.</given-names></name> <name><surname>Alhindi</surname> <given-names>A.</given-names></name> <name><surname>Alhakami</surname> <given-names>H.</given-names></name></person-group> (<year>2020</year>). <article-title>A soft computing approach based on fractional order DPSO algorithm designed to solve the corneal model for eye surgery</article-title>. <source>IEEE Access</source> <volume>8</volume>, <fpage>61576</fpage>&#x02013;<lpage>61592</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ACCESS.2020.2983823</pub-id></mixed-citation>
</ref>
<ref id="B38">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wen</surname> <given-names>J.</given-names></name> <name><surname>Zhang</surname> <given-names>Z.</given-names></name> <name><surname>Fei</surname> <given-names>L.</given-names></name> <name><surname>Zhang</surname> <given-names>B.</given-names></name> <name><surname>Xu</surname> <given-names>Y.</given-names></name> <name><surname>Zhang</surname> <given-names>Z.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>A survey on incomplete multiview clustering</article-title>. <source>IEEE Trans. Syst. Man Cybernet. Syst.</source> <volume>53</volume>, <fpage>1136</fpage>&#x02013;<lpage>1149</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TSMC.2022.3192635</pub-id></mixed-citation>
</ref>
<ref id="B39">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wu</surname> <given-names>B.</given-names></name> <name><surname>Hu</surname> <given-names>W.</given-names></name> <name><surname>Hu</surname> <given-names>J.</given-names></name> <name><surname>Yen</surname> <given-names>G. G.</given-names></name></person-group> (<year>2021</year>). <article-title>Adaptive multiobjective particle swarm optimization based on evolutionary state estimation</article-title>. <source>IEEE Trans. Cybernet.</source> <volume>51</volume>, <fpage>3738</fpage>&#x02013;<lpage>3751</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TCYB.2019.2949204</pub-id><pub-id pub-id-type="pmid">31725406</pub-id></mixed-citation>
</ref>
<ref id="B40">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Xu</surname> <given-names>X.</given-names></name> <name><surname>Shan</surname> <given-names>D.</given-names></name> <name><surname>Wang</surname> <given-names>G.</given-names></name> <name><surname>Jiang</surname> <given-names>X.</given-names></name></person-group> (<year>2016</year>). <article-title>Multimodal medical image fusion using PCNN optimized by the QPSO algorithm</article-title>. <source>Appl. Soft Comput.</source> <volume>46</volume>, <fpage>588</fpage>&#x02013;<lpage>595</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.asoc.2016.03.028</pub-id></mixed-citation>
</ref>
<ref id="B41">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>B.</given-names></name> <name><surname>Jiang</surname> <given-names>C.</given-names></name> <name><surname>Hu</surname> <given-names>Y.</given-names></name> <name><surname>Chen</surname> <given-names>Z.</given-names></name></person-group> (<year>2021</year>). <article-title>&#x0201C;Medical image fusion based a Densely connected convolutional networks,&#x0201D;</article-title> in <source>IEEE 5th Advanced Information Technology, Electronic and Automation Control Conference (IAEAC), Chongqing, China</source> (<publisher-loc>IEEE</publisher-loc>), <fpage>2164</fpage>&#x02013;<lpage>2170</lpage>. doi: <pub-id pub-id-type="doi">10.1109/IAEAC50856.2021.9390712</pub-id></mixed-citation>
</ref>
<ref id="B42">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhao</surname> <given-names>Y.</given-names></name> <name><surname>Zhao</surname> <given-names>Q.</given-names></name> <name><surname>Hao</surname> <given-names>A.</given-names></name></person-group> (<year>2014</year>). <article-title>Multi-modal medical image fusion using improved multi-channel PCNN</article-title>. <source>Bio Med. Mater. Eng.</source> <volume>24</volume>, <fpage>221</fpage>&#x02013;<lpage>228</lpage>. doi: <pub-id pub-id-type="doi">10.3233/BME-130802</pub-id></mixed-citation>
</ref>
<ref id="B43">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhu</surname> <given-names>J.</given-names></name> <name><surname>Wang</surname> <given-names>X.</given-names></name> <name><surname>Huang</surname> <given-names>H.</given-names></name> <name><surname>Cheng</surname> <given-names>S.</given-names></name> <name><surname>Wu</surname> <given-names>M.</given-names></name></person-group> (<year>2021</year>). <article-title>A NSGA-II algorithm for task scheduling in UAV-enabled MEC system</article-title>. <source>IEEE Trans. Intell. Transport. Syst.</source> <volume>23</volume>, <fpage>9414</fpage>&#x02013;<lpage>9429</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TITS.2021.3120019</pub-id></mixed-citation>
</ref>
<ref id="B44">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Zhu</surname> <given-names>M.</given-names></name> <name><surname>Han</surname> <given-names>F.</given-names></name></person-group> (<year>2021</year>). <article-title>&#x0201C;Multi-objective particle swarm optimization based on space decomposition for feature selection,&#x0201D;</article-title> in <source>17th International Conference on Computational Intelligence and Security (CIS)</source> (<publisher-loc>IEEE</publisher-loc>), <fpage>387</fpage>&#x02013;<lpage>391</lpage>. doi: <pub-id pub-id-type="doi">10.1109/CIS54983.2021.00087</pub-id></mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by" id="fn0001">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1410131/overview">Moulay Akhloufi</ext-link>, Universit&#x000E9; de Moncton, Canada</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by" id="fn0002">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1839488/overview">Jie Wu</ext-link>, Shaanxi Normal University, China</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3167545/overview">Rakesh Choudhary</ext-link>, SUNY Upstate Medical University, United States</p>
</fn>
</fn-group>
</back>
</article>