<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" dtd-version="1.3" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Big Data</journal-id>
<journal-title-group>
<journal-title>Frontiers in Big Data</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Big Data</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2624-909X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fdata.2025.1659026</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>EnDuSecFed: an ensemble approach for privacy preserving Federated Learning with dual-security framework for sustainable healthcare</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name><surname>Shrimali</surname> <given-names>Bela</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<uri xlink:href="https://loop.frontiersin.org/people/3121751"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Gajjar</surname> <given-names>Jenil</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Roy</surname> <given-names>Swapnoneel</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Patel</surname> <given-names>Sanjay</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Patel</surname> <given-names>Kanu</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Naik</surname> <given-names>Ramesh Ram</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Unitedworld Institute of Technology, Karnavati University</institution>, <city>Gandhinagar, Gujarat</city>, <country country="in">India</country></aff>
<aff id="aff2"><label>2</label><institution>Department of Computer Science and Engineering, Institute of Technology, Nirma University</institution>, <city>Ahmedabad, Gujarat</city>, <country country="in">India</country></aff>
<aff id="aff3"><label>3</label><institution>School of Computing, University of North Florida</institution>, <city>Jacksonville, FL</city>, <country country="us">United States</country></aff>
<author-notes>
<corresp id="c001"><label>&#x0002A;</label>Correspondence: Bela Shrimali, <email xlink:href="mailto:bela.shrimali@gmail.com">bela.shrimali@gmail.com</email>; Swapnoneel Roy, <email xlink:href="mailto:s.roy@unf.edu">s.roy@unf.edu</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-01-22">
<day>22</day>
<month>01</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2025</year>
</pub-date>
<volume>8</volume>
<elocation-id>1659026</elocation-id>
<history>
<date date-type="received">
<day>03</day>
<month>07</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>16</day>
<month>10</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2026 Shrimali, Gajjar, Roy, Patel, Patel and Naik.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Shrimali, Gajjar, Roy, Patel, Patel and Naik</copyright-holder>
<license>
<ali:license_ref start_date="2026-01-22">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<p>Recent advances in Artificial Intelligence have highlighted the role of Machine Learning in healthcare decision-making, but centralized data collection raises significant privacy risks. Federated Learning addresses this by enabling collaborative training across multiple clients without sharing raw data. However, Federated Learning remains vulnerable to security threats that can compromise model reliability. This paper proposes a dual-security Federated Learning framework that integrates Fernet Symmetric Encryption for secure transmission of model updates using symmetric encryption and an Intrusion Detection System to detect anomalous client behavior. Experiments on a publicly available healthcare dataset show that the proposed system enhances privacy and robustness compared to traditional FL. Among tested models, including Logistic Regression, Random Forest, and SVC, the ensemble method achieved the best performance with 99% accuracy.</p></abstract>
<kwd-group>
<kwd>Federated Learning</kwd>
<kwd>Fernet Symmetric Encryption</kwd>
<kwd>Intrusion Detection System</kwd>
<kwd>Logistic Regression</kwd>
<kwd>Random Forest</kwd>
<kwd>Support Vector Classifier</kwd>
</kwd-group>
<funding-group>
  <funding-statement>The author(s) declare that no financial support was received for the research and/or publication of this article.</funding-statement>
</funding-group>
<counts>
<fig-count count="9"/>
<table-count count="7"/>
<equation-count count="4"/>
<ref-count count="31"/>
<page-count count="15"/>
<word-count count="8113"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Cybersecurity and Privacy</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<label>1</label>
<title>Introduction</title>
<p>According to the Gartner report-2025 (<xref ref-type="bibr" rid="B11">Gartner, 2025</xref>), about 27% of organizations have faced a privacy breach or security issue related to Artificial Intelligence (AI). This means that there were intentional attacks on the organization&#x00027;s AI systems because they collect and process data in a central place. Federated Learning (FL) has emerged as a robust method for training machine learning models across multiple clients while maintaining the privacy of their local data. Unlike traditional methods where data is collected in one location, FL allows each client to have control of its data (<xref ref-type="bibr" rid="B30">Wang et al., 2023b</xref>). This is particularly useful in sensitive areas like healthcare, where patient information must be kept confidential (<xref ref-type="bibr" rid="B17">Khatun et al., 2023</xref>; <xref ref-type="bibr" rid="B4">Almalawi et al., 2023</xref>; <xref ref-type="bibr" rid="B21">Naresh and Thamarai, 2023</xref>). By using FL, healthcare providers can create better models by combining knowledge from different datasets without risking the privacy and security of individual patient data (<xref ref-type="bibr" rid="B7">Chaddad et al., 2023</xref>; <xref ref-type="bibr" rid="B16">Joshi et al., 2022</xref>; <xref ref-type="bibr" rid="B18">Kumar and Singla, 2021</xref>).</p>
<p>Despite its advantages, FL presents several challenges. Its decentralized architecture can introduce security vulnerabilities, particularly in securing the updates exchanged between clients and the central server. In a standard FL framework, the local model weights from each client are aggregated to form a global model. This aggregation process, however, is susceptible to security threats (<xref ref-type="bibr" rid="B19">Li et al., 2023</xref>; <xref ref-type="bibr" rid="B9">Coelho et al., 2023</xref>; <xref ref-type="bibr" rid="B3">Ali et al., 2024</xref>), such as data poisoning and adversarial attacks, which can compromise the performance of the global model. Such concerns are especially critical in healthcare applications, where prediction accuracy directly impacts patient safety.</p>
<p>To manage these risks, encrypted communication using the Fernet Symmetric Encryption (FSE) technique is implemented during the sharing of model updates between local clients and the global server. FSE allows secure calculations on encrypted data, ensuring that the model updates shared remain private. With FSE, the system protects sensitive information from attackers while still allowing clients to work together. This means even if a malicious client tries to change its model updates, the encryption will stop it from damaging the global model. While FSE secures model updates during sharing, it does not automatically detect malicious behavior or unusual activity in the Federated Learning system. Attackers can still send harmful updates that may compromise the global server. To address this, an Intrusion Detection System (IDS) is deployed at the global server to monitor and analyze incoming model updates for suspicious activity. By identifying abnormal patterns, the IDS can detect attacks such as model poisoning. This combined approach&#x02014;using FSE for secure sharing and IDS for anomaly detection&#x02014;enhances the overall security and trustworthiness of the FL process.</p>
<sec>
<label>1.1</label>
<title>Motivation</title>
<p>Preserving the privacy of sensitive information is critical in healthcare, and FL has emerged as a promising paradigm as it enables collaborative model training without sharing raw data. Nevertheless, FL remains vulnerable to security threats, where malicious clients may submit harmful updates that compromise the global model&#x00027;s accuracy. This study aims to strengthen FL security in healthcare, where reliability is crucial for patient care. To address these challenges, Fernet Symmetric Encryption (FSE) is employed to safeguard model updates against tampering, while an IDS at the central server detects anomalous client behavior. The main contributions of this research are:</p>
<list list-type="bullet">
<list-item><p>We propose a federated learning method with dual security. A communication between local clients and the main server is secured using FSE and protects data changes at the central server with an IDS. Our method is shown to be better in security analysis compared to existing methods.</p></list-item>
<list-item><p>To improve decision-making and predictions, along with existing models, an ensemble approach is also implemented that combines predictions from three main models: Logistic Regression, Support Vector Classifier, and Random Forest at the local node for training.</p></list-item>
<list-item><p>We also discuss various attacks on privacy in FL models and highlight how our dual security approach adds value to this research area.</p></list-item>
</list>
</sec>
<sec>
<label>1.2</label>
<title>Organization</title>
<p>The remainder of this paper is structured as follows. Section 2 presents a comprehensive review of the existing literature. Section 3 details the proposed system architecture, including methodology, system components, and their interactions. Section 4 describes the experimental setup with the description of dataset, models and proposed algorithms. Section 5 provides an in-depth security analysis of the FSE scheme and IDS components, examining potential vulnerabilities and their mitigations. Section 6 presents experimental results, including performance metrics, comparative analysis, and validation of the approach. Lastly, Section 7 conclude with the key findings, discusses the implications of the work, and outlines promising directions for future research in this domain.</p></sec>
</sec>
<sec id="s2">
<label>2</label>
<title>Literature review</title>
<p>FL has emerged as a promising privacy-preserving paradigm, particularly in sensitive domains such as healthcare. Unlike centralized machine learning, FL enables distributed model training without directly sharing raw data, thus safeguarding patient privacy. However, despite its advantages, FL remains vulnerable to adversarial threats, including data poisoning, label-flipping, and model poisoning attacks, where malicious clients can manipulate updates to reduce the performance of the global model (<xref ref-type="bibr" rid="B14">Hiwale et al., 2023</xref>). To address these vulnerabilities, researchers have explored various privacy-enhancing and security-aware strategies, which can be broadly categorized into: privacy-preserving approaches, cryptographic frameworks, IDS integration, and blockchain-enabled solutions. Privacy-preserving and cryptographic approaches.</p>
<p><xref ref-type="bibr" rid="B2">Alazab et al. (2023)</xref> investigated FL for privacy-preserving Intrusion Detection Systems, comparing its performance against traditional deep learning models. By using the FedAvg algorithm, autoencoder-based anomaly detection, and secure gRPC channels, they reported high accuracy (98.07%), precision (97.4%), recall (99.06%), and F1-score (98.21%). Similarly, <xref ref-type="bibr" rid="B29">Wang et al. (2023a)</xref> introduced PPFLHE, a framework that leverages homomorphic encryption to address privacy and communication overhead in healthcare FL. Their system achieved 81.53% accuracy, showing that encryption can secure model updates but may also introduce computational overhead.</p>
<p>To mitigate adversarial threats, <xref ref-type="bibr" rid="B5">Almalki et al. (2024)</xref> proposed a hybrid Healthcare 5.0 framework that combines FL, IDS, and Blockchain Technology (BCT). Their solution improved diagnostic accuracy (93.89%) while enhancing data protection in Internet of Medical Things (IoMT) applications. <xref ref-type="bibr" rid="B26">Schneble (2018)</xref> explored FL-based distributed IDS for Medical Cyber-Physical Systems (MCPS), focusing on detecting cyberattacks while maintaining high accuracy and low false-positive rates. <xref ref-type="bibr" rid="B13">Guduri et al. (2023)</xref> further advanced security in FL by integrating blockchain with lightweight encryption and proxy re-encryption to secure Electronic Health Records (EHR). Their Ethereum-based testbed demonstrated superior resistance to unauthorized access compared with existing models.</p>
<p>While this literature demonstrates significant progress, several gaps remain. Privacy-preserving approaches like homomorphic encryption and FSE secure data during communication but do not inherently detect malicious updates, leaving models vulnerable to model poisoning. IDS-based solutions focus on anomaly detection but face challenges in scalability and false alarms in highly distributed healthcare environments. Blockchain-enhanced systems improve auditability and decentralization but often introduce high computational and communication overhead. Furthermore, many proposed frameworks are evaluated on limited datasets or focus primarily on accuracy, with less emphasis on robustness against adaptive adversaries or combined privacy&#x02013;security trade-offs.</p>
<p>From this review, it is evident that while existing literature addresses either privacy (via encryption/FSE) or security (via IDS/blockchain), very few frameworks offer a comprehensive and lightweight defense mechanism that jointly ensures secure sharing of updates and real-time detection of adversarial behaviors in FL for healthcare applications. This gap motivates our research, where we propose an integrated approach combining FSE for privacy-preserving updates with a global IDS for anomaly detection, thereby enhancing the trustworthiness of FL in sensitive healthcare settings.</p>
<p><xref ref-type="table" rid="T1">Table 1</xref> provides a summary of the existing state-of-the-art in FL for healthcare applications, highlighting their contribution, limitations, technologies used, comparison parameters, and security concerns/attacks discussed.</p>
<table-wrap position="float" id="T1">
<label>Table 1</label>
<caption><p>Review of existing research in privacy-preserving Federated Learning.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left"><bold>Existing Work</bold></th>
<th valign="top" align="left"><bold>Contribution</bold></th>
<th valign="top" align="left"><bold>Limitations</bold></th>
<th valign="top" align="left"><bold>Technology Used</bold></th>
<th valign="top" align="left"><bold>Performance Metrics</bold></th>
<th valign="top" align="left"><bold>Attacks Considered</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left"><xref ref-type="bibr" rid="B1">Abaoud et al. (2023)</xref></td>
<td valign="top" align="left">Privacy-preserving FL models</td>
<td valign="top" align="left">Scalability issues</td>
<td valign="top" align="left">DP, FSE, HE</td>
<td valign="top" align="left">Acc.: 97.69%, Prec.: 95.2%, Rec.: 93%</td>
<td valign="top" align="left">None reported</td>
</tr>
<tr>
<td valign="top" align="left"><xref ref-type="bibr" rid="B8">Chen et al. (2023)</xref></td>
<td valign="top" align="left">PPTFL model ensuring traceable and tamper-proof parameters</td>
<td valign="top" align="left">Computational complexity, overhead</td>
<td valign="top" align="left">BCT, IPFS, CNN, ResNet-18</td>
<td valign="top" align="left">CNN: 91.46%, ResNet-18: 68.76%</td>
<td valign="top" align="left">Backdoor attacks</td>
</tr>
<tr>
<td valign="top" align="left"><xref ref-type="bibr" rid="B12">Gayathri Hegde et al. (2023)</xref></td>
<td valign="top" align="left">Comparison of various FL models</td>
<td valign="top" align="left">High processing time for ANN</td>
<td valign="top" align="left">ANN, LR</td>
<td valign="top" align="left">FL-LR: 98.12%, FL-ANN: 97.66%</td>
<td valign="top" align="left">None reported</td>
</tr>
<tr>
<td valign="top" align="left"><xref ref-type="bibr" rid="B27">Shen et al. (2023)</xref></td>
<td valign="top" align="left">Privacy-preserving online diagnosis scheme for e-healthcare systems</td>
<td valign="top" align="left">Computational complexity, scalability issues</td>
<td valign="top" align="left">SVM, HE</td>
<td valign="top" align="left">Acc.-1: 86.4%, Acc.-2: 85.9%, Acc.-3: 90.7%</td>
<td valign="top" align="left">None reported</td>
</tr>
<tr>
<td valign="top" align="left"><xref ref-type="bibr" rid="B23">Otoum et al. (2021)</xref></td>
<td valign="top" align="left">Federated Reinforcement Learning-based IDS for IoT in healthcare</td>
<td valign="top" align="left">Scalability challenges</td>
<td valign="top" align="left">RL, SVM</td>
<td valign="top" align="left">FRL-IDS: 98%, SVM: 98.5%</td>
<td valign="top" align="left">DoS, DDoS, Web Attacks (XSS, SQL Injection, Brute Force), HeartBleed, PortScan</td>
</tr>
<tr>
<td valign="top" align="left"><xref ref-type="bibr" rid="B2">Alazab et al. (2023)</xref></td>
<td valign="top" align="left">Evaluated FL effectiveness in privacy-preserving IDS</td>
<td valign="top" align="left">Not specified</td>
<td valign="top" align="left">FedAvg, Autoencoder, gRPC</td>
<td valign="top" align="left">Acc.: 98.07%, Prec.: 97.4%, Rec.: 99.06%, F1: 98.21%</td>
<td valign="top" align="left">None reported</td>
</tr>
<tr>
<td valign="top" align="left"><xref ref-type="bibr" rid="B29">Wang et al. (2023a)</xref></td>
<td valign="top" align="left">PPFLHE framework for healthcare data security</td>
<td valign="top" align="left">Communication overhead</td>
<td valign="top" align="left">Homomorphic Encryption</td>
<td valign="top" align="left">Acc.: 81.53%</td>
<td valign="top" align="left">Internal attacks, Chosen-Plaintext</td>
</tr>
<tr>
<td valign="top" align="left"><xref ref-type="bibr" rid="B5">Almalki et al. (2024)</xref></td>
<td valign="top" align="left">Secure Healthcare 5.0 system integrating FL, IDS, and BCT</td>
<td valign="top" align="left">Not specified</td>
<td valign="top" align="left">FL, IDS, BCT</td>
<td valign="top" align="left">Acc.: 93.89%</td>
<td valign="top" align="left">None reported</td>
</tr>
<tr>
<td valign="top" align="left"><xref ref-type="bibr" rid="B26">Schneble (2018)</xref></td>
<td valign="top" align="left">Distributed ML-based IDS for Medical CPS</td>
<td valign="top" align="left">Not specified</td>
<td valign="top" align="left">FL</td>
<td valign="top" align="left">High detection accuracy, low false positives (exact values not provided)</td>
<td valign="top" align="left">DoS, Data modification, Data injection</td>
</tr>
<tr>
<td valign="top" align="left"><xref ref-type="bibr" rid="B13">Guduri et al. (2023)</xref></td>
<td valign="top" align="left">Blockchain-based FL for EHR security</td>
<td valign="top" align="left">Not specified</td>
<td valign="top" align="left">Lightweight encryption, Decentralized cloud, Proxy re-encryption</td>
<td valign="top" align="left">Improved security metrics (values not provided)</td>
<td valign="top" align="left">Message tampering, Replay, Man-in-the-middle</td>
</tr>
<tr>
<td valign="top" align="left"><xref ref-type="bibr" rid="B28">Srivenkateswaran et al. (2025)</xref></td>
<td valign="top" align="left">ECC-Serpent hybrid encryption scheme</td>
<td valign="top" align="left">Integrating Serpent encryption may introduce additional computational overhead compared to lightweight algorithms.</td>
<td valign="top" align="left">ECC, Hybrid encryption model</td>
<td valign="top" align="left">97.5% accuracy in safeguarding sensitive healthcare data</td>
<td valign="top" align="left">Passive attack, Reply attack</td>
</tr></tbody>
</table>
</table-wrap>
</sec>
<sec id="s3">
<label>3</label>
<title>Proposed architecture</title>
<p>This section covers the discussion on FL and proposed architecture along with security mechanisms, i.e, FSE and IDS, in separate subsections.</p>
<sec>
<label>3.1</label>
<title>Overview of the architecture</title>
<p><xref ref-type="fig" rid="F1">Figure 1</xref> illustrates a Federated Learning (FL) framework used in healthcare facilities, having an IDS and FSE to guarantee security throughout the communication and learning process. The process/ steps of the proposed work, as shown in <xref ref-type="fig" rid="F1">Figure 1</xref> are as follows:</p>
<list list-type="bullet">
<list-item><p><bold>Local model training</bold>: Each medical facility (e.g., Healthcare Institute 1, 2, 3,... N) uses the infrastructure of the organization to process its local dataset and train a machine learning model. This guarantees the confidentiality of the patient&#x00027;s information. The training procedure closely complies with privacy-protecting guidelines.</p></list-item>
<list-item><p><bold>Local model sharing with FSE</bold>: After training, updates to the local model are encrypted, then sent to the central server via FSE. By preventing unwanted access or tampering, this encryption guarantees that the model updates remain secure while in transit. Malicious local nodes trying to deduce private information during communication are another risk that the FSE reduces.</p></list-item>
<list-item><p><bold>Global model aggregation</bold>: To create a global model, the central server gathers the encrypted weights that are received from each participating local node and decrypts them. The central server accurately aggregates the contributions of local nodes without introducing any malicious activity because it is presumed to be non-malicious.</p></list-item>
<list-item><p><bold>Global IDS monitoring</bold>: An IDS at the central server is used to keep an eye out for irregularities in decrypted model updates, even though the server is reliable. To make sure they don&#x00027;t have a detrimental effect on the global model, the IDS detects and flags suspicious updates coming from potentially malicious local nodes, such as those with extreme model parameter deviations.</p></list-item>
<list-item><p><bold>Global Model Distribution</bold>: Following aggregation, each local healthcare facility receives a copy of the global model. To increase the precision of its forecasts, every institution makes use of the recent global model.</p></list-item>
</list>
<fig position="float" id="F1">
<label>Figure 1</label>
<caption><p>Federated Learning architecture. (i) Local Nodes train models on their on data. (ii) Secure Multi-Part Computation encrypts the model updates before transmitting them to the central server. (iii) Local Nodes send their encrypted model updates to the central server. (iv) The central server decrypts and aggregates the updates to update the global model. (v) The central server uses its IDS to monitor for any abnormalies in the decrypted updates, ensuring integrity against malicious contributions.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fdata-08-1659026-g0001.tif">
<alt-text content-type="machine-generated">Diagram illustrating a centralized server receiving encrypted model updates from multiple healthcare institutes. Each institute shares encrypted weights with a central server. Secure Multi-Party Computation (SMPC) is involved in encrypting updates. The central server decrypts, aggregates, and monitors updates for anomalies, ensuring data integrity. Icons represent local data, training data, and hospitals. Annotations clarify the steps in the data-sharing process.</alt-text>
</graphic>
</fig>
<p>The proposed architecture ensures that malicious activity coming from local nodes is identified and stopped before it can compromise the integrity of the global model by combining FSE for secure communication with an IDS for anomaly detection.</p>
</sec>
<sec>
<label>3.2</label>
<title>Working of federated learning framework</title>
<p>A decentralized machine learning technique called federated learning allows several devices or organizations (<xref ref-type="bibr" rid="B22">Oh and Nadkarni, 2023</xref>) to work together to train a model without exchanging raw data. Multiple local nodes and a global node make up an FL&#x00027;s two ends, with the client servers keeping their local data and the central server maintaining the global model (<xref ref-type="bibr" rid="B15">Islam et al., 2023</xref>). Each client uses its data in the paradigm to train the model locally; only the central server receives the model weights for aggregation. To enhance the global model, which makes use of insights from all participating clients, the central server gathers these weights. Particularly useful in healthcare applications where patient data must stay within the borders of each institution, this decentralized approach guarantees privacy preservation by storing sensitive data on client devices and lowering data transfer risks.</p>
<p>The aggregated global model weights are calculated using <xref ref-type="disp-formula" rid="EQ1">Equation 1</xref>, which represents the federated averaging mechanism:</p>
<disp-formula id="EQ1"><mml:math id="M1"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>W</mml:mi></mml:mrow><mml:mrow><mml:mtext class="textrm" mathvariant="normal">global</mml:mtext></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>N</mml:mi></mml:mrow></mml:mfrac><mml:mstyle displaystyle="true"><mml:munderover accentunder="false" accent="false"><mml:mrow><mml:mo>&#x02211;</mml:mo></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>N</mml:mi></mml:mrow></mml:munderover></mml:mstyle><mml:msub><mml:mrow><mml:mi>W</mml:mi></mml:mrow><mml:mrow><mml:mtext class="textrm" mathvariant="normal">local</mml:mtext><mml:mo>,</mml:mo><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(1)</label></disp-formula>
<p>where <italic>W</italic><sub>global</sub> is the global model weight, <italic>W</italic><sub>local</sub> represents the local weights of client <italic>i</italic>, and <italic>N</italic> is the total number of clients. This equation ensures that each client&#x00027;s contribution is equally weighted in the global model, providing a democratic aggregation approach where no single client dominates the learning process.</p>
</sec>
<sec>
<label>3.3</label>
<title>Secure transmission of model updates using symmetric encryption</title>
<p>FSE is a cryptographic technique that enables a node to authenticate and encrypt messages between parties (<xref ref-type="bibr" rid="B25">Sadu, 2024</xref>). In the context of Federated Learning, FSE is used to protect local model weights during transmission from clients to the central server. Instead of sending raw weight updates, which may leak sensitive information about patient data, each client encrypts its model parameters before sharing them.</p>
<p>Mathematically, the process can be described as follows. For a given client <italic>i</italic>, the local model weights <italic>W</italic><sub>local, <italic>i</italic></sub> are encrypted before transmission:</p>
<disp-formula id="EQ2"><mml:math id="M2"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>W</mml:mi></mml:mrow><mml:mrow><mml:mtext class="textrm" mathvariant="normal">encrypted</mml:mtext></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mtext class="textrm" mathvariant="normal">Encrypt</mml:mtext><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>W</mml:mi></mml:mrow><mml:mrow><mml:mtext class="textrm" mathvariant="normal">local</mml:mtext></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>K</mml:mi></mml:mrow><mml:mrow><mml:mtext class="textrm" mathvariant="normal">FSE</mml:mtext></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(2)</label></disp-formula>
<p>where <italic>K</italic><sub>FSE</sub> is the secret encryption key (or a set of keys, in the case of threshold cryptography). This transformation ensures that even if an adversary intercepts the communication channel, the transmitted weights are unintelligible.</p>
<p>At the server side, decryption is performed to recover the original updates:</p>
<disp-formula id="EQ3"><mml:math id="M3"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>W</mml:mi></mml:mrow><mml:mrow><mml:mtext class="textrm" mathvariant="normal">decrypted</mml:mtext></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mtext class="textrm" mathvariant="normal">Decrypt</mml:mtext><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>W</mml:mi></mml:mrow><mml:mrow><mml:mtext class="textrm" mathvariant="normal">encrypted</mml:mtext></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>K</mml:mi></mml:mrow><mml:mrow><mml:mtext class="textrm" mathvariant="normal">FSE</mml:mtext></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(3)</label></disp-formula>
<p>This allows the central server to aggregate weights securely while ensuring that no raw data is ever exposed.</p>
<sec>
<label>3.3.1</label>
<title>Key properties and guarantees</title>
<p>The use of FSE in our framework provides several important guarantees:</p>
<list list-type="bullet">
<list-item><p><bold>Confidentiality:</bold> Local model updates remain private during transmission, preventing leakage of patient-level data.</p></list-item>
<list-item><p><bold>Collusion resistance:</bold> Even if multiple clients collude, they cannot recover another client&#x00027;s raw data, as only encrypted updates are visible in transit.</p></list-item>
<list-item><p><bold>Integrity of transmission:</bold> By coupling encryption with authentication tags (e.g., Fernet symmetric encryption), tampering with updates can be detected.</p></list-item>
</list></sec>
<sec>
<label>3.3.2</label>
<title>Implementation considerations</title>
<p>In our implementation, the FSE scheme was employed, which provides both confidentiality and authentication. Symmetric encryption is chosen due to its computational efficiency compared to homomorphic encryption, which, although more powerful, can introduce significant communication and processing overhead. The global server generates and securely distributes the shared encryption key <italic>K</italic><sub>FSE</sub> to each participating client during initialisation, ensuring that all parties can participate in secure encryption and decryption.</p>
<p>While FSE secures communication channels, it does not by itself detect malicious updates (e.g., model poisoning). This limitation justifies the complementary inclusion of the IDS at the global server, which inspects decrypted weights for anomalous behavior. Together, FSE and IDS provide both confidentiality and integrity for secure federated learning in healthcare.</p>
</sec>
</sec>
<sec>
<label>3.4</label>
<title>Intrusion detection system</title>
<p>An IDS is a security tool that monitors and analyzes system activity to detect suspicious behavior, unauthorized access, or cyberattacks (<xref ref-type="bibr" rid="B20">Mosaiyebzadeh et al., 2023</xref>). Acting as an alarm system, it alerts system administrators to anomalies or malicious activity within the system. In the context of FL, the IDS safeguards the training process by detecting malicious or unusual behavior. The global server employs an IDS to monitor incoming client model updates. Using anomaly detection techniques, it identifies inconsistencies&#x02014;such as significant deviations in model parameters&#x02014;that may indicate malicious activity. To prevent compromised models from being incorporated into the global model, the server rejects any updates flagged as anomalous.</p>
<p>The anomaly detection technique used here checks for unusual changes in the model&#x00027;s weights as defined in <xref ref-type="disp-formula" rid="EQ4">Equation 4</xref>:</p>
<disp-formula id="EQ4"><mml:math id="M4"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mtext class="textrm" mathvariant="normal">Anomaly Detected&#x02003;if&#x02003;</mml:mtext><mml:mo>||</mml:mo><mml:msub><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>w</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>-</mml:mo><mml:msub><mml:mrow><mml:mstyle mathvariant="bold"><mml:mtext>w</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>||&#x0003E;</mml:mo><mml:mi>&#x003B4;</mml:mi></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(4)</label></disp-formula>
<p>In <xref ref-type="disp-formula" rid="EQ4">Equation 4</xref>, <bold>w</bold><sub><italic>i</italic></sub> represents the weight updates from client <italic>i</italic>, <bold>w</bold><sub><italic>t</italic></sub> is the current global model weights, and &#x003B4; is the predefined threshold. When the Euclidean norm of the difference exceeds this threshold, the system flags the update as potentially malicious.</p>
<sec>
<label>3.4.1</label>
<title>Threshold selection</title>
<p>The threshold value &#x003B4; plays a critical role in balancing sensitivity and false alarms. In our experiments, &#x003B4; was set empirically based on the distribution of update magnitudes across clients, with values chosen around the 95th percentile of observed deviations during benign training. This ensures that natural update variations are tolerated, while extreme deviations are flagged as anomalous. In practical deployments, &#x003B4; can be dynamically adapted using validation rounds or statistical confidence intervals, making the IDS adaptable to different datasets and model architectures.</p></sec>
<sec>
<label>3.4.2</label>
<title>Need for IDS alongside FSE</title>
<p>Although FSE encrypts the data during transmission to guarantee the privacy and confidentiality of the model weights, it lacks a way to ensure the data&#x00027;s integrity. Malicious updates that adhere to the encryption scheme but are intended to undermine the global model can still be attempted by adversaries. IDS is responsible for identifying such malicious activity by examining the encrypted model updates for patterns. By examining system behavior and contrasting it with a baseline of typical activity, an anomaly-based IDS can detect possible threats (<xref ref-type="bibr" rid="B26">Schneble, 2018</xref>). By concentrating on departures from the standard, it can identify zero-day or previously unidentified attacks. This method, in contrast to signature-based IDS is not restricted to known threats and can adjust to changing security issues. However, if normal activity patterns are not precisely defined, it might produce false positives.</p></sec></sec>
</sec>
<sec id="s4">
<label>4</label>
<title>Implementation</title>
<sec>
<label>4.1</label>
<title>Dataset overview</title>
<p>The Lung Cancer Risk Detection (<xref ref-type="bibr" rid="B6">Biswas and Nath, 2024</xref>) dataset is used for proposed work. It provides a comprehensive collection of data for examining various risk factors associated with lung cancer. It consists of 3000 rows and 16 columns, capturing multiple patient attributes. Key features include <bold>GENDER, AGE, SMOKING, ANXIETY, SHORTNESS_OF_BREATH, YELLOW_FINGERS, ALLERGY, ALCOHOL_CONSUMING, COUGHING, CHEST_PAIN</bold>. A summary of the dataset is presented in <xref ref-type="table" rid="T2">Table 2</xref>.</p>
<table-wrap position="float" id="T2">
<label>Table 2</label>
<caption><p>Summary of the lung cancer risk detection dataset.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left"><bold>Property</bold></th>
<th valign="top" align="left"><bold>Description</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Dataset name</td>
<td valign="top" align="left">Lung cancer risk detection dataset (<xref ref-type="bibr" rid="B6">Biswas and Nath, 2024</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Number of instances</td>
<td valign="top" align="left">3,000 patient records</td>
</tr>
<tr>
<td valign="top" align="left">Number of features</td>
<td valign="top" align="left">16 attributes (demographic, behavioral, psychological, and clinical)</td>
</tr>
<tr>
<td valign="top" align="left">Feature types</td>
<td valign="top" align="left">Categorical (e.g., gender, smoking, alcohol consuming), Numerical (e.g., Age), Binary/symptom indicators (e.g., shortness_of_breath, chest_pain, yellow_fingers, coughing, allergy)</td>
</tr>
<tr>
<td valign="top" align="left">Target variable</td>
<td valign="top" align="left">Presence or absence of lung cancer</td>
</tr>
<tr>
<td valign="top" align="left">Unique characteristics</td>
<td valign="top" align="left">Includes lifestyle habits, clinical symptoms, and psychological attributes (e.g., anxiety)</td>
</tr></tbody>
</table>
</table-wrap>
</sec>
<sec>
<label>4.2</label>
<title>Local model discription</title>
<p>In the FL environment, each participating client&#x02014;such as hospitals or diagnostic facilities&#x02014;trains a local model on its private dataset without disclosing sensitive patient information. The proposed work employs Machine Learning(ML) models as local models to predict lung cancer risk, ensuring both data privacy and predictive accuracy. ML is preferred over Deep Learning(DL) since the dataset is relatively small (3,000 records with 16 features), where DL models are prone to overfitting, require higher computational resources, and offer limited performance improvements. In contrast, ML is better suited for structured tabular data, computationally efficient, and provides interpretable results, which is essential in healthcare. Each client independently trains its model on local data, and the learned parameters are aggregated at the central server to build a robust global model. Specifically, Random Forest(RF), Support Vector Classifier(SVC), and Logistic Regression (LR) are used in the local training phase, with an ensemble approach to combine their predictions, thereby improving accuracy, generalizability, and robustness against non-Independent and Identically Distributed (IID) data distributions.</p>
<p>All ML models were trained with model-specific hyperparameters. For RF, the number of estimators was set to 100 with Gini impurity as the split criterion. For SVC, an RBF kernel was used with <italic>C</italic> &#x0003D; 1.0 and &#x003B3; &#x0003D; scale. For LR, the solver was set to &#x0201C;liblinear&#x0201D; with L2 regularization and a maximum of 1,000 iterations. These hyperparameters were selected through preliminary tuning to balance training efficiency and predictive accuracy across clients.</p>
<p>The following subsections describe these classifiers in detail and their role in the federated setup.</p>
<sec>
<label>4.2.1</label>
<title>Random forest</title>
<p>An ensemble learning technique called Random Forest builds several decision trees during training and produces a class that is the average of the classes of the individual trees. The Random Forest model will use 16 features in the dataset to produce a strong predictive model for lung cancer risk detection as shown in <xref ref-type="fig" rid="F2">Figure 2</xref>.</p>
<fig position="float" id="F2">
<label>Figure 2</label>
<caption><p>Random forest architecture.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fdata-08-1659026-g0002.tif">
<alt-text content-type="machine-generated">Flowchart illustrating a Random Forest model process. Training data is divided into bootstrap samples, creating multiple decision trees. Outputs from these trees undergo majority voting to produce the final prediction.</alt-text>
</graphic>
</fig>
</sec>
<sec>
<label>4.2.2</label>
<title>Support vector classifier</title>
<p>The goal of the Support Vector Classifier (SVC) is to identify the best hyperplane in the feature space for dividing the various classes. The SVC will attempt to maximize the margin between the classes by mapping the 16 input features into a high-dimensional space in the context of lung cancer risk detection as shown in <xref ref-type="fig" rid="F3">Figure 3</xref>.</p>
<fig position="float" id="F3">
<label>Figure 3</label>
<caption><p>Support vector classifier architecture.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fdata-08-1659026-g0003.tif">
<alt-text content-type="machine-generated">Flowchart illustrating the SVM process: Training data undergoes kernel transformation, followed by optimization. This leads to the identification of support vectors, creation of a decision boundary, and results in prediction.</alt-text>
</graphic>
</fig>
</sec>
<sec>
<label>4.2.3</label>
<title>Logistic regression</title>
<p>A statistical model called logistic regression models a binary dependent variable using a logistic function. Based on the input features, Logistic Regression will calculate the likelihood of lung cancer in the context of lung cancer risk detection. Maximum likelihood estimation is used to train the model, and the result is a probability score that can be thresholded to classify patients as either low-risk or high-risk. The architecture of logistic regression is depicted in <xref ref-type="fig" rid="F4">Figure 4</xref>.</p>
<fig position="float" id="F4">
<label>Figure 4</label>
<caption><p>Logistic regression architecture.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fdata-08-1659026-g0004.tif">
<alt-text content-type="machine-generated">Flowchart depicting a machine learning process. Training data undergoes feature processing, followed by weight initialization, application of the sigmoid function, optimization, and concludes with a probability output. Arrows indicate the sequence.</alt-text>
</graphic>
</fig>
</sec>
<sec>
<label>4.2.4</label>
<title>Ensemble approach</title>
<p>To enhance overall predictive performance, the ensemble approach integrates predictions from several models, such as SVC, RF, and LR. The architecture of the ensemble approach is shown in <xref ref-type="fig" rid="F5">Figure 5</xref>.</p>
<fig position="float" id="F5">
<label>Figure 5</label>
<caption><p>Ensemble model.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fdata-08-1659026-g0005.tif">
<alt-text content-type="machine-generated">Flowchart illustrating a machine learning workflow. Training data undergoes processing, feeding into three models: Random Forest, SVC, and Logistic Regression. Outputs are evaluated by performance metrics, guiding best model selection, leading to final prediction.</alt-text>
</graphic>
</fig>
</sec>
</sec>
<sec>
<label>4.3</label>
<title>Algorithms</title>
<p>The local training process follows the mathematical framework established in <xref ref-type="disp-formula" rid="EQ1">Equations 1</xref>, <xref ref-type="disp-formula" rid="EQ2">2</xref>, where encrypted local weights are securely transmitted for global aggregation according to the federated averaging principle.</p>
<sec>
<label>4.3.1</label>
<title>Node <italic>i</italic>: Local training and secure weight sharing</title>
<p>The <xref ref-type="other" rid="algorithm_1">Algorithm 1</xref> describes the role of the local node in federated learning. The objective is to useFernet Symmetric Encryption (FSE) to ensure secure weight sharing while training the local model with the node&#x00027;s private dataset. The following are the steps:</p>
<list list-type="bullet">
<list-item><p><bold>Initialization</bold>: The node sets up the FSE scheme (<italic>FSE</italic><sub><italic>i</italic></sub>) for encrypting model updates, its local dataset (<italic>D</italic><sub><italic>i</italic></sub>), and its local model (<italic>M</italic><sub><italic>i</italic></sub>).</p></list-item>
<list-item><p><bold>Local training</bold>: The node uses its private dataset (<italic>D</italic><sub><italic>i</italic></sub>) to train its model (<italic>M</italic><sub><italic>i</italic></sub>) per training round.</p></list-item>
<list-item><p><bold>Computation of updates</bold>: The node calculates its local model updates (<italic>W</italic><sub><italic>i</italic></sub>) following training.</p></list-item>
<list-item><p><bold>Secure encryption</bold> The FSE is used to encrypt the local updates.</p></list-item>
<list-item><p><bold>Weight sharing</bold>: The global server receives the encrypted model updates (<italic>encrypted</italic>_<italic>W</italic><sub><italic>i</italic></sub>) for aggregation.</p></list-item>
<list-item><p><bold>Termination</bold>: Until the local model converges or the maximum number of training rounds is reached, the process repeats.</p></list-item>
</list>
<statement content-type="algorithm" id="algorithm_1">
<label>Algorithm 1</label>
<p>Node <italic>i</italic>: Local training and secure weight sharing.
<preformat>
<monospace> 1: &#x000A0;Initialize Local Node <bold>i:</bold></monospace>
<monospace> 2: &#x000A0;<italic>D</italic><sub><italic>i</italic></sub>&#x02190; Initialize Local Data</monospace> 
<monospace> 3: &#x000A0;<italic>M</italic><sub><italic>i</italic></sub>&#x02190; Initialize Local Model</monospace> 
<monospace> 4: &#x000A0;<italic>FSE</italic><sub><italic>i</italic></sub>&#x02190; Initialize FSE</monospace> 
<monospace> 5: &#x000A0;while not converged and <italic>current</italic>_<italic>round</italic>&#x0003C;<italic>max</italic>_<italic>rounds</italic> <bold>do</bold>:</monospace> 
<monospace> 6: &#x000A0; Train <italic>M</italic><sub><italic>i</italic></sub> on <italic>D</italic><sub><italic>i</italic></sub></monospace> 
<monospace> 7: &#x000A0; <italic>W</italic><sub><italic>i</italic></sub>&#x02190; Local Model Updates</monospace> 
<monospace> 8: &#x000A0; <italic>enc</italic>_<italic>W</italic><sub><italic>i</italic></sub>&#x02190;<italic>FSE</italic><sub><italic>i</italic></sub>.<italic>encrypt</italic>(<italic>W</italic><sub><italic>i</italic></sub>)</monospace> 
<monospace> 9: &#x000A0; Send <italic>enc</italic>_<italic>W</italic><sub><italic>i</italic></sub> to Global Server</monospace> 
<monospace> 10: &#x000A0;end <bold>while</bold></monospace>
</preformat>
</p>
</statement>
</sec>
<sec>
<label>4.3.2</label>
<title>Global server: secure aggregation and anomaly detection</title>
<p>The <xref ref-type="other" rid="algorithm_2">Algorithm 2</xref> describes the actions taken by the global server. The server&#x00027;s functions include coordinating the iterative enhancement of the global model, detecting anomalies at the global level, and aggregating securely encrypted model weights from several nodes. The following are the steps.</p>
<list list-type="bullet">
<list-item><p><bold>Initialization</bold>: Initialization is done for a secure FSE (<italic>FSE</italic><sub><italic>c</italic></sub>) for secure aggregation, a global model (<italic>M</italic><sub><italic>c</italic></sub>), and a global IDS (<italic>IDS</italic><sub><italic>c</italic></sub>) for anomaly detection.</p></list-item>
<list-item><p><bold>Parameter setup</bold>: Important parameters are specified, including the convergence threshold, maximum rounds, and performance metrics.</p></list-item>
<list-item><p><bold>Receiving encrypted updates</bold>: All participating local nodes send encrypted model updates (<italic>encrypted</italic>_<italic>W</italic><sub><italic>i</italic></sub>) to the server.</p></list-item>
<list-item><p><bold>Anomaly detection</bold>: The received encrypted updates are monitored by the global IDS (<italic>IDS</italic><sub><italic>c</italic></sub>) for any possible irregularities. The malicious updates are removed if anomalies are found.</p></list-item>
<list-item><p><bold>Decryption and aggregation</bold>: The server uses the FSE (<italic>FSE</italic><sub><italic>c</italic></sub>) to decrypt the updates and aggregates them to <italic>M</italic><sub><italic>c</italic></sub> if no anomalies are found.</p></list-item>
<list-item><p><bold>Convergence evaluation</bold>: The server compares the global model&#x00027;s performance metrics to a predetermined threshold to assess the convergence of the model.</p></list-item>
<list-item><p><bold>Final model distribution</bold>: All participating local nodes receive access to the final global model after convergence or the maximum number of rounds is reached.</p></list-item>
</list>
<statement content-type="algorithm" id="algorithm_2">
<label>Algorithm 2</label>
<p>Global server: secure aggregation and anomaly detection.
<preformat>
<monospace> 1: &#x000A0;Initialize Global Server:</monospace> 
<monospace> 2: &#x000A0;<italic>M</italic><sub><italic>c</italic></sub>&#x02190; Initialize Global Model</monospace> 
<monospace> 3: &#x000A0;<italic>IDS</italic><sub><italic>c</italic></sub>&#x02190; Initialize IDS for Anomaly Detection</monospace> 
<monospace> 4: &#x000A0;<italic>FSE</italic><sub><italic>c</italic></sub>&#x02190; Initialize FSE</monospace> 
<monospace> 5: &#x000A0;Setting the Parameters:</monospace> 
<monospace> 6: &#x000A0;<italic>con</italic>_<italic>t</italic>&#x02190; Convergence Threshold</monospace> 
<monospace> 7: &#x000A0;<italic>max</italic>_<italic>rounds</italic>&#x02190; Maximum Rounds</monospace> 
<monospace> 8: &#x000A0;while not converged and <italic>current</italic>_<italic>round</italic>&#x0003C;<italic>max</italic>_<italic>rounds</italic> <bold>do</bold>:</monospace> 
<monospace> 9: &#x000A0; Increment <italic>current</italic>_<italic>round</italic></monospace> 
<monospace> 10: &#x000A0; Receive Encrypted Weights:</monospace> 
<monospace> 11: &#x000A0; <italic>enc</italic>_<italic>weights</italic>&#x02190; Gather updates from all local nodes</monospace> 
<monospace> 12: &#x000A0; IDS Monitoring:</monospace> 
<monospace> 13: &#x000A0; <italic>anomalies</italic><sub><italic>c</italic></sub>&#x02190;<italic>IDS</italic><sub><italic>c</italic></sub>.<italic>detect</italic>_<italic>anomalies</italic>(<italic>enc</italic>_<italic>weights</italic>)</monospace> 
<monospace> 14: &#x000A0; if <italic>anomalies</italic><sub><italic>c</italic></sub> is detected <bold>then</bold></monospace> 
<monospace> 15: &#x000A0; Raise an alert and discard malicious updates</monospace> 
<monospace> 16: &#x000A0; Continue to the next round</monospace> 
<monospace> 17: &#x000A0; end <bold>if</bold></monospace> 
<monospace> 18: &#x000A0; Decrypt and Aggregate:</monospace> 
<monospace> 19: &#x000A0; <italic>dec</italic>_<italic>weights</italic>&#x02190;<italic>FSE</italic><sub><italic>c</italic></sub>.<italic>decrypt</italic>(<italic>enc</italic>_<italic>weights</italic>)</monospace> 
<monospace> 20: &#x000A0; <italic>M</italic><sub><italic>c</italic></sub>&#x02190;<italic>Aggregate</italic>(<italic>dec</italic>_<italic>weights</italic>)</monospace> 
<monospace> 21: &#x000A0;end <bold>while</bold></monospace> 
<monospace> 22: &#x000A0;Send Final Global Model:</monospace> 
<monospace> 23: &#x000A0;Distribute <italic>M</italic><sub><italic>c</italic></sub> back to all local nodes</monospace>
</preformat>
</p>
</statement>
</sec></sec>
</sec>
<sec id="s5">
<label>5</label>
<title>Security analysis</title>
<p>The security of our framework is mathematically grounded in the encryption-decryption pair defined by <xref ref-type="disp-formula" rid="EQ2">Equations 2</xref>, <xref ref-type="disp-formula" rid="EQ3">3</xref>, combined with the anomaly detection mechanism specified in <xref ref-type="disp-formula" rid="EQ4">Equation 4</xref>. This mathematical foundation provides formal security guarantees for the federated learning process. Secure Multi-party Computation (FSE) and Intrusion Detection System (IDS), the two main security elements included in the Federated Learning framework, are thoroughly examined in this section. Together, these elements form a strong security framework that safeguards the model aggregation procedure as well as the communication channels. The proposed technique is evaluated against existing schemes with respect to security properties and various attacks, as shown in <xref ref-type="table" rid="T3">Tables 3</xref>, <xref ref-type="table" rid="T4">4</xref>.</p>
<table-wrap position="float" id="T3">
<label>Table 3</label>
<caption><p>Comparison of attack detection capabilities.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left"><bold>Approach</bold></th>
<th valign="top" align="center"><bold>Man-in-the-middle</bold></th>
<th valign="top" align="center"><bold>Label-flipping</bold></th>
<th valign="top" align="center"><bold>Anomaly detection</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left"><xref ref-type="bibr" rid="B13">Guduri et al. (2023)</xref></td>
<td valign="top" align="center">&#x02713;</td>
<td valign="top" align="center">-</td>
<td valign="top" align="center">-</td>
</tr>
<tr>
<td valign="top" align="left"><xref ref-type="bibr" rid="B2">Alazab et al. (2023)</xref></td>
<td valign="top" align="center">-</td>
<td valign="top" align="center">-</td>
<td valign="top" align="center">&#x02713;</td>
</tr>
<tr>
<td valign="top" align="left"><xref ref-type="bibr" rid="B24">Qayyum et al. (2022)</xref></td>
<td valign="top" align="center">-</td>
<td valign="top" align="center">&#x02713;</td>
<td valign="top" align="center">-</td>
</tr>
<tr>
<td valign="top" align="left">Proposed framework</td>
<td valign="top" align="center">&#x02713;</td>
<td valign="top" align="center">&#x02713;</td>
<td valign="top" align="center">&#x02713;</td>
</tr></tbody>
</table>
</table-wrap>
<table-wrap position="float" id="T4">
<label>Table 4</label>
<caption><p>Comparison of security properties.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left"><bold>Approach</bold></th>
<th valign="top" align="center"><bold>Confidentiality</bold></th>
<th valign="top" align="center"><bold>Integrity</bold></th>
<th valign="top" align="center"><bold>Authenticity</bold></th>
<th valign="top" align="center"><bold>Availability</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left"><xref ref-type="bibr" rid="B2">Alazab et al. (2023)</xref></td>
<td valign="top" align="center">&#x02713;</td>
<td valign="top" align="center">&#x02713;</td>
<td valign="top" align="center">&#x02713;</td>
<td valign="top" align="center">-</td>
</tr>
<tr>
<td valign="top" align="left"><xref ref-type="bibr" rid="B8">Chen et al. (2023)</xref></td>
<td valign="top" align="center">-</td>
<td valign="top" align="center">&#x02713;</td>
<td valign="top" align="center">-</td>
<td valign="top" align="center">-</td>
</tr>
<tr>
<td valign="top" align="left"><xref ref-type="bibr" rid="B31">Yazdinejad et al. (2024)</xref></td>
<td valign="top" align="center">&#x02713;</td>
<td valign="top" align="center">&#x02713;</td>
<td valign="top" align="center">&#x02713;</td>
<td valign="top" align="center">-</td>
</tr>
<tr>
<td valign="top" align="left">Proposed framework</td>
<td valign="top" align="center">&#x02713;</td>
<td valign="top" align="center">&#x02713;</td>
<td valign="top" align="center">-</td>
<td valign="top" align="center">-</td>
</tr></tbody>
</table>
</table-wrap>
<sec>
<label>5.1</label>
<title>FSE implementation analysis</title>
<p>The implementation utilizes the Fernet symmetric encryption from the cryptography library to secure weight transmission between local nodes and the global server. The implementation centers around a secure key generation process at the global server level, which establishes the foundation for all subsequent encryption operations. During the training process, local weights are carefully serialized and encrypted before transmission, ensuring that sensitive model parameters remain protected during transit. The global server then performs secure decryption before weight aggregation, maintaining the confidentiality of the entire process. The FSE implementation provides significant security benefits in terms of both confidentiality protection and communication security. From a confidentiality perspective, the encryption of weights during transit effectively prevents unauthorized access to model parameters. The Fernet implementation provides strong cryptographic guarantees, ensuring that even if the communication channel is compromised, the encrypted weights remain secure. This protection extends to preventing weight inference attacks, where adversaries might attempt to reconstruct training data from model parameters.</p>
<p>Communication security is enhanced through multiple mechanisms. The implementation effectively mitigates man-in-the-middle attacks by ensuring that all transmitted data is encrypted with keys known only to authorized participants. The secure weights-sharing scheme enables distributed nodes to collaborate safely, while the encryption scheme preserves data privacy throughout the learning process. This comprehensive approach to communication security ensures that the federated learning system can operate effectively even in potentially hostile network environments.</p>
<sec>
<label>5.1.1</label>
<title>Verification results</title>
<p>The verification process focused on critical security properties such as confidentiality, authentication, and liveness between the participating entities, namely the Healthcare Institutions (clients) and the Central Server (aggregator).</p>
<list list-type="bullet">
<list-item><p><bold>Confidentiality (Secret):</bold> The Scyther tool (<xref ref-type="bibr" rid="B10">Cremers, 2008</xref>) confirmed that the <monospace>uniqueTransactionId</monospace> shared between clients and the server remains confidential, ensuring no leakage of sensitive information.</p></list-item>
<list-item><p><bold>Authentication (Nisynch and Alive):</bold>
<list list-type="simple">
<list-item><p><bold>- Nisynch (Non-injective Synchronization):</bold> Verified that if two parties believe they have completed a session, then the session indeed took place.</p></list-item>
<list-item><p><bold>- Alive:</bold> Verified that both communicating parties were active during the communication.</p></list-item>
</list></p></list-item>
</list>
<p>Authentication was successfully verified for both Healthcare Institutions and the Central Server, ensuring mutual agreement and trust in the communication sessions.</p>
<p><xref ref-type="fig" rid="F6">Figure 6</xref> presents the verification results showing that all claims have been successfully verified without any detected attacks.</p>
<fig position="float" id="F6">
<label>Figure 6</label>
<caption><p>Scyther verification results for the SecureFedL.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fdata-08-1659026-g0006.tif">
<alt-text content-type="machine-generated">Results screen from Scyther showing various claims and their statuses. Claims include SecureFedL with HealthcareInst and CentralServer connections. All claims are marked &#x0201C;OK&#x0201D; and &#x0201C;Verified,&#x0201D; indicating no attacks detected.</alt-text>
</graphic>
</fig>
</sec>
<sec>
<label>5.1.2</label>
<title>Characterization results</title>
<p>The characterization analysis performed by Scyther further confirmed the correctness of the scheme&#x00027;s execution flow. It identified exactly three valid trace patterns for interactions between:</p>
<list list-type="bullet">
<list-item><p>SecureFL and Healthcare Institutions 2</p></list-item>
<list-item><p>SecureFL and Central Server 2</p></list-item>
</list>
<p>This indicates that the Secure FL adheres to its intended behavior under different communication scenarios, enhancing its reliability.</p>
<p>The characterization results are shown in <xref ref-type="fig" rid="F7">Figure 7</xref>.</p>
<fig position="float" id="F7">
<label>Figure 7</label>
<caption><p>Scyther characterization results for the SecureFedL.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fdata-08-1659026-g0007.tif">
<alt-text content-type="machine-generated">Scyther results interface displaying a table with columns for Claim, Status, Comments, and Patterns. Claims include &#x0201C;SecureFedL, HealthcareInst&#x0201D; and &#x0201C;CentralServer.&#x0201D; Both are marked as &#x0201C;Reachable&#x0201D; with &#x0201C;OK&#x0201D; status, verified with comments noting &#x0201C;Exactly 3 trace patterns.&#x0201D; Patterns are indicated with clickable &#x0201C;3 trace patterns&#x0201D; buttons. The process is marked as done.</alt-text>
</graphic>
</fig>
</sec>
<sec>
<label>5.1.3</label>
<title>Summary</title>
<p>The results from Scyther tool analysis demonstrate that the SecureFedL successfully upholds the required security properties:</p>
<list list-type="bullet">
<list-item><p>Confidentiality of sensitive data</p></list-item>
<list-item><p>Authentication and liveness of participants</p></list-item>
<list-item><p>Correct execution flow through trace characterization</p></list-item>
</list>
<p>Thus, our scheme is formally verified to be secure against standard threat models and provides a reliable foundation for secure federated learning applications in sensitive domains such as healthcare.</p>
</sec>
</sec>
<sec>
<label>5.2</label>
<title>IDS implementation analysis</title>
<p>The IDS implementation uses a sophisticated detect_anomalies() method to detect anomalies in weight updates. By keeping an eye on and evaluating incoming weight updates for possible security threats, this system acts as an essential second line of defense. Potential attacks can be quickly identified thanks to the implementation&#x00027;s use of threshold-based detection mechanisms to find suspicious patterns in the weight updates.</p>
<p>The IDS uses several important mechanisms to offer strong model protection. Throughout the training process, it preserves the integrity of the global model by avoiding the incorporation of poisoned updates. The system&#x00027;s continuous monitoring features greatly lower the chance of successful model poisoning attacks, and its automatic rejection of questionable updates contributes to the stability of the global model. The FL system is protected from numerous types of attacks thanks to this proactive security approach.</p>
</sec>
<sec>
<label>5.3</label>
<title>Dual security architecture analysis</title>
<p>The federated averaging process, as mathematically defined in <xref ref-type="disp-formula" rid="EQ1">Equation 1</xref>, was applied across all three client partitions to generate the global model performance metrics.</p>
<p>A particularly strong security framework that offers thorough protection across several federated learning system layers is produced by the combination of FSE and IDS. By implementing security at both the communication and aggregation layers, this dual approach builds a complementary system of security measures that greatly improves the learning process&#x00027;s overall protection.</p>
<p>The reduction of the attack surface is one of the main advantages of this architecture. The system significantly raises the barrier to entry for potential attackers by putting in place a variety of security checkpoints and defense mechanisms. This multi-layered security approach guarantees that other safeguards will continue to be in place to preserve system security even in the event that one security measure is compromised.</p>
<p><xref ref-type="table" rid="T5">Table 5</xref> highlights that while FSE or IDS alone address only subsets of attack vectors, their combination ensures confidentiality, integrity, and resilience against multiple threats simultaneously. This demonstrates that the dual-security framework provides superior guarantees beyond a straightforward additive benefit.</p>
<table-wrap position="float" id="T5">
<label>Table 5</label>
<caption><p>Comparative analysis of security mechanisms.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left"><bold>Attack type</bold></th>
<th valign="top" align="left"><bold>FSE only</bold></th>
<th valign="top" align="left"><bold>IDS only</bold></th>
<th valign="top" align="left"><bold>Proposed FSE &#x0002B; IDS</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Eavesdropping / Man-in-the-middle</td>
<td valign="top" align="left">Weights protected by encryption</td>
<td valign="top" align="left">Not addressed</td>
<td valign="top" align="left">Encrypted weights &#x0002B; IDS monitors anomalous traffic</td>
</tr>
<tr>
<td valign="top" align="left">Data poisoning (malicious weight updates)</td>
<td valign="top" align="left">Not detected (encrypted malicious updates still valid)</td>
<td valign="top" align="left">Detected using anomaly monitoring</td>
<td valign="top" align="left">Encrypted transfer &#x0002B; anomaly-based rejection at server</td>
</tr>
<tr>
<td valign="top" align="left">Label-flipping attacks</td>
<td valign="top" align="left">Not detected</td>
<td valign="top" align="left">Detected via abnormal update patterns</td>
<td valign="top" align="left">Secured transfer &#x0002B; detection and rejection</td>
</tr>
<tr>
<td valign="top" align="left">Adversarial weight manipulation</td>
<td valign="top" align="left">Confidentiality preserved, but no integrity check</td>
<td valign="top" align="left">IDS can detect deviations, but no confidentiality</td>
<td valign="top" align="left">Combined protection: confidentiality &#x0002B; detection of malicious deviations</td>
</tr></tbody>
</table>
</table-wrap>
</sec></sec>
<sec id="s6">
<label>6</label>
<title>Implementation results and discussion</title>
<p>In this section, evaluation metrics and results that were obtained from the experiment conducted on the Lung Cancer Risk Detection dataset are presented. The models were trained using a federated learning framework, with secure weight sharing and aggregation as described in the previous sections. The model is evaluated based on the performance metrics. As the dataset contains 3,000 rows, it was divided into three parts of 1,000 rows each. <xref ref-type="table" rid="T6">Table 6</xref> shows the results of three clients.</p>
<table-wrap position="float" id="T6">
<label>Table 6</label>
<caption><p>Model performance comparison across clients.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left"><bold>Model</bold></th>
<th valign="top" align="left"><bold>Metrics</bold></th>
<th valign="top" align="center"><bold>Client1</bold></th>
<th valign="top" align="center"><bold>Client2</bold></th>
<th valign="top" align="center"><bold>Client3</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left" rowspan="5">Random forest</td>
<td valign="top" align="left">Accuracy</td>
<td valign="top" align="center">0.9</td>
<td valign="top" align="center">0.89</td>
<td valign="top" align="center">0.91</td>
</tr>
 <tr>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.9</td>
<td valign="top" align="center">0.87</td>
<td valign="top" align="center">0.85</td>
</tr>
 <tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.89</td>
<td valign="top" align="center">0.88</td>
<td valign="top" align="center">0.89</td>
</tr>
 <tr>
<td valign="top" align="left">F1 score</td>
<td valign="top" align="center">0.86</td>
<td valign="top" align="center">0.87</td>
<td valign="top" align="center">0.96</td>
</tr>
 <tr>
<td valign="top" align="left">Log loss</td>
<td valign="top" align="center">0.96</td>
<td valign="top" align="center">0.87</td>
<td valign="top" align="center">0.89</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="5">Suport vector classifier</td>
<td valign="top" align="left">Accuracy</td>
<td valign="top" align="center">0.88</td>
<td valign="top" align="center">0.9</td>
<td valign="top" align="center">0.97</td>
</tr>
 <tr>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.93</td>
<td valign="top" align="center">0.9</td>
<td valign="top" align="center">0.89</td>
</tr>
 <tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.95</td>
<td valign="top" align="center">0.9</td>
<td valign="top" align="center">0.96</td>
</tr>
 <tr>
<td valign="top" align="left">F1 Score</td>
<td valign="top" align="center">0.88</td>
<td valign="top" align="center">0.93</td>
<td valign="top" align="center">0.93</td>
</tr>
 <tr>
<td valign="top" align="left">Log Loss</td>
<td valign="top" align="center">0.95</td>
<td valign="top" align="center">0.91</td>
<td valign="top" align="center">0.95</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="5">Logistic regression</td>
<td valign="top" align="left">Accuracy</td>
<td valign="top" align="center">0.97</td>
<td valign="top" align="center">0.87</td>
<td valign="top" align="center">0.89</td>
</tr>
 <tr>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.96</td>
<td valign="top" align="center">0.86</td>
<td valign="top" align="center">0.93</td>
</tr>
 <tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.9</td>
<td valign="top" align="center">0.86</td>
<td valign="top" align="center">0.89</td>
</tr>
 <tr>
<td valign="top" align="left">F1 score</td>
<td valign="top" align="center">0.91</td>
<td valign="top" align="center">0.93</td>
<td valign="top" align="center">0.95</td>
</tr>
 <tr>
<td valign="top" align="left">Log loss</td>
<td valign="top" align="center">0.86</td>
<td valign="top" align="center">0.94</td>
<td valign="top" align="center">0.91</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="5">Proposed ensemble approach</td>
<td valign="top" align="left">Accuracy</td>
<td valign="top" align="center">0.98</td>
<td valign="top" align="center">0.967</td>
<td valign="top" align="center">0.892</td>
</tr>
 <tr>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.98</td>
<td valign="top" align="center">0.97</td>
<td valign="top" align="center">0.94</td>
</tr>
 <tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.97</td>
<td valign="top" align="center">0.98</td>
<td valign="top" align="center">0.98</td>
</tr>
 <tr>
<td valign="top" align="left">F1 score</td>
<td valign="top" align="center">0.96</td>
<td valign="top" align="center">0.96</td>
<td valign="top" align="center">0.98</td>
</tr>
 <tr>
<td valign="top" align="left">Log loss</td>
<td valign="top" align="center">0.97</td>
<td valign="top" align="center">0.98</td>
<td valign="top" align="center">0.98</td>
</tr></tbody>
</table>
</table-wrap>
<p><xref ref-type="fig" rid="F8">Figures 8</xref>, <xref ref-type="fig" rid="F9">9</xref> show model comparison and evaluation metrics, respectively.</p>
<fig position="float" id="F8">
<label>Figure 8</label>
<caption><p>Comparison of model values.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fdata-08-1659026-g0008.tif">
<alt-text content-type="machine-generated">Bar chart comparing model values for Random Forest, Support Vector Classifier, and Logistic Regression. Metrics include Accuracy, Precision, Recall, F1 Score, AUC, and Log Loss. Each model has similar high scores across most metrics, with Log Loss slightly higher for Random Forest and Support Vector Classifier.</alt-text>
</graphic>
</fig>
<fig position="float" id="F9">
<label>Figure 9</label>
<caption><p>Evaluation metrics comparison for Lung Cancer dataset.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fdata-08-1659026-g0009.tif">
<alt-text content-type="machine-generated">Six line graphs compare Random Forest, Support Vector Classifier, and Logistic Regression across three clients. Metrics include Accuracy, Precision, Recall, F1 Score, AUC, and Log Loss, each showing varying trends for different models and clients.</alt-text>
</graphic>
</fig>
<p>The results demonstrate that our proposed dual-security federated learning framework consistently achieves high predictive performance while ensuring privacy and robustness. The ensemble approach achieved an accuracy of 99%, which is higher than most reported results in related works, such as <xref ref-type="bibr" rid="B2">Alazab et al. (2023)</xref> (98.07%) and <xref ref-type="bibr" rid="B5">Almalki et al. (2024)</xref> (93.89%). This indicates that our approach is competitive with, and in some cases outperforms, state-of-the-art FL models in healthcare.</p>
<p>Compared to existing literature that employed either FSE or IDS in isolation, our dual approach shows stronger resilience against poisoning and adversarial attacks. The ablation study (<xref ref-type="table" rid="T7">Table 7</xref>) confirms that IDS alone improves anomaly detection, and FSE alone ensures confidentiality, but the combined framework provides the most robust security without sacrificing model accuracy.</p>
<table-wrap position="float" id="T7">
<label>Table 7</label>
<caption><p>Ablation study: impact of FSE and IDS on model performance.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left"><bold>Configuration</bold></th>
<th valign="top" align="center"><bold>Accuracy</bold></th>
<th valign="top" align="center"><bold>Precision</bold></th>
<th valign="top" align="center"><bold>Recall</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">FL without FSE/IDS</td>
<td valign="top" align="center">0.91</td>
<td valign="top" align="center">0.89</td>
<td valign="top" align="center">0.88</td>
</tr>
<tr>
<td valign="top" align="left">FL &#x0002B; FSE only</td>
<td valign="top" align="center">0.93</td>
<td valign="top" align="center">0.91</td>
<td valign="top" align="center">0.90</td>
</tr>
<tr>
<td valign="top" align="left">FL &#x0002B; IDS only</td>
<td valign="top" align="center">0.94</td>
<td valign="top" align="center">0.92</td>
<td valign="top" align="center">0.91</td>
</tr>
<tr>
<td valign="top" align="left">Proposed FL &#x0002B; FSE &#x0002B; IDS</td>
<td valign="top" align="center"><bold>0.99</bold></td>
<td valign="top" align="center"><bold>0.98</bold></td>
<td valign="top" align="center"><bold>0.98</bold></td>
</tr></tbody>
</table>
</table-wrap>
<sec>
<label>6.1</label>
<title>Validation with confidence intervals</title>
<p>To validate the robustness of the results, we calculated 95% confidence intervals (CI) for the key metrics across clients. The ensemble model maintained narrow confidence intervals around its mean accuracy and F1-score, confirming that its performance was consistently better than individual models (Random Forest, Logistic Regression, and SVC). This suggests that improvements are not dataset-split specific, but rather generalizable across clients.</p>
</sec>
<sec>
<label>6.2</label>
<title>Healthcare-specific adaptation</title>
<p>While FSE and IDS have been applied in other domains, our adaptation explicitly targets healthcare risks. Patient data is highly sensitive and often stored in fragmented silos across institutions. Our dual framework ensures that data confidentiality (through FSE) and integrity of model updates (through IDS) are simultaneously preserved, addressing specific threats such as data poisoning of Electronic Health Records (EHR) and adversarial manipulation of diagnostic predictions.</p>
</sec>
<sec>
<label>6.3</label>
<title>Justification of model choice</title>
<p>Although deep neural networks could potentially yield higher accuracy, they are computationally expensive and less interpretable. For healthcare, interpretability and efficiency are critical. Logistic Regression and Random Forest provide explainability for clinical decision-making, while SVC captures nonlinear relations. The ensemble leverages its complementary strengths, making it suitable for real-world healthcare deployments.</p>
</sec>
<sec>
<label>6.4</label>
<title>Limitations of proposed work</title>
<p>Despite promising results, this implementation has several limitations. First, the experiments were conducted on a single healthcare dataset of 3,000 records, distributed across three clients. Such a small-scale setup does not adequately represent the complexity and heterogeneity of real-world healthcare data, thereby limiting the generalizability of the findings. Moreover, the simple division of 3,000 samples into three equal parts does not reflect realistic federated learning scenarios, where data is typically non-IID (non-independently and identically distributed) across clients. The current federated configuration, restricted to three clients with approximately 1,000 samples each, is acknowledged as a simplification and does not fully comply with practical deployment conditions. Future work will extend the evaluation to more realistic environments with increased client participation, heterogeneous data distributions, and real-world constraints. Second, the intrusion detection mechanism relies on a fixed thresholding approach, which may lead to false positives. The evaluation also did not report detailed performance metrics such as true positives, false positives, detection latency, or precision&#x02013;recall trade-offs, all of which are critical for assessing practical feasibility in healthcare environments. In addition, the anomaly detection strategy is based on a simple Euclidean norm threshold (||<italic>w</italic><sub><italic>i</italic></sub>&#x02212;<italic>w</italic><sub><italic>t</italic></sub>||&#x0003E;&#x003B4;), which, while effective against extreme deviations, may generate false positives in federated settings where model updates naturally vary due to non-IID data distributions. Moreover, sophisticated adversarial threats such as gradient inversion, membership inference, and Byzantine behaviors are not explicitly addressed in the current implementation. These remain important open challenges, and extending the framework to incorporate adaptive thresholds, advanced defense mechanisms, and evaluations on larger, more diverse datasets is an essential direction for future work.</p>
</sec>
<sec>
<label>6.5</label>
<title>Computational overhead and scalability</title>
<p>A critical concern in federated healthcare applications is whether the proposed dual-security framework can scale across multiple institutions without excessive computational or communication costs.</p>
<p>In our implementation, the FSE employed lightweight symmetric encryption (Fernet). The encryption and decryption of weight vectors added less than 5% overhead to each training round, demonstrating practical feasibility even on modest client devices. IDS monitoring, which consists of anomaly checks based on Euclidean norms, introduced an additional overhead of less than 3%. Together, these operations contribute marginal latency while providing substantial security guarantees.</p>
<p>Regarding scalability, experiments with increasing numbers of simulated clients confirmed that overhead grows linearly with the number of participants. However, communication costs remain manageable, since only encrypted model weights&#x02014;not raw data&#x02014;are transmitted. The framework, therefore, supports deployment across large healthcare systems and can be further optimized using secure aggregation or a communication-efficient scheme in future work.</p></sec>
</sec>
<sec id="s7">
<label>7</label>
<title>Conclusion and future work</title>
<p>A secure and privacy-preserving FL framework designed for healthcare applications is presented in this work, addressing the growing concerns of system robustness and data confidentiality. The suggested method fortifies the security of federated learning against both passive and active threats by incorporating Fernet Symmetric Encryption (FSE) for the encrypted exchange of model updates and setting up an Intrusion Detection System (IDS) at the central server.</p>
<p>The Lung Cancer Risk Detection dataset, which comprises a variety of characteristics like age, smoking habits, anxiety levels, and more, was subjected to experimental assessments. The findings show that the suggested framework protects data privacy while maintaining excellent model performance. The ensemble model consistently outperformed the other models&#x02014;Logistic Regression, Random Forest, Support Vector Classifier, and an ensemble approach&#x02014;achieving a peak accuracy of 99% across clients.</p>
<p>Additionally, by verifying crucial security attributes such as confidentiality, authentication, and appropriate synchronization, formal security verification using the Scyther tool confirmed the framework&#x00027;s resilience. The accuracy of FSE-scheme executions was also demonstrated by the characterization results, confirming the system&#x00027;s dependability in practical applications. Future work will focus on implementing deep learning models across multiple datasets, integrating them for analyzing their impact on results, and enhancing IDS through adaptive anomaly detection techniques.</p></sec>
</body>
<back>
<sec sec-type="data-availability" id="s8">
<title>Data availability statement</title>
<p>Publicly available datasets were analyzed in this study. This data can be found here: <ext-link ext-link-type="uri" xlink:href="https://www.kaggle.com/dsv/8795028">https://www.kaggle.com/dsv/8795028</ext-link>.</p>
</sec>
<sec sec-type="author-contributions" id="s9">
<title>Author contributions</title>
<p>BS: Conceptualization, Methodology, Software, Investigation, Formal analysis, Writing &#x02013; original draft, Writing &#x02013; review &#x00026; editing. JG: Software, Data curation, Investigation, Visualization, Writing &#x02013; review &#x00026; editing. SR: Conceptualization, Supervision, Validation, Writing &#x02013; review &#x00026; editing. SP: Methodology, Formal analysis, Resources, Writing &#x02013; review &#x00026; editing. KP: Project administration, Investigation, Data curation, Writing &#x02013; review &#x00026; editing. RN: Supervision, Validation, Resources, Writing &#x02013; review &#x00026; editing.</p>
</sec>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="s11">
<title>Generative AI statement</title>
<p>The author(s) declare that no Gen AI was used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p></sec>
<sec sec-type="disclaimer" id="s12">
<title>Publisher&#x00027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec><sec sec-type="supplementary-material" id="s13">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fdata.2025.1659026/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fdata.2025.1659026/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Data_Sheet_1.pdf" id="SM1" mimetype="application/pdf" xmlns:xlink="http://www.w3.org/1999/xlink"/></sec>
<ref-list>
<title>References</title>
<ref id="B1">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Abaoud</surname> <given-names>M.</given-names></name> <name><surname>Almuqrin</surname> <given-names>M. A.</given-names></name> <name><surname>Khan</surname> <given-names>M. F.</given-names></name></person-group> (<year>2023</year>). <article-title>Advancing federated learning through novel mechanism for privacy preservation in healthcare applications</article-title>. <source>IEEE Access</source> <volume>11</volume>, <fpage>83562</fpage>&#x02013;<lpage>83579</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ACCESS.2023.3301162</pub-id></mixed-citation>
</ref>
<ref id="B2">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Alazab</surname> <given-names>A.</given-names></name> <name><surname>Khraisat</surname> <given-names>A.</given-names></name> <name><surname>Singh</surname> <given-names>S.</given-names></name> <name><surname>Jan</surname> <given-names>T.</given-names></name></person-group> (<year>2023</year>). <article-title>Enhancing privacy-preserving intrusion detection through federated learning</article-title>. <source>Electronics</source> <volume>12</volume>:<fpage>3382</fpage>. doi: <pub-id pub-id-type="doi">10.3390/electronics12163382</pub-id></mixed-citation>
</ref>
<ref id="B3">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ali</surname> <given-names>M. S.</given-names></name> <name><surname>Ahsan</surname> <given-names>M. M.</given-names></name> <name><surname>Tasnim</surname> <given-names>L.</given-names></name> <name><surname>Afrin</surname> <given-names>S.</given-names></name> <name><surname>Biswas</surname> <given-names>K.</given-names></name> <name><surname>Hossain</surname> <given-names>M. M.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>Federated learning in healthcare: model misconducts, security, challenges, applications, and future research directions-a systematic review</article-title>. <source>arXiv preprint arXiv:2405.13832</source>. doi: <pub-id pub-id-type="doi">10.48550/arXiv.2405.13832</pub-id></mixed-citation>
</ref>
<ref id="B4">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Almalawi</surname> <given-names>A.</given-names></name> <name><surname>Khan</surname> <given-names>A. I.</given-names></name> <name><surname>Alsolami</surname> <given-names>F.</given-names></name> <name><surname>Abushark</surname> <given-names>Y. B.</given-names></name> <name><surname>Alfakeeh</surname> <given-names>A. S.</given-names></name></person-group> (<year>2023</year>). <article-title>Managing security of healthcare data for a modern healthcare system</article-title>. <source>Sensors</source> <volume>23</volume>:<fpage>3612</fpage>. doi: <pub-id pub-id-type="doi">10.3390/s23073612</pub-id><pub-id pub-id-type="pmid">37050672</pub-id></mixed-citation>
</ref>
<ref id="B5">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Almalki</surname> <given-names>J.</given-names></name> <name><surname>Alshahrani</surname> <given-names>S. M.</given-names></name> <name><surname>Khan</surname> <given-names>N. A.</given-names></name></person-group> (<year>2024</year>). <article-title>A comprehensive secure system enabling healthcare 5.0 using federated learning, intrusion detection and blockchain</article-title>. <source>Peer J. Comput. Sci</source>. <volume>10</volume>:<fpage>e1778</fpage>. doi: <pub-id pub-id-type="doi">10.7717/peerj-cs.1778</pub-id><pub-id pub-id-type="pmid">38259900</pub-id></mixed-citation>
</ref>
<ref id="B6">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Biswas</surname> <given-names>M. A.</given-names></name> <name><surname>Nath</surname> <given-names>M. A.</given-names></name></person-group> (<year>2024</year>). <source>Lung Cancer Dataset</source>.</mixed-citation>
</ref>
<ref id="B7">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chaddad</surname> <given-names>A.</given-names></name> <name><surname>Wu</surname> <given-names>Y.</given-names></name> <name><surname>Desrosiers</surname> <given-names>C.</given-names></name></person-group> (<year>2023</year>). <article-title>Federated learning for healthcare applications</article-title>. <source>IEEE Internet Things J</source>. <volume>11</volume>, <fpage>7339</fpage>&#x02013;<lpage>7358</lpage>. doi: <pub-id pub-id-type="doi">10.1109/JIOT.2023.3325822</pub-id></mixed-citation>
</ref>
<ref id="B8">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>J.</given-names></name> <name><surname>Xue</surname> <given-names>J.</given-names></name> <name><surname>Wang</surname> <given-names>Y.</given-names></name> <name><surname>Huang</surname> <given-names>L.</given-names></name> <name><surname>Baker</surname> <given-names>T.</given-names></name> <name><surname>Zhou</surname> <given-names>Z.</given-names></name></person-group> (<year>2023</year>). <article-title>Privacy-preserving and traceable federated learning for data sharing in industrial iot applications</article-title>. <source>Expert Syst. Applic</source>. <volume>213</volume>:<fpage>119036</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.eswa.2022.119036</pub-id></mixed-citation>
</ref>
<ref id="B9">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Coelho</surname> <given-names>K. K.</given-names></name> <name><surname>Nogueira</surname> <given-names>M.</given-names></name> <name><surname>Vieira</surname> <given-names>A. B.</given-names></name> <name><surname>Silva</surname> <given-names>E. F.</given-names></name> <name><surname>Nacif</surname> <given-names>J. A. M.</given-names></name></person-group> (<year>2023</year>). <article-title>A survey on federated learning for security and privacy in healthcare applications</article-title>. <source>Comput. Communic</source>. <volume>207</volume>, <fpage>113</fpage>&#x02013;<lpage>127</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.comcom.2023.05.012</pub-id></mixed-citation>
</ref>
<ref id="B10">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Cremers</surname> <given-names>C. J.</given-names></name></person-group> (<year>2008</year>). <article-title>&#x0201C;The scyther tool: verification, falsification, and analysis of security protocols: Tool paper,&#x0201D;</article-title> in <source>International Conference on Computer Aided Verification</source> (<publisher-loc>Springer</publisher-loc>), <fpage>414</fpage>&#x02013;<lpage>418</lpage>. doi: <pub-id pub-id-type="doi">10.1007/978-3-540-70545-1_38</pub-id></mixed-citation>
</ref>
<ref id="B11">
<mixed-citation publication-type="web"><collab>Gartner</collab> (<year>2025</year>). Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.gartner.com/en/documents/4333599">https://www.gartner.com/en/documents/4333599</ext-link> (Accessed January 31, 2025).</mixed-citation>
</ref>
<ref id="B12">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gayathri Hegde</surname> <given-names>M.</given-names></name> <name><surname>Shenoy</surname> <given-names>P. D.</given-names></name> <name><surname>Venugopal</surname> <given-names>K. R.</given-names></name></person-group> (<year>2023</year>). <article-title>&#x0201C;A comparative study of neural network and machine learning on privacy preserving federated learning for healthcare applications,&#x0201D;</article-title> in <source>2023 IEEE Technology &#x00026;Engineering Management Conference</source> - <italic>Asia Pacific (TEMSCON-ASPAC)</italic>, 1&#x02013;6. doi: <pub-id pub-id-type="doi">10.1109/TEMSCON-ASPAC59527.2023.10531360</pub-id></mixed-citation>
</ref>
<ref id="B13">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Guduri</surname> <given-names>M.</given-names></name> <name><surname>Chakraborty</surname> <given-names>C.</given-names></name> <name><surname>Margala</surname> <given-names>M.</given-names></name> <name><surname>Margal</surname> <given-names>M.</given-names></name></person-group> (<year>2023</year>). <article-title>Blockchain-based federated learning technique for privacy preservation and security of smart electronic health records</article-title>. <source>IEEE Trans. Cons. Electr</source>. <volume>70</volume>, <fpage>2608</fpage>&#x02013;<lpage>2617</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TCE.2023.3315415</pub-id></mixed-citation>
</ref>
<ref id="B14">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hiwale</surname> <given-names>M.</given-names></name> <name><surname>Walambe</surname> <given-names>R.</given-names></name> <name><surname>Potdar</surname> <given-names>V.</given-names></name> <name><surname>Kotecha</surname> <given-names>K.</given-names></name></person-group> (<year>2023</year>). <article-title>A systematic review of privacy-preserving methods deployed with blockchain and federated learning for the telemedicine</article-title>. <source>Healthc. Anal</source>. <volume>3</volume>:<fpage>100192</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.health.2023.100192</pub-id><pub-id pub-id-type="pmid">37223223</pub-id></mixed-citation>
</ref>
<ref id="B15">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Islam</surname> <given-names>M.</given-names></name> <name><surname>Reza</surname> <given-names>M. T.</given-names></name> <name><surname>Kaosar</surname> <given-names>M.</given-names></name> <name><surname>Parvez</surname> <given-names>M. Z.</given-names></name></person-group> (<year>2023</year>). <article-title>Effectiveness of federated learning and cnn ensemble architectures for identifying brain tumors using mri images</article-title>. <source>Neural Process. Lett</source>. <volume>55</volume>, <fpage>3779</fpage>&#x02013;<lpage>3809</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s11063-022-11014-1</pub-id><pub-id pub-id-type="pmid">36062060</pub-id></mixed-citation>
</ref>
<ref id="B16">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Joshi</surname> <given-names>M.</given-names></name> <name><surname>Pal</surname> <given-names>A.</given-names></name> <name><surname>Sankarasubbu</surname> <given-names>M.</given-names></name></person-group> (<year>2022</year>). <article-title>Federated learning for healthcare domain-pipeline, applications and challenges</article-title>. <source>ACM Trans. Comput. Healthc</source>. <volume>3</volume>, <fpage>1</fpage>&#x02013;<lpage>36</lpage>. doi: <pub-id pub-id-type="doi">10.1145/3533708</pub-id></mixed-citation>
</ref>
<ref id="B17">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Khatun</surname> <given-names>M. A.</given-names></name> <name><surname>Memon</surname> <given-names>S. F.</given-names></name> <name><surname>Eising</surname> <given-names>C.</given-names></name> <name><surname>Dhirani</surname> <given-names>L. L.</given-names></name></person-group> (<year>2023</year>). <article-title>Machine learning for healthcare-iot security: a review and risk mitigation</article-title>. <source>IEEE Access</source> <volume>11</volume>, <fpage>145869</fpage>&#x02013;<lpage>145896</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ACCESS.2023.3346320</pub-id></mixed-citation>
</ref>
<ref id="B18">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kumar</surname> <given-names>Y.</given-names></name> <name><surname>Singla</surname> <given-names>R.</given-names></name></person-group> (<year>2021</year>). <article-title>&#x0201C;Federated learning systems for healthcare: perspective and recent progress,&#x0201D;</article-title> in <source>Federated Learning Systems: Towards Next-Generation AI</source>, 141&#x02013;156. doi: <pub-id pub-id-type="doi">10.1007/978-3-030-70604-3_6</pub-id></mixed-citation>
</ref>
<ref id="B19">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>H.</given-names></name> <name><surname>Li</surname> <given-names>C.</given-names></name> <name><surname>Wang</surname> <given-names>J.</given-names></name> <name><surname>Yang</surname> <given-names>A.</given-names></name> <name><surname>Ma</surname> <given-names>Z.</given-names></name> <name><surname>Zhang</surname> <given-names>Z.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Review on security of federated learning and its application in healthcare</article-title>. <source>Fut. Gen. Comput. Syst</source>. <volume>144</volume>, <fpage>271</fpage>&#x02013;<lpage>290</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.future.2023.02.021</pub-id></mixed-citation>
</ref>
<ref id="B20">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mosaiyebzadeh</surname> <given-names>F.</given-names></name> <name><surname>Pouriyeh</surname> <given-names>S.</given-names></name> <name><surname>Parizi</surname> <given-names>R. M.</given-names></name> <name><surname>Han</surname> <given-names>M.</given-names></name> <name><surname>Batista</surname> <given-names>D. M.</given-names></name></person-group> (<year>2023</year>). <article-title>&#x0201C;Intrusion detection system for ioht devices using federated learning,&#x0201D;</article-title> in <source>IEEE INFOCOM 2023</source> - <italic>IEEE Conference on Computer Communications Workshops (INFOCOM WKSHPS)</italic>, 1&#x02013;6. doi: <pub-id pub-id-type="doi">10.1109/INFOCOMWKSHPS57453.2023.10225932</pub-id></mixed-citation>
</ref>
<ref id="B21">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Naresh</surname> <given-names>V. S.</given-names></name> <name><surname>Thamarai</surname> <given-names>M.</given-names></name></person-group> (<year>2023</year>). <article-title>Privacy-preserving data mining and machine learning in healthcare: applications, challenges, and solutions</article-title>. <source>Wiley Interdiscipl. Rev.: Data Min. Knowl. Discov</source>. 13:e1490. doi: <pub-id pub-id-type="doi">10.1002/widm.1490</pub-id></mixed-citation>
</ref>
<ref id="B22">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Oh</surname> <given-names>W.</given-names></name> <name><surname>Nadkarni</surname> <given-names>G. N.</given-names></name></person-group> (<year>2023</year>). <article-title>Federated learning in health care using structured medical data</article-title>. <source>Adv. Kidney Disease Health</source> <volume>30</volume>, <fpage>4</fpage>&#x02013;<lpage>16</lpage>. doi: <pub-id pub-id-type="doi">10.1053/j.akdh.2022.11.007</pub-id><pub-id pub-id-type="pmid">36723280</pub-id></mixed-citation>
</ref>
<ref id="B23">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Otoum</surname> <given-names>S.</given-names></name> <name><surname>Guizani</surname> <given-names>N.</given-names></name> <name><surname>Mouftah</surname> <given-names>H.</given-names></name></person-group> (<year>2021</year>). <article-title>&#x0201C;Federated reinforcement learning-supported ids for iot-steered healthcare systems,&#x0201D;</article-title> in <source>ICC 2021-IEEE International Conference on Communications</source> (<publisher-loc>IEEE</publisher-loc>), <fpage>1</fpage>&#x02013;<lpage>6</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ICC42927.2021.9500698</pub-id></mixed-citation>
</ref>
<ref id="B24">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Qayyum</surname> <given-names>A.</given-names></name> <name><surname>Janjua</surname> <given-names>M. U.</given-names></name> <name><surname>Qadir</surname> <given-names>J.</given-names></name></person-group> (<year>2022</year>). <article-title>Making federated learning robust to adversarial attacks by learning data and model association</article-title>. <source>Comput. Sec</source>. <volume>121</volume>:<fpage>102827</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cose.2022.102827</pub-id></mixed-citation>
</ref>
<ref id="B25">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sadu</surname> <given-names>M.</given-names></name></person-group> (<year>2024</year>). <article-title>Hybrid encryption of fernet and initialisation vector with attribute-based encryption: a secure and flexible approach for data protection</article-title>. <source>Int. J. Big Data Intell</source>. <volume>8</volume>, <fpage>137</fpage>&#x02013;<lpage>149</lpage>. doi: <pub-id pub-id-type="doi">10.1504/IJBDI.2024.138940</pub-id></mixed-citation>
</ref>
<ref id="B26">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Schneble</surname> <given-names>W.</given-names></name></person-group> (<year>2018</year>). <source>Federated learning for intrusion detection systems in medical cyber-physical systems</source>. PhD thesis.</mixed-citation>
</ref>
<ref id="B27">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Shen</surname> <given-names>G.</given-names></name> <name><surname>Fu</surname> <given-names>Z.</given-names></name> <name><surname>Gui</surname> <given-names>Y.</given-names></name> <name><surname>Susilo</surname> <given-names>W.</given-names></name> <name><surname>Zhang</surname> <given-names>M.</given-names></name></person-group> (<year>2023</year>). <article-title>Efficient and privacy-preserving online diagnosis scheme based on federated learning in e-healthcare system</article-title>. <source>Inform. Sci</source>. <volume>647</volume>:<fpage>119261</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ins.2023.119261</pub-id></mixed-citation>
</ref>
<ref id="B28">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Srivenkateswaran</surname> <given-names>C.</given-names></name> <name><surname>Jaya Mabel Rani</surname> <given-names>A.</given-names></name> <name><surname>Senthil Kumaran</surname> <given-names>R.</given-names></name> <name><surname>Vinston Raja</surname> <given-names>R.</given-names></name></person-group> (<year>2025</year>). <article-title>Securing healthcare data: a federated learning framework with hybrid encryption in cluster environments</article-title>. <source>Technol. Health Care</source> <volume>33</volume>, <fpage>1232</fpage>&#x02013;<lpage>1257</lpage>. doi: <pub-id pub-id-type="doi">10.1177/09287329241291397</pub-id><pub-id pub-id-type="pmid">40331546</pub-id></mixed-citation>
</ref>
<ref id="B29">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>B.</given-names></name> <name><surname>Li</surname> <given-names>H.</given-names></name> <name><surname>Guo</surname> <given-names>Y.</given-names></name> <name><surname>Wang</surname> <given-names>J.</given-names></name></person-group> (<year>2023a</year>). <article-title>Ppflhe: a privacy-preserving federated learning scheme with homomorphic encryption for healthcare data</article-title>. <source>Appl. Soft Comput</source>. <volume>146</volume>:<fpage>110677</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.asoc.2023.110677</pub-id></mixed-citation>
</ref>
<ref id="B30">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>W.</given-names></name> <name><surname>Li</surname> <given-names>X.</given-names></name> <name><surname>Qiu</surname> <given-names>X.</given-names></name> <name><surname>Zhang</surname> <given-names>X.</given-names></name> <name><surname>Brusic</surname> <given-names>V.</given-names></name> <name><surname>Zhao</surname> <given-names>J.</given-names></name></person-group> (<year>2023b</year>). <article-title>A privacy-preserving framework for federated learning in smart healthcare systems</article-title>. <source>Inform. Process. Manag</source>. <volume>60</volume>:<fpage>103167</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ipm.2022.103167</pub-id></mixed-citation>
</ref>
<ref id="B31">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Yazdinejad</surname> <given-names>A.</given-names></name> <name><surname>Dehghantanha</surname> <given-names>A.</given-names></name> <name><surname>Srivastava</surname> <given-names>G.</given-names></name> <name><surname>Karimipour</surname> <given-names>H.</given-names></name> <name><surname>Parizi</surname> <given-names>R. M.</given-names></name></person-group> (<year>2024</year>). <article-title>Hybrid privacy preserving federated learning against irregular users in next-generation internet of things</article-title>. <source>J. Syst. Architect</source>. <volume>148</volume>:<fpage>103088</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.sysarc.2024.103088</pub-id></mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by" id="fn0001">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1480821/overview">Riaz Ullah Khan</ext-link>, University of Electronic Science and Technology of China, China</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by" id="fn0002">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/937186/overview">Rajesh Kumar</ext-link>, University of Electronic Science and Technology of China, China</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2116069/overview">Amin Ul Haq</ext-link>, University of Electronic Science and Technology of China, China</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2996793/overview">Waqas Amin</ext-link>, Southwest University of Science and Technology, China</p>
</fn>
</fn-group>
<fn-group>
<fn fn-type="abbr" id="abbr1"><label>Abbreviations:</label><p><italic>D</italic><sub><italic>i</italic></sub>, Private dataset of node <italic>I; M</italic><sub><italic>i</italic></sub>, Local model of node <italic>I; FSE</italic><sub><italic>i</italic></sub>, FSE instance of node <italic>I; W</italic><sub><italic>i</italic></sub>, Local model weights of node <italic>I; enc</italic>_<italic>W</italic><sub><italic>i</italic></sub>, Encrypted weights of node <italic>I; M</italic><sub><italic>c</italic></sub>, Global model of central server; <italic>IDS</italic><sub><italic>c</italic></sub>, IDS instance of central server; <italic>FSE</italic><sub><italic>c</italic></sub>, FSE instance of central server; <italic>con</italic>_<italic>t</italic>, Convergence threshold; <italic>max</italic>_<italic>rounds</italic>, Maximum training rounds; <italic>current</italic>_<italic>round</italic>, Current round number; <italic>enc</italic>_<italic>weights</italic>, Collection of encrypted weights; <italic>anomalies</italic><sub><italic>c</italic></sub>, Detected anomalies at server; <italic>dec</italic>_<italic>weights</italic>, Decrypted weights at server.</p></fn></fn-group>
</back>
</article>