<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Mech. Eng.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Mechanical Engineering</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Mech. Eng.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2297-3079</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">1744710</article-id>
<article-id pub-id-type="doi">10.3389/fmech.2026.1744710</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Research on intelligent diagnosis of mechanical rolling bearing faults through transfer learning</article-title>
<alt-title alt-title-type="left-running-head">Zhang</alt-title>
<alt-title alt-title-type="right-running-head">
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fmech.2026.1744710">10.3389/fmech.2026.1744710</ext-link>
</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Zhang</surname>
<given-names>Yougang</given-names>
</name>
<xref ref-type="aff" rid="aff1"/>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/3277522"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal Analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing - original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
</contrib>
</contrib-group>
<aff id="aff1">
<institution>Chongqing Vocational Institute of Safety Technology</institution>, <city>Chongqing</city>, <country country="CN">China</country>
</aff>
<author-notes>
<corresp id="c001">
<label>&#x2a;</label>Correspondence: Yougang Zhang, <email xlink:href="mailto:zhangygyg@hotmail.com">zhangygyg@hotmail.com</email>
</corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-03-02">
<day>02</day>
<month>03</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>12</volume>
<elocation-id>1744710</elocation-id>
<history>
<date date-type="received">
<day>12</day>
<month>11</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>16</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>02</day>
<month>02</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2026 Zhang.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Zhang</copyright-holder>
<license>
<ali:license_ref start_date="2026-03-02">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<sec>
<title>Introduction</title>
<p>This article proposes a fault diagnosis algorithm for mechanical rolling bearings based on transfer learning.</p>
</sec>
<sec>
<title>Methods</title>
<p>The proposed algorithm enhances the traditional conventional convolutional neural network (CNN) algorithm by introducing a domain category judgment module and an inter-domain conditional probability distribution difference module, thereby achieving transfer learning between source domain samples and target domain samples. Simulation experiments were performed. On a PT100 bearing fault simulation test platform, vibration signals of bearings were collected in cases of normal operation, inner race faults, outer race faults, and ball faults at motor speeds of 1,000, 1,500, and 2,000 r/min. The diagnostic performance of support vector machine (SVM), back-propagation neural network (BPNN), and the proposed algorithm was evaluated in operating condition transfer tasks. Moreover, ablation experiments were conducted.</p>
</sec>
<sec>
<title>Results</title>
<p>It was found that the proposed algorithm could effectively and accurately identify bearing faults in the face of changes in operating conditions.</p>
</sec>
<sec>
<title>Discussion</title>
<p>Both the domain category judgment module and the inter-domain conditional probability distribution difference could effectively achieve transfer learning of the diagnostic model.</p>
</sec>
</abstract>
<kwd-group>
<kwd>convolutional neural network</kwd>
<kwd>electrical control design</kwd>
<kwd>fault diagnosis</kwd>
<kwd>rolling bearing</kwd>
<kwd>transfer learning</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was not received for this work and/or its publication.</funding-statement>
</funding-group>
<counts>
<fig-count count="4"/>
<table-count count="2"/>
<equation-count count="5"/>
<ref-count count="15"/>
<page-count count="7"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Mechatronics</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<label>1</label>
<title>Introduction</title>
<p>In modern industrial systems, mechanical rolling bearings, as the core elements of rotating mechanical equipment, play an important role in supporting rotors, reducing friction, and transmitting loads (<xref ref-type="bibr" rid="B8">Liu and Yu, 2021</xref>). Therefore, realizing early fault diagnosis and health condition monitoring of rolling bearings is an urgent need to guarantee the safe, efficient, and continuous operation of industrial systems (<xref ref-type="bibr" rid="B6">Li et al., 2019</xref>). Traditional bearing fault diagnosis methods usually require a large amount of fault data with clear labels collected from specific equipment and performing complex feature engineering to extract fault-sensitive features. However, in actual application scenarios, they face challenges such as scarce labeled data, variable operating conditions, equipment differences, and difficulties in cold start. To address these difficulties, transfer learning, as a training method, has been applied to intelligent algorithms (<xref ref-type="bibr" rid="B10">Shang et al., 2024</xref>). The core principle of transfer learning is to transfer the algorithm parameters learned in the &#x201c;source domain&#x201d; to another related but different &#x201c;target domain&#x201d; to solve the difficulties brought about by the limited training data in the target domain. <xref ref-type="bibr" rid="B4">Hu et al. (2024)</xref> combined a deep neural network with SKNet and Inception-ResNet-v2 for bearing fault diagnosis. <xref ref-type="bibr" rid="B7">Li et al. (2025)</xref> designed an adaptive feature mode decomposition method for the same purpose. <xref ref-type="bibr" rid="B12">Wang et al. (2025)</xref> developed a model for diagnosing faults in aviation bearings based on a long short-term memory and an optimized deep residual shrinkage network. This article introduces a transfer learning-based fault diagnosis algorithm for rolling bearings. The proposed algorithm enhances a conventional convolutional neural network (CNN) by introducing a domain category judgment module and an inter-domain conditional probability distribution difference module to realize transfer learning between source and target domain samples. Simulation tests were subsequently conducted. The innovation of this paper lies in realizing the transfer learning of the intelligent fault diagnosis algorithm by using the domain category judgment module and the inter-domain conditional probability distribution difference module to make the algorithm quickly learn the feature distribution rules even when there are not enough training labels in the face of a new fault diagnosis environment, thereby achieving rapid diagnosis of bearing faults in the new environment and improving the training efficiency of the algorithm.</p>
</sec>
<sec id="s2">
<label>2</label>
<title>Transfer learning-based bearing fault diagnosis</title>
<p>When a bearing malfunctions, its inherent fault characteristic frequencies will be excited during operation. Different types of faults have different fault characteristic frequencies (<xref ref-type="bibr" rid="B9">Liu et al., 2025</xref>). Intelligent algorithms obtain the patterns of bearing fault characteristics through sample training and then identify the types of bearing faults. Usually, a large number of samples are required for training. However, mechanical rolling bearings with faults are difficult to be used for a long time or in large quantities. Moreover, even for the same type of fault, variations in equipment and operating conditions can lead to shifts in fault characteristics, making it difficult for trained intelligent algorithms to adapt to new equipment or operating environments (<xref ref-type="bibr" rid="B2">Guo et al., 2025</xref>). Consequently, when pre-trained or existing intelligent algorithm models are attempted to be applied to brand-new bearing equipment, the absence of historical failure data from the target equipment often prevents their direct application.</p>
<p>The core components of transfer learning include the source domain, the target domain, and the transfer methodology. The source domain generally refers to a dataset with abundant labeled samples&#x2014;in this paper, it denotes the dataset used to train the generic fault diagnosis model. The target domain typically consists of sparsely labeled or unlabeled data; here, it refers to the dataset of new equipment or operating conditions for transfer learning. The transfer methodology comprises the techniques used to transferring diagnostic knowledge from the source domain to the target domain, which may include model-based, instance-based, feature-based methods (<xref ref-type="bibr" rid="B13">Xie et al., 2025</xref>).</p>
<p>Model-based transfer method does not require identical label spaces between the source and target domains. However, if there are significant differences in the distribution of samples in the source domain and the target domain, and the quality of labeled samples in the target domain is not high, it will affect the fine-tuning effect of the model (<xref ref-type="bibr" rid="B15">Zhang et al., 2024</xref>). The instance-based transfer method is easy to operate, but when the difference in the sample distribution between the source domain and the target domain is too large, it will lead to a decrease in the number of similar samples in the two domains, affecting the training effect (<xref ref-type="bibr" rid="B1">Chen et al., 2024</xref>). The feature-based method adjusts the model parameters from the level of the sample feature space, reducing the difference in the sample feature distribution between the two domains and trying to maintain the classification effect of the model on the source domain samples. The above parameter adjustment process does not involve the classification labels of the target domain samples, so it is applicable to the fault diagnosis in the unsupervised domain.</p>
<p>Finally, this paper adopts a feature-based transfer approach to train the fault diagnosis model. The overall training architecture is depicted in <xref ref-type="fig" rid="F1">Figure 1</xref>.<list list-type="order">
<list-item>
<p>Fault samples, i.e., vibration signals, from rolling bearings are collected to form the source and target domains. The source domain dataset must contain complete fault category labels (<xref ref-type="bibr" rid="B3">He et al., 2024</xref>), whereas the target domain dataset has no strict labeling requirements.</p>
</list-item>
<list-item>
<p>After the Fast Fourier Transform (FFT) on samples from both domains, the resulting time-frequency graphs are fed into the feature extraction module. In the proposed fault diagnosis model, this module is composed of multiple convolutional base units (<xref ref-type="bibr" rid="B11">Wang et al., 2024</xref>). Each unit contains a one-dimensional convolutional layer, a normalization layer, an activation function, and a pooling layer. Each unit perform convolutional operations on the input data and apply pooling for dimensionality reduction, thereby effectively extracting features from the time-frequency graphs.</p>
</list-item>
<list-item>
<p>The extracted convolutional features are fed into three modules: the fault classification label module, the domain category judgment module, and the inter-domain conditional probability distribution difference module. The fault classification label module, which comprises three fully connected layers (FCLs) followed by a softmax function, identifies the fault type of the input features. The domain category judgment module, consisting of two FCLs and a softmax function, distinguishes whether the input features originate from the source or target domain. The inter-domain conditional probability distribution difference module calculates the difference between the features of the source and target domain samples.</p>
</list-item>
<list-item>
<p>The fault classification error of the source domain samples, the feature domain category judgment error, and the inter-domain conditional probability distribution deviation are calculated to obtain the training error of the fault diagnosis model (<xref ref-type="bibr" rid="B14">Yuan et al., 2025</xref>). First, the calculation formula of the fault classification error of the source domain samples is:</p>
</list-item>
</list>
<disp-formula id="e1">
<mml:math id="m1">
<mml:mrow>
<mml:msub>
<mml:mi>L</mml:mi>
<mml:mi>y</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mrow>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>c</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>C</mml:mi>
</mml:munderover>
</mml:mstyle>
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>c</mml:mi>
</mml:msub>
<mml:mo>&#x2061;</mml:mo>
<mml:mi>log</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>G</mml:mi>
<mml:mi>y</mml:mi>
</mml:msub>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>G</mml:mi>
<mml:mi>f</mml:mi>
</mml:msub>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msubsup>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
</mml:msubsup>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mrow>
<mml:mo>,</mml:mo>
</mml:mrow>
</mml:math>
<label>(1)</label>
</disp-formula>where <inline-formula id="inf1">
<mml:math id="m2">
<mml:mrow>
<mml:msub>
<mml:mi>L</mml:mi>
<mml:mi>y</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> denotes the fault classification error of the source domain samples, <inline-formula id="inf2">
<mml:math id="m3">
<mml:mrow>
<mml:mi>c</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf3">
<mml:math id="m4">
<mml:mrow>
<mml:mi>C</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> represent fault category and the total number of fault categories, respectively, <inline-formula id="inf4">
<mml:math id="m5">
<mml:mrow>
<mml:msub>
<mml:mi>G</mml:mi>
<mml:mi>f</mml:mi>
</mml:msub>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mtext>&#xa0;</mml:mtext>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula> is a feature extractor, <inline-formula id="inf5">
<mml:math id="m6">
<mml:mrow>
<mml:msub>
<mml:mi>G</mml:mi>
<mml:mi>y</mml:mi>
</mml:msub>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mtext>&#xa0;</mml:mtext>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula> is a fault label classifier, <inline-formula id="inf6">
<mml:math id="m7">
<mml:mrow>
<mml:msubsup>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
</mml:msubsup>
</mml:mrow>
</mml:math>
</inline-formula> represents the sample from the source domain, and <inline-formula id="inf7">
<mml:math id="m8">
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>c</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the real probability of samples belonging to category <inline-formula id="inf8">
<mml:math id="m9">
<mml:mrow>
<mml:mi>c</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> fault in the source domain. Then, the calculation formula of the feature domain category judgment error is:<disp-formula id="e2">
<mml:math id="m10">
<mml:mrow>
<mml:msub>
<mml:mi>L</mml:mi>
<mml:mi>d</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>s</mml:mi>
</mml:msub>
<mml:mo>&#x2061;</mml:mo>
<mml:mi>log</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>G</mml:mi>
<mml:mi>d</mml:mi>
</mml:msub>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>G</mml:mi>
<mml:mi>f</mml:mi>
</mml:msub>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>s</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mi>log</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mi>G</mml:mi>
<mml:mi>d</mml:mi>
</mml:msub>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>G</mml:mi>
<mml:mi>f</mml:mi>
</mml:msub>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>,</mml:mo>
</mml:mrow>
</mml:math>
<label>(2)</label>
</disp-formula>where <inline-formula id="inf9">
<mml:math id="m11">
<mml:mrow>
<mml:msub>
<mml:mi>L</mml:mi>
<mml:mi>d</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> stands for the feature domain category judgment error, <inline-formula id="inf10">
<mml:math id="m12">
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>s</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the real probability of samples belonging to the source domain among all the samples, <inline-formula id="inf11">
<mml:math id="m13">
<mml:mrow>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the input sample, and <inline-formula id="inf12">
<mml:math id="m14">
<mml:mrow>
<mml:msub>
<mml:mi>G</mml:mi>
<mml:mi>d</mml:mi>
</mml:msub>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mtext>&#xa0;</mml:mtext>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula> is the feature domain category classifier. This error is used to measure the performance of the algorithm model in determining whether the input samples belong to the source domain or the target domain during the transfer training process. The calculation formula of the inter-domain conditional probability distribution deviation is:<disp-formula id="e3">
<mml:math id="m15">
<mml:mrow>
<mml:msub>
<mml:mi>L</mml:mi>
<mml:mi>M</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>C</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:mrow>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>c</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>C</mml:mi>
</mml:munderover>
</mml:mstyle>
<mml:mrow>
<mml:mfenced open="&#x2016;" close="&#x2016;" separators="|">
<mml:mrow>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:msubsup>
<mml:mi>n</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>c</mml:mi>
</mml:msubsup>
</mml:mrow>
</mml:mfrac>
<mml:mrow>
<mml:mstyle displaystyle="true">
<mml:munder>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:msubsup>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
</mml:msubsup>
<mml:mo>&#x2208;</mml:mo>
<mml:msub>
<mml:mi>D</mml:mi>
<mml:mi>s</mml:mi>
</mml:msub>
</mml:mrow>
</mml:munder>
</mml:mstyle>
<mml:mrow>
<mml:mi>&#x3d5;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>G</mml:mi>
<mml:mi>f</mml:mi>
</mml:msub>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msubsup>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>s</mml:mi>
</mml:msubsup>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:msubsup>
<mml:mi>n</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>c</mml:mi>
</mml:msubsup>
</mml:mrow>
</mml:mfrac>
<mml:mrow>
<mml:mstyle displaystyle="true">
<mml:munder>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:msubsup>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>t</mml:mi>
</mml:msubsup>
<mml:mo>&#x2208;</mml:mo>
<mml:msub>
<mml:mi>D</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:munder>
</mml:mstyle>
<mml:mrow>
<mml:mi>&#x3d5;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>G</mml:mi>
<mml:mi>f</mml:mi>
</mml:msub>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msubsup>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>t</mml:mi>
</mml:msubsup>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
<mml:mo>,</mml:mo>
</mml:mrow>
</mml:math>
<label>(3)</label>
</disp-formula>where <inline-formula id="inf13">
<mml:math id="m16">
<mml:mrow>
<mml:msub>
<mml:mi>L</mml:mi>
<mml:mi>M</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the inter-domain conditional probability distribution deviation, <inline-formula id="inf14">
<mml:math id="m17">
<mml:mrow>
<mml:msubsup>
<mml:mi>n</mml:mi>
<mml:mi>s</mml:mi>
<mml:mi>c</mml:mi>
</mml:msubsup>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf15">
<mml:math id="m18">
<mml:mrow>
<mml:msubsup>
<mml:mi>n</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>c</mml:mi>
</mml:msubsup>
</mml:mrow>
</mml:math>
</inline-formula> are the number of samples belonging to category <inline-formula id="inf16">
<mml:math id="m19">
<mml:mrow>
<mml:mi>c</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> fault in the source and target domains, <inline-formula id="inf17">
<mml:math id="m20">
<mml:mrow>
<mml:msub>
<mml:mi>D</mml:mi>
<mml:mi>s</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf18">
<mml:math id="m21">
<mml:mrow>
<mml:msub>
<mml:mi>D</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> represent the source and target domains. This error is used to measure the difference of the sample feature distribution in the two domains. The formula of the overall error of the algorithm model during transfer training is:<disp-formula id="e4">
<mml:math id="m22">
<mml:mrow>
<mml:mrow>
<mml:mfenced open="{" close="" separators="|">
<mml:mrow>
<mml:mtable columnalign="left">
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:msub>
<mml:mi>L</mml:mi>
<mml:mi>U</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>&#x3bc;</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>L</mml:mi>
<mml:mi>y</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>&#x3bb;</mml:mi>
<mml:msub>
<mml:mi>L</mml:mi>
<mml:mi>d</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>&#x3bc;</mml:mi>
<mml:msub>
<mml:mi>L</mml:mi>
<mml:mi>M</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:mi>&#x3bb;</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mn>2</mml:mn>
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>exp</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mfrac>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>10</mml:mn>
<mml:mi>e</mml:mi>
<mml:mi>p</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>e</mml:mi>
<mml:msub>
<mml:mi>p</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfrac>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:mi>&#x3bc;</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:mfrac>
<mml:msub>
<mml:mi>d</mml:mi>
<mml:mi>M</mml:mi>
</mml:msub>
<mml:mrow>
<mml:msub>
<mml:mi>d</mml:mi>
<mml:mi>M</mml:mi>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:msubsup>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>c</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>C</mml:mi>
</mml:msubsup>
<mml:msub>
<mml:mi>d</mml:mi>
<mml:mi>c</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:msub>
<mml:mi>d</mml:mi>
<mml:mi>M</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>2</mml:mn>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>2</mml:mn>
<mml:mi>e</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:msub>
<mml:mi>d</mml:mi>
<mml:mi>c</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>2</mml:mn>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>2</mml:mn>
<mml:mi>e</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
<mml:mi>c</mml:mi>
</mml:msup>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>,</mml:mo>
</mml:mrow>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(4)</label>
</disp-formula>where <inline-formula id="inf19">
<mml:math id="m23">
<mml:mrow>
<mml:msub>
<mml:mi>L</mml:mi>
<mml:mi>U</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the overall training loss of the diagnostic mode, <inline-formula id="inf20">
<mml:math id="m24">
<mml:mrow>
<mml:msub>
<mml:mi>d</mml:mi>
<mml:mi>M</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the difference of sample marginal distribution in the two domains, <inline-formula id="inf21">
<mml:math id="m25">
<mml:mrow>
<mml:msub>
<mml:mi>d</mml:mi>
<mml:mi>c</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the difference of inter-domain sample conditional distribution, <inline-formula id="inf22">
<mml:math id="m26">
<mml:mrow>
<mml:mi>e</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula> is the binary classification error of determining whether a sample belongs to the source domain or the target domain, <inline-formula id="inf23">
<mml:math id="m27">
<mml:mrow>
<mml:mi>e</mml:mi>
<mml:msup>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mi>c</mml:mi>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> is the classification error of determining whether a sample has fault category <inline-formula id="inf24">
<mml:math id="m28">
<mml:mrow>
<mml:mi>c</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>, and <inline-formula id="inf25">
<mml:math id="m29">
<mml:mrow>
<mml:mi>&#x3bc;</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> is the weight of <inline-formula id="inf26">
<mml:math id="m30">
<mml:mrow>
<mml:msub>
<mml:mi>L</mml:mi>
<mml:mi>M</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>.<list list-type="simple">
<list-item>
<p>5. A termination check is performed to determine whether to end the training process. If terminated, the domain category judgment module and the inter-domain conditional probability distribution difference module are deactivated during the testing of the fault diagnosis model. If training continues, the computed errors are used to iteratively adjust parameters in all model modules, and the process returns to step &#x2461;. Training terminates when either the predefined number of iterations is reached or the training error converges to a stable value.</p>
</list-item>
</list>
</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>Framework flow for transfer learning.</p>
</caption>
<graphic xlink:href="fmech-12-1744710-g001.tif">
<alt-text content-type="machine-generated">Flowchart illustrating a fault diagnosis method for rolling bearings, starting with source and target domain samples, applying FFT, extracting features, evaluating classification and judgment errors, checking inter-domain distribution deviation, and looping adjustments until stopping criteria are met, at which point training stops.</alt-text>
</graphic>
</fig>
</sec>
<sec id="s3">
<label>3</label>
<title>Simulation experiment</title>
<sec id="s3-1">
<label>3.1</label>
<title>Experimental data</title>
<p>The experimental data were collected from actual mechanical rolling bearings. The PT100 bearing fault simulation test platform, used for acquiring bearing fault data, is shown in <xref ref-type="fig" rid="F2">Figure 2</xref>. The test platform includes a drive motor, frequency converter, double-supported bearing, rotating shaft, foundation base, control unit, and other components. The relevant parameters of the platform are as follows: the drive motor has a rated power of 370&#xa0;W and a rated voltage of 220&#xa0;V; the chrome-plated shaft has a cross-sectional diameter of 25&#xa0;mm; the rolling bearing is PH205 deep groove ball bearing with an inner diameter of 25&#xa0;mm; and the speed adjustment range is 0&#x2013;3,500&#xa0;r/min. The platform can simulate inner race, outer race, and rolling element faults.</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>PT100 bearing fault simulation test platform.</p>
</caption>
<graphic xlink:href="fmech-12-1744710-g002.tif">
<alt-text content-type="machine-generated">Laboratory equipment featuring an electric motor connected to a horizontal metal shaft supported by two mounting brackets, with control switches, display, and a power socket on the front panel of the metal base.</alt-text>
</graphic>
</fig>
<p>When collecting sample data using the test platform, operating conditions were set at 1,000, 1,500, and 2,000&#xa0;r/min. Vibration signal samples were collected under normal operation and three fault types. Finally, 1,000 source domain samples and 5,00 target domain samples were obtained from each operating condition and bearing state. Both domains were further divided into training and testing sets at an 8:2 ratio.</p>
</sec>
<sec id="s3-2">
<label>3.2</label>
<title>Experimental setup</title>
<p>
<xref ref-type="table" rid="T1">Table 1</xref> shows the parameter configuration of the improved CNN algorithm for bearing fault diagnosis. The proposed approach was evaluated against support vector machine (SVM) and back-propagation neural network (BPNN) algorithms. The SVM algorithm adopted a linear kernel function and a penalty parameter of 1. The BPNN algorithm employed an input layer with 128 nodes, a hidden layer with 256 nodes, a sigmoid activation function, and an output layer with 4 nodes.</p>
<table-wrap id="T1" position="float">
<label>TABLE 1</label>
<caption>
<p>Relevant parameters of the bearing failure diagnosis model.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Structure</th>
<th align="left">Parameter setting</th>
<th align="left">Structure</th>
<th align="left">Parameter setting</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">Feature extractor convolution layer 1</td>
<td align="left">1 &#xd7; 64 convolution kernel, stride 16, ReLU activation function (<xref ref-type="bibr" rid="B5">Jia et al., 2024</xref>)</td>
<td align="left">Feature extractor pooling layer 1</td>
<td align="left">Max pooling box size 2 &#xd7; 2</td>
</tr>
<tr>
<td align="left">Feature extractor convolution layer 2</td>
<td align="left">1 &#xd7; 3 convolution kernel, 1 stride, ReLU activation function</td>
<td align="left">Feature extractor pooling layer 2</td>
<td align="left">Max pooling box size 2 &#xd7; 2</td>
</tr>
<tr>
<td align="left">Feature extractor convolutional layer 3</td>
<td align="left">1 &#xd7; 3 convolution kernel, stride 1, ReLU activation function</td>
<td align="left">Feature extractor pooling layer 3</td>
<td align="left">Max pooling box size 2 &#xd7; 2</td>
</tr>
<tr>
<td align="left">Feature extractor convolution layer 4</td>
<td align="left">1 &#xd7; 3 convolution kernel, stride 1, ReLU activation function</td>
<td align="left">Feature extractor pooling layer 4</td>
<td align="left">Max pooling box size 2 &#xd7; 2</td>
</tr>
<tr>
<td align="left">Label classifier FCL 1</td>
<td align="left">512 nodes, ReLU activation function</td>
<td align="left">Label classifier FCL 2</td>
<td align="left">512 nodes, ReLU activation function</td>
</tr>
<tr>
<td align="left">Label classifier FCL 3</td>
<td align="left">256 nodes, ReLU activation function</td>
<td align="left">Domain category discriminator FCL 1</td>
<td align="left">512 nodes, ReLU activation function</td>
</tr>
<tr>
<td align="left">Domain category discriminator FCL 2</td>
<td align="left">512 nodes, ReLU activation function</td>
<td align="left">Number of training iterations</td>
<td align="left">500</td>
</tr>
<tr>
<td align="left">Learning rate</td>
<td align="left">0.02</td>
<td align="left">Learner</td>
<td align="left">Adam</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>FLC: fully connected layer; ReLU: rectified linear unit.</p>
</fn>
</table-wrap-foot>
</table-wrap>
<p>As the proposed bearing fault diagnosis algorithm was based on transfer learning, the diagnostic model was evaluated using transfer tasks. Specifically, motor speeds of 1,000, 1,500, and 2,000&#xa0;r/min were defined as operating condition 1, 2, and 3, respectively. The transfer tasks included: &#x201c;operating condition 1 &#x2192; 2,&#x201d; &#x201c;operating condition 1 &#x2192; 3,&#x201d; &#x201c;operating condition 2 &#x2192; 1,&#x201d; &#x201c;operating condition 2 &#x2192; 3,&#x201d; &#x201c;operating condition 3 &#x2192; 1,&#x201d; and &#x201c;operating condition 3 &#x2192; 2.&#x201d;</p>
<p>The ablation experiments were carried out by removing the domain category judgment module, removing the intra-domain conditional probability distribution difference module, and removing the above two modules.</p>
</sec>
<sec id="s3-3">
<label>3.3</label>
<title>Evaluation indicators</title>
<p>The performance evaluation indicators for the fault diagnosis algorithm employed commonly used precision, recall rate, and F-score, calculated as:<disp-formula id="e5">
<mml:math id="m31">
<mml:mrow>
<mml:mrow>
<mml:mfenced open="{" close="" separators="|">
<mml:mrow>
<mml:mtable columnalign="left">
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:mi>F</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>2</mml:mn>
<mml:mo>&#xb7;</mml:mo>
<mml:mi>P</mml:mi>
<mml:mo>&#xb7;</mml:mo>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>R</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>,</mml:mo>
</mml:mrow>
</mml:math>
<label>(5)</label>
</disp-formula>where <inline-formula id="inf27">
<mml:math id="m32">
<mml:mrow>
<mml:mi>P</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> is precision, <inline-formula id="inf28">
<mml:math id="m33">
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> is recall rate, <inline-formula id="inf29">
<mml:math id="m34">
<mml:mrow>
<mml:mi>F</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> is the combined measure of precision and recall rate, <inline-formula id="inf30">
<mml:math id="m35">
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> is the number of true positives, <inline-formula id="inf31">
<mml:math id="m36">
<mml:mrow>
<mml:mi>F</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> is the number of false positives, <inline-formula id="inf32">
<mml:math id="m37">
<mml:mrow>
<mml:mi>F</mml:mi>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> denotes the number of false negatives, and <inline-formula id="inf33">
<mml:math id="m38">
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> is the number of true negatives.</p>
</sec>
<sec id="s3-4">
<label>3.4</label>
<title>Experimental results</title>
<p>The recognition accuracies of the three fault diagnosis algorithms facing different types of faults in different transfer tasks are displayed in <xref ref-type="table" rid="T2">Table 2</xref>, and the comprehensive diagnosis accuracy is shown in <xref ref-type="fig" rid="F3">Figure 3</xref>. In the same transfer task, the diagnostic accuracy of the same diagnostic algorithm for different fault types did not vary much. Among different diagnostic algorithms, the improved CNN algorithm had the highest accuracy, followed by the BPNN algorithm, and the SVM algorithm had the lowest accuracy. A reduction in accuracy for both the SVM and BPNN algorithms was observed in the &#x201c;operating condition 1&#x2192;3&#x201d; and &#x201c;operating condition 3&#x2192;1&#x201d;; in contrast, the accuracy of the improved CNN algorithm remained largely unaffected.</p>
<table-wrap id="T2" position="float">
<label>TABLE 2</label>
<caption>
<p>Recognition accuracy of three fault diagnosis algorithms for different fault types in various transfer tasks.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Fault type</th>
<th align="left">Diagnostic algorithm</th>
<th align="left">Operating condition 1&#x2192;2</th>
<th align="left">Operating condition 1&#x2192;3</th>
<th align="left">Operating condition 2 &#x2192; 1</th>
<th align="left">Operating condition 2 &#x2192; 3</th>
<th align="left">Operating condition 3 &#x2192; 1</th>
<th align="left">Operating condition 3 &#x2192; 2</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td rowspan="3" align="left">Normal</td>
<td align="left">SVM</td>
<td align="left">0.543</td>
<td align="left">0.387</td>
<td align="left">0.552</td>
<td align="left">0.539</td>
<td align="left">0.387</td>
<td align="left">0.537</td>
</tr>
<tr>
<td align="left">BPNN</td>
<td align="left">0.723</td>
<td align="left">0.601</td>
<td align="left">0.729</td>
<td align="left">0.732</td>
<td align="left">0.603</td>
<td align="left">0.741</td>
</tr>
<tr>
<td align="left">The improved CNN</td>
<td align="left">0.987</td>
<td align="left">0.945</td>
<td align="left">0.989</td>
<td align="left">0.986</td>
<td align="left">0.944</td>
<td align="left">0.984</td>
</tr>
<tr>
<td rowspan="3" align="left">Inner race fault</td>
<td align="left">SVM</td>
<td align="left">0.536</td>
<td align="left">0.388</td>
<td align="left">0.534</td>
<td align="left">0.533</td>
<td align="left">0.389</td>
<td align="left">0.536</td>
</tr>
<tr>
<td align="left">BPNN</td>
<td align="left">0.728</td>
<td align="left">0.602</td>
<td align="left">0.728</td>
<td align="left">0.729</td>
<td align="left">0.600</td>
<td align="left">0.726</td>
</tr>
<tr>
<td align="left">The improved CNN</td>
<td align="left">0.986</td>
<td align="left">0.948</td>
<td align="left">0.991</td>
<td align="left">0.987</td>
<td align="left">0.949</td>
<td align="left">0.984</td>
</tr>
<tr>
<td rowspan="3" align="left">Outer race fault</td>
<td align="left">SVM</td>
<td align="left">0.539</td>
<td align="left">0.389</td>
<td align="left">0.542</td>
<td align="left">0.538</td>
<td align="left">0.386</td>
<td align="left">0.539</td>
</tr>
<tr>
<td align="left">BPNN</td>
<td align="left">0.736</td>
<td align="left">0.604</td>
<td align="left">0.736</td>
<td align="left">0.738</td>
<td align="left">0.301</td>
<td align="left">0.735</td>
</tr>
<tr>
<td align="left">The improved CNN</td>
<td align="left">0.978</td>
<td align="left">0.947</td>
<td align="left">0.975</td>
<td align="left">0.977</td>
<td align="left">0.948</td>
<td align="left">0.979</td>
</tr>
<tr>
<td rowspan="3" align="left">Ball fault</td>
<td align="left">SVM</td>
<td align="left">0.569</td>
<td align="left">0.392</td>
<td align="left">0.567</td>
<td align="left">0.568</td>
<td align="left">0.385</td>
<td align="left">0.564</td>
</tr>
<tr>
<td align="left">BPNN</td>
<td align="left">0.741</td>
<td align="left">0.603</td>
<td align="left">0.742</td>
<td align="left">0.746</td>
<td align="left">0.605</td>
<td align="left">0.743</td>
</tr>
<tr>
<td align="left">The improved CNN</td>
<td align="left">0.989</td>
<td align="left">0.946</td>
<td align="left">0.989</td>
<td align="left">0.987</td>
<td align="left">0.949</td>
<td align="left">0.988</td>
</tr>
</tbody>
</table>
</table-wrap>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>Diagnostic accuracy of three fault diagnosis algorithms in different transfer tasks.</p>
</caption>
<graphic xlink:href="fmech-12-1744710-g003.tif">
<alt-text content-type="machine-generated">Bar chart comparing comprehensive accuracy across five transfer tasks for three methods: SVM, BPNN, and Improved CNN. Improved CNN consistently yields the highest accuracy, surpassing both SVM and BPNN in all tasks.</alt-text>
</graphic>
</fig>
<p>The results of the ablative experiments are presented in <xref ref-type="fig" rid="F4">Figure 4</xref>. It can be seen that the removal of either module led to a considerable decline in the fault diagnosis accuracy. Moreover, the simultaneous removal of both modules resulted in a further performance degradation.</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption>
<p>Ablation experiment results.</p>
</caption>
<graphic xlink:href="fmech-12-1744710-g004.tif">
<alt-text content-type="machine-generated">Bar chart comparing accuracy for six transfer tasks using four approaches: removing the domain category judgment module (blue), removing the probability distribution difference module (orange), removing both modules (yellow), and the improved CNN algorithm (green). The improved CNN algorithm consistently achieves the highest accuracy, close to one point zero, across all tasks.</alt-text>
</graphic>
</fig>
</sec>
</sec>
<sec sec-type="discussion" id="s4">
<label>4</label>
<title>Discussion</title>
<p>Rolling bearings are the core components in rotating mechanical systems, and their operating states are directly related to the safety, reliability, and service life of the equipment. Once a fault occurs, if it fails to be identified and intervened in a timely manner, it is very likely to trigger a chain of mechanical failures, resulting in significant economic losses and even safety accidents. Traditional fault diagnosis methods rely on expert experience and signal processing techniques, which are effective under specific working conditions. However, they have limitations such as weak generalization ability, strong dependence on feature engineering, and high sensitivity to noise. Deep learning technology has shown great potential in bearing fault diagnosis. It can extract features from the original vibration signals and use them to determine the fault type. However, such data-driven methods highly depend on a large number of high-quality and fully labeled fault samples. In actual industrial scenarios, it is difficult to obtain such fault samples. On the one hand, it is difficult to accumulate enough samples for new equipment or rare faults. On the other hand, manual annotation requires the participation of professional engineers, which is costly and error-prone. The fault diagnosis algorithm for rolling bearings is faced with the dilemma of abundant source domain data but scarce target domain data. To address this dilemma, transfer learning has been proposed. Transfer learning alleviates the distribution shift problem caused by speed changes, load fluctuations, sensor differences, or different equipment models through knowledge transfer mechanisms, such as feature alignment, parameter fine-tuning, and adversarial training, thereby achieving high-precision diagnosis with limited target data. In this paper, the CNN algorithm was adopted to identify rolling bearing faults. During the training process, a domain category judgment module and an inter-domain conditional probability distribution difference module were introduced to implement transfer learning between source domain samples and target domain samples. Then, a simulation experiment was carried out. The experimental results showed that in the same transfer task, the diagnostic performance of the improved CNN algorithm was superior to that of the SVM and BPNN algorithms. In different transfer tasks, the diagnostic performance of the proposed algorithm did not change significantly, while that of the SVM and BPNN algorithms declined. The reason is that in different transfer tasks, the samples in the target domain change, and there are differences in the sample distributions of different fault types between the two domains. The SVM and BPNN algorithms directly apply the model trained with source-domain samples to the target domain. Due to the difference in feature distribution between the two domains, the recognition results will deviate, ultimately leading to a decrease in diagnostic performance. The CNN algorithm uses a domain category judgment module and an inter-domain conditional probability distribution difference module during training to balance the feature distribution between the two domains as much as possible during the model parameter adjustment process, so as to keep the diagnostic performance unchanged.</p>
<p>The shortcoming of this paper is that only a single experimental platform was used when validating the algorithm. Meanwhile, the comparison was only made with the SVM and BPNN algorithms, which made the validation results lack universality. Therefore, the future research direction is to expand the experimental platforms and the diagnostic algorithms for comparison to make the experimental results have sufficient universality.</p>
</sec>
<sec sec-type="conclusion" id="s5">
<label>5</label>
<title>Conclusion</title>
<p>This paper presents a transfer learning-based algorithm for diagnosing faults in mechanical rolling bearings. A conventional CNN algorithm integrates a domain category judgment module and an inter-domain conditional probability distribution difference module, thereby enabling effective knowledge transfer between source and target domain samples. Simulation tests were carried out on the PT100 bearing fault simulation test platform, where vibration signals were collected from bearings under normal conditions and with inner ring, outer ring, and ball faults at motor speeds of 1,000, 1,500, and 2,000&#xa0;r/min. The fault diagnosis performance of the improved CNN algorithm in operating condition transfer tasks was compared with the SVM and BPNN algorithms. Moreover, ablation experiments were performed. It was found that the diagnostic accuracy of each algorithm remained consistent for different fault types in the same transfer task. Among the compared methods, the improved CNN algorithm achieved the highest accuracy, followed by BPNN, and the SVM algorithm had the lowest accuracy. In different transfer tasks, both SVM and BPNN algorithms exhibited a significant decline in diagnostic accuracy for transfers in &#x201c;operating condition 1 &#x2192; 3&#x201d; and &#x201c;operating condition 3 &#x2192; 1,&#x201d; whereas the accuracy of the proposed algorithm remained largely unchanged. The ablation study confirmed that removing either the domain category judgment module or the inter-domain conditional probability distribution difference module significantly resulted in a significant reduction in the diagnostic accuracy, and the simultaneous removal of both modules resulted in a further performance drop.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s6">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/supplementary material, further inquiries can be directed to the corresponding author.</p>
</sec>
<sec sec-type="author-contributions" id="s7">
<title>Author contributions</title>
<p>YZ: Conceptualization, Data curation, Formal Analysis, Funding acquisition, Investigation, Methodology, Project administration, Resources, Software, Supervision, Validation, Visualization, Writing &#x2013; original draft, Writing &#x2013; review and editing.</p>
</sec>
<sec sec-type="COI-statement" id="s9">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="s10">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="s11">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chen</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Xue</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Huang</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Multi-modal self-supervised learning for cross-domain one-shot bearing fault diagnosis</article-title>. <source>IFAC-PapersOnLine</source> <volume>58</volume> (<issue>4</issue>), <fpage>746</fpage>&#x2013;<lpage>751</lpage>. <pub-id pub-id-type="doi">10.1016/j.ifacol.2024.07.309</pub-id>
</mixed-citation>
</ref>
<ref id="B2">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Guo</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Potekhin</surname>
<given-names>V. V.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Kovalchuk</surname>
<given-names>E. A.</given-names>
</name>
<name>
<surname>Lian</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2025</year>). <article-title>MDFT-GAN: a multi-domain feature transformer GAN for bearing fault diagnosis under limited and imbalanced data conditions</article-title>. <source>Appl. Sci.</source> <volume>15</volume> (<issue>11</issue>), <fpage>6225</fpage>. <pub-id pub-id-type="doi">10.3390/app15116225</pub-id>
</mixed-citation>
</ref>
<ref id="B3">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>He</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Feng</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>A new method for bearing fault diagnosis based on adaptive SVMD and RCMDSE-IDHT</article-title>. <source>IEEE Access</source> <volume>12</volume>, <fpage>169467</fpage>&#x2013;<lpage>169486</lpage>. <pub-id pub-id-type="doi">10.1109/access.2024.3486363</pub-id>
</mixed-citation>
</ref>
<ref id="B4">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hu</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Xu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Huo</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>An integrated bearing fault diagnosis method based on multibranch SKNet and enhanced Inception-ResNet-v2</article-title>. <source>Shock and Vib.</source> <volume>2024</volume>, <fpage>1</fpage>&#x2013;<lpage>21</lpage>. <pub-id pub-id-type="doi">10.1155/2024/9071328</pub-id>
</mixed-citation>
</ref>
<ref id="B5">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jia</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Mei</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Sun</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Bearing fault diagnosis under transient conditions: using variational mode decomposition and the symmetrized dot pattern-based convolutional neural network model</article-title>. <source>Shock and Vib.</source> <volume>2024</volume> (<issue>1</issue>), <fpage>9263724</fpage>. <pub-id pub-id-type="doi">10.1155/2024/9263724</pub-id>
</mixed-citation>
</ref>
<ref id="B6">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Ding</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Sun</surname>
<given-names>J. Q.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Multi-layer domain adaptation method for rolling bearing fault diagnosis</article-title>. <source>Signal Process.</source> <volume>157</volume> (<issue>4</issue>), <fpage>180</fpage>&#x2013;<lpage>197</lpage>. <pub-id pub-id-type="doi">10.1016/j.sigpro.2018.12.005</pub-id>
</mixed-citation>
</ref>
<ref id="B7">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2025</year>). <article-title>Adaptive feature mode decomposition method for bearing fault diagnosis under strong noise</article-title>. <source>Proc. Institution Mech. Eng. Part C J. Mech. Eng. Sci.</source> <volume>239</volume> (<issue>2</issue>), <fpage>508</fpage>&#x2013;<lpage>519</lpage>. <pub-id pub-id-type="doi">10.1177/09544062241281840</pub-id>
</mixed-citation>
</ref>
<ref id="B8">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Yu</surname>
<given-names>X.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Rolling element bearing fault diagnosis for complex equipment based on MFMD and BP neural network</article-title>. <source>J. Phys. Conf. Ser.</source> <volume>1948</volume> (<issue>1</issue>), <fpage>1</fpage>&#x2013;<lpage>10</lpage>. <pub-id pub-id-type="doi">10.1088/1742-6596/1948/1/012113</pub-id>
</mixed-citation>
</ref>
<ref id="B9">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Lai</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Dai</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Liang</surname>
<given-names>Y.</given-names>
</name>
<etal/>
</person-group> (<year>2025</year>). <article-title>A multi-channel fault information bearing fault diagnosis method based on improved vision transformer</article-title>. <source>IET Conf. Proc.</source> <volume>2024</volume> (<issue>12</issue>), <fpage>1067</fpage>&#x2013;<lpage>1071</lpage>. <pub-id pub-id-type="doi">10.1049/icp.2024.3585</pub-id>
</mixed-citation>
</ref>
<ref id="B10">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Shang</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Tang</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Pan</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Cheng</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>A hybrid semantic attribute-based zero-shot learning model for bearing fault diagnosis under unknown working conditions</article-title>. <source>Eng. Appl. Artif. Intell.</source> <volume>136</volume> (<issue>PartB</issue>), <fpage>109020</fpage>. <pub-id pub-id-type="doi">10.1016/j.engappai.2024.109020</pub-id>
</mixed-citation>
</ref>
<ref id="B11">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Song</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Zhuang</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Bearing fault diagnosis method based on multiple-level feature tensor fusion</article-title>. <source>IEEE Sensors J.</source> <volume>24</volume> (<issue>14</issue>), <fpage>23108</fpage>&#x2013;<lpage>23116</lpage>. <pub-id pub-id-type="doi">10.1109/jsen.2024.3399166</pub-id>
</mixed-citation>
</ref>
<ref id="B12">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>He</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Fu</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>W.</given-names>
</name>
</person-group> (<year>2025</year>). <article-title>Aircraft bearing fault diagnosis method based on LSTM-IDRSN</article-title>. <source>IEEE Access</source> <volume>13</volume>, <fpage>19248</fpage>&#x2013;<lpage>19256</lpage>. <pub-id pub-id-type="doi">10.1109/access.2025.3533551</pub-id>
</mixed-citation>
</ref>
<ref id="B13">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Xie</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Cao</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Pan</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2025</year>). <article-title>A pruning-aware dynamic slimmable network using meta-gradients for high-speed train bogie bearing fault diagnosis</article-title>. <source>ISA Trans.</source> <volume>160</volume>, <fpage>196</fpage>&#x2013;<lpage>204</lpage>. <pub-id pub-id-type="doi">10.1016/j.isatra.2025.02.031</pub-id>
<pub-id pub-id-type="pmid">40155241</pub-id>
</mixed-citation>
</ref>
<ref id="B14">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yuan</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Lu</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2025</year>). <article-title>Research on bearing fault diagnosis based on vibration signals and deep learning models</article-title>. <source>Electronics</source> <volume>14</volume> (<issue>10</issue>), <fpage>2090</fpage>. <pub-id pub-id-type="doi">10.3390/electronics14102090</pub-id>
</mixed-citation>
</ref>
<ref id="B15">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Rolling bearing fault diagnosis based on multi-scale entropy feature and ensemble learning</article-title>. <source>Manuf. Technol.</source> <volume>24</volume> (<issue>3</issue>), <fpage>492</fpage>&#x2013;<lpage>506</lpage>. <pub-id pub-id-type="doi">10.21062/mft.2024.041</pub-id>
</mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1667498/overview">Xuping Zhang</ext-link>, Aarhus University, Denmark</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1819362/overview">Qibin Wang</ext-link>, Xidian University, China</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3109526/overview">Chandrabhanu Malla</ext-link>, Radhakrishna Institute of Technology and Engineering, India</p>
</fn>
</fn-group>
</back>
</article>