<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Archiving and Interchange DTD v2.3 20070202//EN" "archivearticle.dtd">
<article article-type="methods-article" dtd-version="2.3" xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Earth Sci.</journal-id>
<journal-title>Frontiers in Earth Science</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Earth Sci.</abbrev-journal-title>
<issn pub-type="epub">2296-6463</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">1594649</article-id>
<article-id pub-id-type="doi">10.3389/feart.2025.1594649</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Earth Science</subject>
<subj-group>
<subject>Methods</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Time-domain electromagnetic inversion and application for VTI media based on convolutional neural networks</article-title>
<alt-title alt-title-type="left-running-head">Aohuai et al.</alt-title>
<alt-title alt-title-type="right-running-head">
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/feart.2025.1594649">10.3389/feart.2025.1594649</ext-link>
</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Aohuai</surname>
<given-names>Pan</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/3004340/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Liangjun</surname>
<given-names>Yan</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2214126/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Lei</surname>
<given-names>Zhou</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2696849/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
</contrib-group>
<aff id="aff1">
<sup>1</sup>
<institution>College of Geophysics and Petroleum Resources</institution>, <institution>Yangtze University</institution>, <addr-line>Wuhan</addr-line>, <addr-line>Hubei</addr-line>, <country>China</country>
</aff>
<aff id="aff2">
<sup>2</sup>
<institution>Key Laboratory of Exploration Technologies for Oil and Gas Resources</institution>, <institution>Ministry of Education</institution>, <institution>Yangtze University</institution>, <addr-line>Wuhan</addr-line>, <addr-line>Hubei</addr-line>, <country>China</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2221720/overview">Bo Zhang</ext-link>, Jilin University, China</p>
</fn>
<fn fn-type="edited-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2971583/overview">Jamel Seidu</ext-link>, University of Mines and Technology, Ghana</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3007705/overview">Zhihong Fu</ext-link>, Chongqing University, China</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3012367/overview">Zhengguang Liu</ext-link>, Shandong University, China</p>
</fn>
<corresp id="c001">&#x2a;Correspondence: Yan Liangjun, <email>100675@yangtzeu.edu.cn</email>
</corresp>
</author-notes>
<pub-date pub-type="epub">
<day>23</day>
<month>06</month>
<year>2025</year>
</pub-date>
<pub-date pub-type="collection">
<year>2025</year>
</pub-date>
<volume>13</volume>
<elocation-id>1594649</elocation-id>
<history>
<date date-type="received">
<day>16</day>
<month>03</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>12</day>
<month>06</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2025 Aohuai, Liangjun and Lei.</copyright-statement>
<copyright-year>2025</copyright-year>
<copyright-holder>Aohuai, Liangjun and Lei</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<p>The distinct Vertical Transverse Isotropy (VTI) heterogeneity and anisotropic characteristics of shale are critical geophysical indicators for identifying shale gas sweet spots. To address the need for dynamic monitoring of the electrical properties of VTI shale reservoirs during hydraulic fracturing, this paper proposes a fast time-domain electromagnetic inversion method based on prior constraints and convolutional neural networks (CNN). Throughout the process, prior information from logging and magnetotelluric data is first integrated to construct a layered medium parameterization model. By fixing the electrical parameters of non-target layers and varying the vertical resistivity and anisotropy coefficient of the target layer, forward responses are generated to build the training dataset. A convolutional neural network (CNN) model is then designed to achieve the nonlinear mapping between the electromagnetic decay curve and the target parameters. During training, a dynamic learning rate scheduling strategy and Dropout regularization are applied to accelerate model convergence while avoiding overfitting. The results show that the convolutional neural network can effectively extract data features. Under noise-free conditions, the average relative inversion errors for the target layer&#x2019;s resistivity and anisotropy coefficient are 2.26% and 2.32%, respectively, with an inversion time of less than one second per point. Tests on noisy data demonstrate the model&#x2019;s noise resistance, with average relative errors remaining within an acceptable range when Gaussian noise below 5% is added. Application of field-measured transient electromagnetic data shows that the method effectively identifies changes in the target layer&#x2019;s vertical resistivity and anisotropy coefficient induced by hydraulic fracturing, with the average resistivity decreasing from 11.49 to 7.27 (a 36.7% reduction) and the anisotropy coefficient decreasing from 3.21 to 1.58 (a 50.8% reduction). These trends are consistent with conclusions from laboratory core fracturing experiments. This study demonstrates that integrating prior constraints with deep learning can overcome the efficiency bottleneck of traditional inversion methods, providing a new approach for transient electromagnetic inversion in hydraulic fracturing monitoring.</p>
</abstract>
<kwd-group>
<kwd>transient electromagnetic method</kwd>
<kwd>hydraulic fracturing monitoring</kwd>
<kwd>VTI media</kwd>
<kwd>convolutional neural network</kwd>
<kwd>inversion</kwd>
</kwd-group>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Solid Earth Geophysics</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec id="s1">
<title>1 Introduction</title>
<p>As the exploration and development of unconventional oil and gas resources such as shale gas gradually become an important part of the global energy strategy, how to efficiently and accurately assess the physical properties and fluid distribution of underground reservoirs has become a key issue in the field of oil and gas exploration (<xref ref-type="bibr" rid="B2">Fahad and Kamal, 2022</xref>). The significant VTI-type heterogeneity and anisotropic characteristics of shale reservoirs are key geophysical indicators for identifying shale gas sweet spots. Compared to seismic attributes (such as P-wave velocity and impedance), the electrical anisotropy of shale is more pronounced (<xref ref-type="bibr" rid="B15">Xiang et al., 2016</xref>). Especially during hydraulic fracturing, the injection of high-pressure fluid induces fracture propagation, causing significant changes in the electrical characteristics of the formation, particularly anisotropy (<xref ref-type="bibr" rid="B3">Heng et al., 2015</xref>). While magnetic source and electrical source magnetic component transient electromagnetic methods are also insensitive to electrical anisotropy, the electrical source electrical component transient electromagnetic method (TEM) shows unique advantages in this regard (<xref ref-type="bibr" rid="B13">Wang et al., 2015</xref>).</p>
<p>Traditional inversion methods often struggle to capture complex electrical variations in formations, especially in reservoirs with strong anisotropy. These methods typically assume that the subsurface medium is isotropic, which significantly affects the accuracy of inversion results in complex geological environments (<xref ref-type="bibr" rid="B18">Yin and Weidelt, 1999</xref>; <xref ref-type="bibr" rid="B5">Liu et al., 2018</xref>). To address this issue, deep learning methods such as Convolutional Neural Networks (CNN) provide new approaches for time-domain electromagnetic inversion. In recent years, the successful application of Convolutional Neural Networks (CNN) in fields such as image recognition, natural language processing, and physical inversion has demonstrated its advantages in handling high-dimensional complex data (<xref ref-type="bibr" rid="B21">Zhou et al., 2017</xref>; <xref ref-type="bibr" rid="B11">Vladimir, 2019</xref>). In electromagnetic inversion, Convolutional Neural Networks (CNN) can deeply mine data features through supervised learning and obtain effective inversion models through training, thereby overcoming the shortcomings of traditional methods in nonlinear and high-dimensional problems. Compared to traditional inversion methods, deep learning methods have the advantage of providing real-time inversion results once training is complete, effectively solving the problem of the long processing times associated with traditional inversion methods (<xref ref-type="bibr" rid="B20">Zhang et al., 2021</xref>). Among these, <xref ref-type="bibr" rid="B6">Liu et al. (2021)</xref> proposed a novel 18-layer residual fully convolutional neural network (18RFCN) for audio magnetotelluric (AMT) data inversion, and achieved excellent results when applied to field AMT data. <xref ref-type="bibr" rid="B4">Huang, (2021)</xref> implemented inversion in marine controlled-source electromagnetic detection using deep learning methods. <xref ref-type="bibr" rid="B17">Yan et al. (2023)</xref> achieved one-dimensional fast imaging of transient electromagnetics based on Convolutional Neural Networks (CNN), and introduced coordinate information as input parameters to enable rapid processing of area data. <xref ref-type="bibr" rid="B19">Yu et al. (2025)</xref> designed a &#x2018;ResNet-50&#x2032;residual neural network inversion model for two-dimensional magnetotelluric inversion. They tested the network with unfamiliar data and compared it with the NLCG inversion method, showing that the model not only achieves real-time accurate inversion but also has certain noise resistance. The aforementioned deep learning-based geophysical inversion methods are significantly more efficient than traditional regularization inversion methods, indicating that deep learning technology has broad application prospects in the field of electromagnetic inversion. However, there is a common issue that requires extensive coverage of the subsurface structure, which results in an exceptionally large training set, causing redundancy with a significant amount of invalid data. Therefore, this can significantly reduce the size of the training dataset, save computational resources, and improve efficiency. Additionally, by controlling variables, we can capture changes in the electrical properties of the target layer caused by fracturing during the inversion process, providing more reliable support for fracturing monitoring and decision-making.</p>
<p>Based on this scenario, this paper introduces a one-dimensional time-domain electromagnetic rapid inversion method based on CNN under VTI media, and discusses in detail the establishment of the training set, data preprocessing, deep learning model design and construction, network training, and hyperparameter design. First, fully utilize the existing prior geological data in the project to determine the physical property parameters of layers other than the target layer of interest. Then, set the range of variation for the physical property parameters of the target layer within a reasonable interval, and traverse the physical property parameters within the interval using logarithmic spacing to generate forward models. Use a data filtering algorithm to numerically simulate the transient electromagnetic response of the reservoir&#x2019;s electrical anisotropy model to construct the sample dataset. Three datasets were generated to test the theoretical feasibility of the method by simulating low-resistivity target layers, high-resistivity target layers, and conventional target layers in a horizontally layered medium. Gaussian random noise with different proportions was added to the samples in the test set to evaluate the model&#x2019;s noise resistance. Additionally, a learning rate scheduling strategy was introduced during training to accelerate the convergence of the model&#x2019;s loss function. Finally, a set of formation model datasets was designed based on actual geological data combined with magnetotelluric inversion results. On the basis of inversion validation using synthetic data, real measured data from a survey line before and after fracturing was used for inversion. The results show that the network model constructed in this paper can invert the resistivity and anisotropy coefficients of the target layer. Especially in hydraulic fracturing scenarios, this method can efficiently capture the changes in subsurface resistivity and anisotropy coefficients, providing a new approach for transient electromagnetic inversion in hydraulic fracturing monitoring.</p>
</sec>
<sec sec-type="methods" id="s2">
<title>2 Methods</title>
<p>Inversion problems essentially involve deducing certain properties based on limited observations. In the hydraulic fracturing application scenario assumed in this paper, we simulate the formation conditions as a horizontally layered medium based on prior geological data. The forward process of the transient electromagnetic response can be represented by a function <italic>G</italic> (<xref ref-type="disp-formula" rid="e1">Equation 1</xref>):<disp-formula id="e1">
<mml:math id="m1">
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>G</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(1)</label>
</disp-formula>where <italic>d</italic> represents the transient electromagnetic response data, m represents the physical parameters of the concerned target layer, including the anisotropy coefficient and the vertical resistivity, and <italic>k</italic> represents other parameters that can be constrained <italic>a priori</italic>, including non-target layer physical property parameters, transmitter output power, transmitter-receiver distance, survey point coordinates, etc. Given that the forward method&#x2019;s correctness is validated, we control the variable <italic>k</italic> and set it consistent with the actual fracturing scenario. Then, we can attempt to establish the matrix mapping relationship <italic>g</italic> between response <italic>d</italic> and the concerned target layer&#x2019;s physical parameters <italic>m</italic> for inversion, namely (<xref ref-type="disp-formula" rid="e2">Equation 2</xref>):<disp-formula id="e2">
<mml:math id="m2">
<mml:mrow>
<mml:mi>g</mml:mi>
<mml:mo>:</mml:mo>
<mml:mo>&#x2009;</mml:mo>
<mml:mi>d</mml:mi>
<mml:mo>&#x2192;</mml:mo>
<mml:mi>m</mml:mi>
</mml:mrow>
</mml:math>
<label>(2)</label>
</disp-formula>
</p>
<p>Due to the highly complex relationship between the electromagnetic field and the model parameters, it is difficult to express the mapping relationship <italic>g</italic> in simple mathematical terms. Therefore, we use supervised learning to train a convolutional neural network to extract the complex features embedded in the response data, in order to obtain the mapping relationship <italic>g</italic>
<sub>
<italic>&#x3b8;</italic>
</sub>, that is (<xref ref-type="disp-formula" rid="e3">Equation 3</xref>):<disp-formula id="e3">
<mml:math id="m3">
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mi>g</mml:mi>
<mml:mi>&#x3b8;</mml:mi>
</mml:msub>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(3)</label>
</disp-formula>where <italic>&#x3b8;</italic> represents the hyperparameters and structural information of the neural network. To enable the neural network to learn the appropriate <italic>&#x3b8;</italic>, a loss function <italic>L(&#x3b8;)</italic> is introduced:<disp-formula id="e4">
<mml:math id="m4">
<mml:mrow>
<mml:mi>L</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>&#x3b8;</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:mrow>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>N</mml:mi>
</mml:munderover>
</mml:mstyle>
<mml:mrow>
<mml:mo>&#x2016;</mml:mo>
<mml:mrow>
<mml:msubsup>
<mml:mi>d</mml:mi>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>b</mml:mi>
<mml:mi>s</mml:mi>
</mml:mrow>
<mml:mi>i</mml:mi>
</mml:msubsup>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>&#x3b8;</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mrow>
</mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msubsup>
<mml:mi>d</mml:mi>
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>d</mml:mi>
</mml:mrow>
<mml:mi>i</mml:mi>
</mml:msubsup>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>&#x3b8;</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:msup>
<mml:mo>&#x2016;</mml:mo>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
<label>(4)</label>
</disp-formula>where <italic>N</italic> is the number of samples, <inline-formula id="inf1">
<mml:math id="m5">
<mml:mrow>
<mml:msubsup>
<mml:mi>d</mml:mi>
<mml:mrow>
<mml:mi>o</mml:mi>
<mml:mi>b</mml:mi>
<mml:mi>s</mml:mi>
</mml:mrow>
<mml:mi>i</mml:mi>
</mml:msubsup>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>&#x3b8;</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula> is the response data of the <italic>i</italic>th sample, and <inline-formula id="inf2">
<mml:math id="m6">
<mml:mrow>
<mml:msubsup>
<mml:mi>d</mml:mi>
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>d</mml:mi>
</mml:mrow>
<mml:mi>i</mml:mi>
</mml:msubsup>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>&#x3b8;</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula> is the electromagnetic response predicted by the neural network. By optimizing this loss function, the network learns the best target layer parameter prediction model. Whether the loss function <italic>L(&#x3b8;)</italic> can effectively converge to ensure the effectiveness and accuracy of the complex mapping <italic>g</italic>
<sub>
<italic>&#x3b8;</italic>
</sub> mainly depends on the following parts: (1) Dataset construction, (2) Forward algorithm validation, (3) Data normalization, (4) Network structure optimization, (5) Inversion prediction model parameters. The flowchart of one-dimensional anisotropic transient electromagnetic inversion based on deep learning is shown in <xref ref-type="fig" rid="F1">Figure 1</xref>.</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>Steps of Transient Electromagnetic Deep Learning Inversion. Step 1 involves dataset construction, step 2 is neural network training, and step 3 performs the inversion.</p>
</caption>
<graphic xlink:href="feart-13-1594649-g001.tif">
<alt-text content-type="machine-generated">Flowchart illustrating a three-stage process for resistivity-anisotropy modeling. Stage One involves constructing a data set through forward modeling of high, low, and general resistivity target layers, shown with graph plots. Stage Two involves neural network training with input graphs producing forward parameters (&#x3C1;, &#x3BB;). Stage Three involves inversion using a trained model, resulting in prediction parameters (&#x3C1;', &#x3BB;').</alt-text>
</graphic>
</fig>
<sec id="s2-1">
<title>2.1 Dataset construction</title>
<sec id="s2-1-1">
<title>2.1.1 Anisotropic modeling and forward calculation</title>
<p>According to the study by Yan Liangjun on the transient electromagnetic response of the reservoir electrical anisotropy model, let the electric dipole be located at the coordinate origin, with its direction along the x-axis. In this case, the vector potential A has only <inline-formula id="inf3">
<mml:math id="m7">
<mml:mrow>
<mml:msub>
<mml:mi>A</mml:mi>
<mml:mi>x</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf4">
<mml:math id="m8">
<mml:mrow>
<mml:msub>
<mml:mi>A</mml:mi>
<mml:mi>z</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> components. In cylindrical coordinates, the equations for <inline-formula id="inf5">
<mml:math id="m9">
<mml:mrow>
<mml:msub>
<mml:mi>A</mml:mi>
<mml:mi>x</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf6">
<mml:math id="m10">
<mml:mrow>
<mml:msub>
<mml:mi>A</mml:mi>
<mml:mi>z</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> take the following forms (<xref ref-type="bibr" rid="B16">Yan et al., 2014</xref>) (<xref ref-type="disp-formula" rid="e5">Equations 5</xref>, <xref ref-type="disp-formula" rid="e6">6</xref>):<disp-formula id="e5">
<mml:math id="m11">
<mml:mrow>
<mml:mfrac>
<mml:mrow>
<mml:msup>
<mml:mi mathvariant="bold-italic">&#x2202;</mml:mi>
<mml:mn mathvariant="bold">2</mml:mn>
</mml:msup>
<mml:msub>
<mml:mi mathvariant="bold-italic">A</mml:mi>
<mml:mi mathvariant="bold-italic">x</mml:mi>
</mml:msub>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x2202;</mml:mi>
<mml:msup>
<mml:mi mathvariant="bold-italic">r</mml:mi>
<mml:mn mathvariant="bold">2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:mfrac>
<mml:mo>&#x2b;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn mathvariant="bold">1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="bold-italic">r</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:mfrac>
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x2202;</mml:mi>
<mml:msub>
<mml:mi mathvariant="bold-italic">A</mml:mi>
<mml:mi mathvariant="bold-italic">x</mml:mi>
</mml:msub>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x2202;</mml:mi>
<mml:mi mathvariant="bold-italic">r</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:mo>&#x2b;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:msup>
<mml:mi mathvariant="bold-italic">&#x2202;</mml:mi>
<mml:mn mathvariant="bold">2</mml:mn>
</mml:msup>
<mml:msub>
<mml:mi mathvariant="bold-italic">A</mml:mi>
<mml:mi mathvariant="bold-italic">x</mml:mi>
</mml:msub>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x2202;</mml:mi>
<mml:msup>
<mml:mi mathvariant="bold-italic">z</mml:mi>
<mml:mn mathvariant="bold">2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:mfrac>
<mml:mo>&#x2212;</mml:mo>
<mml:msubsup>
<mml:mi>k</mml:mi>
<mml:mi>t</mml:mi>
<mml:mn>2</mml:mn>
</mml:msubsup>
<mml:msub>
<mml:mi>A</mml:mi>
<mml:mi>x</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0</mml:mn>
</mml:mrow>
</mml:math>
<label>(5)</label>
</disp-formula>
<disp-formula id="e6">
<mml:math id="m12">
<mml:mrow>
<mml:mfrac>
<mml:mrow>
<mml:msup>
<mml:mi>&#x2202;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
<mml:msub>
<mml:mi>A</mml:mi>
<mml:mi>z</mml:mi>
</mml:msub>
</mml:mrow>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msup>
<mml:mi>r</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:mfrac>
<mml:mo>&#x2b;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mi>A</mml:mi>
<mml:mi>z</mml:mi>
</mml:msub>
</mml:mrow>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:mo>&#x2b;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:msup>
<mml:mi>&#x3bb;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:mfrac>
<mml:mfrac>
<mml:mrow>
<mml:msup>
<mml:mi>&#x2202;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
<mml:msub>
<mml:mi>A</mml:mi>
<mml:mi>z</mml:mi>
</mml:msub>
</mml:mrow>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msup>
<mml:mi>z</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:mfrac>
<mml:mo>&#x2212;</mml:mo>
<mml:msubsup>
<mml:mi>k</mml:mi>
<mml:mi>n</mml:mi>
<mml:mn>2</mml:mn>
</mml:msubsup>
<mml:msub>
<mml:mi>A</mml:mi>
<mml:mi>z</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:msup>
<mml:mi>&#x3bb;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mfrac>
<mml:mrow>
<mml:msup>
<mml:mi>&#x2202;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
<mml:msub>
<mml:mi>A</mml:mi>
<mml:mi>x</mml:mi>
</mml:msub>
</mml:mrow>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:mi>x</mml:mi>
<mml:mi>&#x2202;</mml:mi>
<mml:mi>z</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
<label>(6)</label>
</disp-formula>
</p>
<p>In the equation, the anisotropy coefficient is defined as <inline-formula id="inf7">
<mml:math id="m13">
<mml:mrow>
<mml:mi>&#x3bb;</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:msqrt>
<mml:mfrac>
<mml:mrow>
<mml:msub>
<mml:mi>&#x3c1;</mml:mi>
<mml:mi>n</mml:mi>
</mml:msub>
</mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mi>&#x3c1;</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfrac>
</mml:msqrt>
</mml:mrow>
</mml:math>
</inline-formula>,where the resistivity in the parallel direction is <inline-formula id="inf8">
<mml:math id="m14">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3c1;</mml:mi>
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mi>&#x3c1;</mml:mi>
<mml:mrow>
<mml:mi>y</mml:mi>
<mml:mi>y</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mi>&#x3c1;</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>, and the resistivity in the vertical direction is <inline-formula id="inf9">
<mml:math id="m15">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3c1;</mml:mi>
<mml:mrow>
<mml:mi>z</mml:mi>
<mml:mi>z</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mi>&#x3c1;</mml:mi>
<mml:mrow>
<mml:mi>n</mml:mi>
<mml:mi>n</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>, with units of &#x3a9;&#xb7;m. The wavenumber in the vertical direction is <inline-formula id="inf10">
<mml:math id="m16">
<mml:mrow>
<mml:msub>
<mml:mi>k</mml:mi>
<mml:mi>n</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:msqrt>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>w</mml:mi>
<mml:mi>&#x3bc;</mml:mi>
</mml:mrow>
<mml:msub>
<mml:mi>&#x3c1;</mml:mi>
<mml:mi>n</mml:mi>
</mml:msub>
</mml:mfrac>
</mml:mrow>
</mml:msqrt>
</mml:mrow>
</mml:math>
</inline-formula>, while the wavenumber in the parallel direction is <inline-formula id="inf11">
<mml:math id="m17">
<mml:mrow>
<mml:msub>
<mml:mi>k</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:msqrt>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>w</mml:mi>
<mml:mi>&#x3bc;</mml:mi>
</mml:mrow>
<mml:msub>
<mml:mi>&#x3c1;</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mfrac>
</mml:mrow>
</mml:msqrt>
</mml:mrow>
</mml:math>
</inline-formula> .</p>
<p>By applying the method of separation of variables and incorporating the continuous boundary conditions of the vector potential at the interface, the expressions for the electric and magnetic field components on the surface can be obtained (<xref ref-type="bibr" rid="B10">Vanyan et al., 1967</xref>). When the source is excited by a step current, the relationship between the electric field in the frequency domain and the time domain is given by (<xref ref-type="bibr" rid="B7">Niu, 2007</xref>) (<xref ref-type="disp-formula" rid="e7">Equation 7</xref>):<disp-formula id="e7">
<mml:math id="m18">
<mml:mrow>
<mml:mi>e</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn mathvariant="bold">2</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3c0;</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:msubsup>
<mml:mo>&#x222b;</mml:mo>
<mml:mn>0</mml:mn>
<mml:mi>&#x221e;</mml:mi>
</mml:msubsup>
<mml:mtext>&#x200a;</mml:mtext>
<mml:mfrac>
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>E</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>w</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
<mml:mi>w</mml:mi>
</mml:mfrac>
<mml:mi mathvariant="italic">sin</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>w</mml:mi>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>w</mml:mi>
</mml:mrow>
</mml:math>
<label>(7)</label>
</disp-formula>
</p>
<p>Among them:<disp-formula id="e8">
<mml:math display="block" id="m19">
<mml:mrow>
<mml:msub>
<mml:mi>E</mml:mi>
<mml:mi>x</mml:mi>
</mml:msub>
<mml:mo>&#x2062;</mml:mo>
<mml:mfenced close=")" open="(" separators="|">
<mml:mi>w</mml:mi>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>w</mml:mi>
<mml:msub>
<mml:mi>&#x3bc;</mml:mi>
<mml:mn>0</mml:mn>
</mml:msub>
<mml:mi>I</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
<mml:mi>&#x3c0;</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:mo>&#x2062;</mml:mo>
<mml:mrow>
<mml:msubsup>
<mml:mo>&#x222b;</mml:mo>
<mml:mn>0</mml:mn>
<mml:mi>&#x221e;</mml:mi>
</mml:msubsup>
<mml:mtext>&#x200a;</mml:mtext>
<mml:mfrac>
<mml:mi>m</mml:mi>
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mfrac>
<mml:msub>
<mml:mi>n</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mo>&#x2a;</mml:mo>
</mml:msup>
</mml:mfrac>
</mml:mrow>
</mml:mfrac>
<mml:mo>&#x2062;</mml:mo>
<mml:msub>
<mml:mi>J</mml:mi>
<mml:mn>0</mml:mn>
</mml:msub>
<mml:mo>&#x2062;</mml:mo>
<mml:mfenced close=")" open="(" separators="|">
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>d</mml:mi>
<mml:mi>m</mml:mi>
</mml:mrow>
<mml:mo>&#x2b;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>w</mml:mi>
<mml:msub>
<mml:mi>&#x3bc;</mml:mi>
<mml:mn>0</mml:mn>
</mml:msub>
<mml:mi>I</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
<mml:mi>&#x3c0;</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:mi>&#xd7;</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mrow>
<mml:msubsup>
<mml:mo>&#x222b;</mml:mo>
<mml:mn>0</mml:mn>
<mml:mi>&#x221e;</mml:mi>
</mml:msubsup>
<mml:mtext>&#x200a;</mml:mtext>
<mml:mfenced close=")" open="(" separators="|">
<mml:mrow>
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x3bb;</mml:mi>
<mml:mover accent="true">
<mml:msub>
<mml:mi>n</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
<mml:mo>&#xaf;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:msup>
<mml:mover accent="true">
<mml:mi>R</mml:mi>
<mml:mo>&#xaf;</mml:mo>
</mml:mover>
<mml:mo>&#x2a;</mml:mo>
</mml:msup>
<mml:msubsup>
<mml:mi>k</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msubsup>
</mml:mrow>
</mml:mfrac>
<mml:mo>&#x2212;</mml:mo>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mfrac>
<mml:msub>
<mml:mi>n</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mo>&#x2a;</mml:mo>
</mml:msup>
</mml:mfrac>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:mfenced>
<mml:mi>&#xd7;</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mfenced close="]" open="[" separators="|">
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:msub>
<mml:mi>J</mml:mi>
<mml:mn>0</mml:mn>
</mml:msub>
<mml:mo>&#x2062;</mml:mo>
<mml:mfenced close=")" open="(" separators="|">
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2062;</mml:mo>
<mml:msup>
<mml:mi mathvariant="italic">cos</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
<mml:mo>&#x2061;</mml:mo>
<mml:mi>&#x3b8;</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mi>J</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
<mml:mo>&#x2062;</mml:mo>
<mml:mfenced close=")" open="(" separators="|">
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2062;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi mathvariant="italic">cos</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:mn>2</mml:mn>
<mml:mi>&#x3b8;</mml:mi>
</mml:mrow>
<mml:mi>r</mml:mi>
</mml:mfrac>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>d</mml:mi>
<mml:mi>m</mml:mi>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(8)</label>
</disp-formula>
</p>
<p>
<xref ref-type="disp-formula" rid="e8">Equation 8</xref> is the expression for the horizontal electric field component in the frequency domain, where <inline-formula id="inf12">
<mml:math id="m20">
<mml:mrow>
<mml:msub>
<mml:mi>J</mml:mi>
<mml:mn>0</mml:mn>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf13">
<mml:math id="m21">
<mml:mrow>
<mml:msub>
<mml:mi>J</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> are the zeroth-order and first-order Bessel functions, respectively, <inline-formula id="inf14">
<mml:math id="m22">
<mml:mrow>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mo>&#x2a;</mml:mo>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf15">
<mml:math id="m23">
<mml:mrow>
<mml:mover accent="true">
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mo>&#x2a;</mml:mo>
</mml:msup>
<mml:mo>&#xaf;</mml:mo>
</mml:mover>
</mml:mrow>
</mml:math>
</inline-formula> is the reflection coefficient, And spatial frequency: <inline-formula id="inf16">
<mml:math id="m24">
<mml:mrow>
<mml:msubsup>
<mml:mi>n</mml:mi>
<mml:mi>i</mml:mi>
<mml:mn>2</mml:mn>
</mml:msubsup>
<mml:mo>&#x3d;</mml:mo>
<mml:msup>
<mml:mi>m</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
<mml:mo>&#x2b;</mml:mo>
<mml:msubsup>
<mml:mi>k</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mi>i</mml:mi>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msubsup>
</mml:mrow>
</mml:math>
</inline-formula>; <inline-formula id="inf17">
<mml:math id="m25">
<mml:mrow>
<mml:msubsup>
<mml:mover accent="true">
<mml:mi>n</mml:mi>
<mml:mo>&#xaf;</mml:mo>
</mml:mover>
<mml:mi>i</mml:mi>
<mml:mn>2</mml:mn>
</mml:msubsup>
<mml:mo>&#x3d;</mml:mo>
<mml:msup>
<mml:mi>m</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
<mml:mo>&#x2b;</mml:mo>
<mml:msubsup>
<mml:mi>k</mml:mi>
<mml:mrow>
<mml:mi>n</mml:mi>
<mml:mi>i</mml:mi>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msubsup>
</mml:mrow>
</mml:math>
</inline-formula>.</p>
<p>To verify the accuracy of the forward modeling program, we first use a three-layer 1D isotropic model with the following parameters: <inline-formula id="inf18">
<mml:math id="m26">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3c1;</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>100</mml:mn>
<mml:mi mathvariant="normal">&#x3a9;</mml:mi>
<mml:mo>&#xb7;</mml:mo>
<mml:mi>m</mml:mi>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>h</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>2000</mml:mn>
<mml:mi>m</mml:mi>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>&#x3c1;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>10</mml:mn>
<mml:mi mathvariant="normal">&#x3a9;</mml:mi>
<mml:mo>&#xb7;</mml:mo>
<mml:mi>m</mml:mi>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>h</mml:mi>
<mml:mn>2</mml:mn>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>300</mml:mn>
<mml:mi>m</mml:mi>
<mml:mo>,</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf19">
<mml:math id="m27">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3c1;</mml:mi>
<mml:mn>3</mml:mn>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>100</mml:mn>
<mml:mi mathvariant="normal">&#x3a9;</mml:mi>
<mml:mo>&#xb7;</mml:mo>
<mml:mi>m</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>,When calculating with the anisotropic forward modeling program, the anisotropy coefficients are set as <inline-formula id="inf20">
<mml:math id="m28">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3bb;</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mi>&#x3bb;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mi>&#x3bb;</mml:mi>
<mml:mn>3</mml:mn>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1.0</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, representing an isotropic medium. The calculation results are shown in <xref ref-type="fig" rid="F2">Figure 2</xref>. It can be observed that the electric field curves obtained from both the theoretical solution and the anisotropic forward modeling program fit well. This demonstrates that the forward modeling program based on the theory of electrical anisotropy is reliable.</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>Forward modeling results of a one-dimensional isotropic model. &#x201c;Aniso&#x201d; shows results from the anisotropic modeling program developed in this study with anisotropy coefficients set to 1.0, while &#x201c;iso&#x201d; shows results from the isotropic modeling program.</p>
</caption>
<graphic xlink:href="feart-13-1594649-g002.tif">
<alt-text content-type="machine-generated">Log-log plot showing electric field strength (Ex in volts per meter) oveitime (t in seconds). The field value curve with the anisotropic coefficient set to 1.0 is almost identical to the field value curve of isotropic forward modeling, initially stablearound 10-4 V/m, then decreasing sharply to about 10-7 V/m.</alt-text>
</graphic>
</fig>
</sec>
<sec id="s2-1-2">
<title>2.1.2 Dataset construction</title>
<p>During the hydraulic fracturing process, the injection of high-pressure fluid promotes the expansion of fractures, which usually causes significant changes in the electrical properties of the target layer (<xref ref-type="bibr" rid="B8">Shen et al., 2009</xref>), while the physical parameters of the upper and lower layers remain relatively stable. Based on this background, this paper sets the parameters of the target layer as inversion variables and fixes the parameters of other layers to reduce the scale of the parameters. To ensure the effectiveness of the algorithm, we generated three datasets for testing. In the case of a three-layer horizontally layered medium, forward simulations were conducted for both high-resistivity and low-resistivity target layers, resulting in datasets D1 and D2. Additionally, a ten-layer underground model was forward simulated, with the target layer located in the ninth thin layer, generating dataset D3. The model parameters of each layer in these three datasets, as well as the changes in vertical resistivity and anisotropy coefficient of the target layer, are shown in <xref ref-type="fig" rid="F3">Figure 3</xref>. In the high-resistivity target layer training set, the resistivity of the first and third layers is fixed at 30 &#x2126;-meters, and the thickness of the first and second layers is 200 and 400 m, respectively. The resistivity range of the second layer (target layer) is 1&#x2013;150 &#x3a9; m, with 250 resistivity values taken at logarithmic intervals, and the anisotropy coefficient (&#x3bb;) ranges from 0.1 to 10.0, with 200 &#x3bb; values taken at logarithmic intervals. In the low-resistivity target layer training set, the resistivity of the first and third layers is fixed at 300 &#x3a9; m, and the resistivity range of the second layer (target layer) is 150&#x2013;1,000 &#x3a9; m, also with 250 resistivity values taken at logarithmic intervals and 200 &#x3bb; values. In the general target layer training set, we refer to the actual prior data (from a specific location) to construct a ten-layer underground model, with the target layer located in the ninth layer. The resistivity range of the target layer is 1&#x2013;1,000 &#x2126;-meters, with 500 resistivity values taken at logarithmic intervals, and the &#x3bb; range is 0.1&#x2013;10.0, with 200 &#x3bb; values. The distribution of the three sample sets ultimately used for testing is shown in <xref ref-type="table" rid="T1">Table 1</xref>.</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>Setting of physical property parameters of the target layer under three backgrounds. This figure illustrates the model settings of physical property parameters of the target layer under three background conditions: high-resistance target layer, low-resistance target layer, and conventional target layer.</p>
</caption>
<graphic xlink:href="feart-13-1594649-g003.tif">
<alt-text content-type="machine-generated">Diagram comparing three types of target layers with their respective measurements:1. High-resistivity target layer with three parts: - \( h_1 &#x3d; 200 \)m, \( \rho_1 &#x3d; 30 \, \Omega \cdot m \), \( \lambda_1 &#x3d; 1.0 \). - \( h_2 &#x3d; 400 \)m, \( \rho_2 &#x3d; 150 \sim 1000 \, \Omega \cdot m \), \( \lambda_2 &#x3d; 0.1 \sim 10.0 \). - \( h_3 &#x3d; \infty \), \( \rho_3 &#x3d; 30 \, \Omega \cdot m \), \( \lambda_3 &#x3d; 1.0 \).2. Low-resistivity target layer with three parts: - \( h_1 &#x3d; 400 \)m, \( \rho_1 &#x3d; 300 \, \Omega \cdot m \), \( \lambda_1 &#x3d; 1.0 \). - \( \rho_2 &#x3d; 1 \sim 150 \, \Omega \cdot m \), \( h_2 &#x3d; 200 \)m, \( \lambda_2 &#x3d; 0.1 \sim 10.0 \). - \( h_3 &#x3d; \infty \), \( \rho_3 &#x3d; 300 \, \Omega \cdot m \), \( \lambda_3 &#x3d; 1.0 \).3. General target layer listing multiple parts with specific height, resistivity, and lambda values for \( h_1 \) through \( h_{10} \).</alt-text>
</graphic>
</fig>
<table-wrap id="T1" position="float">
<label>TABLE 1</label>
<caption>
<p>Distribution of Sample Dataset.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Model type</th>
<th align="center">Model quantity</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">High Resistivity Target Layer Model</td>
<td align="center">50,000</td>
</tr>
<tr>
<td align="center">Low Resistivity Target Layer Model</td>
<td align="center">50,000</td>
</tr>
<tr>
<td align="center">General Target Layer Model</td>
<td align="center">100,000</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>Datasets were constructed for three Model Types respectively, and these datasets were used for the method testing.</p>
</fn>
</table-wrap-foot>
</table-wrap>
</sec>
</sec>
<sec id="s2-2">
<title>2.2 Data normalization</title>
<p>As mentioned earlier, to enhance the nonlinear expression capability of the neural network, we need to introduce activation functions in the network layers. However, activation functions are most effective within a specific range, and their effectiveness diminishes when the input data exceeds this range. Therefore, we need to normalize the data to ensure that it is transformed to a reasonable magnitude range while retaining as much feature information as possible.</p>
<p>To this end, we tested the training effects of three normalization methods on the same dataset for the sample data: (<xref ref-type="disp-formula" rid="e9">Equation 9</xref>) Log Min-Max Normalization, (<xref ref-type="disp-formula" rid="e11">Equation 11</xref>) Early Apparent Resistivity &#x2a; Time Normalization, and (<xref ref-type="disp-formula" rid="e12">Equation 12</xref>) Early Apparent Resistivity &#x2a; Time &#x2b; Log Min-Max Normalization. The effects are shown in <xref ref-type="fig" rid="F4">Figure 4</xref>. Log Min-Max Normalization resulted in an excessive scaling range, losing the feature information of the sample data, which led to neuron inactivation in the neural network and caused the model to lose its predictive ability. The Early Apparent Resistivity &#x2a; Time Normalization model converged well and did not exhibit severe overfitting. The Early Apparent Resistivity &#x2a; Time &#x2b; Log Min-Max Normalization method&#x2019;s loss function also eventually converged, but overfitting occurred in the middle stages.<disp-formula id="e9">
<mml:math id="m29">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="normal">x</mml:mi>
<mml:mtext>norm</mml:mtext>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="normal">x</mml:mi>
<mml:mi>log</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:mi mathvariant="italic">min</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="normal">x</mml:mi>
<mml:mi>log</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">max</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="normal">x</mml:mi>
<mml:mi>log</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mi mathvariant="italic">min</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="normal">x</mml:mi>
<mml:mi>log</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
<label>(9)</label>
</disp-formula>
<disp-formula id="e10">
<mml:math id="m30">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3c1;</mml:mi>
<mml:mi>s</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>2</mml:mn>
<mml:mi>&#x3c0;</mml:mi>
<mml:msup>
<mml:mi>r</mml:mi>
<mml:mn>3</mml:mn>
</mml:msup>
<mml:msub>
<mml:mi>E</mml:mi>
<mml:mi>x</mml:mi>
</mml:msub>
</mml:mrow>
<mml:mrow>
<mml:mi>I</mml:mi>
<mml:mi>d</mml:mi>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
<label>(10)</label>
</disp-formula>
<disp-formula id="e11">
<mml:math id="m31">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3c1;</mml:mi>
<mml:mi>s</mml:mi>
</mml:msub>
<mml:mo>&#xd7;</mml:mo>
<mml:mi>t</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>2</mml:mn>
<mml:mi>&#x3c0;</mml:mi>
<mml:msup>
<mml:mi>r</mml:mi>
<mml:mn>3</mml:mn>
</mml:msup>
<mml:msub>
<mml:mi>E</mml:mi>
<mml:mi>x</mml:mi>
</mml:msub>
<mml:mi>t</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>I</mml:mi>
<mml:mi>d</mml:mi>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
<label>(11)</label>
</disp-formula>
<disp-formula id="e12">
<mml:math id="m32">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3c1;</mml:mi>
<mml:mi>s</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:msub>
<mml:mi>&#x3c1;</mml:mi>
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>g</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mi>&#x3c1;</mml:mi>
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:msub>
<mml:mi mathvariant="italic">log</mml:mi>
<mml:mi mathvariant="italic">min</mml:mi>
</mml:msub>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mi>&#x3c1;</mml:mi>
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:msub>
<mml:mi mathvariant="italic">log</mml:mi>
<mml:mi mathvariant="italic">max</mml:mi>
</mml:msub>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mi>&#x3c1;</mml:mi>
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:msub>
<mml:mi mathvariant="italic">log</mml:mi>
<mml:mi mathvariant="italic">min</mml:mi>
</mml:msub>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
<label>(12)</label>
</disp-formula>where <inline-formula id="inf21">
<mml:math id="m33">
<mml:mrow>
<mml:msub>
<mml:mi>&#x3c1;</mml:mi>
<mml:mi mathvariant="normal">s</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the early-time apparent resistivity (<xref ref-type="disp-formula" rid="e10">Equation 10</xref>), <italic>r</italic> is the source-receiver distance, <inline-formula id="inf22">
<mml:math id="m34">
<mml:mrow>
<mml:msub>
<mml:mi>E</mml:mi>
<mml:mi>x</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the electric field response, <italic>I</italic> is the transmitting current, and <italic>dl</italic> is the length of the transmitting source.</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption>
<p>Decrease in the loss function under different normalization strategies. Among the three normalization strategies, the time-weighted normalization of early apparent resistivity yields the best performance.</p>
</caption>
<graphic xlink:href="feart-13-1594649-g004.tif">
<alt-text content-type="machine-generated">Three charts depict training and validation loss curves for different data normalization methods. The first chart shows logarithmic max-min normalization with initial high losses decreasing and stabilizing. The second illustrates early apparent resistivity time-weighted normalization with losses rapidly decreasing and leveling off. The third combines both methods, showing rapid loss reduction and stabilization. Each chart includes a legend indicating training loss in blue and validation loss in red.</alt-text>
</graphic>
</fig>
<p>Subsequently, to avoid excessive initial loss values, we also normalize the label data. To prevent the normalization amplitude from being too large and causing the differences between different label data to become too small, we scale both the resistivity and anisotropy coefficients within the range of 0&#x2013;1,000. Then, we take the logarithm with base 10, so that the normalized label data for vertical resistivity and anisotropy coefficients are finally distributed within the range of (0, 3]. This helps the loss function converge effectively. After applying the above normalization strategy, the loss function decrease is shown in <xref ref-type="fig" rid="F5">Figure 5</xref>.</p>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption>
<p>Training performance after normalizing both the sample data and labels. The figure shows the training results of the neural network after applying normalization to both input samples and output labels. The training process exhibits good convergence and stability.</p>
</caption>
<graphic xlink:href="feart-13-1594649-g005.tif">
<alt-text content-type="machine-generated">Graph showing train and validation loss over epochs. Train loss (blue line) and validation loss (red dashed line) both decrease sharply initially and stabilize near zero as epochs increase.</alt-text>
</graphic>
</fig>
</sec>
<sec id="s2-3">
<title>2.3 Convolutional neural network model construction and training</title>
<sec id="s2-3-1">
<title>2.3.1 Model construction</title>
<p>The classic convolutional neural network (CNN) structure typically includes an input layer, convolutional layers, activation functions, pooling layers, fully connected layers, and an output layer. The core idea is to extract features from the data using the convolutional layers, enhance the model&#x2019;s non-linearity with activation functions, reduce dimensionality with pooling layers, and synthesize the features in the fully connected layers to complete classification or regression tasks. In the transient electromagnetic inversion process, unlike the two-dimensional CNN used for image processing, the one-dimensional CNN is more suitable for extracting features from time-series signals, and its computational scale is also much smaller than that of the two-dimensional CNN used for image processing (<xref ref-type="bibr" rid="B1">Chen et al., 2025</xref>). Therefore, we choose to adopt the one-dimensional convolutional neural network structure shown in <xref ref-type="fig" rid="F6">Figure 6</xref>. The normalized response data <bold>
<italic>d</italic>
</bold> is used as input, and the normalized target layer resistivity and anisotropy coefficients <bold>
<italic>m</italic>
</bold> are used as the network&#x2019;s output. Here, <inline-formula id="inf23">
<mml:math id="m35">
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mfenced open="[" close="]" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>d</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>d</mml:mi>
<mml:mn>2</mml:mn>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:mo>&#x2026;</mml:mo>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>d</mml:mi>
<mml:mn>24</mml:mn>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>d</mml:mi>
<mml:mn>25</mml:mn>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mi>T</mml:mi>
</mml:msup>
<mml:mo>,</mml:mo>
<mml:mi>m</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mfenced open="[" close="]" separators="|">
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>&#x3bb;</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mi>T</mml:mi>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula>.</p>
<fig id="F6" position="float">
<label>FIGURE 6</label>
<caption>
<p>Convolutional Neural Network Architecture Diagram. The figure illustrates the structure of the convolutional neural network used in this study, including the arrangement of convolutional layers, and fully connected layers.</p>
</caption>
<graphic xlink:href="feart-13-1594649-g006.tif">
<alt-text content-type="machine-generated">Diagram of a neural network architecture showing the flow from input data to output data. It includes three convolutional layers with specified input and output channels and kernel sizes, followed by three fully connected layers with indicated input and output dimensions. The final layer outputs data with size two by one.</alt-text>
</graphic>
</fig>
<p>In this paper, we train the data using mini-batch gradient descent. In each iteration, a mini-batch of training data of size &#x201c;batchsize&#x201d; is used to calculate the gradient and update the model parameters. This method combines the advantages of batch gradient descent and stochastic gradient descent, offering high computational efficiency and good stability. The data flow during the training process is shown in <xref ref-type="fig" rid="F6">Figure 6</xref>. First, the input data has the shape of (batchsize,1,25), where &#x201c;batchsize&#x201d; is the size of the mini-batch, one represents a single channel (indicating one-dimensional signal data), and 25 represents the number of time channels. The input data passes through three convolutional layers for feature extraction. The first convolutional layer uses 16 convolutional kernels, each with a size of 5; the second convolutional layer uses 30 convolutional kernels, each with a size of 5; the third convolutional layer also uses 30 convolutional kernels, each with a size of 3. After these three convolutional layers, the data shape becomes (batchsize,30,25), where 30 represents the number of feature channels and 25 is the number of time channels. To increase the network&#x2019;s non-linear expressive power, the ReLU activation function is used in all layers. Additionally, to reduce data loss, zero-padding is used in the convolutional operations, and pooling layers are omitted to retain more spatial information. Then, the data processed by the convolutions is flattened into 30 &#xd7; 25 features and passed into the fully connected layer. The fully connected layer contains three hidden layers. The first hidden layer has 325 neurons, the second hidden layer has 64 neurons, and the third hidden layer has 12 neurons. After the successive compression of the data, the final output is obtained, where the data shape is (batchsize,2,1), with 2 representing the number of predicted parameters and one indicating a single output feature.</p>
</sec>
<sec id="s2-3-2">
<title>2.3.2 Loss function</title>
<p>The network training process essentially involves minimizing the mathematical expectation of the loss function derived in <xref ref-type="disp-formula" rid="e4">Equation 4</xref> to achieve optimal predictive performance of the model. During this process of minimizing the mathematical expectation of the loss function, the network obtains the model&#x2019;s predicted output through forward propagation and compares it with the actual label values. Then, the backpropagation algorithm calculates the gradients of the error with respect to the network parameters (such as neuron weights and biases) and adjusts these parameters based on these gradients to optimize the loss function.</p>
<p>The mean squared error (MSE) loss function is one of the most commonly used loss functions in regression tasks. It is defined as (<xref ref-type="disp-formula" rid="e13">Equation 13</xref>):<disp-formula id="e13">
<mml:math id="m36">
<mml:mrow>
<mml:mtext>MSE</mml:mtext>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>N</mml:mi>
</mml:munderover>
</mml:mstyle>
<mml:mtext>&#x200a;</mml:mtext>
<mml:msup>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msubsup>
<mml:mi>y</mml:mi>
<mml:mi>i</mml:mi>
<mml:mo>&#x2032;</mml:mo>
</mml:msubsup>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
<label>(13)</label>
</disp-formula>
</p>
<p>Where <italic>N</italic> is the number of samples, <inline-formula id="inf24">
<mml:math id="m37">
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> represents the true label values, and <inline-formula id="inf25">
<mml:math id="m38">
<mml:mrow>
<mml:msubsup>
<mml:mi>y</mml:mi>
<mml:mi>i</mml:mi>
<mml:mo>&#x2032;</mml:mo>
</mml:msubsup>
</mml:mrow>
</mml:math>
</inline-formula>&#x200b; represents the predicted values from the model.</p>
</sec>
<sec id="s2-3-3">
<title>2.3.3 Learning rate</title>
<p>In deep learning, the learning rate is also a crucial hyperparameter. It represents the step size at which the model parameters are updated during each iteration. If the learning rate is too high, meaning the step size is too large, it may hinder the model&#x2019;s convergence. On the other hand, if the learning rate is too low, meaning the step size is too small, it may cause the convergence to be too slow. Therefore, a reasonable learning rate schedule is essential for effective neural network training. After multiple tests, this paper finally adopts a learning rate adjustment strategy that changes with the number of iterations and the decrease in the loss function, which is represented by <xref ref-type="disp-formula" rid="e14">Equation 14</xref>:<disp-formula id="e14">
<mml:math id="m39">
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mi>R</mml:mi>
<mml:mn>0</mml:mn>
</mml:msub>
<mml:mo>&#xd7;</mml:mo>
<mml:msup>
<mml:mn>0.75</mml:mn>
<mml:mrow>
<mml:mi>&#x3b1;</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
<label>(14)</label>
</disp-formula>where <inline-formula id="inf26">
<mml:math id="m40">
<mml:mrow>
<mml:msub>
<mml:mi>R</mml:mi>
<mml:mn>0</mml:mn>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the initial learning rate, set to 0.01 in this study, and <italic>&#x3b1;</italic> represents the number of decay steps. Each time a specified condition is triggered during training, the learning rate is reduced to three-quarters of its previous value. The condition we set is that if the validation error does not decrease for 5 consecutive times, the decay step <italic>&#x3b1;</italic> is increased by 1. This method allows the model to dynamically reduce the learning rate during training, avoiding the drawbacks of a manually set learning rate that might be too high or too low. It significantly enhances the model&#x2019;s performance and convergence speed, making it easier for the model to reach its optimal state.</p>
</sec>
<sec id="s2-3-4">
<title>2.3.4 Model training</title>
<p>In this paper, a one-dimensional inversion neural network model for transient electromagnetic is built based on the PyTorch framework, and the network training and prediction are carried out in the GPU mode. The hardware configuration used is as follows: Intel(R) Xeon(R) Platinum 8352S CPU @ 2.20 GHz 3.40 GHz, NVIDIA GeForce RTX 4090 (24 GB) GPU, and the RAM is 256 GB (256 GB available). Under this hardware setup, the training efficiency of the model is reflected in a time consumption of 1.8 s per epoch. the complete hyperparameter settings for training the convolutional neural network model are shown in <xref ref-type="table" rid="T2">Table 2</xref>. The dataset is randomly split into training, validation, and test sets in an 8:1:1 ratio to ensure that after the network training is completed, the test set contains a sufficient number of unseen data to evaluate the inversion performance of the network model. Additionally, the ReLU activation function is used, and the Adam optimizer is employed for training. Apart from the aforementioned learning rate decay strategy, an &#x201c;early stopping&#x201d; strategy is also implemented. Training is halted early if the validation error does not decrease for 50 consecutive iterations, effectively preventing overfitting.</p>
<table-wrap id="T2" position="float">
<label>TABLE 2</label>
<caption>
<p>Hyperparameter Settings.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Hyperparameter</th>
<th align="center">Setting</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">Number of Training Epochs</td>
<td align="center">500</td>
</tr>
<tr>
<td align="center">Batch Size</td>
<td align="center">50</td>
</tr>
<tr>
<td align="center">Activation Function</td>
<td align="center">ReLU</td>
</tr>
<tr>
<td align="center">Learning Rate</td>
<td align="center">Initial learning rate 0.01, decayed by 75% upon trigger conditions</td>
</tr>
<tr>
<td align="center">Optimizer</td>
<td align="center">Adam</td>
</tr>
<tr>
<td align="center">Dropout</td>
<td align="center">0.1</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>The settings include the number of training epochs, batch size, activation function, learning rate, optimizer, and Dropout rate.</p>
</fn>
</table-wrap-foot>
</table-wrap>
</sec>
</sec>
</sec>
<sec id="s3">
<title>3 Results and testing</title>
<sec id="s3-1">
<title>3.1 Inversion of synthetic data without noise</title>
<p>As shown in <xref ref-type="fig" rid="F7">Figure 7</xref>, the inversion results of three models selected from the test samples (named a1b1, a2b2, and a3b3) under noiseless conditions are presented. From top to bottom, the first and second columns show the comparison between the true values and the predicted (inverted) values of the vertical resistivity and anisotropy coefficients of the target layer. The third column shows the forward modeling response fitting curves of the true geoelectric model and the inverted geoelectric model. The fourth column displays the absolute error curves of the forward response of the true model and the inverted model. For the high-resistivity target layer model a1b1, with layer thicknesses of 200, 400, and 5,000 m, vertical resistivities of 30.0, 280.17, and 30.0 &#x3a9; m, and anisotropy coefficients of 1.0, 5.62, and 1.0, the inversion results show that the second layer&#x2019;s vertical resistivity is 297.05 &#x3a9; m, with a relative error of 6.02%, and the anisotropy coefficient is 5.51, with a relative error of 1.97%. For the low-resistivity target layer model a2b2, with layer thicknesses of 400, 200, and 5,000 m, vertical resistivities of 300, 60.65, and 300 &#x3a9; m, and anisotropy coefficients of 1.0, 1.66, and 1.0, the inversion results show that the second layer&#x2019;s vertical resistivity is 64.23 &#x3a9; m, with a relative error of 5.94%, and the anisotropy coefficient is 1.55, with a relative error of 6.38%. For the general target layer model a3b3, with the target layer at the ninth layer and the settings of the underground 10 layers being consistent with <xref ref-type="fig" rid="F3">Figure 3</xref>, the vertical resistivity of the target layer is 85.68 &#x3a9; m, and the anisotropy coefficient is 7.9. The inversion results show that the vertical resistivity of the ninth layer is 91.46 &#x3a9; m, with a relative error of 6.75%, and the anisotropy coefficient is 7.68, with a relative error of 2.78%.</p>
<fig id="F7" position="float">
<label>FIGURE 7</label>
<caption>
<p>Inversion Results for Different Models. <bold>(a1&#x2013;a3)</bold> are comparisons between the true values and predicted values of the vertical resistivity &#x3c1; for the target layer under different models. <bold>(b1&#x2013;b3)</bold> are comparisons between the true values and predicted values of the anisotropy coefficient &#x3bb; for the target layer under different models. <bold>(c1&#x2013;c3)</bold> are comparisons of the forward modeling response curves for model parameters and predicted parameters. <bold>(d1&#x2013;d3)</bold> are error absolute value curves comparing the forward modeling response of model parameters with the predicted parameter&#x2019;s forward modeling response. The inversion results show that the predicted parameters closely match the true values, and the forward modeling responses based on predicted parameters exhibit high consistency with those from the true models, indicating the effectiveness of the proposed method.</p>
</caption>
<graphic xlink:href="feart-13-1594649-g007.tif">
<alt-text content-type="machine-generated">Nine-panel figure illustrating comparisons between true and predicted models across three scenarios. Panels (a1), (a2), and (a3) show resistivity plots versus depth. Panels (b1), (b2), and (b3) depict anisotropy coefficient versus depth. Panels (c1), (c2), and (c3) present line plots of electric field \(E_x\) over time \(t\), comparing true and predicted models. Panels (d1), (d2), and (d3) display error graphs of \(E_x\) over time \(t\). Each subplot includes legends for model distinction.</alt-text>
</graphic>
</fig>
<p>From the three models and their inversion results, it can be seen that for the inversion of noise-free synthetic data, the vertical resistivity and anisotropy coefficient of the target layer can be well restored. The fitting error of the forward response curve is at least two orders of magnitude lower than the data itself. In terms of the prediction performance for special models, the inversion results are satisfactory, and the inversion time after network training is within 1 s, which is significantly shorter than the time required for traditional iterative inversion, perfectly meeting the real-time inversion requirements in practical scenarios. In addition, to verify the generalization performance of the model, all unseen data from the test set, which was not involved in model training, was used for inversion, and the relative error of the inversion parameters was calculated. The test results, as shown in <xref ref-type="table" rid="T3">Table 3</xref>, indicate that the average relative error is less than 5%, demonstrating that even for unfamiliar forward response curves, the convolutional neural network can effectively extract complex electromagnetic response features and accurately restore the vertical resistivity and anisotropy coefficient of the target layer.</p>
<table-wrap id="T3" position="float">
<label>TABLE 3</label>
<caption>
<p>Average Relative Inversion Error of the Test Set in Noise-Free Synthetic Data.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Dataset - sample size</th>
<th align="left">Average relative error - &#x3c1;</th>
<th align="left">Average relative error - &#x3bb;</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">high-resistivity target layer- 5,000</td>
<td align="center">3.35%</td>
<td align="center">3.18%</td>
</tr>
<tr>
<td align="center">low-resistivity target layer - 5,000</td>
<td align="center">2.13%</td>
<td align="center">2.47%</td>
</tr>
<tr>
<td align="center">general target layer - 10,000</td>
<td align="center">1.78%</td>
<td align="center">1.82%</td>
</tr>
<tr>
<td align="center">total - 20,000</td>
<td align="center">2.26%</td>
<td align="center">2.32%</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>Statistical analysis of the inversion results of three model types across the entire test set shows that, under noise-free conditions, the average relative error of all models is less than 4%. This result demonstrates that the method performs remarkably well when dealing with synthetic data without noise interference.</p>
</fn>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="s3-2">
<title>3.2 Inversion of synthetic data with noise</title>
<p>In this study, to evaluate the noise resistance of the convolutional neural network (CNN) inversion method under realistic conditions, Gaussian white noise&#x2014;one of the most common types of noise&#x2014;was introduced as the disturbance source. Gaussian white noise is characterized by a zero mean, flat power spectral density, and statistical independence, making it an effective representation of random disturbances encountered in practical electromagnetic measurements, such as instrument noise, atmospheric interference, and geological background noise. By introducing Gaussian noise, the complexity of real-world electromagnetic data can be more accurately simulated, enabling a more objective assessment of the CNN model&#x2019;s robustness and generalization capability under varying noise levels.</p>
<p>We calculated the average relative error of the inversion results for 5,000 noisy test samples from the D1 dataset without changing any other parameters, as shown in <xref ref-type="table" rid="T4">Table 4</xref>. This demonstrates that the convolutional neural network can still effectively capture data features when the noise level is not too severe. Subsequently, different levels of Gaussian noise (2%, 5%, 8%, and 12%) were added to the three sets of test samples used earlier, and the inversion was performed again. The results were compared with the true model and the noise-free inversion results, as shown in <xref ref-type="fig" rid="F8">Figure 8</xref>. Specifically, e1, e2, and e3 represent the inversion results of the vertical resistivity of the target layer under different noise levels, while f1, f2, and f3 show the inversion results of the anisotropy coefficient. The results indicate that when the noise ratio is within 5%, the neural network inversion exhibits good generalization ability, and the error of the inversion parameters remains within an acceptable range. However, it is important to note that as the noise ratio increases, the inversion performance of the network deteriorates accordingly. Therefore, when applying this method to practical scenarios, it is still necessary to perform data cleaning and denoising in advance to ensure better inversion performance.</p>
<table-wrap id="T4" position="float">
<label>TABLE 4</label>
<caption>
<p>Inversion Results of Test Samples with Added Noise in Dataset D1.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Noise ratio</th>
<th align="center">Average relative error - &#x3c1;</th>
<th align="center">Average relative error - &#x3bb;</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">0%</td>
<td align="center">3.35%</td>
<td align="center">3.18%</td>
</tr>
<tr>
<td align="center">2%</td>
<td align="center">6.41%</td>
<td align="center">5.98%</td>
</tr>
<tr>
<td align="center">5%</td>
<td align="center">13.43%</td>
<td align="center">9.03%</td>
</tr>
<tr>
<td align="center">8%</td>
<td align="center">40.28%</td>
<td align="center">36.57%</td>
</tr>
<tr>
<td align="center">12%</td>
<td align="center">125.92%</td>
<td align="center">78.62%</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>The results show that when the noise level is within 5%, the neural network inversion exhibits good generalization ability. As the noise level increases, the inversion performance of the network decreases accordingly.</p>
</fn>
</table-wrap-foot>
</table-wrap>
<fig id="F8" position="float">
<label>FIGURE 8</label>
<caption>
<p>The Impact of Noise on Inversion Results. <bold>(a1,a2)</bold> show the effect of different levels of noise on the resistivity and anisotropy coefficient of the high-resistivity target layer. <bold>(b1,b2)</bold> show the effect of different levels of noise on the resistivity and anisotropy coefficient of the low-resistivity target layer. <bold>(c1,c2)</bold> show the effect of different levels of noise on the resistivity and anisotropy coefficient of the typical target layer. The results show that when the noise level is within 5%, the neural network inversion exhibits good generalization ability. As the noise level increases, the inversion performance of the network decreases accordingly.</p>
</caption>
<graphic xlink:href="feart-13-1594649-g008.tif">
<alt-text content-type="machine-generated">Six graphs depict resistivity and anisotropy coefficient variations with depth. Panels a1 and a2 show consistent patterns for the true and predicted models, with varying noise levels. Panels b1 and b2 highlight differences in resistivity and anisotropy at depths of up to eight hundred meters. Panel c1 illustrates more significant deviations, while panel c2 shows minimal changes beyond fifteen hundred meters. Each graph includes a legend indicating the line colors for the true model, predicted model, and models with two percent, five percent, eight percent, and twelve percent noise.</alt-text>
</graphic>
</fig>
</sec>
<sec id="s3-3">
<title>3.3 Inversion comparison of field data before and after hydraulic fracturing</title>
<p>The method proposed in this paper focuses on the inversion of vertical resistivity and anisotropy coefficient of the target layer by fixing the electrical parameters of non-target layers. This strategy is often limited in conventional transient electromagnetic inversion, where complex unknown stratified structures require simultaneous inversion of multiple formation parameters, significantly increasing data dimensionality and computational complexity. However, in the context of hydraulic fracturing monitoring in shale reservoirs, this &#x201c;parameter simplification&#x201d; design becomes a unique advantage.</p>
<p>Firstly, prior to fracturing, an initial geoelectric model can be constructed using well-logging curves, 3D seismic interpretation, and magnetotelluric (MT) surveys, providing a reliable basis for fixing background layer parameters. Secondly, the resistivity anomalies caused by hydraulic fracturing are spatially constrained, mainly within the target reservoir around the horizontal wellbore (typically within a 200&#x2013;300 m radius), while the electrical properties of non-target layers, such as the caprock and basement, remain relatively stable before and after fracturing. This physical constraint allows the proposed method to efficiently capture the dynamic changes in vertical resistivity and anisotropy of the target layer through inversion in a reduced parameter space, reducing the neural network training data by 2-3 orders of magnitude and meeting the computational efficiency required for fracturing monitoring.</p>
<p>This study is based on field data from a shale gas well in Zhongxian, Chongqing, using the ultra-long grounded source transient electromagnetic system. The transmission source has a total length of 4064m, with a peak current of 103.686A, and uses a TD50 bipolar trapezoidal waveform with a sampling rate of 1000 Hz. The field survey line layout relative to the horizontal well is shown in <xref ref-type="fig" rid="F9">Figure 9</xref>. Based on well-logging interpretation, 3D seismic data, and MT inversion, a four-layer &#x201c;low-high-low-high&#x201d; resistivity structure model was constructed (target layer depth &#x3c;2600 m), eight observation points were set along the main fracturing direction, and corresponding datasets were generated for the different coordinates of the eight observation points.</p>
<fig id="F9" position="float">
<label>FIGURE 9</label>
<caption>
<p>Schematic Diagram of Field Survey Line Design and the Relative Position of the Horizontal Well. The measured data were selected from the measurement points along 9 survey lines.</p>
</caption>
<graphic xlink:href="feart-13-1594649-g009.tif">
<alt-text content-type="machine-generated">Graph showing a grid of labeled data points with a highlighted survey line crossing through stages forty-one, forty-two, and forty-three. A horizontal well is marked, intersecting the survey line.</alt-text>
</graphic>
</fig>
<p>Taking the first observation point as an example (<xref ref-type="fig" rid="F10">Figure 10</xref>), the early apparent resistivity curve (0.02&#x2013;1.02s) before and after fracturing shows significant differences, with a maximum absolute change of <italic>&#x394;&#x3c1;_</italic>max &#x3d; 23.95 &#x3a9; m and a relative change rate of up to 37.4%, far exceeding the system noise threshold (5%). This demonstrates the CNN&#x2019;s ability to effectively extract characteristic signals from the fracturing process.</p>
<fig id="F10" position="float">
<label>FIGURE 10</label>
<caption>
<p>Early Apparent Resistivity Curves Before and After Fracturing and the Absolute and Relative Changes. This change in values can be effectively captured by the neural network.</p>
</caption>
<graphic xlink:href="feart-13-1594649-g010.tif">
<alt-text content-type="machine-generated">Three graphs compare data related to resistivity over time. The first graph shows early apparent resistivity with black and red curves for before and after fracturing. The second graph presents delta values, shown in red, while the third graph illustrates relative variation percentages, also in red. Both horizontal axes represent time in seconds on a logarithmic scale.</alt-text>
</graphic>
</fig>
<p>According to the laboratory rock physics study by <xref ref-type="bibr" rid="B9">Tong et al. (2023)</xref>, the replacement of original gas with conductive fluid in fracture pores leads to a significant decrease in the average resistivity of the formation, while the vertical resistivity decreases more significantly, further reducing the anisotropy coefficient. The inversion results for the entire survey line (<xref ref-type="fig" rid="F11">Figures 11</xref>, <xref ref-type="fig" rid="F12">12</xref>) show that after fracturing, the average vertical resistivity of the target layer decreased from 11.49 &#x3a9; m to 7.27 &#x3a9; m (a 36.7% reduction), while the anisotropy coefficient decreased from 3.21 to 1.58 (a 50.8% reduction). The simultaneous decline in electrical parameters can be attributed to the formation of a conductive fluid network caused by the injection of proppant and low-resistivity fracturing fluid, which is consistent with the conclusions of laboratory core fracturing experiments.</p>
<fig id="F11" position="float">
<label>FIGURE 11</label>
<caption>
<p>Electrical Property Parameters of the Target Layer Before and After Fracturing and Their Changes. <bold>(a1)</bold> Vertical resistivity distribution of the target layer before fracturing, <bold>(a2)</bold> Vertical resistivity distribution of the target layer after fracturing, <bold>(a3)</bold> Changes in vertical resistivity, <bold>(b1)</bold> Anisotropy coefficient distribution before fracturing, <bold>(b2)</bold> Anisotropy coefficient distribution after fracturing, <bold>(b3)</bold> Changes in anisotropy coefficient. Overall, after fracturing, both the resistivity and the anisotropy coefficient have generally decreased.</p>
</caption>
<graphic xlink:href="feart-13-1594649-g011.tif">
<alt-text content-type="machine-generated">Six contour plots display variations in electrical resistivity and anisotropy coefficient with elevation and distance. The top row (a1, a2, a3) illustrates resistivity in ohm-meters, showing color gradients from purple (low) to red (high). The bottom row (b1, b2, b3) presents anisotropy coefficients using similar color coding. Elevation ranges from negative two thousand to negative three thousand meters, and distance spans zero to six hundred meters in each graph, highlighting differences in subsurface properties.</alt-text>
</graphic>
</fig>
<fig id="F12" position="float">
<label>FIGURE 12</label>
<caption>
<p>Changes in Average Resistivity and Average Anisotropy Coefficient of the Stratum. After fracturing of the target layer, the average value of vertical resistivity decreased from 11.49 &#x3a9; m to 7.27 &#x3a9; m, with a decrease of 36.7%; the average value of the anisotropy coefficient decreased from 3.21 to 1.58, representing a reduction of 50.8%.</p>
</caption>
<graphic xlink:href="feart-13-1594649-g012.tif">
<alt-text content-type="machine-generated">Two graphs depict resistivity and anisotropy coefficient changes before and after fracturing. The left graph shows elevation versus resistivity, with separate lines for before and after fracturing. The right graph shows elevation versus anisotropy coefficient, also distinguishing between before and after fracturing. The legend identifies line colors: black for before fracturing, red for after fracturing. The graphs illustrate how these properties vary with elevation.</alt-text>
</graphic>
</fig>
</sec>
</sec>
<sec sec-type="conclusion" id="s4">
<title>4 Conclusion</title>
<p>This study proposes a CNN-driven anisotropic time-domain electromagnetic (TEM) inversion framework, which overcomes the limitations of traditional iterative inversion methods, such as long computation time and low efficiency. By leveraging an end-to-end deep extraction-regression architecture, the proposed method establishes a nonlinear mapping between multi-time window electromagnetic decay characteristics and the target layer&#x2019;s electrical parameters (vertical resistivity &#x3c1; and anisotropy coefficient &#x3bb;), successfully reducing the single-point inversion time to within 1 s, which is much lower than the 317 s used in the one-dimensional iterative inversion by <xref ref-type="bibr" rid="B12">Wang, (2024)</xref>.</p>
<p>In the synthetic data tests, a standardized dataset containing 200,000 samples of three target layer types&#x2014;high-resistivity (&#x3c1; &#x3e; 100 &#x3a9; m), low-resistivity (&#x3c1; &#x3c; 50 &#x3a9; m), and conventional (10 &#x3a9; m &#x2264; &#x3c1; &#x2264; 1,000 &#x3a9; m)&#x2014;was constructed. The results verified the feasibility of the method, with an average relative error of less than 4% for both vertical resistivity (&#x3c1;) and anisotropy coefficient (&#x3bb;), and no overfitting occurred (the loss between the validation set and the test set was less than 2%).</p>
<p>In the noisy data tests, the results showed that the CNN inversion maintained good generalization performance when the noise ratio was within 2%&#x2013;5%, with inversion errors remaining within an acceptable range. However, as the noise level increased, the inversion performance gradually deteriorated.</p>
<p>In field data applications, this method has demonstrated remarkable practical potential. It successfully identified regions of reduced resistivity caused by the injection of fracturing fluid, with the average resistivity decreasing from 11.49 &#x3a9; m to 7.27 &#x3a9; m (a reduction of 36.7%), and the average anisotropy coefficient dropping from 3.21 to 1.58 (a reduction of 50.8%). The inversion time for a single point is less than 1 s, enabling the method to efficiently capture resistivity and anisotropy changes induced by hydraulic fracturing. This demonstrates that the integration of prior constraints and deep learning can overcome the timeliness bottleneck of traditional inversion, providing more reliable support for fracturing monitoring and decision-making.</p>
<p>However, it should be noted that while the method presented in this paper effectively reduces the size of the training dataset through sufficient prior constraints, and based on our analysis of field data, the trend of the inversion results is generally consistent with the laboratory rock physics experiments, we believe the prior information used is highly reliable. Nevertheless, due to the complexity of subsurface electrical structures, it is often difficult to fully match them with the prior model. If there is a significant discrepancy between the prior information and the actual subsurface electrical parameters, it may lead to substantial errors in the inversion results. At present, we are unable to precisely quantify the differences between the prior data and the true subsurface parameters. Therefore, the focus of our future research will be to further improve the inversion method, such as exploring whether it is possible to directly invert the changes in the electrical properties of the target layer by using the residual signal of the response before and after fracturing. This approach may help reduce the result bias caused by inaccurate prior information.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s5">
<title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p>
</sec>
<sec sec-type="author-contributions" id="s6">
<title>Author contributions</title>
<p>PA: Writing &#x2013; original draft. YL: Writing &#x2013; review and editing. ZL: Writing &#x2013; review and editing.</p>
</sec>
<sec sec-type="funding-information" id="s7">
<title>Funding</title>
<p>The author(s) declare that financial support was received for the research and/or publication of this article. This work was financially supported by the National Natural Science Foundation of China (NSFC) Key Project (Grant No. 42030805), titled &#x201c;Research on Time-Domain Electromagnetic Monitoring Methods for Hydraulic Fracturing and Their Comprehensive Applications.&#x201d;</p>
</sec>
<ack>
<p>Special thanks to the EMLAB laboratory at Yangtze University for their support in this research.</p>
</ack>
<sec sec-type="COI-statement" id="s8">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="s9">
<title>Generative AI statement</title>
<p>The author(s) declare that no Generative AI was used in the creation of this manuscript.</p>
</sec>
<sec sec-type="disclaimer" id="s10">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chen</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Zhu</surname>
<given-names>X. Y.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Xu</surname>
<given-names>Y. Q.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>W.</given-names>
</name>
</person-group> (<year>2025</year>). <article-title>A review of physics-guided deep learning: progress, challenges, and prospects</article-title>. <source>J. Front. Comput. Sci. Technol.</source> <volume>19</volume> (<issue>02</issue>), <fpage>277</fpage>&#x2013;<lpage>294</lpage>. <pub-id pub-id-type="doi">10.3778/j.issn.1673-9418.2407056</pub-id>
</citation>
</ref>
<ref id="B2">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Fahad</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Kamal</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Groundwater aquifer detection using the time-domain electromagnetic method: a case study in Harrat Ithnayn, northwestern Saudi Arabia</article-title>. <source>J. King Saud Univ. - Sci.</source> <volume>34</volume> (<issue>01</issue>), <fpage>101684</fpage>. <pub-id pub-id-type="doi">10.1016/j.jksus.2021.101684</pub-id>
</citation>
</ref>
<ref id="B3">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Heng</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>C. H.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>B. P.</given-names>
</name>
<name>
<surname>Guo</surname>
<given-names>Y. T.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Wei</surname>
<given-names>Y. L.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Experimental study on anisotropic characteristics of shale</article-title>. <source>Rock Soil Mech.</source> <volume>36</volume> (<issue>03</issue>), <fpage>609</fpage>&#x2013;<lpage>616</lpage>. <pub-id pub-id-type="doi">10.16285/j.rsm.2015.03.001</pub-id>
</citation>
</ref>
<ref id="B4">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Huang</surname>
<given-names>M. W.</given-names>
</name>
</person-group> (<year>2021</year>). <source>Research on marine controlled-source electromagnetic inversion based on deep learning</source>. <publisher-name>Henan Polytechnic University</publisher-name>. <pub-id pub-id-type="doi">10.27116/d.cnki.gjzgc.2021.000219</pub-id>
</citation>
</ref>
<ref id="B5">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>Y. H.</given-names>
</name>
<name>
<surname>Yin</surname>
<given-names>C. C.</given-names>
</name>
<name>
<surname>Cai</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Huang</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Ben</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>B.</given-names>
</name>
<etal/>
</person-group> (<year>2018</year>). <article-title>Research status and prospects of anisotropy in electromagnetic exploration</article-title>. <source>Chin. J. Geophys.</source> <volume>61</volume> (<issue>08</issue>), <fpage>3468</fpage>&#x2013;<lpage>3487</lpage>. <pub-id pub-id-type="doi">10.6038/cjg2018L0004</pub-id>
</citation>
</ref>
<ref id="B6">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Ren</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Tang</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Xu</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>Y.</given-names>
</name>
<etal/>
</person-group> (<year>2021</year>). <article-title>Deep learning audio magnetotellurics inversion using residual-based deep convolution neural network</article-title>. <source>J. Appl. Geophys.</source> <volume>188</volume> (<issue>000</issue>), <fpage>104309</fpage>. <pub-id pub-id-type="doi">10.1016/j.jappgeo.2021.104309</pub-id>
</citation>
</ref>
<ref id="B7">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Niu</surname>
<given-names>Z. L.</given-names>
</name>
</person-group> (<year>2007</year>). <source>Principles of time-domain electromagnetic methods</source>. <publisher-loc>Changsha</publisher-loc>: <publisher-name>Central South University Press</publisher-name>.</citation>
</ref>
<ref id="B8">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Shen</surname>
<given-names>J. S.</given-names>
</name>
<name>
<surname>Su</surname>
<given-names>B. Y.</given-names>
</name>
<name>
<surname>Guo</surname>
<given-names>N. C.</given-names>
</name>
</person-group> (<year>2009</year>). <article-title>Electrical anisotropic response characteristics of fractured reservoirs</article-title>. <source>Chin. J. Geophys.</source> <volume>52</volume> (<issue>11</issue>), <fpage>2903</fpage>&#x2013;<lpage>2912</lpage>. <pub-id pub-id-type="doi">10.3969/j.issn.0001-5733.2009.11.026</pub-id>
</citation>
</ref>
<ref id="B9">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Tong</surname>
<given-names>X. L.</given-names>
</name>
<name>
<surname>Yan</surname>
<given-names>L. J.</given-names>
</name>
<name>
<surname>Xiang</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Tan</surname>
<given-names>G. X.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>IP characteristics and anisotropy experiment of hydraulic fracturing shale</article-title>. <source>Oil Geophys. Prospect.</source> <volume>58</volume> (<issue>05</issue>), <fpage>1152</fpage>&#x2013;<lpage>1163</lpage>. <pub-id pub-id-type="doi">10.13810/j.cnki.issn.1000-7210.2023.05.012</pub-id>
</citation>
</ref>
<ref id="B10">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Vanyan</surname>
<given-names>L. L.</given-names>
</name>
<name>
<surname>Bobrovnikov</surname>
<given-names>L. Z.</given-names>
</name>
<name>
<surname>Davidov</surname>
<given-names>V. M.</given-names>
</name>
<name>
<surname>Kuznetsov</surname>
<given-names>A. N.</given-names>
</name>
<name>
<surname>Loshenitzina</surname>
<given-names>V. L.</given-names>
</name>
<name>
<surname>Morozova</surname>
<given-names>G. M.</given-names>
</name>
<etal/>
</person-group> (<year>1967</year>). <source>Electromagnetic depth soundings</source>. <publisher-loc>Boston, MA</publisher-loc>: <publisher-name>Springer</publisher-name>. <pub-id pub-id-type="doi">10.1007/978-1-4684-0670-2</pub-id>
</citation>
</ref>
<ref id="B11">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Vladimir</surname>
<given-names>P.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Deep learning electromagnetic inversion with convolutional neural networks</article-title>. <source>Geophys. J. Int.</source> <volume>218</volume> (<issue>2</issue>), <fpage>817</fpage>&#x2013;<lpage>832</lpage>. <pub-id pub-id-type="doi">10.1093/gji/ggz204</pub-id>
</citation>
</ref>
<ref id="B12">
<citation citation-type="thesis">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>S. J.</given-names>
</name>
</person-group> (<year>2024</year>). <source>One-dimensional inversion and application of electric source transient electromagnetic method</source> (<comment>Master&#x2019;s thesis</comment>). <publisher-name>Yangtze University</publisher-name>. <pub-id pub-id-type="doi">10.26981/d.cnki.gjhsc.2024.001266</pub-id>
</citation>
</ref>
<ref id="B13">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>X. X.</given-names>
</name>
<name>
<surname>Di</surname>
<given-names>Q. Y.</given-names>
</name>
<name>
<surname>Tang</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Preliminary study on electrical source transient electromagnetic method</article-title>. <source>Prog. Geophys.</source> <volume>30</volume> (<issue>02</issue>), <fpage>872</fpage>&#x2013;<lpage>877</lpage>. <pub-id pub-id-type="doi">10.6038/pg20150253</pub-id>
</citation>
</ref>
<ref id="B15">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Xiang</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Hu</surname>
<given-names>W. B.</given-names>
</name>
<name>
<surname>Yan</surname>
<given-names>L. J.</given-names>
</name>
<name>
<surname>Tang</surname>
<given-names>X. G.</given-names>
</name>
<name>
<surname>Xie</surname>
<given-names>X. B.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Characteristics of shale gas reservoirs and geophysical prediction techniques</article-title>. <source>Spec. Oil Gas Reserv.</source> <volume>23</volume> (<issue>02</issue>), <fpage>5</fpage>&#x2013;<lpage>8&#x2b;151</lpage>. <pub-id pub-id-type="doi">10.3969/j.issn.1006-6535.2016.02.002</pub-id>
</citation>
</ref>
<ref id="B16">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yan</surname>
<given-names>L. J.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Xie</surname>
<given-names>X. B.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>Z. G.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Transient electromagnetic response of electrically anisotropic reservoir models</article-title>. <source>Chin. J. Eng. Geophys.</source> <volume>11</volume> (<issue>03</issue>), <fpage>346</fpage>&#x2013;<lpage>350</lpage>. <pub-id pub-id-type="doi">10.3969/j.issn.1672-7940.2014.03.013</pub-id>
</citation>
</ref>
<ref id="B17">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yan</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Yin</surname>
<given-names>C. C.</given-names>
</name>
<name>
<surname>Su</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Ke</surname>
<given-names>Z. Y.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>L. Y.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Fast imaging of transient electromagnetic data based on convolutional neural network</article-title>. <source>Chin. J. Geophys.</source> <volume>66</volume> (<issue>10</issue>), <fpage>4290</fpage>&#x2013;<lpage>4300</lpage>. <pub-id pub-id-type="doi">10.6038/cjg2023Q0827</pub-id>
</citation>
</ref>
<ref id="B18">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yin</surname>
<given-names>C. C.</given-names>
</name>
<name>
<surname>Weidelt</surname>
<given-names>P.</given-names>
</name>
</person-group> (<year>1999</year>). <article-title>Geoelectrical fields in a layered earth with arbitrary anisotropy</article-title>. <source>Geophysics</source> <volume>64</volume> (<issue>2</issue>), <fpage>426</fpage>&#x2013;<lpage>434</lpage>. <pub-id pub-id-type="doi">10.1190/1.1444547</pub-id>
</citation>
</ref>
<ref id="B19">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yu</surname>
<given-names>J. H.</given-names>
</name>
<name>
<surname>Tang</surname>
<given-names>X. G.</given-names>
</name>
<name>
<surname>Xiong</surname>
<given-names>Z. T.</given-names>
</name>
</person-group> (<year>2025</year>). <article-title>2D inversion of magnetotelluric data based on residual neural network</article-title>. <source>Chin. J. Geophys.</source> <volume>68</volume> (<issue>01</issue>), <fpage>269</fpage>&#x2013;<lpage>281</lpage>. <pub-id pub-id-type="doi">10.6038/cjg2024R0859</pub-id>
</citation>
</ref>
<ref id="B20">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname>
<given-names>Z. H.</given-names>
</name>
<name>
<surname>Liao</surname>
<given-names>X. L.</given-names>
</name>
<name>
<surname>Cao</surname>
<given-names>Y. Y.</given-names>
</name>
<name>
<surname>Hou</surname>
<given-names>Z. L.</given-names>
</name>
<name>
<surname>Fan</surname>
<given-names>X. T.</given-names>
</name>
<name>
<surname>Xu</surname>
<given-names>Z. X.</given-names>
</name>
<etal/>
</person-group> (<year>2021</year>). <article-title>Joint inversion of gravity and gravity gradient anomalies based on deep learning</article-title>. <source>Chin. J. Geophys.</source> <volume>64</volume> (<issue>04</issue>), <fpage>1435</fpage>&#x2013;<lpage>1452</lpage>. <pub-id pub-id-type="doi">10.6038/cjg2021O0151</pub-id>
</citation>
</ref>
<ref id="B21">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhou</surname>
<given-names>F. Y.</given-names>
</name>
<name>
<surname>Jin</surname>
<given-names>L. P.</given-names>
</name>
<name>
<surname>Dong</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>A survey of convolutional neural networks</article-title>. <source>Chin. J. Comput.</source> <volume>40</volume> (<issue>06</issue>), <fpage>1229</fpage>&#x2013;<lpage>1251</lpage>. <pub-id pub-id-type="doi">10.11897/SP.J.1016.2017.01229</pub-id>
</citation>
</ref>
</ref-list>
</back>
</article>