<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article article-type="research-article" dtd-version="1.3" xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Earth Sci.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Earth Science</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Earth Sci.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2296-6463</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">1736569</article-id>
<article-id pub-id-type="doi">10.3389/feart.2026.1736569</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Atmospheric CO<sub>2</sub> concentration prediction based on bidirectional long short-term memory</article-title>
<alt-title alt-title-type="left-running-head">Qiao et al.</alt-title>
<alt-title alt-title-type="right-running-head">
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/feart.2026.1736569">10.3389/feart.2026.1736569</ext-link>
</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Qiao</surname>
<given-names>Yina</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/3206206"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing - original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Yang</surname>
<given-names>Hui</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Cui</surname>
<given-names>Liu</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/3208452"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal Analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Zhang</surname>
<given-names>Yuan</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal Analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Feng</surname>
<given-names>Gefei</given-names>
</name>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Fan</surname>
<given-names>Huaiwei</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal Analysis</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Lv</surname>
<given-names>Qingzhou</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Yao</surname>
<given-names>Yuejing</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
</contrib>
</contrib-group>
<aff id="aff1">
<label>1</label>
<institution>School of Resources and Geosciences, China University of Mining and Technology</institution>, <city>Xuzhou</city>, <country country="CN">China</country>
</aff>
<aff id="aff2">
<label>2</label>
<institution>Key Laboratory of Coalbed Methane Resources and Reservoir Formation Process of the Ministry of Education, China University of Mining and Technology</institution>, <city>Xuzhou</city>, <country country="CN">China</country>
</aff>
<aff id="aff3">
<label>3</label>
<institution>Collaborative Innovation Center for Language Competence, Jiangsu Normal University</institution>, <city>Xuzhou</city>, <country country="CN">China</country>
</aff>
<author-notes>
<corresp id="c001">
<label>&#x2a;</label>Correspondence: Hui Yang, <email xlink:href="mailto:yanghui@cumt.edu.cn">yanghui@cumt.edu.cn</email>
</corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-01-22">
<day>22</day>
<month>01</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>14</volume>
<elocation-id>1736569</elocation-id>
<history>
<date date-type="received">
<day>31</day>
<month>10</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>02</day>
<month>12</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>06</day>
<month>01</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2026 Qiao, Yang, Cui, Zhang, Feng, Fan, Lv and Yao.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Qiao, Yang, Cui, Zhang, Feng, Fan, Lv and Yao</copyright-holder>
<license>
<ali:license_ref start_date="2026-01-22">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<sec>
<title>Introduction</title>
<p>Accurate prediction of atmospheric CO<sub>2</sub> concentration is essential for evaluating local emission dynamics, supporting regional carbon management, and promoting carbon neutrality goals. However, CO<sub>2</sub> variations are highly influenced by complex interactions between meteorological conditions and anthropogenic activities, leading to highly nonlinear and time-dependent behavior that challenges conventional prediction methods.</p>
</sec>
<sec>
<title>Methods</title>
<p>To address this issue, a time series prediction framework based on Bidirectional Long Short-Term Memory (BILSTM) was developed for ground-based CO<sub>2</sub> concentration forecasting. Observations from four stations of the European Integrated Carbon Observation System (ICOS) were used. Correlation analysis was first conducted between CO<sub>2</sub> concentration and multiple meteorological variables to identify dominant driving factors. Two prediction schemes were then designed: one incorporating all meteorological variables and another using only highly correlated features. The proposed BILSTM model was evaluated against LSTM, GRU, and Support Vector Regression (SVR) models using RMSE, MAE, and R2 as performance metrics.</p>
</sec>
<sec>
<title>Results</title>
<p>The results indicate that correlation-based feature selection effectively improves prediction accuracy and model stability. Among all models, BILSTM consistently achieves superior performance in both short-term and long-term CO<sub>2</sub> prediction, exhibiting greater stability and robustness than LSTM, GRU, and SVR.</p>
</sec>
<sec>
<title>Discussion</title>
<p>The superior performance of BILSTM is attributed to its bidirectional recurrent structure, which enables effective learning of both past and future temporal dependencies in CO<sub>2</sub> time series. Differences in prediction accuracy among stations reflect varying environmental and anthropogenic influences.</p>
</sec>
</abstract>
<kwd-group>
<kwd>bidirectional long short-term memory</kwd>
<kwd>carbon dioxide</kwd>
<kwd>ground monitoring</kwd>
<kwd>integrated carbon observation system</kwd>
<kwd>time series prediction</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This research is funded by the National Natural Science Foundation of China (42571545), the National Natural Science Foundation of China (52478011), the Third Xinjiang Scientific Expedition Program (2022xjkk1006), the Xinjiang Uygur Autonomous Region Key Research and Development Program (2022B01012-1), the Science and Technology Innovation Project of Jiangsu Provincial Department of Natural Resources (2023018), the Fundamental Research Funds for the Central Universities (2024ZDPYCH1002), the Research on Multi-scale Estimation and Simulation Methods for Terrestrial Ecosystem Carbon Storage in China (JSNRM-2025A07), the Graduate Innovation Program of China University of Mining and Technology (2023WLKXJ004), the Postgraduate Research and Practice Innovation Program of Jiangsu Province (KYCX23_2761).</funding-statement>
</funding-group>
<counts>
<fig-count count="13"/>
<table-count count="7"/>
<equation-count count="16"/>
<ref-count count="45"/>
<page-count count="00"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Geochemistry</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<label>1</label>
<title>Introduction</title>
<p>Carbon dioxide (CO<sub>2</sub>) is the most critical greenhouse gas, accounting for more than three-quarters of global greenhouse forcing. Since the Industrial Revolution, rapid industrialization and fossil fuel consumption have led to a continuous rise in atmospheric CO<sub>2</sub> levels, intensifying the greenhouse effect and triggering frequent extreme weather events (<xref ref-type="bibr" rid="B8">Das et al., 2023</xref>; <xref ref-type="bibr" rid="B9">Diffenbaugh et al., 2017</xref>). To address these challenges, the Paris Agreement in 2015 (<xref ref-type="bibr" rid="B30">UNFCCC, 2016</xref>) marked the international community&#x2019;s collective commitment to combat climate change, aiming to limit the global average temperature rise to below 2 &#xb0;C compared to pre-industrial levels and make efforts to further reduce the increase to below 1.5 &#xb0;C (<xref ref-type="bibr" rid="B3">Ayugi et al., 2023</xref>; <xref ref-type="bibr" rid="B18">Jin et al., 2024</xref>; <xref ref-type="bibr" rid="B44">Zhang et al., 2023</xref>; <xref ref-type="bibr" rid="B42">Zhang and Chen, 2022</xref>). Currently, over 190 parties have legally committed to reducing their national greenhouse gas emissions (<xref ref-type="bibr" rid="B21">Lelandais et al., 2022</xref>). Achieving this target relies not only on emission reduction commitments but also on accurate monitoring and prediction of atmospheric CO<sub>2</sub> concentration at regional and local scales, which provide scientific evidence for climate policy formulation and emission regulation.</p>
<p>At present, satellite observation and surface measurement are the main methods to obtain the CO<sub>2</sub> concentration (<xref ref-type="bibr" rid="B25">Li Y. et al., 2025</xref>; <xref ref-type="bibr" rid="B38">Yang et al., 2020</xref>). Satellite observation, due to its extensive spatial coverage, has become an important tool for analyzing the spatio-temporal variations of global CO<sub>2</sub> concentration (<xref ref-type="bibr" rid="B36">Xiang et al., 2022</xref>; <xref ref-type="bibr" rid="B39">Yang et al., 2023</xref>). However, the narrow swaths and revisit cycles (<xref ref-type="bibr" rid="B17">Jin et al., 2022</xref>; <xref ref-type="bibr" rid="B43">Zhang et al., 2022</xref>) make the satellite observation data suffer from temporal discontinuity and accuracy limitations (<xref ref-type="bibr" rid="B15">Heymann et al., 2017</xref>; <xref ref-type="bibr" rid="B33">Wang W. et al., 2022</xref>). As a result, it is difficult to accurately reflect the temporal continuous variation of CO<sub>2</sub> concentration. To make up for the deficiency, ground-based monitoring stations provide high-precision and temporal continuous CO<sub>2</sub> observation data (<xref ref-type="bibr" rid="B27">Mingwei et al., 2017</xref>), which can more finely depict the real-time change of CO<sub>2</sub> concentration (<xref ref-type="bibr" rid="B34">Wu C. et al., 2023</xref>). As early as 1958, the Mauna Loa Observatory in Hawaii, USA, began long-term observations of CO<sub>2</sub> concentration (<xref ref-type="bibr" rid="B19">Keeling et al., 1976</xref>). Subsequently, the World Meteorological Organization (WMO) established the Global Atmosphere Watch Programme (GAW) in 1991 (<xref ref-type="bibr" rid="B11">Ferrarese et al., 2015</xref>; <xref ref-type="bibr" rid="B22">Li et al., 2022</xref>) with the goal of developing a comprehensive network observation to facilitate in-depth research into global CO<sub>2</sub> concentration changes. However, most of these monitoring stations are located in high-altitude and remote areas far from CO<sub>2</sub> sources and sinks (<xref ref-type="bibr" rid="B7">Curcoll et al., 2018</xref>). They are primarily dedicated to studying long-term CO<sub>2</sub> concentration changes on global trend rather than regional and local variations (<xref ref-type="bibr" rid="B5">Conil et al., 2019</xref>). Therefore, in order to accurately monitor regional and local CO<sub>2</sub> concentration, the Integrated Carbon Observation System (ICOS) has been established in Europe since 2012 (<xref ref-type="bibr" rid="B40">Yver-Kwok et al., 2021</xref>). This network consists of towers set up at different heights to monitor atmospheric components such as CO<sub>2</sub>, CH<sub>4</sub>, N<sub>2</sub>O, as well as meteorological variables like temperature and humidity (<xref ref-type="bibr" rid="B21">Lelandais et al., 2022</xref>). This provides valuable datasets for exploring CO<sub>2</sub> variation mechanisms and developing predictive models.</p>
<p>Ground station observation, with its core advantage of high temporal resolution, can not only track and accurately depict the overall evolution trend of local CO<sub>2</sub> concentration over the long term, but also capture its short-term dynamic changes such as diurnal fluctuations in real time. However, the variation of CO<sub>2</sub> concentration is comprehensively influenced by multiple local factors such as traffic emissions, industrial activities, and meteorological conditions. Consequently, the CO<sub>2</sub> time series data exhibit pronounced characteristics such as strong seasonality, nonlinearity, and local heterogeneity, which still pose challenges for CO<sub>2</sub> prediction. Traditional statistical models and shallow learning methods lack the ability to effectively capture long-term temporal dependencies and complex nonlinear mechanisms, making it difficult to meet the requirements of high-precision prediction. In recent years, deep learning methods have become powerful tools for dealing with nonlinear and long-term dependent sequences (<xref ref-type="bibr" rid="B6">Cui et al., 2023</xref>; <xref ref-type="bibr" rid="B12">Gao and Li, 2021</xref>; <xref ref-type="bibr" rid="B24">Li R. et al., 2025</xref>). Recurrent Neural Network (RNN), as one of the earliest deep learning methods for time series prediction (<xref ref-type="bibr" rid="B2">Amalou et al., 2022</xref>; <xref ref-type="bibr" rid="B20">Khaldi et al., 2023</xref>), has made some progress in handling sequential data. However, it is prone to the issues of gradient disappearance and explosion (<xref ref-type="bibr" rid="B10">Fang et al., 2021</xref>; <xref ref-type="bibr" rid="B32">Wang J. et al., 2022</xref>). Long Short-Term Memory (LSTM) network is an improvement based on RNN, which can effectively capture long-range dependencies (<xref ref-type="bibr" rid="B4">Chu et al., 2023</xref>; <xref ref-type="bibr" rid="B28">Qadeer et al., 2020</xref>) by introducing gating mechanisms (<xref ref-type="bibr" rid="B41">Zaini et al., 2023</xref>). However, conventional LSTM network only processes sequential data in one direction, which limits the model&#x2019;s ability to utilize future information (<xref ref-type="bibr" rid="B45">Zrira et al., 2024</xref>). The Bidirectional Long Short-Term Memory network (BILSTM) overcomes this limitation by combining forward and backward propagation, thus enabling a more comprehensive understanding of temporal dependencies and improving prediction robustness (<xref ref-type="bibr" rid="B23">Li et al., 2024</xref>; <xref ref-type="bibr" rid="B26">Liu et al., 2023</xref>; <xref ref-type="bibr" rid="B35">Wu K. et al., 2023</xref>).</p>
<p>In this study, we developed a BILSTM-based CO<sub>2</sub> prediction model using multi-factor meteorological inputs from ICOS ground stations. The main objectives are: 1. To analyze the correlations between CO<sub>2</sub> concentration and meteorological variables, and assess the impact of feature selection on model performance; 2. To compare the predictive accuracy of BILSTM with GRU, LSTM, and SVR baseline models using standard statistical metrics. The remainder of this paper is organized as follows: <xref ref-type="sec" rid="s2">Section 2</xref> introduces the data and methods; <xref ref-type="sec" rid="s3">Section 3</xref> presents the experimental results and discussion; <xref ref-type="sec" rid="s4">Section 4</xref> summarizes the main conclusions and future research directions.</p>
</sec>
<sec id="s2">
<label>2</label>
<title>Data and methods</title>
<sec id="s2-1">
<label>2.1</label>
<title>Data source</title>
<p>The time series data of CO<sub>2</sub> concentration used in this paper were obtained from the Integrated Carbon Observation System (ICOS). ICOS is an Observer Intergovernmental Organization (IGO) to the United Nations Framework Convention on Climate Change (UNFCCC). It has a prominent role in some of the working groups of the Global Climate Observing System (GCOS) and is also a part of the Group on Earth Observations (GEO) and Global Atmosphere Watch (GAW) program, with data products accessible through the World Meteorological Organization (WMO). Currently, ICOS comprises approximately 180 stations across 16 European countries, covering three major observational domains, atmosphere, ecosystem, and ocean. All ICOS stations follow unified instrumentation standards, calibration procedures, and quality-control protocols, ensuring high precision, consistency, and long-term stability of the measurements.</p>
<p>Specifically, the ICOS Atmosphere Release 2023-1 Level 2 greenhouse gas dataset (<ext-link ext-link-type="uri" xlink:href="https://www.icos-cp.eu/data-products/atmosphere-release">https://www.icos-cp.eu/data-products/atmosphere-release</ext-link>) was utilized (<xref ref-type="bibr" rid="B16">ICOS et al., 2023</xref>). This collection includes final quality controlled hourly averaged data for CO<sub>2</sub>, CH<sub>4</sub>, N<sub>2</sub>O, and CO, as well as corresponding meteorological parameters measured at multiple vertical levels. It also contains additional records such as <sup>14</sup>C in CO<sub>2</sub> in two-weekly integrated samples and flask sample analyses of CO<sub>2</sub>, CH<sub>4</sub>, CO, N<sub>2</sub>O, SF<sub>6</sub> and H<sub>2</sub>, covering the period from September 2015 to April 2023.</p>
<p>For this research, 2 years (1 January 2020 to 31 December 2021) of hourly CO<sub>2</sub> concentration and meteorological observations were selected from four ICOS atmospheric stations: HPB, IPR, KRE, and TRN. The meteorological variables considered include air temperature (AT), relative humidity (RH), air pressure (AP), wind direction (WD), and wind speed (WS). The geographical distribution of these stations is illustrated in <xref ref-type="fig" rid="F1">Figure 1</xref>, and detailed site information is provided in <xref ref-type="table" rid="T1">Table 1</xref>.</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>Location distribution of HPB, IPR, KRE, and TRN stations.</p>
</caption>
<graphic xlink:href="feart-14-1736569-g001.tif">
<alt-text content-type="machine-generated">Map illustrating Europe with elevation data, using a gradient from green (low) to red (high). ICOS_sites are marked with blue dots. A legend indicates data categories: ICOS_sites, ICOS member country, DEM. A scale bar and compass are included.</alt-text>
</graphic>
</fig>
<table-wrap id="T1" position="float">
<label>TABLE 1</label>
<caption>
<p>Detailed location of HPB, IPR, KRE, and TRN stations.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Station code</th>
<th align="center">Station name</th>
<th align="center">Latitude (&#xb0;)</th>
<th align="center">Longitude (&#xb0;)</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">HPB</td>
<td align="center">Hohenpeissenberg</td>
<td align="center">47.8011</td>
<td align="center">11.0246</td>
</tr>
<tr>
<td align="center">IPR</td>
<td align="center">Ispra</td>
<td align="center">45.8147</td>
<td align="center">8.6360</td>
</tr>
<tr>
<td align="center">KRE</td>
<td align="center">K&#x159;e&#x161;&#xed;n u Pacova</td>
<td align="center">49.5720</td>
<td align="center">15.0800</td>
</tr>
<tr>
<td align="center">TRN</td>
<td align="center">Trainou</td>
<td align="center">47.9647</td>
<td align="center">2.1125</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s2-2">
<label>2.2</label>
<title>Data preprocessing</title>
<p>During data acquisition, inevitable factors such as instrument maintenance, network interruptions, and power outages may cause missing values in the original time series, which can negatively affect the accuracy and stability of model predictions. To assess the impact of missing data, we systematically analyzed the missingness of CO<sub>2</sub> concentration and meteorological variables at the four observation stations from 2020 to 2021, with the results visualized in <xref ref-type="fig" rid="F2">Figure 2</xref>.</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>Missing rate at each station. <bold>(a)</bold> CO<sub>2</sub>. <bold>(b)</bold> Meteorological variable.</p>
</caption>
<graphic xlink:href="feart-14-1736569-g002.tif">
<alt-text content-type="machine-generated">Bar chart and heatmap showing missing data rates for 2020 and 2021. Chart (a) compares CO2 missing rates across four categories: HPB, IPR, KRE, and TRN, with the relatively higher missing rates of KRE and TRN than those of HPB and IPR. Heatmap (b) details missing rates by categories AP, RH, AT, WD, and WS for 2020 and 2021. Darker shades represent higher missing rates, with significant values in IPR and KRE for 2021.</alt-text>
</graphic>
</fig>
<p>The missing distribution of CO<sub>2</sub> observations at each station across different years was shown in <xref ref-type="fig" rid="F2">Figure 2a</xref>, while the missingness of meteorological variables was presented in <xref ref-type="fig" rid="F2">Figure 2b</xref>. It can be seen that although all six types of observational variables at each station exhibit varying degrees of missing data, missing rates remain relatively low, with most variables missing less than 5%, indicating that the dataset quality is sufficient for model training. To minimize the impact of missing data, the linear interpolation method was applied to fill the data gaps. The calculation formula is shown in <xref ref-type="disp-formula" rid="e1">Equation 1</xref>:<disp-formula id="e1">
<mml:math id="m1">
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(1)</label>
</disp-formula>where <inline-formula id="inf1">
<mml:math id="m2">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> represents the time point with missing observation data, <inline-formula id="inf2">
<mml:math id="m3">
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf3">
<mml:math id="m4">
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> are the time points without missing data before and after time <inline-formula id="inf4">
<mml:math id="m5">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf5">
<mml:math id="m6">
<mml:mrow>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf6">
<mml:math id="m7">
<mml:mrow>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> are the observed values at time points <inline-formula id="inf7">
<mml:math id="m8">
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf8">
<mml:math id="m9">
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>, and <inline-formula id="inf9">
<mml:math id="m10">
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the final interpolation calculation result.</p>
<p>Additionally, since the measurement scales of the six observation variables are different, it is necessary to normalize them and generate a unified value range of [0,1] so that improve the calculation and convergence speed of the prediction model. The calculation formula for normalization is shown in <xref ref-type="disp-formula" rid="e2">Equation 2</xref>:<disp-formula id="e2">
<mml:math id="m11">
<mml:mrow>
<mml:mi>y</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>min</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mi>max</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>min</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
<label>(2)</label>
</disp-formula>where <inline-formula id="inf10">
<mml:math id="m12">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> represents the original observed value, <inline-formula id="inf11">
<mml:math id="m13">
<mml:mrow>
<mml:mi>min</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf12">
<mml:math id="m14">
<mml:mrow>
<mml:mi>max</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula> represent the minimum and maximum values of the observed data respectively, and <inline-formula id="inf13">
<mml:math id="m15">
<mml:mrow>
<mml:mi>y</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> represents the normalized result.</p>
<p>Finally, to prepare the data for model training, the continuous time series from each station were converted into a supervised learning format. Specifically, the sliding window method was employed to segment the time series into input-output pairs, enabling the model to learn temporal dependencies from historical observations. As illustrated in <xref ref-type="fig" rid="F3">Figure 3</xref>, when the window size is L and the time step is 1, each input sequence consists of L consecutive historical data points, which are used to predict the value at the next time step.</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>Sliding window technology.</p>
</caption>
<graphic xlink:href="feart-14-1736569-g003.tif">
<alt-text content-type="machine-generated">Diagram depicting a sequence of input and output windows on a timeline. Three rows illustrate overlapping sections where the input window is marked in blue and the output window in green. Each row shifts rightward, showing progression from \(X_{t-L}\) to \(X_{t&#x2b;3}\).</alt-text>
</graphic>
</fig>
</sec>
<sec id="s2-3">
<label>2.3</label>
<title>Methods</title>
<p>The overall methodological framework for CO<sub>2</sub> concentration prediction is illustrated in <xref ref-type="fig" rid="F4">Figure 4</xref>. It mainly consists of two main stages: data analysis and model training. In the first stage, time series data of CO<sub>2</sub> concentration and meteorological observations from the HPB, IPR, KRE, and TRN stations were collected and preprocessed. Missing values were filled using linear interpolation, and all variables were normalized to enhance the numerical stability of subsequent model training. The preprocessed data were then divided into training set and testing set. In the second stage, the correlation between meteorological variables and CO<sub>2</sub> concentration was analyzed to identify the most influential factors. The highly correlated features were selected as model inputs to reduce noise and improve predictive accuracy. Finally, the BILSTM network was constructed for time series prediction, and its performance was evaluated by statistical metrics and three baseline models, LSTM, GRU, and SVR.</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption>
<p>Methodological framework.</p>
</caption>
<graphic xlink:href="feart-14-1736569-g004.tif">
<alt-text content-type="machine-generated">Flowchart illustrating a machine learning model process. The data section includes dataset collection (carbon dioxide, meteorology), preprocessing (interpolation, normalization), and splitting (sliding window for train/test sets, feature selection using Pearson correlation). The model section outlines construction (input, BILSTM, dropout, dense, output layers) and an evaluation with GRU, LSTM, SVR, RMSE, MAE, and R-squared. A BILSTM diagram shows inputs, outputs, and hidden layers' interactions.</alt-text>
</graphic>
</fig>
<sec id="s2-3-1">
<label>2.3.1</label>
<title>Long short-term memory</title>
<p>Long Short-Term Memory, first proposed by Sepp Hochreiter and Jurgen Schmidhuber in 1997, can solve the gradient disappearance or explosion problem that exists in the traditional Recurrent Neural Network model (<xref ref-type="bibr" rid="B29">Schmidhuber, 1997</xref>). As shown in <xref ref-type="fig" rid="F5">Figure 5</xref>, its basic structure contains a memory cell <inline-formula id="inf14">
<mml:math id="m16">
<mml:mrow>
<mml:msub>
<mml:mi>c</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> and three gates (forget gate <inline-formula id="inf15">
<mml:math id="m17">
<mml:mrow>
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>, input gate <inline-formula id="inf16">
<mml:math id="m18">
<mml:mrow>
<mml:msub>
<mml:mi>i</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>, and output gate <inline-formula id="inf17">
<mml:math id="m19">
<mml:mrow>
<mml:msub>
<mml:mi>o</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>).</p>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption>
<p>The structure of LSTM model.</p>
</caption>
<graphic xlink:href="feart-14-1736569-g005.tif">
<alt-text content-type="machine-generated">Diagram illustrating a recurrent neural network with Long Short-Term Memory (LSTM) units. Three sections show the flow of data through input, hidden, and output layers. Each section contains components like sigmoid and tanh activation functions, illustrating input \( x \), cell state \( c \), and hidden state \( h \) connections across time steps \( t-1 \), \( t \), and \( t&#x2b;1 \).</alt-text>
</graphic>
</fig>
<p>The core idea of LSTM is the adaptive gate mechanism, which determines whether and to what extent the cell state of LSTM is updated (<xref ref-type="bibr" rid="B13">Gers and Cummins, 2000</xref>). The input gate <inline-formula id="inf18">
<mml:math id="m20">
<mml:mrow>
<mml:msub>
<mml:mi>i</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> contains the sigmod function and tanh function, which together control how input information is passed to the memory cell. As the most important component of LSTM, the forget gate <inline-formula id="inf19">
<mml:math id="m21">
<mml:mrow>
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> determines how to retain and forget historical information by calculating the weight, so it can effectively overcome the problems of gradient disappearance and gradient explosion (<xref ref-type="bibr" rid="B31">Wang et al., 2021</xref>). The output gate <inline-formula id="inf20">
<mml:math id="m22">
<mml:mrow>
<mml:msub>
<mml:mi>o</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> determines which amount of information is to be output in the current state. The complete calculation process of the LSTM network from input to output is as follows:<disp-formula id="e3">
<mml:math id="m23">
<mml:mrow>
<mml:mtext>sigmod</mml:mtext>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:mi mathvariant="normal">x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>&#x2b;</mml:mo>
<mml:msup>
<mml:mi>e</mml:mi>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
<label>(3)</label>
</disp-formula>
<disp-formula id="e4">
<mml:math id="m24">
<mml:mrow>
<mml:mi>tanh</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:mi mathvariant="normal">x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:msup>
<mml:mi>e</mml:mi>
<mml:mi>x</mml:mi>
</mml:msup>
<mml:mo>&#x2212;</mml:mo>
<mml:msup>
<mml:mi>e</mml:mi>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
<mml:mrow>
<mml:msup>
<mml:mi>e</mml:mi>
<mml:mi>x</mml:mi>
</mml:msup>
<mml:mo>&#x2b;</mml:mo>
<mml:msup>
<mml:mi>e</mml:mi>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
<label>(4)</label>
</disp-formula>
<disp-formula id="e5">
<mml:math id="m25">
<mml:mrow>
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>&#x3c3;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:mi>f</mml:mi>
</mml:msub>
<mml:mo>&#xb7;</mml:mo>
<mml:mrow>
<mml:mfenced open="[" close="]" separators="&#x7c;">
<mml:mrow>
<mml:msub>
<mml:mi>h</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mi>f</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(5)</label>
</disp-formula>
<disp-formula id="e6">
<mml:math id="m26">
<mml:mrow>
<mml:msub>
<mml:mi>i</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>&#x3c3;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#xb7;</mml:mo>
<mml:mrow>
<mml:mfenced open="[" close="]" separators="&#x7c;">
<mml:mrow>
<mml:msub>
<mml:mi>h</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(6)</label>
</disp-formula>
<disp-formula id="e7">
<mml:math id="m27">
<mml:mrow>
<mml:msub>
<mml:mover accent="true">
<mml:mi>c</mml:mi>
<mml:mo>&#x223c;</mml:mo>
</mml:mover>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mi mathvariant="italic">tanh</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:mi>c</mml:mi>
</mml:msub>
<mml:mo>&#xb7;</mml:mo>
<mml:mrow>
<mml:mfenced open="[" close="]" separators="&#x7c;">
<mml:mrow>
<mml:msub>
<mml:mi>h</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mi>c</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(7)</label>
</disp-formula>
<disp-formula id="e8">
<mml:math id="m28">
<mml:mrow>
<mml:msub>
<mml:mi>c</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#xb7;</mml:mo>
<mml:msub>
<mml:mi>c</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mi>i</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#xb7;</mml:mo>
<mml:msub>
<mml:mover accent="true">
<mml:mi>c</mml:mi>
<mml:mo>&#x223c;</mml:mo>
</mml:mover>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
<label>(8)</label>
</disp-formula>
<disp-formula id="e9">
<mml:math id="m29">
<mml:mrow>
<mml:msub>
<mml:mi>o</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>&#x3c3;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:mi>o</mml:mi>
</mml:msub>
<mml:mo>&#xb7;</mml:mo>
<mml:mrow>
<mml:mfenced open="[" close="]" separators="&#x7c;">
<mml:mrow>
<mml:msub>
<mml:mi>h</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mi>o</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(9)</label>
</disp-formula>
<disp-formula id="e10">
<mml:math id="m30">
<mml:mrow>
<mml:msub>
<mml:mi>h</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mi>o</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#xb7;</mml:mo>
<mml:mo>&#x2061;</mml:mo>
<mml:mi>tanh</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:msub>
<mml:mi>c</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(10)</label>
</disp-formula>
</p>
<p>In <xref ref-type="disp-formula" rid="e3">Equations 3</xref>&#x2013;<xref ref-type="disp-formula" rid="e10">10</xref>, <inline-formula id="inf21">
<mml:math id="m31">
<mml:mrow>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:mi>f</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf22">
<mml:math id="m32">
<mml:mrow>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf23">
<mml:math id="m33">
<mml:mrow>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:mi>c</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf24">
<mml:math id="m34">
<mml:mrow>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:mi>o</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> denote the weight vectors of forget gate, input gate, output gate and memory cell respectively. <inline-formula id="inf25">
<mml:math id="m35">
<mml:mrow>
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mi>f</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf26">
<mml:math id="m36">
<mml:mrow>
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf27">
<mml:math id="m37">
<mml:mrow>
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf28">
<mml:math id="m38">
<mml:mrow>
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mi>o</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> represent the bias vectors of forget gate, input gate, output gate and memory cell respectively. <inline-formula id="inf29">
<mml:math id="m39">
<mml:mrow>
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf30">
<mml:math id="m40">
<mml:mrow>
<mml:msub>
<mml:mi>i</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf31">
<mml:math id="m41">
<mml:mrow>
<mml:msub>
<mml:mi>c</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf32">
<mml:math id="m42">
<mml:mrow>
<mml:msub>
<mml:mi>o</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> denote the vectors of forget gate, input gate, memory cell and output gate respectively. <inline-formula id="inf33">
<mml:math id="m43">
<mml:mrow>
<mml:msub>
<mml:mi>h</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf34">
<mml:math id="m44">
<mml:mrow>
<mml:msub>
<mml:mi>h</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> represent the output vectors at time steps <inline-formula id="inf35">
<mml:math id="m45">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf36">
<mml:math id="m46">
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>; <inline-formula id="inf37">
<mml:math id="m47">
<mml:mrow>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> denotes the input at time <inline-formula id="inf38">
<mml:math id="m48">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>.</p>
</sec>
<sec id="s2-3-2">
<label>2.3.2</label>
<title>Bidirectional long short-term memory</title>
<p>As shown in <xref ref-type="fig" rid="F6">Figure 6</xref>, Bidirectional Long Short-Term Memory (BILSTM) network is an improvement based on LSTM model, consisting of two independent LSTM networks, forward and backward propagation (<xref ref-type="bibr" rid="B1">Alex Graves and J&#xfc;rgen, 2005</xref>; <xref ref-type="bibr" rid="B14">Graves and Schmidhuber, 2005</xref>).</p>
<fig id="F6" position="float">
<label>FIGURE 6</label>
<caption>
<p>The structure of BILSTM model.</p>
</caption>
<graphic xlink:href="feart-14-1736569-g006.tif">
<alt-text content-type="machine-generated">Diagram showing a Bidirectional LSTM neural network architecture. It includes input, forward, and backward layers with LSTM units. Each unit processes time steps \(x_{t-1}\), \(x_t\), and \(x_{t&#x2b;1}\). An activation layer with tanh functions connects LSTM outputs \(h_{t-1}\), \(h_t\), and \(h_{t&#x2b;1}\) to the output layers \(y_{t-1}\), \(y_t\), and \(y_{t&#x2b;1}\). Solid and dashed lines indicate data flow and dependencies between layers.</alt-text>
</graphic>
</fig>
<p>The hidden state of the forward LSTM network <inline-formula id="inf39">
<mml:math id="m49">
<mml:mrow>
<mml:mover accent="true">
<mml:msub>
<mml:mi>h</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x2192;</mml:mo>
</mml:mover>
</mml:mrow>
</mml:math>
</inline-formula> is:<disp-formula id="e11">
<mml:math id="m50">
<mml:mrow>
<mml:mover accent="true">
<mml:msub>
<mml:mi mathvariant="normal">h</mml:mi>
<mml:mi mathvariant="normal">t</mml:mi>
</mml:msub>
<mml:mo>&#x2192;</mml:mo>
</mml:mover>
<mml:mo>&#x3d;</mml:mo>
<mml:mtext>LSTM</mml:mtext>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="normal">x</mml:mi>
<mml:mi mathvariant="normal">t</mml:mi>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:mover accent="true">
<mml:msub>
<mml:mi mathvariant="normal">h</mml:mi>
<mml:mrow>
<mml:mi mathvariant="normal">t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2192;</mml:mo>
</mml:mover>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(11)</label>
</disp-formula>In <xref ref-type="disp-formula" rid="e11">Equation 11</xref>, <inline-formula id="inf40">
<mml:math id="m51">
<mml:mrow>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> represents the input at time <inline-formula id="inf41">
<mml:math id="m52">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>, LSTM denotes the LSTM unit, <inline-formula id="inf42">
<mml:math id="m53">
<mml:mrow>
<mml:mover accent="true">
<mml:msub>
<mml:mi>h</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2192;</mml:mo>
</mml:mover>
</mml:mrow>
</mml:math>
</inline-formula> represents the hidden state of the forward LSTM network at time <inline-formula id="inf43">
<mml:math id="m54">
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>.</p>
<p>Similarly, the hidden state of the backward LSTM network <inline-formula id="inf44">
<mml:math id="m55">
<mml:mrow>
<mml:mover accent="true">
<mml:msub>
<mml:mi>h</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x20d6;</mml:mo>
</mml:mover>
</mml:mrow>
</mml:math>
</inline-formula> is:<disp-formula id="e12">
<mml:math id="m56">
<mml:mrow>
<mml:mover accent="true">
<mml:msub>
<mml:mi mathvariant="normal">h</mml:mi>
<mml:mi mathvariant="normal">t</mml:mi>
</mml:msub>
<mml:mo>&#x20d6;</mml:mo>
</mml:mover>
<mml:mo>&#x3d;</mml:mo>
<mml:mtext>LSTM</mml:mtext>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="normal">x</mml:mi>
<mml:mi mathvariant="normal">t</mml:mi>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:mover accent="true">
<mml:msub>
<mml:mi mathvariant="normal">h</mml:mi>
<mml:mrow>
<mml:mi mathvariant="normal">t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x20d6;</mml:mo>
</mml:mover>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(12)</label>
</disp-formula>In <xref ref-type="disp-formula" rid="e12">Equation 12</xref>, <inline-formula id="inf45">
<mml:math id="m57">
<mml:mrow>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> represents the input at time <inline-formula id="inf46">
<mml:math id="m58">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>, LSTM denotes the LSTM unit, <inline-formula id="inf47">
<mml:math id="m59">
<mml:mrow>
<mml:mover accent="true">
<mml:msub>
<mml:mi>h</mml:mi>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x20d6;</mml:mo>
</mml:mover>
</mml:mrow>
</mml:math>
</inline-formula> represents the hidden state of the backward LSTM network at time <inline-formula id="inf48">
<mml:math id="m60">
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>.</p>
<p>The hidden state <inline-formula id="inf49">
<mml:math id="m61">
<mml:mrow>
<mml:msub>
<mml:mi>h</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> of the BILSTM network is composed of the hidden states&#x200b; of forward LSTM network <inline-formula id="inf50">
<mml:math id="m62">
<mml:mrow>
<mml:mover accent="true">
<mml:msub>
<mml:mi>h</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x2192;</mml:mo>
</mml:mover>
</mml:mrow>
</mml:math>
</inline-formula> and the hidden states of backward LSTM network <inline-formula id="inf51">
<mml:math id="m63">
<mml:mrow>
<mml:mover accent="true">
<mml:msub>
<mml:mi mathvariant="normal">h</mml:mi>
<mml:mi mathvariant="normal">t</mml:mi>
</mml:msub>
<mml:mo>&#x20d6;</mml:mo>
</mml:mover>
</mml:mrow>
</mml:math>
</inline-formula>. The output value is obtained by activation function.</p>
</sec>
<sec id="s2-3-3">
<label>2.3.3</label>
<title>Parameter setting</title>
<p>The design of model parameters has a significant impact on prediction performance. After multiple rounds of testing and adjustment, the final parameter configuration is summarized in <xref ref-type="table" rid="T2">Table 2</xref>. Specifically, the CO<sub>2</sub> concentration and meteorological data from the HPB, IPR, KRE, and TRN stations were divided into training and testing sets according to the time dimension using a 7:3 ratio. The first 70% of the data were used for model training, while the remaining 30% were used for testing. The historical sequence length was set to 24, and the prediction length was set to 1, meaning that the CO<sub>2</sub> concentration of next hour was predicted based on observations of past 24 h from each station. The BILSTM network consisted of one BILSTM layer, one Dropout layer, and one Dense layer. The BILSTM layer contained 128 neurons, while the dropout rate was set to 0.2 to prevent overfitting. The model was trained using the Mean Squared Error (MSE) as the loss function, with a learning rate of 0.001 and a batch size of 64. The Adaptive Moment Estimation (Adam) optimizer, known for its high computational efficiency and low memory demand, was employed for parameter optimization and loss minimization. The number of epochs was set to 100. All experiments were implemented in Python 3.9.18, utilizing the TensorFlow 2.7.0 and Keras 2.7.0 frameworks. Both training and testing were conducted on a system equipped with a GeForce RTX 3060 GPU and 16 GB of memory.</p>
<table-wrap id="T2" position="float">
<label>TABLE 2</label>
<caption>
<p>Parameter Settings of BILSTM model.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Parameter</th>
<th align="center">Values</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">Training set</td>
<td align="center">70%</td>
</tr>
<tr>
<td align="center">Test set</td>
<td align="center">30%</td>
</tr>
<tr>
<td align="center">History len</td>
<td align="center">24</td>
</tr>
<tr>
<td align="center">Predict len</td>
<td align="center">1</td>
</tr>
<tr>
<td align="center">Neurons</td>
<td align="center">128</td>
</tr>
<tr>
<td align="center">Dropout</td>
<td align="center">0.2</td>
</tr>
<tr>
<td align="center">Loss function</td>
<td align="center">Mean square error</td>
</tr>
<tr>
<td align="center">Learning rate</td>
<td align="center">0.001</td>
</tr>
<tr>
<td align="center">Batch size</td>
<td align="center">64</td>
</tr>
<tr>
<td align="center">Optimizer</td>
<td align="center">Adam</td>
</tr>
<tr>
<td align="center">Epochs</td>
<td align="center">100</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s2-3-4">
<label>2.3.4</label>
<title>Model evaluation</title>
<p>To quantitatively evaluate the performance of the BILSTM model in predicting CO<sub>2</sub> concentration, RMSE (represent overall prediction accuracy), MAE (represent the average distance between predicted values and true values), and R<sup>2</sup> (represent the degree of model fit) were employed (<xref ref-type="bibr" rid="B37">Yan et al., 2025</xref>). The formulas for each metric are shown in <xref ref-type="disp-formula" rid="e13">Equations 13</xref>&#x2013;<xref ref-type="disp-formula" rid="e16">16</xref>:<disp-formula id="e13">
<mml:math id="m64">
<mml:mrow>
<mml:mtext>RMSE</mml:mtext>
<mml:mo>&#x3d;</mml:mo>
<mml:msqrt>
<mml:mrow>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>T</mml:mi>
</mml:munderover>
</mml:mstyle>
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo>&#x5e;</mml:mo>
</mml:mover>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msup>
<mml:mo>/</mml:mo>
<mml:mi>T</mml:mi>
</mml:mrow>
</mml:mrow>
</mml:msqrt>
</mml:mrow>
</mml:math>
<label>(13)</label>
</disp-formula>
<disp-formula id="e14">
<mml:math id="m65">
<mml:mrow>
<mml:mtext>MAE</mml:mtext>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:mrow>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>T</mml:mi>
</mml:munderover>
</mml:mstyle>
<mml:mrow>
<mml:mfenced open="|" close="|" separators="&#x7c;">
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo>&#x5e;</mml:mo>
</mml:mover>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(14)</label>
</disp-formula>
<disp-formula id="e15">
<mml:math id="m66">
<mml:mrow>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>T</mml:mi>
</mml:munderover>
</mml:mstyle>
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo>&#x5e;</mml:mo>
</mml:mover>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msup>
<mml:mo>/</mml:mo>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>T</mml:mi>
</mml:munderover>
</mml:mstyle>
<mml:msup>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo>&#xaf;</mml:mo>
</mml:mover>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(15)</label>
</disp-formula>
<disp-formula id="e16">
<mml:math id="m67">
<mml:mrow>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo>&#xaf;</mml:mo>
</mml:mover>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>T</mml:mi>
</mml:munderover>
</mml:mstyle>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
<label>(16)</label>
</disp-formula>
</p>
<p>In the above equation, <inline-formula id="inf52">
<mml:math id="m68">
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf53">
<mml:math id="m69">
<mml:mrow>
<mml:msub>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo>&#x5e;</mml:mo>
</mml:mover>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> are the observed and predicted values at time <inline-formula id="inf54">
<mml:math id="m70">
<mml:mrow>
<mml:mi mathvariant="normal">t</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> respectively; <inline-formula id="inf55">
<mml:math id="m71">
<mml:mrow>
<mml:mi>T</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> is the amount of data in the test set.</p>
</sec>
</sec>
</sec>
<sec sec-type="results|discussion" id="s3">
<label>3</label>
<title>Results and discussion</title>
<sec id="s3-1">
<label>3.1</label>
<title>Input feature selection</title>
<p>Selecting appropriate input features is crucial for constructing a high precision prediction model. Feature selection not only directly affects the predictive performance of the model but also influences its generalization ability and computational efficiency. An excessive number of input variables may introduce redundancy, reduce interpretability, and increase computational cost, thereby limiting the model&#x2019;s performance. To identify the key variables influencing CO<sub>2</sub> concentration, the correlations between meteorological factors and CO<sub>2</sub> concentration at the HPB, IPR, KRE, and TRN stations were analyzed using Pearson&#x2019;s correlation coefficient. According to conventional criteria, the absolute value of the Pearson correlation coefficient less than 0.3 indicates a low correlation, between 0.3 and 0.7 indicates a moderate correlation, and above 0.7 indicates a high correlation.</p>
<p>As shown in <xref ref-type="fig" rid="F7">Figure 7</xref>, the correlations between CO<sub>2</sub> concentration and five meteorological factors, air temperature (AT), relative humidity (RH), air pressure (AP), wind direction (WD), and wind speed (WS), were analyzed. Among these factors, air temperature exhibits the strongest correlation with CO<sub>2</sub> concentration at all four stations, with correlation coefficients exceeding 0.6. Relative humidity is identified as the second most influential factor, showing a relatively strong correlation at the IPR, KRE, and TRN stations. Although the correlation at the HPB station is slightly weaker, RH still ranks as the second most important variable after temperature. In contrast, air pressure, wind direction, and wind speed display consistently low correlations across all stations, with average correlation coefficients below 0.1 (<xref ref-type="table" rid="T3">Table 3</xref>). Therefore, based on the correlation analysis, air temperature (AT) and relative humidity (RH), which show relatively strong and consistent correlations with CO<sub>2</sub> concentration across all stations, were selected as the key input features for the subsequent BILSTM prediction model.</p>
<fig id="F7" position="float">
<label>FIGURE 7</label>
<caption>
<p>Correlation between CO<sub>2</sub> concentration and meteorological factors at four stations. <bold>(a)</bold> HPB. <bold>(b)</bold> IPR. <bold>(c)</bold> KRE. <bold>(d)</bold> TRN.</p>
</caption>
<graphic xlink:href="feart-14-1736569-g007.tif">
<alt-text content-type="machine-generated">Four correlation matrices labeled (a) to (d) show relationships between variables CO2, AT, RH, AP, WD, and WS. Each matrix displays correlation values in circles colored from blue (negative correlation) to red (positive correlation), with a gradient bar indicating correlation strength from -1 to 1. The matrices highlight different correlation patterns among the variables.</alt-text>
</graphic>
</fig>
<table-wrap id="T3" position="float">
<label>TABLE 3</label>
<caption>
<p>Correlation values between CO<sub>2</sub> concentration and meteorological factors at four stations.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Station Correlation</th>
<th align="center">HPB</th>
<th align="center">IPR</th>
<th align="center">KRE</th>
<th align="center">TRN</th>
<th align="center">Average</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">AT</td>
<td align="center">&#x2212;0.670</td>
<td align="center">&#x2212;0.612</td>
<td align="center">&#x2212;0.689</td>
<td align="center">&#x2212;0.662</td>
<td align="center">&#x2212;0.658</td>
</tr>
<tr>
<td align="center">RH</td>
<td align="center">0.287</td>
<td align="center">0.513</td>
<td align="center">0.564</td>
<td align="center">0.481</td>
<td align="center">0.461</td>
</tr>
<tr>
<td align="center">AP</td>
<td align="center">&#x2212;0.191</td>
<td align="center">0.217</td>
<td align="center">&#x2212;0.084</td>
<td align="center">&#x2212;0.011</td>
<td align="center">&#x2212;0.017</td>
</tr>
<tr>
<td align="center">WD</td>
<td align="center">0.020</td>
<td align="center">&#x2212;0.068</td>
<td align="center">&#x2212;0.011</td>
<td align="center">&#x2212;0.065</td>
<td align="center">&#x2212;0.031</td>
</tr>
<tr>
<td align="center">WS</td>
<td align="center">0.097</td>
<td align="center">&#x2212;0.254</td>
<td align="center">0.030</td>
<td align="center">0.111</td>
<td align="center">&#x2212;0.004</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s3-2">
<label>3.2</label>
<title>Feature selection effect</title>
<p>To verify the influence of feature selection on the model&#x2019;s predictive performance, the results of the BILSTM model using all meteorological factors as input were compared with using only air temperature (AT) and relative humidity (RH). The predicted values and the true observed values for the four stations under these two different input feature configurations are compared in <xref ref-type="fig" rid="F8">Figure 8</xref>. From the prediction results, it can be observed that for all four stations, when the model input features are restricted to air temperature and relative humidity, the predicted values are closer to the actual observations, indicating a higher level of agreement between predictions and measurements. These findings suggest that simplifying the input feature set does not compromise prediction accuracy. On the contrary, it effectively eliminates redundant and weakly correlated information, thereby improving the model&#x2019;s ability to capture key patterns and enhancing its overall predictive stability.</p>
<fig id="F8" position="float">
<label>FIGURE 8</label>
<caption>
<p>Comparison results before and after feature selection for each station. <bold>(a)</bold> HPB station. <bold>(b)</bold> IPR station. <bold>(c)</bold> KRE station. <bold>(d)</bold> TRN station. The black line represents the true observations; the red line represents the predicted values using only air temperature and relative humidity and the blue line represents the predicted values obtained using all meteorological factors as input.</p>
</caption>
<graphic xlink:href="feart-14-1736569-g008.tif">
<alt-text content-type="machine-generated">Four line graphs labeled (a) to (d) compare actual and predicted CO2 concentrations over time in hours. Each graph uses different data features: model predictions with atmospheric temperature and relative humidity, and predictions with all features. The CO2 levels fluctuate similarly across graphs, with lines closely following each other. Graph (b) has a higher range of CO2 levels compared to the others.</alt-text>
</graphic>
</fig>
<p>For further quantifying the prediction performance of before and after input feature selection, the RMSE, MAE, and R<sup>2</sup> for each station under the two feature configurations are calculated in <xref ref-type="table" rid="T4">Table 4</xref>. Additionally, the fitting relationships between predicted values and actual observations are visualized in <xref ref-type="fig" rid="F9">Figure 9</xref> through density scatter plots. As can be seen, when input features are limited to air temperature and relative humidity, the RMSE and MAE of the BILSTM model are decreased and the R<sup>2</sup> is increased at all stations. The overall prediction accuracy is improved compared to using all meteorological factors as input features. Specifically, the average RMSE at the four stations decreases from 2.426 to 2.178, and the average MAE decreases from 3.715 to 3.429, both of which decrease by about 0.3. Meanwhile, the average R<sup>2</sup> increases from 0.899 to 0.915. This indicates that selecting features that are highly correlated with CO<sub>2</sub> mole fraction will be more effective than using all of them. Removing low-correlation meteorological factors can not only enhance prediction performance but also reduce computational complexity and overfitting risk.</p>
<table-wrap id="T4" position="float">
<label>TABLE 4</label>
<caption>
<p>Prediction performance of the BILSTM model under different input feature configurations.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th rowspan="2" align="center">Station</th>
<th colspan="3" align="center">CO<sub>2</sub>&#x2b;All features</th>
<th colspan="3" align="center">CO<sub>2</sub>&#x2b;AT, RH</th>
</tr>
<tr>
<th align="center">MAE</th>
<th align="center">RMSE</th>
<th align="center">R<sup>2</sup>
</th>
<th align="center">MAE</th>
<th align="center">RMSE</th>
<th align="center">R<sup>2</sup>
</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">HPB</td>
<td align="center">1.896</td>
<td align="center">3.151</td>
<td align="center">0.889</td>
<td align="center">1.624</td>
<td align="center">2.974</td>
<td align="center">0.901</td>
</tr>
<tr>
<td align="center">IPR</td>
<td align="center">3.753</td>
<td align="center">5.702</td>
<td align="center">0.910</td>
<td align="center">3.548</td>
<td align="center">5.387</td>
<td align="center">0.919</td>
</tr>
<tr>
<td align="center">KRE</td>
<td align="center">1.908</td>
<td align="center">2.702</td>
<td align="center">0.917</td>
<td align="center">1.742</td>
<td align="center">2.556</td>
<td align="center">0.926</td>
</tr>
<tr>
<td align="center">TRN</td>
<td align="center">2.167</td>
<td align="center">3.305</td>
<td align="center">0.880</td>
<td align="center">1.797</td>
<td align="center">2.798</td>
<td align="center">0.914</td>
</tr>
<tr>
<td align="center">Average</td>
<td align="center">2.426</td>
<td align="center">3.715</td>
<td align="center">0.899</td>
<td align="center">2.178</td>
<td align="center">3.429</td>
<td align="center">0.915</td>
</tr>
</tbody>
</table>
</table-wrap>
<fig id="F9" position="float">
<label>FIGURE 9</label>
<caption>
<p>Density scatter plots between predicted values and actual observations under the two feature configurations. <bold>(a)</bold> HPB_(AT&#x2b;RH). <bold>(b)</bold> HPB_(All features). <bold>(c)</bold> IPR_(AT&#x2b;RH). <bold>(d)</bold> IPR_(All features). <bold>(e)</bold> KRE_(AT&#x2b;RH). <bold>(f)</bold> KRE_(All features). <bold>(g)</bold> TRN_(AT&#x2b;RH). <bold>(h)</bold> TRN_(All features).</p>
</caption>
<graphic xlink:href="feart-14-1736569-g009.tif">
<alt-text content-type="machine-generated">Scatter plots in a two-by-four grid comparing predicted values versus actual values for different variables. Top row (AT&#x2b;RH) includes graphs for HPB, IPR, KRE, and TRN with respective equations and R-squared values. Bottom row (All features) mirrors the top row variables with slight differences in equations and R-squared values. Data points are color-coded by density, ranging from purple (low) to red (high).</alt-text>
</graphic>
</fig>
<p>It is observed that after feature selection, the prediction performance has improved. However, there are still differences among all four stations, which may be closely related to the geographical location characteristics (<xref ref-type="fig" rid="F10">Figure 10</xref>). The HPB station, located in a high-altitude mountainous region in Germany, serving as a regional background station, is far from major anthropogenic emission sources. Its CO<sub>2</sub> observations primarily reflect large-scale background signals with relatively smooth temporal variability and minimal local disturbances, making the time series easier for the model to learn and resulting in the highest prediction accuracy. In contrast, the IPR station exhibits the highest prediction error, as it is located near industrial zone in northern Italy. The CO<sub>2</sub> observations at IPR is strongly influenced by highly variable local anthropogenic emissions, including industrial activities and traffic, making it challenging to accurately capture short-term fluctuations and leading to greater uncertainty. KRE and TRN stations show relatively similar prediction accuracies, as both are dominated by natural ecosystem processes. KRE, located in an agricultural area, is influenced by crop growth and soil respiration, while TRN, as a forest station, exhibits CO<sub>2</sub> variations dependent on vegetation photosynthesis and respiration. Overall, the observed differences in model performance across stations are not incidental but a direct reflection of varying environmental conditions. The selected stations in this study encompass diverse geographical and environmental backgrounds, demonstrating the strong environmental adaptability of the BILSTM model in CO<sub>2</sub> prediction. Furthermore, this analysis highlights that future prediction targeting complex environments should incorporate external indicators of human activity, such as traffic indices or emission inventories, which may further enhance prediction accuracy.</p>
<fig id="F10" position="float">
<label>FIGURE 10</label>
<caption>
<p>The locations of the four stations. <bold>(a)</bold> HPB. <bold>(b)</bold> IPR. <bold>(c)</bold> KRE. <bold>(d)</bold> TRN.</p>
</caption>
<graphic xlink:href="feart-14-1736569-g010.tif">
<alt-text content-type="machine-generated">Satellite images of four regions labeled (a) Hohenpeissenberg, Germany, characterized by green countryside and forested areas; (b) Ispra, Italy, showing urban development and forest; (c) Trainou, France, a mix of fields and rural land; (d) K&#x159;e&#x161;&#xED;n u Pacova, Czech Republic, with patchwork of fields and wooded regions.</alt-text>
</graphic>
</fig>
</sec>
<sec id="s3-3">
<label>3.3</label>
<title>Model performance evaluation</title>
<p>In order to explore the accuracy and effectiveness of BILSTM model in predicting CO<sub>2</sub> concentration, the prediction results were compared with those of three baseline models, LSTM, GRU, and SVR. The parameter settings for each model are shown in <xref ref-type="table" rid="T5">Table 5</xref>.</p>
<table-wrap id="T5" position="float">
<label>TABLE 5</label>
<caption>
<p>Parameter Settings of the three baseline models.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Model</th>
<th align="left">Parameter</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">LSTM</td>
<td align="left">Learning rate &#x3d; 0.001, batch size &#x3d; 64, epoch &#x3d; 100, neurons &#x3d; 128, optimizer &#x3d; Adam, dropout &#x3d; 0.2</td>
</tr>
<tr>
<td align="center">GRU</td>
<td align="left">Learning rate &#x3d; 0.001, batch size &#x3d; 64, epoch &#x3d; 100, neurons &#x3d; 128, optimizer &#x3d; Adam, dropout &#x3d; 0.2</td>
</tr>
<tr>
<td align="center">SVR</td>
<td align="left">Kernel &#x3d; &#x2018;rbf&#x2019;, gamma &#x3d; 0.01, c &#x3d; 10, epsilon &#x3d; 0.1</td>
</tr>
</tbody>
</table>
</table-wrap>
<sec id="s3-3-1">
<label>3.3.1</label>
<title>Short-term prediction performance</title>
<p>The performance comparison of the four models in predicting CO<sub>2</sub> concentration is illustrated in <xref ref-type="fig" rid="F11">Figure 11</xref>. It is evident that the three neural network models, BILSTM, GRU, and LSTM, demonstrate similar prediction performance, all of which outperform the SVR model, a traditional machine learning approach. Among them, BILSTM achieves the best overall performance, with RMSE and MAE values consistently lower than those of GRU and LSTM across all stations, indicating higher prediction accuracy. Although the performance of GRU and LSTM is slightly inferior to BILSTM, they still provide relatively accurate predictions. In contrast, the SVR model performs the worst among the four, exhibiting notably higher RMSE and MAE values at all stations, particularly at HPB and TRN, where prediction errors are significantly larger. This demonstrates that the generalization capability of SVR is much weaker when dealing with complex nonlinear time series data compared to neural network approaches.</p>
<fig id="F11" position="float">
<label>FIGURE 11</label>
<caption>
<p>Comparison of prediction performance of the four models.</p>
</caption>
<graphic xlink:href="feart-14-1736569-g011.tif">
<alt-text content-type="machine-generated">Two bar charts compare error metrics for different models: BILSTM, GRU, LSTM, and SVR across four datasets labeled HPB, IPR, KRE, and TRN. The left chart shows RMSE values, and the right chart shows MAE values. In both charts, BILSTM generally performs similarly to GRU and LSTM, while SVR shows higher values across most datasets.</alt-text>
</graphic>
</fig>
<p>The quantitative evaluation metrics (RMSE, MAE, and R<sup>2</sup>) for each model and station are summarized in <xref ref-type="table" rid="T6">Table 6</xref>. As shown, the R<sup>2</sup> of BILSTM consistently exceed those of the other three models, reflecting its superior ability to capture temporal variability in CO<sub>2</sub> concentration. BILSTM also exhibits stronger explanatory power and a better overall fit, confirming its robustness and reliability in CO<sub>2</sub> prediction.</p>
<table-wrap id="T6" position="float">
<label>TABLE 6</label>
<caption>
<p>MAE, RMSE, and R<sup>2</sup> of the four models at each station.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Station</th>
<th align="center">Model</th>
<th align="center">MAE</th>
<th align="center">RMSE</th>
<th align="center">R<sup>2</sup>
</th>
<th align="center">Station</th>
<th align="center">Model</th>
<th align="center">MAE</th>
<th align="center">RMSE</th>
<th align="center">R<sup>2</sup>
</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td rowspan="4" align="center">HPB</td>
<td align="center">BILSTM</td>
<td align="center">1.624</td>
<td align="center">2.974</td>
<td align="center">0.901</td>
<td rowspan="4" align="center">KRE</td>
<td align="center">BILSTM</td>
<td align="center">1.742</td>
<td align="center">2.556</td>
<td align="center">0.926</td>
</tr>
<tr>
<td align="center">LSTM</td>
<td align="center">1.764</td>
<td align="center">3.029</td>
<td align="center">0.898</td>
<td align="center">LSTM</td>
<td align="center">1.791</td>
<td align="center">2.575</td>
<td align="center">0.924</td>
</tr>
<tr>
<td align="center">GRU</td>
<td align="center">1.647</td>
<td align="center">3.052</td>
<td align="center">0.896</td>
<td align="center">GRU</td>
<td align="center">1.803</td>
<td align="center">2.603</td>
<td align="center">0.923</td>
</tr>
<tr>
<td align="center">SVR</td>
<td align="center">4.321</td>
<td align="center">5.162</td>
<td align="center">0.703</td>
<td align="center">SVR</td>
<td align="center">2.014</td>
<td align="center">2.810</td>
<td align="center">0.910</td>
</tr>
<tr>
<td rowspan="4" align="center">IPR</td>
<td align="center">BILSTM</td>
<td align="center">3.548</td>
<td align="center">5.387</td>
<td align="center">0.919</td>
<td rowspan="4" align="center">TRN</td>
<td align="center">BILSTM</td>
<td align="center">1.797</td>
<td align="center">2.798</td>
<td align="center">0.914</td>
</tr>
<tr>
<td align="center">LSTM</td>
<td align="center">3.846</td>
<td align="center">5.762</td>
<td align="center">0.908</td>
<td align="center">LSTM</td>
<td align="center">1.885</td>
<td align="center">2.931</td>
<td align="center">0.906</td>
</tr>
<tr>
<td align="center">GRU</td>
<td align="center">3.563</td>
<td align="center">5.432</td>
<td align="center">0.918</td>
<td align="center">GRU</td>
<td align="center">1.840</td>
<td align="center">2.845</td>
<td align="center">0.911</td>
</tr>
<tr>
<td align="center">SVR</td>
<td align="center">4.361</td>
<td align="center">5.800</td>
<td align="center">0.907</td>
<td align="center">SVR</td>
<td align="center">2.755</td>
<td align="center">3.520</td>
<td align="center">0.864</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>To further analyze model performance at a detailed level, part of the prediction results from the TRN station were selected for visualization and discussion. The comparison between the true observations and the predicted results of BILSTM, GRU, LSTM, and SVR is presented in <xref ref-type="fig" rid="F12">Figure 12</xref>. As shown, all four models can capture the overall temporal trend of CO<sub>2</sub> concentration. In <xref ref-type="fig" rid="F12">Figure 12a</xref>, both GRU and BILSTM accurately reproduce the detailed variations of CO<sub>2</sub> concentration, with BILSTM showing slightly higher precision, particularly in peak regions. This improvement can be attributed to the bidirectional propagation mechanism of BILSTM, which allows the model to incorporate both past and future information for more comprehensive temporal representation. The comparison between BILSTM and LSTM (<xref ref-type="fig" rid="F12">Figure 12b</xref>) further highlights this advantage. LSTM tends to produce larger prediction errors, especially around CO<sub>2</sub> concentration peaks, whereas BILSTM more effectively captures subtle fluctuations in the time series, resulting in predictions closer to the observed values. Finally, <xref ref-type="fig" rid="F12">Figure 12c</xref> compares the prediction performance of BILSTM and SVR. It is clear that BILSTM not only captures the overall trend more accurately but also provides better predictions during periods of complex CO<sub>2</sub> fluctuations. In contrast, SVR tends to lose detailed information, leading to relatively larger errors and poorer performance.</p>
<fig id="F12" position="float">
<label>FIGURE 12</label>
<caption>
<p>Comparison of the prediction results of BILSTM, GRU, LSTM, and SVR at TRN station. <bold>(a)</bold> Comparison of BILSTM and GRU prediction results. <bold>(b)</bold> Comparison of BILSTM and LSTM prediction results. <bold>(c)</bold> Comparison of BILSTM and SVR prediction results. The black line represents the true observed values, the red line represents the BILSTM prediction results, the blue line represents the GRU prediction results, the green line represents the LSTM prediction results, and the purple line represents the SVR prediction results.</p>
</caption>
<graphic xlink:href="feart-14-1736569-g012.tif">
<alt-text content-type="machine-generated">Three line graphs (a, b, c) compare actual CO2 levels with predicted levels using different algorithms over time in hours. Graph (a) compares actual data with BIDirectional Long Short-Term Memory (BILSTM) and Gated Recurrent Unit (GRU) predictions, highlighting discrepancies with red circles. Graph (b) compares BILSTM and Long Short-Term Memory (LSTM), and graph (c) compares BILSTM and Support Vector Machine (SVM) predictions. Each graph includes a zoomed inset showing finer detail of variations. CO2 levels range from 400 to 460 ppm. Each graph's x-axis is time in hours, and the y-axis is CO2 concentration in ppm.</alt-text>
</graphic>
</fig>
<p>Overall, both the quantitative metrics and visual analysis confirm that the BILSTM model achieves the highest prediction accuracy among the four models. Its bidirectional learning structure significantly enhances the model&#x2019;s capacity to represent temporal dependencies, yielding more precise and stable prediction values compared with GRU, LSTM, and SVR.</p>
</sec>
<sec id="s3-3-2">
<label>3.3.2</label>
<title>Long-term prediction performance</title>
<p>To comprehensively verify the long-term prediction capability of the BILSTM model, we extended the prediction horizon and conducted multi-step prediction experiments to evaluate its temporal generalization performance. Specifically, the prediction intervals were expanded to 3, 6, 12, 24, and 48 h, and the average MAE, RMSE, and R<sup>2</sup> across the four stations were calculated for each prediction step. The results are summarized in <xref ref-type="table" rid="T7">Table 7</xref>.</p>
<table-wrap id="T7" position="float">
<label>TABLE 7</label>
<caption>
<p>Multi-step prediction results of different models.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Model</th>
<th align="center">Metric</th>
<th align="center">&#x2b;1 h</th>
<th align="center">&#x2b;3 h</th>
<th align="center">&#x2b;6 h</th>
<th align="center">&#x2b;12 h</th>
<th align="center">&#x2b;24 h</th>
<th align="center">&#x2b;48 h</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td rowspan="3" align="center">BILSTM</td>
<td align="center">MAE</td>
<td align="center">2.178</td>
<td align="center">3.122</td>
<td align="center">3.958</td>
<td align="center">4.994</td>
<td align="center">5.762</td>
<td align="center">6.194</td>
</tr>
<tr>
<td align="center">RMSE</td>
<td align="center">3.429</td>
<td align="center">4.765</td>
<td align="center">5.886</td>
<td align="center">7.138</td>
<td align="center">8.112</td>
<td align="center">8.706</td>
</tr>
<tr>
<td align="center">R<sup>2</sup>
</td>
<td align="center">0.915</td>
<td align="center">0.839</td>
<td align="center">0.755</td>
<td align="center">0.636</td>
<td align="center">0.530</td>
<td align="center">0.458</td>
</tr>
<tr>
<td rowspan="3" align="center">GRU</td>
<td align="center">MAE</td>
<td align="center">2.213</td>
<td align="center">3.189</td>
<td align="center">3.995</td>
<td align="center">5.042</td>
<td align="center">5.893</td>
<td align="center">6.404</td>
</tr>
<tr>
<td align="center">RMSE</td>
<td align="center">3.483</td>
<td align="center">4.889</td>
<td align="center">5.915</td>
<td align="center">7.243</td>
<td align="center">8.312</td>
<td align="center">8.935</td>
</tr>
<tr>
<td align="center">R<sup>2</sup>
</td>
<td align="center">0.912</td>
<td align="center">0.827</td>
<td align="center">0.752</td>
<td align="center">0.622</td>
<td align="center">0.502</td>
<td align="center">0.423</td>
</tr>
<tr>
<td rowspan="3" align="center">LSTM</td>
<td align="center">MAE</td>
<td align="center">2.322</td>
<td align="center">3.282</td>
<td align="center">4.115</td>
<td align="center">5.091</td>
<td align="center">5.853</td>
<td align="center">6.279</td>
</tr>
<tr>
<td align="center">RMSE</td>
<td align="center">3.574</td>
<td align="center">4.933</td>
<td align="center">6.014</td>
<td align="center">7.331</td>
<td align="center">8.213</td>
<td align="center">8.780</td>
</tr>
<tr>
<td align="center">R<sup>2</sup>
</td>
<td align="center">0.909</td>
<td align="center">0.829</td>
<td align="center">0.745</td>
<td align="center">0.613</td>
<td align="center">0.520</td>
<td align="center">0.451</td>
</tr>
<tr>
<td rowspan="3" align="center">SVM</td>
<td align="center">MAE</td>
<td align="center">3.363</td>
<td align="center">4.648</td>
<td align="center">5.313</td>
<td align="center">6.779</td>
<td align="center">7.720</td>
<td align="center">8.695</td>
</tr>
<tr>
<td align="center">RMSE</td>
<td align="center">4.323</td>
<td align="center">5.812</td>
<td align="center">7.020</td>
<td align="center">8.537</td>
<td align="center">9.250</td>
<td align="center">10.277</td>
</tr>
<tr>
<td align="center">R<sup>2</sup>
</td>
<td align="center">0.846</td>
<td align="center">0.719</td>
<td align="center">0.595</td>
<td align="center">0.415</td>
<td align="center">0.334</td>
<td align="center">0.181</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>As can be seen from the <xref ref-type="table" rid="T7">Table 7</xref>, all models exhibit a continuous increase in MAE and RMSE, accompanied by a gradual decrease in R<sup>2</sup> as the prediction horizon becomes longer. This pattern indicates that long-term prediction tasks have higher uncertainty, so the prediction accuracy decreases over time. Despite this, BILSTM consistently demonstrates superior performance compared with GRU, LSTM, and SVM across all prediction steps. Especially when the prediction step reaches 24 h and 48 h, BILSTM still maintains the lowest error and highest R<sup>2</sup>, highlighting its stronger stability and robustness in long-term prediction.</p>
<p>Furthermore, as illustrated in <xref ref-type="fig" rid="F13">Figure 13</xref>, the performance differences between neural network models (BILSTM, GRU, LSTM) and machine learning model (SVM) is particularly significant as the prediction step increases. When the prediction step reaches 6 h, the R<sup>2</sup> of SVM drops to only 0.59, whereas all three neural network models retain R<sup>2</sup> above 0.7. When the prediction step is extended to 24 h, the R<sup>2</sup> of SVM sharply decreases to 0.334, while the R<sup>2</sup> of neural network models still maintains above 0.5. This rapid degradation in SVM performance is due to its inherent lack of temporal memory capability and inability to model long-term dependencies. In contrast, BILSTM, GRU, and LSTM benefit from gated recurrent units that effectively capture sequential patterns, enabling them to maintain relatively high R<sup>2</sup> values. As the prediction horizon becomes longer and cumulative errors increase, the advantages of BILSTM become more evident. Across all prediction steps, BILSTM consistently exhibits the lowest MAE and RMSE, indicating that its bidirectional recurrent structure allows to leverage both forward and backward temporal information during training. This enables BILSTM to more effectively characterize the temporal dependencies of CO<sub>2</sub> concentration dynamics, thereby achieving higher prediction reliability.</p>
<fig id="F13" position="float">
<label>FIGURE 13</label>
<caption>
<p>Comparison of prediction performance among BILSTM, GRU, LSTM, and SVM models at different time steps. <bold>(a)</bold> MAE, <bold>(b)</bold> RMSE, and <bold>(c)</bold> R<sub>2</sub>.</p>
</caption>
<graphic xlink:href="feart-14-1736569-g013.tif">
<alt-text content-type="machine-generated">Three charts comparing SVM, LSTM, GRU, and BILSTM over time steps. Chart (a) shows MAE with BILSTM performing best. Chart (b) shows RMSE with BILSTM lowest. Chart (c) shows R&#xB2;, with BILSTM, GRU, and LSTM outperforming SVM, maintaining higher scores.</alt-text>
</graphic>
</fig>
</sec>
</sec>
</sec>
<sec id="s4">
<label>4</label>
<title>Conclusion and future work</title>
<p>This study proposed a CO<sub>2</sub> concentration prediction method based on a BILSTM and compared its performance with three baseline models, GRU, LSTM, and SVR. The predictive capabilities of all models were evaluated using three statistical metrics, RMSE, MAE, and R<sup>2</sup>. The main findings are summarized as follows: 1. Among the five meteorological factors (air temperature, relative humidity, air pressure, wind direction, and wind speed) at the HPB, IPR, KRE, and TRN stations, air temperature exhibited the highest correlation with CO<sub>2</sub> concentration, followed by relative humidity. In contrast, the correlations of air pressure, wind direction, and wind speed with CO<sub>2</sub> concentration were relatively weak. 2. Feature selection played an essential role in improving model prediction accuracy. When only air temperature and relative humidity were used as input variables, the prediction accuracy of the BILSTM model significantly improved compared with using all meteorological factors. This result indicates that selecting features with stronger correlations effectively reduces input noise and enhances the model&#x2019;s predictive performance. 3. Comparisons with GRU, LSTM, and SVR baseline models revealed that the BILSTM model achieved the best overall performance among the four models. Across all four stations, BILSTM consistently achieved the lowest RMSE and MAE values and the highest R<sup>2</sup>. The superior performance of BILSTM is attributed to its bidirectional propagation mechanism, which enables the model to capture historical and future information comprehensively so that more detailed information of CO<sub>2</sub> concentration could be captured and further improve prediction accuracy.</p>
<p>Although this study demonstrates the capability of the BILSTM model to effectively predict CO<sub>2</sub> concentrations at ground-based observation sites, several limitations remain. First, the predictive variables rely on meteorological factors, whereas CO<sub>2</sub> is also influenced by anthropogenic emissions, land-cover characteristics, and vegetation activity. Incorporating these additional driver factors in future work would allow for a more comprehensive representation of the spatiotemporal variability and governing mechanisms of atmospheric CO<sub>2</sub>. Additionally, the present analysis is validated using data from only four ICOS stations in Europe. To further examine the generalizability of the proposed model, future studies should extend the evaluation to a larger number of ICOS sites or global GAW/NOAA observation networks, enabling a more robust assessment of model applicability under diverse atmospheric conditions. Finally, future research could explore more advanced hybrid deep learning approaches, such as introducing attention mechanisms to focus on critical prediction features, or employing graph neural networks to explicitly capture spatial correlations among different monitoring stations thereby further enhancing the predictive capability and interpretability.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s5">
<title>Data availability statement</title>
<p>The CO<sub>2</sub> and meteorological data for this study can be downloaded from the ICOS Carbon portal, DOI: 10.18160/VXCS-95EV.ICOS - <ext-link ext-link-type="uri" xlink:href="https://www.icos-cp.eu/">Integrated Carbon Observation System</ext-link>.</p>
</sec>
<sec sec-type="author-contributions" id="s6">
<title>Author contributions</title>
<p>YQ: Data curation, Writing &#x2013; review and editing, Writing &#x2013; original draft, Methodology. HY: Writing &#x2013; review and editing, Conceptualization, Methodology, Funding acquisition, Supervision. LC: Formal Analysis, Writing &#x2013; review and editing, Software. YZ: Formal Analysis, Writing &#x2013; review and editing. GF: Writing &#x2013; review and editing, Software. HF: Writing &#x2013; review and editing, Formal Analysis. QL: Investigation, Writing &#x2013; review and editing. YY: Investigation, Writing &#x2013; review and editing.</p>
</sec>
<ack>
<title>Acknowledgements</title>
<p>We would like to thank the ICOS for providing the CO<sub>2</sub> and meteorological data of HPB, IPR, KRE, and TRN stations.</p>
</ack>
<sec sec-type="COI-statement" id="s8">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="s9">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="s10">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<fn-group>
<fn fn-type="custom" custom-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/301337/overview">Takafumi Hirata</ext-link>, Hokkaido University, Japan</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1857069/overview">Tongwen Li</ext-link>, Sun Yat-sen University, China</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2690420/overview">Jining Yan</ext-link>, China University of Geosciences Wuhan, China</p>
</fn>
</fn-group>
<ref-list>
<title>References</title>
<ref id="B1">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Alex Graves</surname>
<given-names>S. F.</given-names>
</name>
<name>
<surname>J&#xfc;rgen</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2005</year>). &#x201c;<article-title>Bidirectional lstm networks for improved phoneme classification and recognition</article-title>,&#x201d; in <source>International conference on artificial neural networks</source> (<publisher-name>Springer</publisher-name>), <fpage>799</fpage>&#x2013;<lpage>804</lpage>.</mixed-citation>
</ref>
<ref id="B2">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Amalou</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Mouhni</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Abdali</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Multivariate time series prediction by RNN architectures for energy consumption forecasting</article-title>. <source>Energy Rep.</source> <volume>8</volume>, <fpage>1084</fpage>&#x2013;<lpage>1091</lpage>. <pub-id pub-id-type="doi">10.1016/j.egyr.2022.07.139</pub-id>
</mixed-citation>
</ref>
<ref id="B3">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ayugi</surname>
<given-names>B. O.</given-names>
</name>
<name>
<surname>Chung</surname>
<given-names>E. S.</given-names>
</name>
<name>
<surname>Zhu</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Ogega</surname>
<given-names>O. M.</given-names>
</name>
<name>
<surname>Babousmail</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Ongoma</surname>
<given-names>V.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Projected changes in extreme climate events over Africa under 1.5&#xb0;C, 2.0&#xb0;C and 3.0&#xb0;C global warming levels based on CMIP6 projections</article-title>. <source>Atmos. Res.</source> <volume>292</volume>, <fpage>106872</fpage>. <pub-id pub-id-type="doi">10.1016/j.atmosres.2023.106872</pub-id>
</mixed-citation>
</ref>
<ref id="B4">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Yao</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Qiao</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>Z. Y.</given-names>
</name>
<name>
<surname>Zhong</surname>
<given-names>C. Y.</given-names>
</name>
<name>
<surname>Tang</surname>
<given-names>L. J.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Three-hourly PM<sub>2.5</sub> and O<sub>3</sub> concentrations prediction based on time series decomposition and LSTM model with attention mechanism</article-title>. <source>Atmos. Pollut. Res.</source> <volume>14</volume> (<issue>11</issue>), <fpage>101879</fpage>. <pub-id pub-id-type="doi">10.1016/j.apr.2023.101879</pub-id>
</mixed-citation>
</ref>
<ref id="B5">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Conil</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Helle</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Langrene</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Laurent</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>Delmotte</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Ramonet</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Continuous atmospheric CO<sub>2</sub>, CH<sub>4</sub> and CO measurements at the Observatoire P&#xe9;renne de l&#x27;Environnement (OPE) station in France from 2011 to 2018</article-title>. <source>Atmos. Meas. Tech.</source> <volume>12</volume> (<issue>12</issue>), <fpage>6361</fpage>&#x2013;<lpage>6383</lpage>. <pub-id pub-id-type="doi">10.5194/amt-12-6361-2019</pub-id>
</mixed-citation>
</ref>
<ref id="B6">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cui</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Jin</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Zeng</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Lin</surname>
<given-names>X.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Deep learning methods for atmospheric PM<sub>2.5</sub> prediction: a comparative study of transformer and CNN-LSTM-attention</article-title>. <source>Atmos. Pollut. Res.</source> <volume>14</volume> (<issue>9</issue>), <fpage>101833</fpage>. <pub-id pub-id-type="doi">10.1016/j.apr.2023.101833</pub-id>
</mixed-citation>
</ref>
<ref id="B7">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Curcoll</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Camarero</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Bacardit</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>&#xc0;gueda</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Grossi</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Gacia</surname>
<given-names>E.</given-names>
</name>
<etal/>
</person-group> (<year>2018</year>). <article-title>Atmospheric carbon dioxide variability at aig&#xfc;estortes, central pyrenees, Spain</article-title>. <source>Reg. Environ. Change</source> <volume>19</volume> (<issue>2</issue>), <fpage>313</fpage>&#x2013;<lpage>324</lpage>. <pub-id pub-id-type="doi">10.1007/s10113-018-1443-2</pub-id>
</mixed-citation>
</ref>
<ref id="B8">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Das</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Kunchala</surname>
<given-names>R. K.</given-names>
</name>
<name>
<surname>Chandra</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Chhabra</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Pandya</surname>
<given-names>M. R.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Characterizing the regional XCO<sub>2</sub> variability and its association with ENSO over India inferred from GOSAT and OCO-2 satellite observations</article-title>. <source>Sci. Total Environ.</source> <volume>902</volume>, <fpage>166176</fpage>. <pub-id pub-id-type="doi">10.1016/j.scitotenv.2023.166176</pub-id>
<pub-id pub-id-type="pmid">37562615</pub-id>
</mixed-citation>
</ref>
<ref id="B9">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Diffenbaugh</surname>
<given-names>N. S.</given-names>
</name>
<name>
<surname>Singh</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Mankin</surname>
<given-names>J. S.</given-names>
</name>
<name>
<surname>Horton</surname>
<given-names>D. E.</given-names>
</name>
<name>
<surname>Swain</surname>
<given-names>D. L.</given-names>
</name>
<name>
<surname>Touma</surname>
<given-names>D.</given-names>
</name>
<etal/>
</person-group> (<year>2017</year>). <article-title>Quantifying the influence of global warming on unprecedented extreme climate events</article-title>. <source>Proc. Natl. Acad. Sci.</source> <volume>114</volume> (<issue>19</issue>), <fpage>4881</fpage>&#x2013;<lpage>4886</lpage>. <pub-id pub-id-type="doi">10.1073/pnas.1618082114</pub-id>
<pub-id pub-id-type="pmid">28439005</pub-id>
</mixed-citation>
</ref>
<ref id="B10">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Fang</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Peng</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Hong</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Predicting flood susceptibility using LSTM neural networks</article-title>. <source>J. Hydrology</source> <volume>594</volume>, <fpage>125734</fpage>. <pub-id pub-id-type="doi">10.1016/j.jhydrol.2020.125734</pub-id>
</mixed-citation>
</ref>
<ref id="B11">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ferrarese</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Apadula</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Bertiglia</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Cassardo</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Ferrero</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Fialdini</surname>
<given-names>L.</given-names>
</name>
<etal/>
</person-group> (<year>2015</year>). <article-title>Inspection of high&#x2013;concentration CO<sub>2</sub> events at the Plateau Rosa alpine station</article-title>. <source>Atmos. Pollut. Res.</source> <volume>6</volume> (<issue>3</issue>), <fpage>415</fpage>&#x2013;<lpage>427</lpage>. <pub-id pub-id-type="doi">10.5094/apr.2015.046</pub-id>
</mixed-citation>
</ref>
<ref id="B12">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gao</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>W.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>A graph-based LSTM model for PM<sub>2.5</sub> forecasting</article-title>. <source>Atmos. Pollut. Res.</source> <volume>12</volume> (<issue>9</issue>), <fpage>101150</fpage>. <pub-id pub-id-type="doi">10.1016/j.apr.2021.101150</pub-id>
</mixed-citation>
</ref>
<ref id="B13">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gers</surname>
<given-names>F. A. S. J.</given-names>
</name>
<name>
<surname>Cummins</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2000</year>). <article-title>Learning to forget: continual prediction with LSTM</article-title>. <source>Neural Computation</source> <volume>12</volume> (<issue>10</issue>), <fpage>2451</fpage>&#x2013;<lpage>2471</lpage>. <pub-id pub-id-type="doi">10.1162/089976600300015015</pub-id>
<pub-id pub-id-type="pmid">11032042</pub-id>
</mixed-citation>
</ref>
<ref id="B14">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Graves</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Schmidhuber</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2005</year>). <article-title>Framewise phoneme classification with bidirectional LSTM and other neural network architectures</article-title>. <source>Neural Netw.</source> <volume>18</volume> (<issue>5-6</issue>), <fpage>602</fpage>&#x2013;<lpage>610</lpage>. <pub-id pub-id-type="doi">10.1016/j.neunet.2005.06.042</pub-id>
<pub-id pub-id-type="pmid">16112549</pub-id>
</mixed-citation>
</ref>
<ref id="B15">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Heymann</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Reuter</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Buchwitz</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Schneising</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>Bovensmann</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Burrows</surname>
<given-names>J. P.</given-names>
</name>
<etal/>
</person-group> (<year>2017</year>). <article-title>CO<sub>2</sub> emission of Indonesian fires in 2015 estimated from satellite&#x2010;derived atmospheric CO<sub>2</sub> concentrations</article-title>. <source>Geophys. Res. Lett.</source> <volume>44</volume> (<issue>3</issue>), <fpage>1537</fpage>&#x2013;<lpage>1544</lpage>. <pub-id pub-id-type="doi">10.1002/2016gl072042</pub-id>
</mixed-citation>
</ref>
<ref id="B16">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>ICOS</surname>
<given-names>RI</given-names>
</name>
<name>
<surname>Apadula</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Arnold</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Bergamaschi</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Biermann</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>H.</given-names>
</name>
<etal/>
</person-group> (<year>2023</year>). <source>ICOS atmosphere release 2023-1 of level 2 greenhouse gas mole fractions of CO<sub>2</sub>, CH<sub>4</sub>, N<sub>2</sub>O, CO, meteorology and <sup>14</sup>CO<sub>2</sub>, and flask samples analysed for CO<sub>2</sub>, CH<sub>4</sub>, N<sub>2</sub>O, CO, H<sub>2</sub> and SF<sub>6</sub>
</source>. <publisher-name>ICOS ERIC - Carbon Portal</publisher-name>. <pub-id pub-id-type="doi">10.18160/VXCS-95EV</pub-id>
</mixed-citation>
</ref>
<ref id="B17">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jin</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Xue</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Jiang</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Zhao</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Yuan</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Sun</surname>
<given-names>Y.</given-names>
</name>
<etal/>
</person-group> (<year>2022</year>). <article-title>A long-term global XCO<sub>2</sub> dataset: ensemble of satellite products</article-title>. <source>Atmos. Res.</source> <volume>279</volume>, <fpage>106385</fpage>. <pub-id pub-id-type="doi">10.1016/j.atmosres.2022.106385</pub-id>
</mixed-citation>
</ref>
<ref id="B18">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jin</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Xue</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Yuan</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Zhao</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Jiang</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Sun</surname>
<given-names>Y.</given-names>
</name>
<etal/>
</person-group> (<year>2024</year>). <article-title>Retrieval anthropogenic CO<sub>2</sub> emissions from OCO-2 and comparison with gridded emission inventories</article-title>. <source>J. Clean. Prod.</source> <volume>448</volume>, <fpage>141418</fpage>. <pub-id pub-id-type="doi">10.1016/j.jclepro.2024.141418</pub-id>
</mixed-citation>
</ref>
<ref id="B19">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Keeling</surname>
<given-names>C. D.</given-names>
</name>
<name>
<surname>Bacastow</surname>
<given-names>R. B.</given-names>
</name>
<name>
<surname>Bainbridge</surname>
<given-names>A. E.</given-names>
</name>
<name>
<surname>Ekdahl</surname>
<given-names>C. A.</given-names>
<suffix>Jr.</suffix>
</name>
<name>
<surname>Guenther</surname>
<given-names>P. R.</given-names>
</name>
<name>
<surname>Waterman</surname>
<given-names>L. S.</given-names>
</name>
<etal/>
</person-group> (<year>1976</year>). <article-title>Atmospheric carbon dioxide variations at Mauna Loa observatory, Hawaii</article-title>. <source>Tellus</source> <volume>28</volume> (<issue>6</issue>), <fpage>538</fpage>&#x2013;<lpage>551</lpage>. <pub-id pub-id-type="doi">10.1111/j.2153-3490.1976.tb00701.x</pub-id>
</mixed-citation>
</ref>
<ref id="B20">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Khaldi</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>El Afia</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Chiheb</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Tabik</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>What is the best RNN-cell structure to forecast each time series behavior?</article-title> <source>Expert Syst. Appl.</source> <volume>215</volume>, <fpage>119140</fpage>. <pub-id pub-id-type="doi">10.1016/j.eswa.2022.119140</pub-id>
</mixed-citation>
</ref>
<ref id="B21">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lelandais</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Xueref-Remy</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Riandet</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Blanc</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Armengaud</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Oppo</surname>
<given-names>S.</given-names>
</name>
<etal/>
</person-group> (<year>2022</year>). <article-title>Analysis of 5.5 years of atmospheric CO<sub>2</sub>, CH<sub>4</sub>, CO continuous observations (2014-2020) and their correlations, at the Observatoire de Haute Provence, a station of the ICOS-France national greenhouse gases observation network</article-title>. <source>Atmos. Environ.</source> <volume>277</volume>, <fpage>119020</fpage>. <pub-id pub-id-type="doi">10.1016/j.atmosenv.2022.119020</pub-id>
</mixed-citation>
</ref>
<ref id="B22">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Kim</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Lee</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Kenea</surname>
<given-names>S. T.</given-names>
</name>
<name>
<surname>Kim</surname>
<given-names>J. E.</given-names>
</name>
<name>
<surname>Chung</surname>
<given-names>C. Y.</given-names>
</name>
<etal/>
</person-group> (<year>2022</year>). <article-title>Analysis of source distribution of high carbon monoxide events using airborne and surface observations in Korea</article-title>. <source>Atmos. Environ.</source> <volume>289</volume>, <fpage>119316</fpage>. <pub-id pub-id-type="doi">10.1016/j.atmosenv.2022.119316</pub-id>
</mixed-citation>
</ref>
<ref id="B23">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Huang</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Lou</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Wan</surname>
<given-names>Q.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Short-term PV power prediction based on meteorological similarity days and SSA-BiLSTM</article-title>. <source>Syst. Soft Comput.</source> <volume>6</volume>, <fpage>200084</fpage>. <pub-id pub-id-type="doi">10.1016/j.sasc.2024.200084</pub-id>
</mixed-citation>
</ref>
<ref id="B24">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Xu</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Shao</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Bai</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2025a</year>). <article-title>Near real-time land surface temperature reconstruction from FY-4A satellite using spatio-temporal attention network</article-title>. <source>Int. J. Appl. Earth Observation Geoinformation</source> <volume>139</volume>, <fpage>104480</fpage>. <pub-id pub-id-type="doi">10.1016/j.jag.2025.104480</pub-id>
</mixed-citation>
</ref>
<ref id="B25">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Yan</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Zhong</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Bao</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Sun</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2025b</year>). <article-title>Full-coverage mapping of daily high-resolution XCO<sub>2</sub> across China from 2015 to 2020 by deep learning-based spatio-temporal fusion</article-title>. <source>IEEE Trans. Geoscience Remote Sens.</source>, <fpage>1</fpage>. <pub-id pub-id-type="doi">10.1109/tgrs.2025.3540289</pub-id>
</mixed-citation>
</ref>
<ref id="B26">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Liang</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Han</surname>
<given-names>Z.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Carbon emission reduction prediction of new energy vehicles in China based on GRA-BiLSTM model</article-title>. <source>Atmos. Pollut. Res.</source> <volume>14</volume> (<issue>9</issue>), <fpage>101865</fpage>. <pub-id pub-id-type="doi">10.1016/j.apr.2023.101865</pub-id>
</mixed-citation>
</ref>
<ref id="B27">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mingwei</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Tianxiang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Xingying</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Jinglu</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Ling</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Chun</surname>
<given-names>W.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Fusion of multi-source near-surface CO<sub>2</sub> concentration data based on high accuracy surface modeling</article-title>. <source>Atmos. Pollut. Res.</source> <volume>8</volume> (<issue>6</issue>), <fpage>1170</fpage>&#x2013;<lpage>1178</lpage>. <pub-id pub-id-type="doi">10.1016/j.apr.2017.05.003</pub-id>
</mixed-citation>
</ref>
<ref id="B28">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Qadeer</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Rehman</surname>
<given-names>W. U.</given-names>
</name>
<name>
<surname>Sheri</surname>
<given-names>A. M.</given-names>
</name>
<name>
<surname>Park</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Kim</surname>
<given-names>H. K.</given-names>
</name>
<name>
<surname>Jeon</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>A long short-term memory (LSTM) network for hourly estimation of PM<sub>2.5</sub> concentration in two cities of South Korea</article-title>. <source>Appl. Sci.</source> <volume>10</volume> (<issue>11</issue>), <fpage>3984</fpage>. <pub-id pub-id-type="doi">10.3390/app10113984</pub-id>
</mixed-citation>
</ref>
<ref id="B29">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Schmidhuber</surname>
<given-names>S. H. J.</given-names>
</name>
<name>
<surname>Schmidhuber</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>1997</year>). <article-title>Long short-term memory</article-title>. <source>Neural Computation</source> <volume>9</volume> (<issue>8</issue>), <fpage>1735</fpage>&#x2013;<lpage>1780</lpage>. <pub-id pub-id-type="doi">10.1162/neco.1997.9.8.1735</pub-id>
<pub-id pub-id-type="pmid">9377276</pub-id>
</mixed-citation>
</ref>
<ref id="B30">
<mixed-citation publication-type="journal">
<collab>UNFCCC</collab>. (<year>2016</year>). <article-title>The paris agreement</article-title>.</mixed-citation>
</ref>
<ref id="B31">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Zhao</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Biswas</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Shi</surname>
<given-names>Z.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>High-resolution prediction of the spatial distribution of PM<sub>2.5</sub> concentrations in China using a long short-term memory model</article-title>. <source>J. Clean. Prod.</source> <volume>297</volume>, <fpage>126493</fpage>. <pub-id pub-id-type="doi">10.1016/j.jclepro.2021.126493</pub-id>
</mixed-citation>
</ref>
<ref id="B32">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Sun</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2022a</year>). <article-title>NGCU: a new RNN model for time-series data prediction</article-title>. <source>Big Data Res.</source> <volume>27</volume>, <fpage>100296</fpage>. <pub-id pub-id-type="doi">10.1016/j.bdr.2021.100296</pub-id>
</mixed-citation>
</ref>
<ref id="B33">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>He</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Feng</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Jin</surname>
<given-names>Z.</given-names>
</name>
</person-group> (<year>2022b</year>). <article-title>High-coverage reconstruction of XCO2 using multisource satellite remote sensing data in beijing&#x2013;tianjin&#x2013;hebei region</article-title>. <source>Int. J. Environ. Res. Public Health</source> <volume>19</volume> (<issue>17</issue>), <fpage>10853</fpage>. <pub-id pub-id-type="doi">10.3390/ijerph191710853</pub-id>
<pub-id pub-id-type="pmid">36078571</pub-id>
</mixed-citation>
</ref>
<ref id="B34">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wu</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Ju</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2023a</year>). <article-title>Reconstructing annual XCO<sub>2</sub> at a 1 kmx1 km spatial resolution across China from 2012 to 2019 based on a spatial CatBoost method</article-title>. <source>Environ. Res.</source> <volume>236</volume> (<issue>Pt 2</issue>), <fpage>116866</fpage>. <pub-id pub-id-type="doi">10.1016/j.envres.2023.116866</pub-id>
<pub-id pub-id-type="pmid">37567384</pub-id>
</mixed-citation>
</ref>
<ref id="B35">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wu</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Peng</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Su</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Quan</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2023b</year>). <article-title>A novel short-term household load forecasting method combined BiLSTM with trend feature extraction</article-title>. <source>Energy Rep.</source> <volume>9</volume>, <fpage>1013</fpage>&#x2013;<lpage>1022</lpage>. <pub-id pub-id-type="doi">10.1016/j.egyr.2023.05.041</pub-id>
</mixed-citation>
</ref>
<ref id="B36">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Xiang</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Yan</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Mohamed Taha</surname>
<given-names>A. M.</given-names>
</name>
<name>
<surname>Xu</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Super-resolution reconstruction of GOSAT CO<sub>2</sub> products using bicubic interpolation</article-title>. <source>Geocarto Int.</source> <volume>37</volume> (<issue>27</issue>), <fpage>15187</fpage>&#x2013;<lpage>15211</lpage>. <pub-id pub-id-type="doi">10.1080/10106049.2022.2096699</pub-id>
</mixed-citation>
</ref>
<ref id="B37">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yan</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Feng</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>He</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Sun</surname>
<given-names>Z.</given-names>
</name>
<etal/>
</person-group> (<year>2025</year>). <article-title>New 30-m resolution dataset reveals declining soil erosion with regional increases across Chinese mainland (1990&#x2013;2022)</article-title>. <source>Remote Sens. Environ.</source> <volume>323</volume>, <fpage>114681</fpage>. <pub-id pub-id-type="doi">10.1016/j.rse.2025.114681</pub-id>
</mixed-citation>
</ref>
<ref id="B38">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Feng</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Xiang</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Xu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Qin</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Spatio-temporal validation of AIRS CO<sub>2</sub> observations using GAW, HIPPO and TCCON</article-title>. <source>Remote Sens.</source> <volume>12</volume> (<issue>21</issue>), <fpage>3583</fpage>. <pub-id pub-id-type="doi">10.3390/rs12213583</pub-id>
</mixed-citation>
</ref>
<ref id="B39">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Inter-comparison and evaluation of global satellite XCO<sub>2</sub> products</article-title>. <source>Geo-spatial Inf. Sci.</source> <volume>28</volume> (<issue>1</issue>), <fpage>131</fpage>&#x2013;<lpage>144</lpage>. <pub-id pub-id-type="doi">10.1080/10095020.2023.2252017</pub-id>
</mixed-citation>
</ref>
<ref id="B40">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yver-Kwok</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Philippon</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Bergamaschi</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Biermann</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Calzolari</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>H.</given-names>
</name>
<etal/>
</person-group> (<year>2021</year>). <article-title>Evaluation and optimization of ICOS atmosphere station data as part of the labeling process</article-title>. <source>Atmos. Meas. Tech.</source> <volume>14</volume> (<issue>1</issue>), <fpage>89</fpage>&#x2013;<lpage>116</lpage>. <pub-id pub-id-type="doi">10.5194/amt-14-89-2021</pub-id>
</mixed-citation>
</ref>
<ref id="B41">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zaini</surname>
<given-names>N. A.</given-names>
</name>
<name>
<surname>Ahmed</surname>
<given-names>A. N.</given-names>
</name>
<name>
<surname>Ean</surname>
<given-names>L. W.</given-names>
</name>
<name>
<surname>Chow</surname>
<given-names>M. F.</given-names>
</name>
<name>
<surname>Malek</surname>
<given-names>M. A.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Forecasting of fine particulate matter based on LSTM and optimization algorithm</article-title>. <source>J. Clean. Prod.</source> <volume>427</volume>, <fpage>139233</fpage>. <pub-id pub-id-type="doi">10.1016/j.jclepro.2023.139233</pub-id>
</mixed-citation>
</ref>
<ref id="B42">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>W.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>China&#x2019;s energy transition pathway in a carbon neutral vision</article-title>. <source>Engineering</source> <volume>14</volume>, <fpage>64</fpage>&#x2013;<lpage>76</lpage>. <pub-id pub-id-type="doi">10.1016/j.eng.2021.09.004</pub-id>
</mixed-citation>
</ref>
<ref id="B43">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Deriving gapless CO<sub>2</sub> concentrations using a geographically weighted neural network: china, 2014&#x2013;2020</article-title>. <source>Int. J. Appl. Earth Observation Geoinformation</source> <volume>114</volume>, <fpage>103063</fpage>. <pub-id pub-id-type="doi">10.1016/j.jag.2022.103063</pub-id>
</mixed-citation>
</ref>
<ref id="B44">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Huang</surname>
<given-names>X.-D.</given-names>
</name>
<name>
<surname>Zhong</surname>
<given-names>J.-T.</given-names>
</name>
<name>
<surname>Guo</surname>
<given-names>L. F.</given-names>
</name>
<name>
<surname>Guo</surname>
<given-names>S. Y.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>D. Y.</given-names>
</name>
<etal/>
</person-group> (<year>2023</year>). <article-title>A representative CO<sub>2</sub> emissions pathway for China toward carbon neutrality under the paris Agreement&#x27;s 2 &#xb0;C target</article-title>. <source>Adv. Clim. Change Res.</source> <volume>14</volume> (<issue>6</issue>), <fpage>941</fpage>&#x2013;<lpage>951</lpage>. <pub-id pub-id-type="doi">10.1016/j.accre.2023.11.004</pub-id>
</mixed-citation>
</ref>
<ref id="B45">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zrira</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Kamal-Idrissi</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Farssi</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Khan</surname>
<given-names>H. A.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Time series prediction of sea surface temperature based on BiLSTM model with attention mechanism</article-title>. <source>J. Sea Res.</source> <volume>198</volume>, <fpage>102472</fpage>. <pub-id pub-id-type="doi">10.1016/j.seares.2024.102472</pub-id>
</mixed-citation>
</ref>
</ref-list>
</back>
</article>