<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Ind. Eng.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Industrial Engineering</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Ind. Eng.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2813-6047</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">1769776</article-id>
<article-id pub-id-type="doi">10.3389/fieng.2026.1769776</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>A hybrid neural network integrating attention mechanism for time series and non-time series multi-factor electric vehicle energy consumption prediction</article-title>
<alt-title alt-title-type="left-running-head">Zhang et al.</alt-title>
<alt-title alt-title-type="right-running-head">
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fieng.2026.1769776">10.3389/fieng.2026.1769776</ext-link>
</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Zhang</surname>
<given-names>Wenqiang</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2394949"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal Analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Chai</surname>
<given-names>Ruisheng</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing - original draft</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Li</surname>
<given-names>Mingzhe</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing - original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Mu</surname>
<given-names>Yashuang</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Li</surname>
<given-names>Peng</given-names>
</name>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal Analysis</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Gen</surname>
<given-names>Mitsuo</given-names>
</name>
<xref ref-type="aff" rid="aff4">
<sup>4</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2104850"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal Analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
</contrib>
</contrib-group>
<aff id="aff1">
<label>1</label>
<institution>School of Artificial Intelligence and Big Data, Henan University of Technology</institution>, <city>Zhengzhou</city>, <country country="CN">China</country>
</aff>
<aff id="aff2">
<label>2</label>
<institution>College of Information Science and Engineering, Henan University of Technology</institution>, <city>Zhengzhou</city>, <country country="CN">China</country>
</aff>
<aff id="aff3">
<label>3</label>
<institution>Institute for Complexity Science, Henan University of Technology</institution>, <city>Zhengzhou</city>, <country country="CN">China</country>
</aff>
<aff id="aff4">
<label>4</label>
<institution>Research Institute for Science &#x26; Technology, Tokyo University of Science</institution>, <city>Tokyo</city>, <country country="JP">Japan</country>
</aff>
<author-notes>
<corresp id="c001">
<label>&#x2a;</label>Correspondence: Wenqiang Zhang, <email xlink:href="mailto:zhangwq@haut.edu.cn">zhangwq@haut.edu.cn</email>; Peng Li, <email xlink:href="mailto:lipeng@haut.edu.cn">lipeng@haut.edu.cn</email>,</corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-18">
<day>18</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>4</volume>
<elocation-id>1769776</elocation-id>
<history>
<date date-type="received">
<day>17</day>
<month>12</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>27</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>02</day>
<month>02</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2026 Zhang, Chai, Li, Mu, Li and Gen.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Zhang, Chai, Li, Mu, Li and Gen</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-18">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<sec>
<title>Introduction</title>
<p>In recent years, electric vehicles (EVs) have garnered increasing consumer favor due to their low energy consumption and mechanical simplicity; however, the persistent limitation of short driving range has not been fundamentally resolved and continues to fuel drivers&#x2019; range anxiety. To enhance the accuracy of EV energy-consumption prediction, this paper categorizes influencing factors from multiple perspectives and proposes a hybrid neural-network prediction model that integrates temporal features and an attention mechanism.</p>
</sec>
<sec>
<title>Methods</title>
<p>The model first partitions the dataset into time-series and non-time-series subsets based on temporal correlation. A convolutional neural network (CNN) is then employed to extract and reconstruct features from the time-series data to reduce computational complexity, after which an attention-enhanced bidirectional gated recurrent unit (AtBiGRU) further captures sequential dependencies. The resulting fitted representations, together with the non-time-series variables, are fed into a deep neural network (DNN) for ensemble learning, yielding precise energy-consumption predictions. By processing sequential and non-sequential data separately, the method effectively improves computational efficiency and model expressiveness.</p>
</sec>
<sec>
<title>Results</title>
<p>Experimental results demonstrate that the proposed CNN&#x2013;AtBiGRU&#x2013;DNN hybrid model achieves higher prediction accuracy and faster convergence than baseline algorithms.</p>
</sec>
<sec>
<title>Conclusion</title>
<p>The proposed model validates its effectiveness and advancement in EV energy-consumption prediction.</p>
</sec>
</abstract>
<kwd-group>
<kwd>attention mechanism</kwd>
<kwd>bidirectional gated recurrent unit</kwd>
<kwd>electric vehicles</kwd>
<kwd>energy consumption prediction</kwd>
<kwd>neural network</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This research work is supported by the National Natural Science Foundation of China (62006071), Natural Science Foundation of Henan Province (252300421884), and Zhengzhou Science and Technology Collaborative Innovation Project (21ZZXTCX19).</funding-statement>
</funding-group>
<counts>
<fig-count count="13"/>
<table-count count="5"/>
<equation-count count="16"/>
<ref-count count="30"/>
<page-count count="14"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Systems Engineering</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<label>1</label>
<title>Introduction</title>
<p>Due to the escalating issues of rapid depletion of fossil energy sources, significant increase in air pollution and the exacerbation of climate change, global politicians and academics are currently prioritizing the creation of a transportation system that relies on renewable energy sources. An exceptionally encouraging advancement in this realm involves the advocacy for the adoption of electric cars (EVs) as a gradual substitution for internal combustion engine vehicles (ICEs) and hybrid electric vehicles (HEVs) (<xref ref-type="bibr" rid="B16">Needell et al., 2016</xref>). Compared to traditional fuel vehicles, EVs have advantages such as zero emissions, high energy efficiency, simpler structure and lower noise. However, their disadvantage lies in their short range (<xref ref-type="bibr" rid="B9">Kucukvar et al., 2022</xref>). Therefore, one of the main areas of research focus in recent years has been how to predict pure EVs energy consumption precisely. Energy consumption prediction refers to analyzing energy consumption by analyzing the driving demand and influencing factors in the future. More accurate estimation of the range of EVs can not only effectively improve users&#x2019; driving experience, but also when drivers have energy consumption data under specific vehicle conditions, they can perform precise energy management, plan chargiwng time in advance, and avoid emergency charging after the battery runs out. Emergency charging increases battery stress and heat due to high current input, which has an adverse impact on battery life. As a result, accurate energy consumption prediction is instrumental in optimizing battery usage strategies and scientifically extending battery life.</p>
<p>The energy consumption of EVs is influenced by numerous factors, and different factors will affect each other as shown in <xref ref-type="fig" rid="F1">Figure 1</xref>. During the driving process, the driver, electric vehicle, and environment form a closed loop, where the driver interacts with the environment while driving, and changes in the environment in turn affect the driver&#x2019;s judgment and operation. Each part of this closed loop will possess an effect on the energy consumption of EVs, and its influencing factors are mainly attributed to the following three categories.</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>Classification of factors affecting EVs energy consumption.</p>
</caption>
<graphic xlink:href="fieng-04-1769776-g001.tif">
<alt-text content-type="machine-generated">Concept map diagram illustrating factors affecting vehicles, divided into vehicles, drivers, and environments. Subcategories include vehicle type, battery condition, driver age and style, road environment, and natural environment elements such as weather and traffic flow.</alt-text>
</graphic>
</fig>
<p>It can be seen from the above that the energy consumption of EVs is impacted by a variety of variables, and there are correlations between various factors, so it is difficult to establish a corresponding physical model. At present, common research methods are roughly divided into two categories. The first category is based on the battery state of charge (SOC) as the main research object. By studying the battery SOC during the driving of EVs, a relevant energy consumption prediction model is established. Battery SOC reflects the energy stored in the battery at the current moment and is one of the most critical status quantities of the battery. Currently commonly used research methods based on battery status can be roughly divided into traditional estimation methods (<xref ref-type="bibr" rid="B24">Wu et al., 2023</xref>), adaptive filtering methods (<xref ref-type="bibr" rid="B28">Ye et al., 2022</xref>) and intelligent estimation methods (<xref ref-type="bibr" rid="B8">Jiaqiang et al., 2022</xref>). Studying the battery SOC can establish a relatively accurate battery energy consumption model, but the disadvantage is that this method is mostly used in laboratory environments, and the impact of a certain factor on the battery is considered alone, which may occur in actual road driving scenarios. The combination of these conditions can easily lead to a large error between the estimated results and the actual results. The second type of research method is based on being data-driven, which means making decisions centered around data. The process can be summarized as &#x201c;data collection - data preprocessing - model establishment - model evaluation - model decision-making&#x201d; (<xref ref-type="bibr" rid="B20">Sun et al., 2020</xref>). The specific implementation method is to first use a test run to conduct tests or use relevant software to conduct simulation tests to obtain a large amount of data. After cleaning and classifying the data, build a model according to its characteristics and calculate the error, ultimately selecting the optimal model to predict and analyze the problem.</p>
<p>There are several key phases in the data-driven development process such as statistical methods, traditional machine learning, and deep learning. Statistical methods represented by Kalman filters (<xref ref-type="bibr" rid="B15">Ma et al., 2021</xref>) and early artificial intelligence algorithms such as random forests (<xref ref-type="bibr" rid="B17">Rathore et al., 2023</xref>) and support vector machines (<xref ref-type="bibr" rid="B14">Liu et al., 2022</xref>) can all solve corresponding problems in the scenarios where their algorithms are applicable, but the similarity is that most of them only a certain characteristic of the object can be analyzed. In terms of EVs energy consumption prediction, statistical methods and traditional machine learning cannot solve this problem well due to the many influencing factors described above. Deep learning is a new research direction in the field of machine learning. Its characteristic is that it can extract and fit the features of input data through multiple multi-layer neural networks, and increase its representation capabilities through a series of nonlinear transformations (<xref ref-type="bibr" rid="B6">How et al., 2020</xref>). It is suitable for solving problems where there is a highly complex and difficult-to-understand nonlinear relationship between input samples and output data.</p>
<p>In this paper, the collected driving data is divided into time series and non-time series according to its time characteristics. Time series data is related to specific travel times and can be used to analyze the trends in travel distance and average speed over different time periods. Non-time series data describes the static attributes or environmental conditions of the vehicle, which do not change over time. On this basis, a combined neural network named CNN-AtBiGRU-DNN which based on convolutional neural network (CNN), bidirectional gated recurrent unit integrating attention mechanism (AtBiGRU) and deep neural network (DNN) is proposed. The main contributions of the paper can be briefly summarized as follows:<list list-type="order">
<list-item>
<p>It takes into account a variety of factors that can affect the energy consumption of EVs, including the environment, drivers, and vehicles, and divides them into time series and non-time series based on data characteristics.</p>
</list-item>
<list-item>
<p>By introducing attention mechanism in BiGRU network, the model can focus more on key time series features, which helps to capture important patterns and information in time series and improve prediction performance.</p>
</list-item>
<list-item>
<p>In the proposed CNN-AtBiGRU-DNN, CNN performs dimensionality reduction processing on the data, AtBiGRU is used to process time series, and DNN is used to process non-time series and time series fitting results. It fully utilizes the benefits of various network structures in learning characteristics to improve the model&#x2019;s capacity for learning.</p>
</list-item>
</list>
</p>
<p>The subsequent sections are organized as follows. <xref ref-type="sec" rid="s2">Section 2</xref> offers a review of related questions and using algorithms. The proposed CNN-AtBiGRU-DNN is detailed in <xref ref-type="sec" rid="s3">Section 3</xref>. Both the computational evaluation and comparison results are demonstrate in <xref ref-type="sec" rid="s4">Section 4</xref>. Lastly, conclusions and future research are drawn in <xref ref-type="sec" rid="s5">Section 5</xref>.</p>
</sec>
<sec id="s2">
<label>2</label>
<title>Related work</title>
<p>In the last several years, researchers have paid widespread attention to how to predict the energy consumption of EVs accurately. In this study, a number of factors were taken into consideration while predicting the energy consumption of EVs. As much as we are aware of, there are relatively few research that simultaneously consider three factors: EVs, drivers, and environmental conditions. Next, the closely related work is reviewed.</p>
<sec id="s2-1">
<label>2.1</label>
<title>Research objectives related to the problem</title>
<p>Due to three factors affecting the energy consumption of EVs, the research work on these three aspects will be analyzed and summarized below.</p>
<p>From the driver&#x2019;s perspective, their age, driving style, driving speed, age, and type of license all have an impact on EV energy consumption. <xref ref-type="bibr" rid="B3">Donkers et al. (2020)</xref> categorized driving styles as conservative, normal and aggressive based on driving speed, acceleration, lateral acceleration, and regenerative braking efficiency. <xref ref-type="bibr" rid="B4">Hai et al. (2023)</xref> concluded that vehicle driving behavior and acceleration have a significant impact on power consumption based on studies under real road conditions. <xref ref-type="bibr" rid="B23">Wager et al. (2016)</xref> selected two different electric vehicles for testing and found that as the driving speed increases, the energy consumption increases.</p>
<p>From the environmental point of view, the environment can be divided into two primary categories: the natural environment and the road environment. The natural environment includes such things as temperature, visibility, barometric pressure, humidity, and wind speed and direction, while the road environment includes gradient, road type, congestion, and number of intersections. With regard to the effect of natural environmental on the energy consumption of EVs, <xref ref-type="bibr" rid="B12">Liu et al. (2018)</xref> investigated that the ambient temperature affects the energy consumption by affecting the auxiliary loads and output energy loss. <xref ref-type="bibr" rid="B23">Wager et al. (2016)</xref> conducted a study that revealed that driving at higher speeds coupled with headwinds significantly impairs the efficiency of EVs thus significantly reducing its drivable range. <xref ref-type="bibr" rid="B10">Lee et al. (2024)</xref> concluded from comparative tests that low temperatures not only increase motor and battery energy consumptions but also hinder regenerative energy recovery during driving. <xref ref-type="bibr" rid="B1">Al-Wreikat et al. (2022)</xref> concluded that EVs have less energy consumption when driving in cities than when operating a vehicle in rural or highway areas. ith regard to the effect of road environment on the energy consumption of EVs, <xref ref-type="bibr" rid="B26">Yang et al. (2014)</xref> investigated how the slope of the road affected the amount of power used by EVs going uphill and downhill. <xref ref-type="bibr" rid="B29">Zhang and Yao (2015)</xref> concluded that when an EV needs to accelerate through an intersection, it is preferable to drive for a little amount of time at maximum acceleration to reduce energy consumption.</p>
<p>From the perspective of electric vehicles, total vehicle mass, battery performance, vehicle usage, vehicle load and battery status also have an effect on EVs energy consumption. <xref ref-type="bibr" rid="B27">Yang et al. (2023)</xref> concluded that usage and electricity consumption of air conditioners are higher in winter than in other seasons and correlation analysis showed that energy consumption is most correlated with air conditioner usage. <xref ref-type="bibr" rid="B22">Wager et al. (2014)</xref> proposed that there exists a notable disparity in energy consumption and driving range between electric vehicles equipped with manual and automatic transmission systems. <xref ref-type="bibr" rid="B5">Hao et al. (2020)</xref> demonstrate that the electricity consumption of electric vehicles varies significantly based on their application. Specifically, the identical EV model when utilized for ridesharing or taxi services, experiences significantly increased driving mileage and requires more frequent recharging.</p>
</sec>
<sec id="s2-2">
<label>2.2</label>
<title>Related problem solving algorithms</title>
<p>In order to make precise predictions regarding the energy consumption of EVs, many researchers have proposed various solutions. Early solutions were mostly based on research on battery SOC, which often performed well in experiments but did not perform well in real environments. <xref ref-type="bibr" rid="B2">Chen et al. (2019)</xref> proposed a new parameter backtracking strategy to reconstruct the open circuit voltage (OCV) and SOC curves based on the findings of the SOC estimate and the whole online parameter identification. <xref ref-type="bibr" rid="B19">Shrivastava et al. (2021)</xref> established the new dual forgetting factor-based adaptive extended Kalman filter (DFFAEKF) to estimate the SOC of Lithium-ion batteries in EV. <xref ref-type="bibr" rid="B25">Xiao et al. (2022)</xref> used recursive least squares with forgetting factor (FFRLS) to identify the parameters including the OCV values and the capacity that is accessible were input into AEKF to estimate SOC.</p>
<p>With the rise of deep learning, it has been widely used to predict the energy consumption of EVs. <xref ref-type="bibr" rid="B13">Liu et al. (2021)</xref> presented an extended kalman filter (EKF) based data-driven approach for estimating the state of charge (SOC) in lithium-ion batteries, which effectively prevents overcharging and discharging of the battery, thereby extending its lifespan. <xref ref-type="bibr" rid="B21">Sun et al. (2023)</xref> introduced an energy management strategy leveraging deep learning and developed an enhanced model predictive control algorithm (LSTM-IMPC) that incorporates long short-term memory (LSTM). The energy management strategy based on LSTM-IMPC proposed has a globally optimal energy-saving efficiency in different environments. <xref ref-type="bibr" rid="B30">Zraibi et al. (2021)</xref> proposed a hybrid approach called CNN-LSTM-DNN to improve the long-term predictive performance of lithium-ion batteries in EVs. Compared with a single deep learning method, the proposed algorithm has higher accuracy and lower error rate. <xref ref-type="bibr" rid="B7">Hua et al. (2022)</xref> used a neural network architecture named bidirectional long short-term memory (BiLSTM) to accurately estimate energy consumption of EVs in the presence of insufficient data and irregular driving trajectories.</p>
</sec>
<sec id="s2-3">
<label>2.3</label>
<title>Research gap</title>
<p>In the current research endeavors, while substantial progress has been made in elucidating the influence of individual or multiple factors on the energy consumption of EVs, the majority of studies have fallen short in comprehensively accounting for the synergistic effects arising from the intricate interplay between EVs, drivers, and environment. This limitation inherently impedes the comprehensiveness and precision of EVs energy consumption predictions. Scholars are increasingly resorting to deep learning techniques to predict energy consumption, yet existing research predominantly focuses on offline predictions. However, in practical scenarios, real-time prediction holds paramount importance for drivers to plan their journeys and for vehicle management systems to optimize operations. Therefore, devising novel methodologies that can achieve both real-time and precise prediction of EVs energy consumption represents a crucial research trajectory that demands further exploration. This research trajectory holds the potential to significantly enhance the accuracy and applicability of energy consumption predictions, ultimately contributing to a more efficient and sustainable EVs ecosystem.</p>
<p>In recent years, attention mechanisms have attracted widespread attention in different research fields. The attention mechanism essentially reallocates resources based on the importance of objects, with the core idea of finding correlations between them based on existing data and highlighting certain important features. Combining attention mechanisms with different neural networks has become a research hotspot. <xref ref-type="bibr" rid="B18">Shen et al. (2023)</xref> utilizing attention mechanisms to assist the model in extracting information from the traffic conditions of the road network that is crucial for the current task. <xref ref-type="bibr" rid="B11">Li et al. (2023)</xref> suggests a hybrid model that combines a DNN, temporal convolutional network (TCN), gated recurrent unit (GRU), and a dual attention mechanism. TCN integrated with a feature attention mechanism, which is employed to create an encoder module and captured the phenomenon of regeneration with battery capacity.</p>
<p>Addressing the limitations of prior research, this paper categorizes the collected data, taking into account the impact of the use of air conditioning and parking heaters on the energy consumption of electric vehicles from the perspective of EVs. From an environmental perspective, taking into consideration the influence of road conditions, tire types and the number of cities passing through on the energy consumption of EVs. From the driver&#x2019;s perspective, taking into account the influence of various driving styles on the energy consumption of EVs. And based on the problem, a combination neural network of CNN-AtBiGRU-DNN was proposed, CNN performs dimensionality reduction processing on the data, AtBiGRU is used to process time series, and DNN is used to process non-time series and time series fitting results.</p>
</sec>
</sec>
<sec id="s3">
<label>3</label>
<title>Model construction</title>
<sec id="s3-1">
<label>3.1</label>
<title>CNN model</title>
<p>Convolutional neural networks (CNNs) abstract raw data into higher-level representations through local connectivity and weight sharing, enabling effective automated feature extraction. A typical CNN comprises convolutional, pooling, and fully connected layers; this architecture markedly reduces the number of parameters and overall complexity, thereby improving generalization. In the convolutional layers, multiple kernels operate in a sliding-window manner to capture local patterns at different scales and orientations. Pooling layers downsample the feature maps, compressing dimensionality while enhancing robustness and translation invariance. The fully connected layers further integrate high-level features to perform the final regression task. With this hierarchical design, CNNs automatically learn high-quality feature representations, substantially reducing the burden of manual feature engineering and data reconstruction.</p>
<p>The CNN used in this study is illustrated in <xref ref-type="fig" rid="F2">Figure 2</xref>, and its core hyperparameters are optimized as follows: the network contains two convolutional stages, each employing 64 filters with a kernel size of 1 (i.e., 1 <inline-formula id="inf1">
<mml:math id="m1">
<mml:mrow>
<mml:mo>&#xd7;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 1). This configuration follows an incremental strategy intended to progressively enlarge the effective receptive field and strengthen global feature extraction, thereby improving the quality and efficiency of feature representations while keeping model complexity under control.</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>The structure of CNN.</p>
</caption>
<graphic xlink:href="fieng-04-1769776-g002.tif">
<alt-text content-type="machine-generated">Diagram illustrating a convolutional neural network architecture with labeled layers including input layer, convolutional layer, pooling layer, fully connected layer, and output layer, with arrows indicating the flow of data between each layer.</alt-text>
</graphic>
</fig>
</sec>
<sec id="s3-2">
<label>3.2</label>
<title>GRU model</title>
<p>GRU is proficient in capturing semantic associations within extended sequences effectively, and thus is suitable for dealing with the long-term dependency phenomenon present in recurrent neural network (RNN). Compared to LSTM characterized by a large number of parameters, GRU model mitigates the gradient vanishing problem by lowering the quantity of parameters to some extent. GRU takes the long-term memory state at a certain moment as its output and simultaneously modifies that long-term memory state during the output process, thus giving it fewer inputs and a more simplified structure compared to LSTM. The structure is shown in <xref ref-type="fig" rid="F3">Figure 3</xref> and the computation of its hidden state can be derived from <xref ref-type="disp-formula" rid="e1">Equations 1</xref>&#x2013;<xref ref-type="disp-formula" rid="e4">4</xref>.<disp-formula id="e1">
<mml:math id="m2">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>r</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>&#x3c3;</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>w</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2217;</mml:mo>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
<label>(1)</label>
</disp-formula>
<disp-formula id="e2">
<mml:math id="m3">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mo>&#x303;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>h</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>w</mml:mi>
<mml:mo>&#x2217;</mml:mo>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>r</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2217;</mml:mo>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
<label>(2)</label>
</disp-formula>
<disp-formula id="e3">
<mml:math id="m4">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>z</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>&#x3c3;</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>w</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>z</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2217;</mml:mo>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
<label>(3)</label>
</disp-formula>
<disp-formula id="e4">
<mml:math id="m5">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>z</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2217;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>z</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2217;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mo>&#x303;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
<label>(4)</label>
</disp-formula>where <inline-formula id="inf2">
<mml:math id="m6">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>z</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the update gate, <inline-formula id="inf3">
<mml:math id="m7">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>r</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the reset gate, <inline-formula id="inf4">
<mml:math id="m8">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mo>&#x303;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the candidate set, <inline-formula id="inf5">
<mml:math id="m9">
<mml:mrow>
<mml:mi>&#x3c3;</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> is the activation function Sigmoid, <inline-formula id="inf6">
<mml:math id="m10">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the network input at the current moment, <inline-formula id="inf7">
<mml:math id="m11">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the network output at the previous moment, and <inline-formula id="inf8">
<mml:math id="m12">
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>&#x2212;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> denotes that the link propagates the data forward for <inline-formula id="inf9">
<mml:math id="m13">
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>z</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>. Update gate and reset gate are two important components in GRU, which are employed to regulate the extent to the state information from the preceding moment influences the present state. Through the modification of the values associated with the update gate and reset gate, the GRU model can flexibly control the transmission and effects of the last status information on the current situation, so as to efficiently deal with the dependencies between long sequences. This mechanism makes the GRU model perform well in some sequence modeling tasks.</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>The structure of GRU.</p>
</caption>
<graphic xlink:href="fieng-04-1769776-g003.tif">
<alt-text content-type="machine-generated">Diagram illustrating a Gated Recurrent Unit neural network cell, featuring inputs h sub t minus 1 and x sub t, with reset gate r sub t, update gate z sub t, and candidate activation h tilde sub t, using sigmoid and tanh activation functions and performing elementwise multiplication and addition to output h sub t.</alt-text>
</graphic>
</fig>
<p>To create a new hidden state, the GRU first takes as inputs the current input, the hidden state from the previous instant, and the output from the previous moment. Then combines these three inputs using an activation function. The GRU will then choose whether to utilize the new hidden state <italic>in lieu</italic> of the hidden state from the previous instant depending on the update gate&#x2019;s output. To obtain the final output, GRU inputs the hidden state into the output layer after updating the hidden state based on the output of the reset gate.</p>
</sec>
<sec id="s3-3">
<label>3.3</label>
<title>Attention mechanism</title>
<p>Attention mechanism is a mechanism that mimics the allocation of attention in the human brain, which is able to focus attention on important areas at a specific moment, ignoring or diminishing attention to other areas, so as to obtain more detailed information and filter out useless information. The core idea is to flexibly and reasonably adjust the attention to information, amplify the needed information and suppress irrelevant information.</p>
<p>As a form of attention mechanism, the single-head attention mechanism boasts a straightforward and concise structure, efficiently computing attention scores via a single weight matrix. During the calculation of attention weights, it fully utilizes data from the entire input sequence, incorporating both prior and subsequent information, to identify long-term dependencies within the sequence. Additionally, the single-head attention mechanism considers the input sequence as a matrix and derives the predicted sequence directly through matrix transformation. This approach significantly enhances the network&#x2019;s parallel processing capabilities, consequently speeding up both model training and inference processes. The calculation formula for the single-head attention mechanism is shown as <xref ref-type="disp-formula" rid="e5">Equations 5</xref>&#x2013;<xref ref-type="disp-formula" rid="e7">7</xref>.<disp-formula id="e5">
<mml:math id="m14">
<mml:mrow>
<mml:mi>S</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>q</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>q</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x22c5;</mml:mo>
<mml:mspace width="0.3333em"/>
<mml:msub>
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mrow>
<mml:msqrt>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:msqrt>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
<label>(5)</label>
</disp-formula>
<disp-formula id="e6">
<mml:math id="m15">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b1;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mi mathvariant="normal">S</mml:mi>
<mml:mi mathvariant="normal">o</mml:mi>
<mml:mi mathvariant="normal">f</mml:mi>
<mml:mi mathvariant="normal">t</mml:mi>
<mml:mi mathvariant="normal">m</mml:mi>
<mml:mi mathvariant="normal">a</mml:mi>
<mml:mi mathvariant="normal">x</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="normal">S</mml:mi>
<mml:mi mathvariant="normal">c</mml:mi>
<mml:mi mathvariant="normal">o</mml:mi>
<mml:mi mathvariant="normal">r</mml:mi>
<mml:mi mathvariant="normal">e</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>q</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:mspace width="0.3333em"/>
<mml:msub>
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
<mml:mspace width="0.3333em"/>
</mml:mrow>
</mml:math>
<label>(6)</label>
</disp-formula>
<disp-formula id="e7">
<mml:math id="m16">
<mml:mrow>
<mml:mi mathvariant="normal">A</mml:mi>
<mml:mi mathvariant="normal">t</mml:mi>
<mml:mi mathvariant="normal">t</mml:mi>
<mml:mi mathvariant="normal">e</mml:mi>
<mml:mi mathvariant="normal">n</mml:mi>
<mml:mi mathvariant="normal">t</mml:mi>
<mml:mi mathvariant="normal">i</mml:mi>
<mml:mi mathvariant="normal">o</mml:mi>
<mml:mi mathvariant="normal">n</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>q</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:mspace width="0.3333em"/>
<mml:mi>K</mml:mi>
<mml:mo>,</mml:mo>
<mml:mspace width="0.3333em"/>
<mml:mi>V</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:mstyle displaystyle="true">
<mml:munder>
<mml:mrow>
<mml:mstyle displaystyle="true">
<mml:mrow>
<mml:mover>
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>K</mml:mi>
</mml:mrow>
</mml:mover>
</mml:mrow>
</mml:mstyle>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:munder>
</mml:mstyle>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b1;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mspace width="0.3333em"/>
<mml:msub>
<mml:mrow>
<mml:mi>v</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mspace width="0.3333em"/>
<mml:mspace width="0.3333em"/>
</mml:mrow>
</mml:math>
<label>(7)</label>
</disp-formula>where <inline-formula id="inf10">
<mml:math id="m17">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>q</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the <inline-formula id="inf11">
<mml:math id="m18">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mi>h</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> query vector, <inline-formula id="inf12">
<mml:math id="m19">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the <inline-formula id="inf13">
<mml:math id="m20">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mi>h</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> key vector, <inline-formula id="inf14">
<mml:math id="m21">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the dimension of the key vector, <inline-formula id="inf15">
<mml:math id="m22">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b1;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the attention weight of the <inline-formula id="inf16">
<mml:math id="m23">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mi>h</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> query vector to the <inline-formula id="inf17">
<mml:math id="m24">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mi>h</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> key vector, <inline-formula id="inf18">
<mml:math id="m25">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>v</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the <inline-formula id="inf19">
<mml:math id="m26">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mi>h</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> value vector, <inline-formula id="inf20">
<mml:math id="m27">
<mml:mrow>
<mml:mi>K</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf21">
<mml:math id="m28">
<mml:mrow>
<mml:mi>V</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> are the sets of all key vectors and value vectors. Firstly, calculate the score between <inline-formula id="inf22">
<mml:math id="m29">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>q</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf23">
<mml:math id="m30">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> through dot product operation. Then, use the <inline-formula id="inf24">
<mml:math id="m31">
<mml:mrow>
<mml:mi>S</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>f</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>m</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> function to normalize these scores and obtain attention weights. Finally, based on these weights, <inline-formula id="inf25">
<mml:math id="m32">
<mml:mrow>
<mml:mi>V</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> is weighted and summed to obtain the final attention output.</p>
</sec>
<sec id="s3-4">
<label>3.4</label>
<title>BiGRU model with an attention mechanism</title>
<p>The transmission of GRU is unidirectional from front to back, which is often applied to scenarios where the model output is only related to historical information, while the output of the EV energy consumption time series is not only affected by historical information, but also greatly related to the information of the future moments. To address this problem, BiGRU is used in this paper to construct two GRU structures with opposite directions under the premise of maintaining the unidirectional and efficient time-series information processing capability of GRU, which is calculated as <xref ref-type="disp-formula" rid="e8">Equations 8</xref>&#x2013;<xref ref-type="disp-formula" rid="e10">10</xref>.<disp-formula id="e8">
<mml:math id="m33">
<mml:mrow>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mo>&#x20d7;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>G</mml:mi>
<mml:mi>R</mml:mi>
<mml:mi>U</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mo>&#x20d7;</mml:mo>
</mml:mover>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
<label>(8)</label>
</disp-formula>
<disp-formula id="e9">
<mml:math id="m34">
<mml:mrow>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mo>&#x20d6;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>G</mml:mi>
<mml:mi>R</mml:mi>
<mml:mi>U</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mo>&#x20d6;</mml:mo>
</mml:mover>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
<label>(9)</label>
</disp-formula>
<disp-formula id="e10">
<mml:math id="m35">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>w</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2217;</mml:mo>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mo>&#x20d7;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>v</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2217;</mml:mo>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mo>&#x20d6;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>b</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
<label>(10)</label>
</disp-formula>where the nonlinear transformation corresponding to the input temporal data is represented by the GRU function; <inline-formula id="inf26">
<mml:math id="m36">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the input vector input to the memory unit at moment <inline-formula id="inf27">
<mml:math id="m37">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>; <inline-formula id="inf28">
<mml:math id="m38">
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mo>&#x20d7;</mml:mo>
</mml:mover>
</mml:mrow>
</mml:math>
</inline-formula> is the output of the forward hidden layer; <inline-formula id="inf29">
<mml:math id="m39">
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mo>&#x20d6;</mml:mo>
</mml:mover>
</mml:mrow>
</mml:math>
</inline-formula> is the output of the inverse hidden layer; <inline-formula id="inf30">
<mml:math id="m40">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>w</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf31">
<mml:math id="m41">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>v</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> denote the weights associated with the respective forward hidden layer state <inline-formula id="inf32">
<mml:math id="m42">
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mo>&#x20d7;</mml:mo>
</mml:mover>
</mml:mrow>
</mml:math>
</inline-formula> and the corresponding inverse hidden layer state <inline-formula id="inf33">
<mml:math id="m43">
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mo>&#x20d6;</mml:mo>
</mml:mover>
</mml:mrow>
</mml:math>
</inline-formula> at moment <inline-formula id="inf34">
<mml:math id="m44">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>; and <inline-formula id="inf35">
<mml:math id="m45">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>b</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> denotes the bias of the BiGRU hidden layer corresponding to the moment <inline-formula id="inf36">
<mml:math id="m46">
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>.<disp-formula id="e11">
<mml:math id="m47">
<mml:mrow>
<mml:mi>y</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>&#x3c6;</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>W</mml:mi>
<mml:mo>&#x2217;</mml:mo>
<mml:mi>x</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>b</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
<label>(11)</label>
</disp-formula>
</p>
<p>In this paper, a BiGRU-based attention mechanism model is used. After extracting the global features from the time-series data using the BiGRU network, the attention mechanism computes the correlation between each input and the output result, creates attention weights based on the correlation. The attention weights are multiplied by the output of the BiGRU network to obtain the fitted values. The network structure is shown in <xref ref-type="fig" rid="F4">Figure 4</xref>, where <inline-formula id="inf37">
<mml:math id="m48">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>a</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> refers to the attention probability value assigned by the attention layer to the output of the BiGRU, and <inline-formula id="inf38">
<mml:math id="m49">
<mml:mrow>
<mml:mi>y</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> is the final output value of the network.</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption>
<p>The structure of BiGRU with attention mechanism.</p>
</caption>
<graphic xlink:href="fieng-04-1769776-g004.tif">
<alt-text content-type="machine-generated">Diagram illustrating a bidirectional recurrent neural network where input nodes x1 to xt feed information into forward and backward hidden states, which are then aggregated into combined hidden states h1 to ht, and these are weighted by a1 to at and summed to produce output y.</alt-text>
</graphic>
</fig>
</sec>
<sec id="s3-5">
<label>3.5</label>
<title>DNN mechanism</title>
<p>The structure of DNN is an unsupervised multilayer neural network. The gradual abstraction and representation of the input data is achieved through layer-by-layer feature learning, where the inputs to the following layer are the output features of the preceding layer. DNN has the capability to map features from existing spatial samples to a more abstract feature space, facilitating the learning of an improved feature representation for the input data. The feature transformation of deep neural networks involves multiple nonlinear mappings and thus has the ability to fit highly complex functions. <xref ref-type="fig" rid="F5">Figure 5</xref> illustrates the specific structure of a deep neural network.</p>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption>
<p>The structure of DNN.</p>
</caption>
<graphic xlink:href="fieng-04-1769776-g005.tif">
<alt-text content-type="machine-generated">Diagram showing a simple neural network with an input layer of four nodes, one hidden layer with four nodes, and an output layer with one node, illustrating interconnected pathways between layers.</alt-text>
</graphic>
</fig>
<p>DNN is divided into input layer, hidden layer and output layer. The input layer receives raw data, the hidden layer learns features, and the output layer generates results. The layers are fully connected to each other, and the complex relationship between input and output is learned by readjusting the connection weights. The layers satisfy the linear relationship between layers as shown in <xref ref-type="disp-formula" rid="e8">Equation 8</xref>, where <inline-formula id="inf39">
<mml:math id="m50">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> represents the input vector, <inline-formula id="inf40">
<mml:math id="m51">
<mml:mrow>
<mml:mi>&#x3c6;</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> represents the activation function. In this paper, the <inline-formula id="inf41">
<mml:math id="m52">
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>L</mml:mi>
<mml:mi>u</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> function is selected as the activation function based on the features of the problem, <inline-formula id="inf42">
<mml:math id="m53">
<mml:mrow>
<mml:mi>W</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> is the matrix of weight coefficient, <inline-formula id="inf43">
<mml:math id="m54">
<mml:mrow>
<mml:mi>b</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> is the bias vector and <inline-formula id="inf44">
<mml:math id="m55">
<mml:mrow>
<mml:mi>y</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> is the neuron output (<xref ref-type="disp-formula" rid="e11">Equation 11</xref>).</p>
</sec>
<sec id="s3-6">
<label>3.6</label>
<title>CNN-AtBiGRU-DNN mechanism</title>
<p>This study integrates a convolutional neural network (CNN), an attention-enhanced bidirectional gated recurrent unit (AtBiGRU), and a deep neural network (DNN) to form a hybrid architecture (CNN&#x2013;AtBiGRU&#x2013;DNN) that fully exploits the strengths of each component in feature extraction and sequence modeling. In this framework, the CNN first extracts local patterns from raw time-series data and, through convolution and pooling operations, enhances feature representations while reducing dimensionality, thereby providing more expressive inputs for subsequent sequence modeling. The resulting feature maps are then flattened and fed into a bidirectional GRU (BiGRU), which captures temporal dependencies in both forward and backward directions to learn long- and short-range dynamics more comprehensively.</p>
<p>To further increase sensitivity to salient temporal information, an attention mechanism is applied on top of the BiGRU (AtBiGRU). By assigning learnable weights to the hidden states across time steps, the model autonomously focuses on those segments most influential for energy-consumption prediction, effectively amplifying informative signals while suppressing noise. Finally, the attention-weighted sequence representation is fused with non-time-series features and jointly fed into a fully connected DNN to produce the final integrated prediction. This design not only leverages the complementarity between temporal and non-temporal information but also, through structured feature extraction and attention weighting, markedly enhances the model&#x2019;s expressive power and interpretability. <xref ref-type="fig" rid="F6">Figure 6</xref> illustrates its particular procedure, the specific procedure is shown in <xref ref-type="statement" rid="Algorithm_1">Algorithm 1</xref>.</p>
<fig id="F6" position="float">
<label>FIGURE 6</label>
<caption>
<p>CNN-AtBiGRU-DNN structure.</p>
</caption>
<graphic xlink:href="fieng-04-1769776-g006.tif">
<alt-text content-type="machine-generated">Flowchart illustrating a six-step data analysis process: Step 1 shows time series and non-time series plots; Step 2 shows a CNN diagram; Step 3 features an AtBiGRU architecture; Step 4 displays data merging; Step 5 shows a deep neural network; Step 6 presents output results.</alt-text>
</graphic>
</fig>
<p>
<statement content-type="algorithm" id="Algorithm_1">
<label>Algorithm 1</label>
<title>CNN-AtBiGRU-DNN.</title>
<p>
<list list-type="simple">
<list-item>
<p>
<bold>Require:</bold> Problem data (data), epoch.</p>
</list-item>
<list-item>
<p>
<bold>Ensure:</bold> Energy consumption forecast (predicated_values).</p>
</list-item>
<list-item>
<p>1:&#x2003;Initialize the array time_series_data and non_time_series_data;</p>
</list-item>
<list-item>
<p>2:&#x2003;Initialize CNN, AtBiGRU and DNN;</p>
</list-item>
<list-item>
<p>3:&#x2003;<bold>for</bold> each item in data <bold>do</bold>
</p>
</list-item>
<list-item>
<p>4:&#x2003;&#x2003;<bold>if</bold> this item is a time series <bold>then</bold> add item to time_series_data;</p>
</list-item>
<list-item>
<p>5:&#x2003;&#x2003;<bold>else</bold> add this item to non_time_series_data</p>
</list-item>
<list-item>
<p>6:&#x2003;&#x2003;<bold>end if</bold>
</p>
</list-item>
<list-item>
<p>7:&#x2003;<bold>end for</bold>
</p>
</list-item>
<list-item>
<p>8:&#x2003;<bold>while</bold> epoch <bold>do</bold>
</p>
</list-item>
<list-item>
<p>9:&#x2003;&#x2003;Use time_series_data to train on CNN and AtBiGRU to get array of fitted values fitted_values;</p>
</list-item>
<list-item>
<p>10:&#x2003;<bold>end while</bold>
</p>
</list-item>
<list-item>
<p>11:&#x2003;Merge non_time_series_data with fitted_values, and add results to data_with_fitted_values;</p>
</list-item>
<list-item>
<p>12:&#x2003;<bold>while</bold> epoch <bold>do</bold>
</p>
</list-item>
<list-item>
<p>13:&#x2003;&#x2003;Use data_with_fitted_values to train on the DNN to get an array of predicted_values;</p>
</list-item>
<list-item>
<p>14:&#x2003;<bold>end while</bold>
</p>
</list-item>
<list-item>
<p>15:&#x2003;Output predicted_values;</p>
</list-item>
</list>
</p>
</statement>
</p>
</sec>
<sec id="s3-7">
<label>3.7</label>
<title>Model evaluation indicators</title>
<p>After each round of training, the model was evaluated using a complete test set, using three indicators: mean squared error (MSE), mean absolute error (MAE), root mean square error (RMSE), and coefficient of determination <inline-formula id="inf45">
<mml:math id="m56">
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> to measure the capacity for prediction of the model. The formulaes for calculation are shown in <xref ref-type="disp-formula" rid="e12">Equations 12</xref>&#x2013;<xref ref-type="disp-formula" rid="e15">15</xref>, where <inline-formula id="inf46">
<mml:math id="m57">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>Y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the true value in the original data, <inline-formula id="inf47">
<mml:math id="m58">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:mi>Y</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">&#x302;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the predicted value, and <inline-formula id="inf48">
<mml:math id="m59">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:mi>Y</mml:mi>
</mml:mrow>
<mml:mo>&#x304;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the average of the true values.<disp-formula id="e12">
<mml:math id="m60">
<mml:mrow>
<mml:mi>M</mml:mi>
<mml:mi>S</mml:mi>
<mml:mi>E</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>n</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:mstyle displaystyle="true">
<mml:munder>
<mml:mrow>
<mml:mstyle displaystyle="true">
<mml:mrow>
<mml:mover>
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>n</mml:mi>
</mml:mrow>
</mml:mover>
</mml:mrow>
</mml:mstyle>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:munder>
</mml:mstyle>
<mml:msup>
<mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>Y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:mi>Y</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">&#x302;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
<label>(12)</label>
</disp-formula>
<disp-formula id="e13">
<mml:math id="m61">
<mml:mrow>
<mml:mi>M</mml:mi>
<mml:mi>A</mml:mi>
<mml:mi>E</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>n</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:mstyle displaystyle="true">
<mml:munder>
<mml:mrow>
<mml:mstyle displaystyle="true">
<mml:mrow>
<mml:mover>
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>n</mml:mi>
</mml:mrow>
</mml:mover>
</mml:mrow>
</mml:mstyle>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:munder>
</mml:mstyle>
<mml:mfenced open="|" close="|">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>Y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:mi>Y</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">&#x302;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
<label>(13)</label>
</disp-formula>
<disp-formula id="e14">
<mml:math id="m62">
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mi>M</mml:mi>
<mml:mi>S</mml:mi>
<mml:mi>E</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:msqrt>
<mml:mrow>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>n</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:mstyle displaystyle="true">
<mml:munder>
<mml:mrow>
<mml:mstyle displaystyle="true">
<mml:mrow>
<mml:mover>
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>n</mml:mi>
</mml:mrow>
</mml:mover>
</mml:mrow>
</mml:mstyle>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:munder>
</mml:mstyle>
<mml:msup>
<mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>Y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:mi>Y</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">&#x302;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:msqrt>
</mml:mrow>
</mml:math>
<label>(14)</label>
</disp-formula>
<disp-formula id="e15">
<mml:math id="m63">
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msup>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mstyle displaystyle="true">
<mml:munder>
<mml:mrow>
<mml:mstyle displaystyle="true">
<mml:mrow>
<mml:mover>
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>n</mml:mi>
</mml:mrow>
</mml:mover>
</mml:mrow>
</mml:mstyle>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:munder>
</mml:mstyle>
<mml:msup>
<mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>Y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:mi>Y</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">&#x302;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
<mml:mrow>
<mml:mstyle displaystyle="true">
<mml:munder>
<mml:mrow>
<mml:mstyle displaystyle="true">
<mml:mrow>
<mml:mover>
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>n</mml:mi>
</mml:mrow>
</mml:mover>
</mml:mrow>
</mml:mstyle>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:munder>
</mml:mstyle>
<mml:msup>
<mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>Y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:mi>Y</mml:mi>
</mml:mrow>
<mml:mo>&#x304;</mml:mo>
</mml:mover>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
<label>(15)</label>
</disp-formula>
<disp-formula id="e16">
<mml:math id="m64">
<mml:mrow>
<mml:mi>S</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>M</mml:mi>
<mml:mi>S</mml:mi>
<mml:mi>E</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>M</mml:mi>
<mml:mi>A</mml:mi>
<mml:mi>E</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
<label>(16)</label>
</disp-formula>
</p>
</sec>
</sec>
<sec sec-type="results|discussion" id="s4">
<label>4</label>
<title>Experimental results and discussion</title>
<sec id="s4-1">
<label>4.1</label>
<title>Data analysis and processing</title>
<p>The data used in this study comes from a new energy vehicle testing platform in the U.S.A<xref ref-type="fn" rid="fn1">
<sup>1</sup>
</xref> The dataset consists of historical operation data of an EV spanning January 2017 to June 2023 (2017.1&#x2013;2023.6). The detailed parameters of this EV are shown in <xref ref-type="table" rid="T1">Table 1</xref>. The original dataset was analyzed and screened to remove records irrelevant to this study, and the retained fields are listed in <xref ref-type="table" rid="T2">Table 2</xref>.</p>
<table-wrap id="T1" position="float">
<label>TABLE 1</label>
<caption>
<p>Basic parameters of electric vehicle.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Parameters</th>
<th align="center">Numerical value</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">Wheelbases (mm)</td>
<td align="center">2631</td>
</tr>
<tr>
<td align="center">Maximum output power (kW)</td>
<td align="center">85</td>
</tr>
<tr>
<td align="center">Maximum speed (km/h)</td>
<td align="center">160</td>
</tr>
<tr>
<td align="center">Maximum torque (N/m)</td>
<td align="center">270</td>
</tr>
<tr>
<td align="center">Battery capacity (kWh)</td>
<td align="center">24.2</td>
</tr>
<tr>
<td align="center">Maximum motor horsepower (PS)</td>
<td align="center">109</td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap id="T2" position="float">
<label>TABLE 2</label>
<caption>
<p>Data fields and definitions.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Field name</th>
<th align="left">Field definitions</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">fuel_date</td>
<td align="left">Date of data set collection</td>
</tr>
<tr>
<td align="left">trip_distance (km)</td>
<td align="left">Distance traveled in 1&#xa0;day</td>
</tr>
<tr>
<td align="left">tire_type</td>
<td align="left">Tire type (0: Winter tire, 1: Summer tire)</td>
</tr>
<tr>
<td align="left">City</td>
<td align="left">Number of cities traveled in the day</td>
</tr>
<tr>
<td align="left">motor_way</td>
<td align="left">Highway driving</td>
</tr>
<tr>
<td align="left">country_roads</td>
<td align="left">Rural road driving</td>
</tr>
<tr>
<td align="left">driving_style</td>
<td align="left">Driving style (0: Aggressive, 1: Normal, 2: Conservative)</td>
</tr>
<tr>
<td align="left">Consumption (kWh/100&#xa0;km)</td>
<td align="left">Energy consumption per 100&#xa0;km</td>
</tr>
<tr>
<td align="left">A/C</td>
<td align="left">Air conditioning usage (0: not used, 1: Used)</td>
</tr>
<tr>
<td align="left">park_heating</td>
<td align="left">Parking heater (0: not used, 1: Used)</td>
</tr>
<tr>
<td align="left">avg_speed (km/h)</td>
<td align="left">Average speed while traveling</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>For modeling and evaluation, the dataset was split chronologically into training, validation, and test subsets as follows: January 2017 to March 2022 (2017.1&#x2013;2022.3) was used as the training set (80% of the data), April 2022 to November 2022 (2022.4&#x2013;2022.11) was used as the validation set (10%), and December 2022 to June 2023 (2022.12&#x2013;2023.6) was used as the test set (10%). This single chronological split preserves temporal order and prevents information leakage from the future into the past.</p>
<p>During the pre-processing of automobile driving data, the following measures need to be implemented to ensure the credibility and accuracy of the data: First, outliers and redundant values in the data are detected and processed through appropriate statistical methods and domain knowledge to ensure the consistency and accuracy of the data. Second, for missing data caused by reasons such as delayed and lost sensor signals, appropriate filling methods, such as linear interpolation, need to be used to maintain data continuity and integrity. In addition, the distribution of missing data values is plotted through visualization techniques, which can provide a more intuitive understanding of the pattern and degree of missingness and provide a reference basis for subsequent processing and analysis. Finally, on the premise that the missing data comprises only a minimal fraction of the entire dataset and is of relatively low importance in the automobile driving data, it is recommended that this part of the missing data be discarded to ensure the quality of the data.</p>
<p>The distribution of energy consumption recorded in the dataset by year is shown in <xref ref-type="fig" rid="F7">Figure 7</xref>. It demonstrates that in the energy consumption distribution the use of air conditioning as well as the parking heater makes the energy consumption much higher in winter than in summer, and the highest energy consumption is close to twice the lowest value, which is corresponds to the real circumstances. <xref ref-type="fig" rid="F8">Figure 8</xref> shows the density of the total energy consumption distribution, and it is evident that the average value of energy consumption is 14.4&#xa0;kWh/100&#xa0;km, and 50% of the data is located between 11.8<inline-formula id="inf49">
<mml:math id="m65">
<mml:mrow>
<mml:mo>&#x223c;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>16.7&#xa0;kWh/100&#xa0;km.</p>
<fig id="F7" position="float">
<label>FIGURE 7</label>
<caption>
<p>Distribution of energy consumption.</p>
</caption>
<graphic xlink:href="fieng-04-1769776-g007.tif">
<alt-text content-type="machine-generated">Line chart shows fuel consumption trends from 2017 to 2023, with seasonal peaks near 24 and lows around 10. Consumption fluctuates annually, indicating recurring seasonal patterns.</alt-text>
</graphic>
</fig>
<fig id="F8" position="float">
<label>FIGURE 8</label>
<caption>
<p>Density map of total energy consumption distribution.</p>
</caption>
<graphic xlink:href="fieng-04-1769776-g008.tif">
<alt-text content-type="machine-generated">Histogram with a density curve displaying energy consumption per one hundred kilometers in kilowatt hours, showing most values are between ten and fifteen kilowatt hours per one hundred kilometers, with a right-skewed distribution.</alt-text>
</graphic>
</fig>
</sec>
<sec id="s4-2">
<label>4.2</label>
<title>Experimental environment and parameter settings</title>
<p>In this paper, a personal computer serves as the experimental platform, the CPU is Intel (R) Core (TM) i5-12500H, 2.3&#xa0;GHz, the GPU is NVIDIA RTX3050, the model compilation environment is Python, and the experiments are carried out using open-source TensorFlow libraries and Keras libraries and employing the GPU engine.</p>
<p>The model iterations are set to 100, with the final energy consumption value designated as the prediction target. Prediction experiments are conducted based on the optimization results of each parameter, and the optimization target <inline-formula id="inf50">
<mml:math id="m66">
<mml:mrow>
<mml:mi>S</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> is established as shown in <xref ref-type="disp-formula" rid="e16">Equation 16</xref>. The smaller <inline-formula id="inf51">
<mml:math id="m67">
<mml:mrow>
<mml:mi>S</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> indicates that the comprehensive prediction performance is better, and the parameter setting is more reasonable. The optimization parameter of CNN is the size of convolutional neurons and convolutional kernel of the two-layer convolutional neural network, and the parameter setting method of incremental increase is taken to extract global features in a better way. The optimization parameters for BiGRU encompass the number of neurons, while for DNN involves the number of neurons in the hidden layer. Taking <inline-formula id="inf52">
<mml:math id="m68">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>C</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>64,1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>B</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>128</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>64</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> as an example, it implies that the CNN network has 64 convolutional neurons with a convolutional kernel size of 1, the AtBiGRU network consists of 128 neurons, and the hidden layer of the DNN network has 64 neurons. The prediction performances of different network structures are shown in <xref ref-type="table" rid="T3">Table 3</xref>, and it can be seen that the selected network structure 4 has the best performance.</p>
<table-wrap id="T3" position="float">
<label>TABLE 3</label>
<caption>
<p>Model performance under different network structures.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Index</th>
<th align="center">Network structure</th>
<th align="center">
<inline-formula id="inf53">
<mml:math id="m69">
<mml:mrow>
<mml:mi>M</mml:mi>
<mml:mi>A</mml:mi>
<mml:mi>E</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
<th align="center">
<inline-formula id="inf54">
<mml:math id="m70">
<mml:mrow>
<mml:mi>M</mml:mi>
<mml:mi>S</mml:mi>
<mml:mi>E</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
<th align="center">
<inline-formula id="inf55">
<mml:math id="m71">
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
<th align="center">
<inline-formula id="inf56">
<mml:math id="m72">
<mml:mrow>
<mml:mi>S</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">1</td>
<td align="center">
<inline-formula id="inf57">
<mml:math id="m73">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>C</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>64,1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>B</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>128</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>64</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">0.5372</td>
<td align="center">0.2583</td>
<td align="center">0.9707</td>
<td align="center">1.8257</td>
</tr>
<tr>
<td align="center">2</td>
<td align="center">
<inline-formula id="inf58">
<mml:math id="m74">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>C</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">128,1</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>B</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>64</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>32</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">0.5103</td>
<td align="center">0.2394</td>
<td align="center">0.9792</td>
<td align="center">1.7709</td>
</tr>
<tr>
<td align="center">3</td>
<td align="center">
<inline-formula id="inf59">
<mml:math id="m75">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>C</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>64,3</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>B</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>128</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>64</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">0.5011</td>
<td align="center">0.2011</td>
<td align="center">0.9779</td>
<td align="center">1.7248</td>
</tr>
<tr>
<td align="center">4</td>
<td align="center">
<inline-formula id="inf60">
<mml:math id="m76">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>C</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">128,3</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>B</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>64</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>32</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">0.4872</td>
<td align="center">0.1987</td>
<td align="center">0.9806</td>
<td align="center">1.7057</td>
</tr>
<tr>
<td align="center">5</td>
<td align="center">
<inline-formula id="inf61">
<mml:math id="m77">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>C</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>64,1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>B</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>64</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>64</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">0.5501</td>
<td align="center">0.2787</td>
<td align="center">0.9673</td>
<td align="center">1.8626</td>
</tr>
<tr>
<td align="center">6</td>
<td align="center">
<inline-formula id="inf62">
<mml:math id="m78">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>C</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">128,1</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>B</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>64</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>64</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">0.5276</td>
<td align="center">0.2415</td>
<td align="center">0.9721</td>
<td align="center">1.7978</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s4-3">
<label>4.3</label>
<title>Ablation study</title>
<p>This study adopts the BiGRU-DNN-based energy consumption prediction algorithm as the baseline model and sequentially incorporates two enhancement strategies: the attention mechanism and the convolutional neural network (CNN). The results of the ablation experiments are presented in <xref ref-type="table" rid="T4">Table 4</xref>. As shown in <xref ref-type="table" rid="T4">Table 4</xref>, the integration of these enhancement strategies significantly improves the accuracy of electric vehicle energy consumption prediction. Specifically, incorporating the attention mechanism into the BiGRU-DNN model alone reduces the mean absolute error (MAE) by 4.8% and the mean squared error (MSE) by 31.2%, demonstrating its capability to independently identify and emphasize key spatiotemporal features such as vehicle speed and acceleration that critically influence energy consumption. When the attention mechanism is combined with CNN, the CNN-AtBiGRU-DNN model achieves a 5.5% reduction in MAE and a 35.9% reduction in MSE compared to the baseline BiGRU-DNN model. This improvement can be attributed to the introduction of convolutional neural networks (CNN) and the attention mechanism. Specifically, CNNs can efficiently and automatically extract deep hierarchical features from the input data, while the attention mechanism uses a single weight matrix to efficiently calculate attention scores, enabling rapid focus on key information. The core lies in modeling the global information of the entire input sequence and capturing the dependencies between preceding and subsequent data, accurately identifying the core spatiotemporal features that affect energy consumption, and avoiding judgment biases caused by local information. It further refines the feature representation by adaptively assigning higher weights to significant spatiotemporal factors, thereby suppressing irrelevant information. The single-head attention mechanism, with its core advantages of capturing global dependencies and efficient weight allocation, not only independently achieves significant reduction in prediction errors but also forms functional complementarity with the convolutional neural network (CNN), ultimately enhancing the overall prediction performance of the model.</p>
<table-wrap id="T4" position="float">
<label>TABLE 4</label>
<caption>
<p>Results of ablation study with different models.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">CNN</th>
<th align="center">Attention mechanism</th>
<th align="center">
<inline-formula id="inf63">
<mml:math id="m79">
<mml:mrow>
<mml:mi>M</mml:mi>
<mml:mi>A</mml:mi>
<mml:mi>E</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
<th align="center">
<inline-formula id="inf64">
<mml:math id="m80">
<mml:mrow>
<mml:mi>M</mml:mi>
<mml:mi>S</mml:mi>
<mml:mi>E</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
<th align="center">
<inline-formula id="inf65">
<mml:math id="m81">
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">&#x200b;</td>
<td align="left">&#x200b;</td>
<td align="center">0.6217</td>
<td align="center">0.3102</td>
<td align="center">0.9762</td>
</tr>
<tr>
<td align="center">
<inline-formula id="inf66">
<mml:math id="m82">
<mml:mrow>
<mml:mi>&#x2713;</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="left">&#x200b;</td>
<td align="center">0.6163</td>
<td align="center">0.2874</td>
<td align="center">0.9773</td>
</tr>
<tr>
<td align="left">&#x200b;</td>
<td align="center">
<inline-formula id="inf67">
<mml:math id="m83">
<mml:mrow>
<mml:mi>&#x2713;</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">0.5918</td>
<td align="center">0.2134</td>
<td align="center">0.9798</td>
</tr>
<tr>
<td align="center">
<inline-formula id="inf68">
<mml:math id="m84">
<mml:mrow>
<mml:mi>&#x2713;</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf69">
<mml:math id="m85">
<mml:mrow>
<mml:mi>&#x2713;</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">0.5872</td>
<td align="center">0.1987</td>
<td align="center">0.9806</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s4-4">
<label>4.4</label>
<title>Comparison of prediction results</title>
<p>In this study, four different deep learning models including BiLSTM, GRU-DNN, BiLSTM-DNN, and the proposed CNN-AtBiGRU-DNN were used for comparative experiments. To ensure a fair and rigorous comparison, all models were consistently trained with identical data splitting, preprocessing steps, and training protocols: the training process utilized all collected driving segment data in each round, with a total of 100 training rounds conducted. The MSE variation during training is presented in <xref ref-type="fig" rid="F9">Figure 9</xref>. It is observed from the MSE metric variation that the prediction error of the models gradually decreases and the degree of fitting gradually improves as training progresses. When the number of training rounds approaches 20, all models near convergence. Among the four models, the CNN-AtBiGRU-DNN model exhibits the fastest convergence speed, with its MSE metric stabilizing earlier.</p>
<fig id="F9" position="float">
<label>FIGURE 9</label>
<caption>
<p>Mean squared error (MSE) comparison.</p>
</caption>
<graphic xlink:href="fieng-04-1769776-g009.tif">
<alt-text content-type="machine-generated">Line chart compares mean squared error (MSE) versus epoch over one hundred epochs for four neural network models: BiLSTM, GRU-DNN, BiLSTM-DNN, and CNN-AtBiGRU-DNN. Inset magnifies epochs ninety to one hundred, showing stability differences among models, with CNN-AtBiGRU-DNN achieving the lowest MSE values and BiLSTM the highest.</alt-text>
</graphic>
</fig>
<p>For the MAE, RMSE and <inline-formula id="inf70">
<mml:math id="m86">
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> metrics in <xref ref-type="table" rid="T5">Table 5</xref>, the CNN-AtBiGRU-DNN model shows a small advantage at the end of training. Compared to the BiGRU-DNN model, the MAE and RMSE of the proposed model have decreased by 7.7% and 11.6% respectively, while the <inline-formula id="inf71">
<mml:math id="m87">
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> metrics has increased by approximately 0.9%.</p>
<table-wrap id="T5" position="float">
<label>TABLE 5</label>
<caption>
<p>Comparison of prediction effects of different methods.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Evaluation indicators</th>
<th align="center">
<inline-formula id="inf72">
<mml:math id="m88">
<mml:mrow>
<mml:mi>M</mml:mi>
<mml:mi>A</mml:mi>
<mml:mi>E</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
<th align="center">
<inline-formula id="inf73">
<mml:math id="m89">
<mml:mrow>
<mml:mi>M</mml:mi>
<mml:mi>S</mml:mi>
<mml:mi>E</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
<th align="center">
<inline-formula id="inf74">
<mml:math id="m90">
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mi>M</mml:mi>
<mml:mi>S</mml:mi>
<mml:mi>E</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
<th align="center">
<inline-formula id="inf75">
<mml:math id="m91">
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">BiLSTM</td>
<td align="center">1.2109</td>
<td align="center">0.5372</td>
<td align="center">1.8573</td>
<td align="center">0.2583</td>
</tr>
<tr>
<td align="center">GRU-DNN</td>
<td align="center">0.7233</td>
<td align="center">0.5972</td>
<td align="center">0.8115</td>
<td align="center">0.9621</td>
</tr>
<tr>
<td align="center">BiLSTM-DNN</td>
<td align="center">0.6361</td>
<td align="center">0.3991</td>
<td align="center">0.6798</td>
<td align="center">0.9718</td>
</tr>
<tr>
<td align="center">CNN-AtBiGRU-DNN</td>
<td align="center">0.5872</td>
<td align="center">0.1987</td>
<td align="center">0.6009</td>
<td align="center">0.9806</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>As can be seen from <xref ref-type="fig" rid="F10">Figure 10</xref>, comparing with the rest of the comparison algorithms, the predicted values of the BiGRU-DNN model with the addition of the attention mechanism and the combination of CNN are closer to the true values and have the highest degree of curve closeness to the true values in the three stages of testing conducted, which demonstrates the effectiveness of the CNN-AtBiGRU-DNN model presented in this paper in predicting the energy consumption of EVs.</p>
<fig id="F10" position="float">
<label>FIGURE 10</label>
<caption>
<p>Comparison of energy consumption prediction results during different driving processes. <bold>(A)</bold> The first driving stage. <bold>(B)</bold> The second driving stage. <bold>(C)</bold> The third driving stage.</p>
</caption>
<graphic xlink:href="fieng-04-1769776-g010.tif">
<alt-text content-type="machine-generated">Three line graphs compare electricity consumption predictions from four models&#x2014;CNN-AttBiGRU-DNN, BiLSTM-DNN, GRU-DNN, and BiLSTM&#x2014;versus true values over fifteen trips for three driving stages, with each graph showing close alignment and variations among models per stage.</alt-text>
</graphic>
</fig>
<p>Based on a thorough Pearson correlation coefficient analysis of the filtered data in <xref ref-type="fig" rid="F11">Figure 11</xref>, a significant positive correlation has been observed between avg<inline-formula id="inf76">
<mml:math id="m92">
<mml:mrow>
<mml:mtext>_</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula>speed, tire<inline-formula id="inf77">
<mml:math id="m93">
<mml:mrow>
<mml:mtext>_</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula>type, and the energy consumption of EVs. This indicates that as these factors increase, the energy consumption of EVs will also rise proportionately. Conversely, factors like country<inline-formula id="inf78">
<mml:math id="m94">
<mml:mrow>
<mml:mtext>_</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula>roads and city exhibit a weak correlation with energy consumption, suggesting they have a comparatively minor impact on the overall energy consumption of EVs.</p>
<fig id="F11" position="float">
<label>FIGURE 11</label>
<caption>
<p>Pearson correlation coefficient analysis of the filtered data.</p>
</caption>
<graphic xlink:href="fieng-04-1769776-g011.tif">
<alt-text content-type="machine-generated">Heatmap visualization displays correlation coefficients between eleven variables relevant to vehicle energy consumption. Darker red squares represent stronger correlations, with trip distance and quantity showing the highest positive correlation at zero point ninety-two.</alt-text>
</graphic>
</fig>
<p>For this purpose, input data is processed by removing avg<inline-formula id="inf79">
<mml:math id="m95">
<mml:mrow>
<mml:mtext>_</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula>speed and tire<inline-formula id="inf80">
<mml:math id="m96">
<mml:mrow>
<mml:mtext>_</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula>type separately, and then trained and predicted using the proposed CNN-AtBiGRU-DNN model. The MSE, MAE, RMSE, and <inline-formula id="inf81">
<mml:math id="m97">
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> metrics of the trained model are shown in the <xref ref-type="fig" rid="F12">Figure 12</xref>. It is evident that alterations to the input variable exert a minimal influence on the <inline-formula id="inf82">
<mml:math id="m98">
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> metrics. Notably, when compared to training with the full dataset, excluding the avg<inline-formula id="inf83">
<mml:math id="m99">
<mml:mrow>
<mml:mtext>_</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula>speed variable led to a modest reduction of approximately 26.9% in the MSE index, alongside marginal declines of 5.2% and 7.5% in the MAE and RMSE respectively. Conversely, the degree of decrease in these indicators was relatively small when tire<inline-formula id="inf84">
<mml:math id="m100">
<mml:mrow>
<mml:mtext>_</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula>type was removed.</p>
<fig id="F12" position="float">
<label>FIGURE 12</label>
<caption>
<p>Pearson correlation coefficient analysis of the filtered data.</p>
</caption>
<graphic xlink:href="fieng-04-1769776-g012.tif">
<alt-text content-type="machine-generated">Bar chart compares the performance of CNN-ABiGRU-DNN, Without avg_speed, and Without tire_type models across four metrics: Mean Squared Error (MSE), Mean Absolute Error (MAE), Root Mean Squared Error (RMSE), and R squared (R&#xB2;). CNN-ABiGRU-DNN shows the lowest error rates and highest R&#xB2; value.</alt-text>
</graphic>
</fig>
<p>These results are then compared with the accurate values and the results obtained from training the model with the complete data set as illustrated in <xref ref-type="fig" rid="F13">Figure 13</xref>. Upon examining the prediction outcomes, it becomes evident that after removing the factors with the strongest correlation to energy consumption in time series and non-time series data respectively, the prediction results show significant differences compared to those obtained from training and predicting with the complete data set, with a decrease in accuracy. This indicates that avg<inline-formula id="inf85">
<mml:math id="m101">
<mml:mrow>
<mml:mtext>_</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula>speed and tire<inline-formula id="inf86">
<mml:math id="m102">
<mml:mrow>
<mml:mtext>_</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula>type have had a certain influence on the experimental results.</p>
<fig id="F13" position="float">
<label>FIGURE 13</label>
<caption>
<p>Pearson correlation coefficient analysis of the filtered data.</p>
</caption>
<graphic xlink:href="fieng-04-1769776-g013.tif">
<alt-text content-type="machine-generated">Line chart comparing energy consumption in kilowatt-hours per one hundred kilometers across twenty-five trips for four scenarios: true value, CNN-ABiGRU-DNN, without average speed, and without tire type. Each scenario is represented by a distinct colored line, showing close but slightly varied trends in consumption, with values fluctuating between approximately ten and sixteen kilowatt-hours per one hundred kilometers.</alt-text>
</graphic>
</fig>
<p>Based on the experimental results presented above, the proposed CNN-AtBiGRU-DNN model demonstrates superior prediction accuracy and faster convergence compared to the baseline models, with notable improvements in the R<sup>2</sup> metric. Furthermore, the ablation study reveals that different input variables exert varying degrees of influence on the prediction performance. Nevertheless, several limitations of this study should be acknowledged. First, the integration of multiple neural network components inherently introduces a substantial number of trainable parameters, resulting in increased computational complexity and higher resource requirements during the training phase. More importantly, all experiments in this study were conducted on data collected from a single vehicle operating under specific conditions. While the model achieves satisfactory performance on the current dataset, its generalization capability to unseen scenarios remains to be further validated. In particular, factors such as variations in vehicle specifications (e.g., battery capacity, curb weight, and powertrain configuration), heterogeneous driving behaviors (e.g., aggressive versus conservative driving styles), and diverse geographical and environmental conditions (e.g., terrain topography, climatic factors, and traffic patterns) may potentially affect the model&#x2019;s predictive accuracy. Future research will prioritize extending the validation of the proposed framework to encompass multiple vehicle platforms and a broader range of real-world driving conditions, thereby providing a more comprehensive assessment of its generalization performance.</p>
</sec>
</sec>
<sec sec-type="conclusion" id="s5">
<label>5</label>
<title>Conclusion</title>
<p>This paper establishes a deep learning model CNN-AtBiGRU-DNN for EVs energy consumption prediction. Considering a series of factors related to electric vehicle energy consumption, an effective data analysis and processing method is established based on real data. Upon data analysis, it was discovered that the energy consumption values are significantly higher during winter, attributed to the utilization of heating and air conditioning. The collected data was divided into time series and non time series for training, and the results were compared with BiLSTM, GRU-DNN, and BiLSTM-DNN. CNN-AtBiGRU-DNN has faster fitting speed and higher accuracy, which enhances its predictive ability for large batches of real-time driving data. Training the model with different input variables reveals that avg<inline-formula id="inf87">
<mml:math id="m103">
<mml:mrow>
<mml:mtext>_</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula>speed and tire<inline-formula id="inf88">
<mml:math id="m104">
<mml:mrow>
<mml:mtext>_</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula>type have a certain impact on the prediction results. Future work will focus on continuously improving the architecture of deep learning models to accommodate growing data sets and more complex driving scenarios while also paying particular attention to additional factors that might impact energy consumption.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s6">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/supplementary material, further inquiries can be directed to the corresponding authors.</p>
</sec>
<sec sec-type="author-contributions" id="s7">
<title>Author contributions</title>
<p>WZ: Project administration, Formal Analysis, Funding acquisition, Writing &#x2013; review and editing, Supervision, Conceptualization, Methodology. RC: Methodology, Investigation, Writing &#x2013; review and editing, Data curation, Writing &#x2013; original draft. ML: Validation, Writing &#x2013; review and editing, Software, Writing &#x2013; original draft, Visualization. YM: Project administration, Writing &#x2013; review and editing, Methodology, Conceptualization, Funding acquisition. PL: Supervision, Project administration, Writing &#x2013; review and editing, Formal Analysis. MG: Writing &#x2013; review and editing, Formal Analysis, Supervision.</p>
</sec>
<sec sec-type="COI-statement" id="s9">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="s10">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="s11">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Al-Wreikat</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Serrano</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Sodr&#xe9;</surname>
<given-names>J. R.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Effects of ambient temperature and trip characteristics on the energy consumption of an electric vehicle</article-title>. <source>Energy</source> <volume>238</volume>, <fpage>122028</fpage>. <pub-id pub-id-type="doi">10.1016/j.energy.2021.122028</pub-id>
</mixed-citation>
</ref>
<ref id="B2">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chen</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Lei</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Xiong</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Shen</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>A novel approach to reconstruct open circuit voltage for state of charge estimation of lithium ion batteries in electric vehicles</article-title>. <source>Appl. Energy</source> <volume>255</volume>, <fpage>113758</fpage>. <pub-id pub-id-type="doi">10.1016/j.apenergy.2019.113758</pub-id>
</mixed-citation>
</ref>
<ref id="B3">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Donkers</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Viktorovi&#x107;</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Influence of driving style, infrastructure, weather and traffic on electric vehicle performance</article-title>. <source>Transp. Research Part D Transport Environment</source> <volume>88</volume>, <fpage>102569</fpage>. <pub-id pub-id-type="doi">10.1016/j.trd.2020.102569</pub-id>
</mixed-citation>
</ref>
<ref id="B4">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hai</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Dhahad</surname>
<given-names>H. A.</given-names>
</name>
<name>
<surname>Jasim</surname>
<given-names>K. F.</given-names>
</name>
<name>
<surname>Sharma</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Fouad</surname>
<given-names>H.</given-names>
</name>
<etal/>
</person-group> (<year>2023</year>). <article-title>Deep learning-based prediction of lithium-ion batteries state of charge for electric vehicles in standard driving cycle</article-title>. <source>Sustain. Energy Technol. Assessments</source> <volume>60</volume>, <fpage>103461</fpage>. <pub-id pub-id-type="doi">10.1016/j.seta.2023.103461</pub-id>
</mixed-citation>
</ref>
<ref id="B5">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hao</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Lin</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Ouyang</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Seasonal effects on electric vehicle energy consumption and driving range: a case study on personal, taxi, and ridesharing vehicles</article-title>. <source>J. Clean. Prod.</source> <volume>249</volume>, <fpage>119403</fpage>. <pub-id pub-id-type="doi">10.1016/j.jclepro.2019.119403</pub-id>
</mixed-citation>
</ref>
<ref id="B6">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>How</surname>
<given-names>D. N.</given-names>
</name>
<name>
<surname>Hannan</surname>
<given-names>M. A.</given-names>
</name>
<name>
<surname>Lipu</surname>
<given-names>M. S. H.</given-names>
</name>
<name>
<surname>Sahari</surname>
<given-names>K. S.</given-names>
</name>
<name>
<surname>Ker</surname>
<given-names>P. J.</given-names>
</name>
<name>
<surname>Muttaqi</surname>
<given-names>K. M.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>State-of-charge estimation of li-ion battery in electric vehicles: a deep neural network approach</article-title>. <source>IEEE Trans. Industry Appl.</source> <volume>56</volume>, <fpage>5565</fpage>&#x2013;<lpage>5574</lpage>. <pub-id pub-id-type="doi">10.1109/tia.2020.3004294</pub-id>
</mixed-citation>
</ref>
<ref id="B7">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hua</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Sevegnani</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Yi</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Birnie</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>McAslan</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Fine-grained rnn with transfer learning for energy consumption estimation on evs</article-title>. <source>IEEE Trans. Industrial Inf.</source> <volume>18</volume>, <fpage>8182</fpage>&#x2013;<lpage>8190</lpage>. <pub-id pub-id-type="doi">10.1109/tii.2022.3143155</pub-id>
</mixed-citation>
</ref>
<ref id="B8">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jiaqiang</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Zeng</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Wen</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Wei</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Huang</surname>
<given-names>Z.</given-names>
</name>
<etal/>
</person-group> (<year>2022</year>). <article-title>Effects analysis on active equalization control of lithium-ion batteries based on intelligent estimation of the state-of-charge</article-title>. <source>Energy</source> <volume>238</volume>, <fpage>121822</fpage>.</mixed-citation>
</ref>
<ref id="B9">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kucukvar</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Onat</surname>
<given-names>N. C.</given-names>
</name>
<name>
<surname>Kutty</surname>
<given-names>A. A.</given-names>
</name>
<name>
<surname>Abdella</surname>
<given-names>G. M.</given-names>
</name>
<name>
<surname>Bulak</surname>
<given-names>M. E.</given-names>
</name>
<name>
<surname>Ansari</surname>
<given-names>F.</given-names>
</name>
<etal/>
</person-group> (<year>2022</year>). <article-title>Environmental efficiency of electric vehicles in Europe under various electricity production mix scenarios</article-title>. <source>J. Clean. Prod.</source> <volume>335</volume>, <fpage>130291</fpage>. <pub-id pub-id-type="doi">10.1016/j.jclepro.2021.130291</pub-id>
</mixed-citation>
</ref>
<ref id="B10">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lee</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Song</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Lim</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Park</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Energy consumption evaluation of passenger electric vehicle based on ambient temperature under real-world driving conditions</article-title>. <source>Energy Convers. Manag.</source> <volume>306</volume>, <fpage>118289</fpage>. <pub-id pub-id-type="doi">10.1016/j.enconman.2024.118289</pub-id>
</mixed-citation>
</ref>
<ref id="B11">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Mao</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Hua</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Remaining useful life prediction for lithium-ion batteries with a hybrid model based on TCN-GRU-DNN and dual attention mechanism</article-title>. <source>IEEE Trans. Transp. Electrification</source>. <volume>9.3</volume>, <fpage>4726</fpage>&#x2013;<lpage>4740</lpage>.</mixed-citation>
</ref>
<ref id="B12">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Yamamoto</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Morikawa</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Exploring the interactive effects of ambient temperature and vehicle auxiliary loads on electric vehicle energy consumption</article-title>. <source>Appl. Energy</source> <volume>227</volume>, <fpage>324</fpage>&#x2013;<lpage>331</lpage>. <pub-id pub-id-type="doi">10.1016/j.apenergy.2017.08.074</pub-id>
</mixed-citation>
</ref>
<ref id="B13">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>He</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>X.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>An extended kalman filter based data-driven method for state of charge estimation of li-ion batteries</article-title>. <source>J. Energy Storage</source> <volume>40</volume>, <fpage>102655</fpage>. <pub-id pub-id-type="doi">10.1016/j.est.2021.102655</pub-id>
</mixed-citation>
</ref>
<ref id="B14">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>He</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Xie</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Huang</surname>
<given-names>W.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Self-discharge prediction method for lithium-ion batteries based on improved support vector machine</article-title>. <source>J. Energy Storage</source> <volume>55</volume>, <fpage>105571</fpage>. <pub-id pub-id-type="doi">10.1016/j.est.2022.105571</pub-id>
</mixed-citation>
</ref>
<ref id="B15">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ma</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Miao</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>X.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Examining influential factors on the energy consumption of electric and diesel buses: a data-driven analysis of large-scale public transit network in beijing</article-title>. <source>Energy</source> <volume>216</volume>, <fpage>119196</fpage>. <pub-id pub-id-type="doi">10.1016/j.energy.2020.119196</pub-id>
</mixed-citation>
</ref>
<ref id="B16">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Needell</surname>
<given-names>Z. A.</given-names>
</name>
<name>
<surname>McNerney</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Chang</surname>
<given-names>M. T.</given-names>
</name>
<name>
<surname>Trancik</surname>
<given-names>J. E.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Potential for widespread electrification of personal vehicle travel in the United States</article-title>. <source>Nat. Energy</source> <volume>1</volume>, <fpage>1</fpage>&#x2013;<lpage>7</lpage>. <pub-id pub-id-type="doi">10.1038/nenergy.2016.112</pub-id>
</mixed-citation>
</ref>
<ref id="B17">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Rathore</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Meena</surname>
<given-names>H. K.</given-names>
</name>
<name>
<surname>Jain</surname>
<given-names>P.</given-names>
</name>
</person-group> (<year>2023</year>). &#x201c;<article-title>Prediction of ev energy consumption using random forest and xgboost</article-title>,&#x201d; in <source>2023 international conference on power electronics and energy (ICPEE)</source> (<publisher-name>IEEE</publisher-name>), <fpage>1</fpage>&#x2013;<lpage>6</lpage>.</mixed-citation>
</ref>
<ref id="B18">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Shen</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Ahn</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Lamantia</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Personalized velocity and energy prediction for electric vehicles with road features in consideration</article-title>. <source>IEEE Trans. Transp. Electrification</source>. <volume>9.3</volume>, <fpage>3958</fpage>&#x2013;<lpage>3969</lpage>.</mixed-citation>
</ref>
<ref id="B19">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Shrivastava</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Soon</surname>
<given-names>T. K.</given-names>
</name>
<name>
<surname>Idris</surname>
<given-names>M. Y. I. B.</given-names>
</name>
<name>
<surname>Mekhilef</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Adnan</surname>
<given-names>S. B. R. S.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Combined state of charge and state of energy estimation of lithium-ion battery using dual forgetting factor-based adaptive extended kalman filter for electric vehicle applications</article-title>. <source>IEEE Trans. Veh. Technol.</source> <volume>70</volume>, <fpage>1200</fpage>&#x2013;<lpage>1215</lpage>. <pub-id pub-id-type="doi">10.1109/tvt.2021.3051655</pub-id>
</mixed-citation>
</ref>
<ref id="B20">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sun</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Fu</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Tao</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Zhu</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Si</surname>
<given-names>P.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Data-driven reinforcement-learning-based hierarchical energy management strategy for fuel cell/battery/ultracapacitor hybrid electric vehicles</article-title>. <source>J. Power Sources</source> <volume>455</volume>, <fpage>227964</fpage>. <pub-id pub-id-type="doi">10.1016/j.jpowsour.2020.227964</pub-id>
</mixed-citation>
</ref>
<ref id="B21">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sun</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Fu</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Xie</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>An energy management strategy for plug-in hybrid electric vehicles based on deep learning and improved model predictive control</article-title>. <source>Energy</source> <volume>269</volume>, <fpage>126772</fpage>. <pub-id pub-id-type="doi">10.1016/j.energy.2023.126772</pub-id>
</mixed-citation>
</ref>
<ref id="B22">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wager</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>McHenry</surname>
<given-names>M. P.</given-names>
</name>
<name>
<surname>Whale</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Br&#xe4;unl</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Testing energy efficiency and driving range of electric vehicles in relation to gear selection</article-title>. <source>Renew. Energy</source> <volume>62</volume>, <fpage>303</fpage>&#x2013;<lpage>312</lpage>. <pub-id pub-id-type="doi">10.1016/j.renene.2013.07.029</pub-id>
</mixed-citation>
</ref>
<ref id="B23">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wager</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Whale</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Braunl</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Driving electric vehicles at highway speeds: the effect of higher driving speeds on energy consumption and driving range for electric vehicles in Australia</article-title>. <source>Renew. Sustainable Energy Reviews</source> <volume>63</volume>, <fpage>158</fpage>&#x2013;<lpage>165</lpage>. <pub-id pub-id-type="doi">10.1016/j.rser.2016.05.060</pub-id>
</mixed-citation>
</ref>
<ref id="B24">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wu</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Lyu</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Huang</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Wei</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Physics-based battery soc estimation methods: recent advances and future perspectives</article-title>. <source>J. Energy Chem.</source> <volume>89</volume>, <fpage>27</fpage>&#x2013;<lpage>40</lpage>.</mixed-citation>
</ref>
<ref id="B25">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Xiao</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Hu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Jia</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>A novel estimation of state of charge for the lithium-ion battery in electric vehicle without open circuit voltage experiment</article-title>. <source>Energy</source> <volume>243</volume>, <fpage>123072</fpage>. <pub-id pub-id-type="doi">10.1016/j.energy.2021.123072</pub-id>
</mixed-citation>
</ref>
<ref id="B26">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yang</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Lin</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Tang</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Electric vehicle&#x2019;s electricity consumption on a road with different slope</article-title>. <source>Phys. A Stat. Mech. Its Appl.</source> <volume>402</volume>, <fpage>41</fpage>&#x2013;<lpage>48</lpage>.</mixed-citation>
</ref>
<ref id="B27">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yang</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Xu</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Data-driven analysis of battery electric vehicle energy consumption under real-world temperature conditions</article-title>. <source>J. Energy Storage</source> <volume>72</volume>, <fpage>108590</fpage>. <pub-id pub-id-type="doi">10.1016/j.est.2023.108590</pub-id>
</mixed-citation>
</ref>
<ref id="B28">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ye</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Lin</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>X.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>State-of-charge estimation with adaptive extended kalman filter and extended stochastic gradient algorithm for lithium-ion batteries</article-title>. <source>J. Energy Storage</source> <volume>47</volume>, <fpage>103611</fpage>. <pub-id pub-id-type="doi">10.1016/j.est.2021.103611</pub-id>
</mixed-citation>
</ref>
<ref id="B29">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Yao</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Eco-driving at signalised intersections for electric vehicles</article-title>. <source>IET Intell. Transp. Syst.</source> <volume>9</volume>, <fpage>488</fpage>&#x2013;<lpage>497</lpage>. <pub-id pub-id-type="doi">10.1049/iet-its.2014.0145</pub-id>
</mixed-citation>
</ref>
<ref id="B30">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zraibi</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Okar</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Chaoui</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Mansouri</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Remaining useful life assessment for lithium-ion batteries using cnn-lstm-dnn hybrid method</article-title>. <source>IEEE Trans. Veh. Technol.</source> <volume>70</volume>, <fpage>4252</fpage>&#x2013;<lpage>4261</lpage>. <pub-id pub-id-type="doi">10.1109/tvt.2021.3071622</pub-id>
</mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/903832/overview">Muhammad Sultan</ext-link>, Bahauddin Zakariya University, Pakistan</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1937184/overview">Ildiko Tulbure</ext-link>, 1 Decembrie 1918 University, Romania</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3335963/overview">Omer Can TOLUN</ext-link>, Adana Alparslan Turkes Science and Technology University, T&#xfc;rkiye</p>
</fn>
</fn-group>
<fn-group>
<fn id="fn1">
<label>1</label>
<p>The data used in this paper can be obtained from <ext-link ext-link-type="uri" xlink:href="https://www.spritmonitor.de/en/detail/786327.html">https://www.spritmonitor.de/en/detail/786327.html</ext-link>
</p>
</fn>
</fn-group>
</back>
</article>