<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article article-type="research-article" dtd-version="2.3" xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Earth Sci.</journal-id>
<journal-title>Frontiers in Earth Science</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Earth Sci.</abbrev-journal-title>
<issn pub-type="epub">2296-6463</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">1473325</article-id>
<article-id pub-id-type="doi">10.3389/feart.2024.1473325</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Earth Science</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Advanced machine learning artificial neural network classifier for lithology identification using Bayesian optimization</article-title>
<alt-title alt-title-type="left-running-head">Soulaimani et al.</alt-title>
<alt-title alt-title-type="right-running-head">
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/feart.2024.1473325">10.3389/feart.2024.1473325</ext-link>
</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Soulaimani</surname>
<given-names>Sa&#xe2;d</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2805197/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Soulaimani</surname>
<given-names>Ayoub</given-names>
</name>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2806440/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Abdelrahman</surname>
<given-names>Kamal</given-names>
</name>
<xref ref-type="aff" rid="aff4">
<sup>4</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1321026/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Miftah</surname>
<given-names>Abdelhalim</given-names>
</name>
<xref ref-type="aff" rid="aff5">
<sup>5</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1458964/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Fnais</surname>
<given-names>Mohammed S.</given-names>
</name>
<xref ref-type="aff" rid="aff4">
<sup>4</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Mondal</surname>
<given-names>Biraj Kanti</given-names>
</name>
<xref ref-type="aff" rid="aff6">
<sup>6</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2279885/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
</contrib-group>
<aff id="aff1">
<sup>1</sup>
<institution>Resources Valorization, Environment and Sustainable Development Research Team (RVESD)</institution>, <institution>Department of Mines</institution>, <institution>Mines School of Rabat</institution>, <addr-line>Rabat</addr-line>, <country>Morocco</country>
</aff>
<aff id="aff2">
<sup>2</sup>
<institution>Geology and Sustainable Mining Institute</institution>, <institution>Mohammed VI Polytechnic University</institution>, <addr-line>Ben Guerir</addr-line>, <country>Morocco</country>
</aff>
<aff id="aff3">
<sup>3</sup>
<institution>Natural Resources and Sustainable Development Laboratory</institution>, <institution>Department of Earth Sciences</institution>, <institution>Faculty of Sciences</institution>, <institution>Ibn Tofa&#xef;l University</institution>, <addr-line>K&#xe9;nitra</addr-line>, <country>Morocco</country>
</aff>
<aff id="aff4">
<sup>4</sup>
<institution>Department of Geology and Geophysics</institution>, <institution>College of Science</institution>, <institution>King Saud University</institution>, <addr-line>Riyadh</addr-line>, <country>Saudi Arabia</country>
</aff>
<aff id="aff5">
<sup>5</sup>
<institution>Laboratory Physico-Chemistry of Processes and Materials, Faculty of Sciences and Techniques, Hassan First University of Settat</institution>, <addr-line>Settat</addr-line>, <country>Morocco</country>
</aff>
<aff id="aff6">
<sup>6</sup>
<institution>Department of Geography</institution>, <institution>Netaji Subhas Open University</institution>, <addr-line>Kolkata</addr-line>, <country>India</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1340678/overview">Sheng Nie</ext-link>, Chinese Academy of Sciences (CAS), China</p>
</fn>
<fn fn-type="edited-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1921662/overview">Fengchao Xiong</ext-link>, Nanjing University of Science and Technology, China</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2561982/overview">Zhiwen Xue</ext-link>, University of Chinese Academy of Sciences, China</p>
</fn>
<corresp id="c001">&#x2a;Correspondence: Sa&#xe2;d Soulaimani, <email>soulaimani@enim.ac.ma</email>; Kamal Abdelrahman, <email>khassanein@ksu.edu.sa</email>
</corresp>
</author-notes>
<pub-date pub-type="epub">
<day>20</day>
<month>11</month>
<year>2024</year>
</pub-date>
<pub-date pub-type="collection">
<year>2024</year>
</pub-date>
<volume>12</volume>
<elocation-id>1473325</elocation-id>
<history>
<date date-type="received">
<day>30</day>
<month>07</month>
<year>2024</year>
</date>
<date date-type="accepted">
<day>25</day>
<month>10</month>
<year>2024</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2024 Soulaimani, Soulaimani, Abdelrahman, Miftah, Fnais and Mondal.</copyright-statement>
<copyright-year>2024</copyright-year>
<copyright-holder>Soulaimani, Soulaimani, Abdelrahman, Miftah, Fnais and Mondal</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<p>Identifying lithology is crucial for geological exploration, and the adoption of artificial intelligence is progressively becoming a refined approach to automate this process. A key feature of this strategy is leveraging population search algorithms to fine-tune hyperparameters, thus boosting prediction accuracy. Notably, Bayesian optimization has been applied for the first time to select the most effective learning parameters for artificial neural network classifiers used for lithology identification. This technique utilizes the capability of Bayesian optimization to utilize past classification outcomes to enhance the lithology models performance based on physical parameters calculated from well log data. In a comparison of artificial neural network architectures, the Bayesian-optimized artificial neural network (BOANN) demonstrably achieved the superior classification accuracy in validation and significantly outperformed a non-optimized wide, bilayer, and tri-layer network configurations, indicating that incorporating Bayesian optimization can significantly advance lithofacies recognition, thus offering a more accurate and intelligent solution for identifying lithology.</p>
</abstract>
<kwd-group>
<kwd>geology</kwd>
<kwd>lithology identification</kwd>
<kwd>machine learning</kwd>
<kwd>neural network</kwd>
<kwd>Bayesian optimization</kwd>
</kwd-group>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Geoinformatics</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec id="s1">
<title>1 Introduction</title>
<p>Identifying lithology is a critical operation in the oil and gas sector, providing essential insights for exploration and production processes. Lithology identification has historically relied on labor-intensive and error-prone manual analysis of geological data (<xref ref-type="bibr" rid="B28">Lui et al., 2022</xref>; <xref ref-type="bibr" rid="B30">McCormick and Heaven, 2023</xref>; <xref ref-type="bibr" rid="B9">Bonali et al., 2024</xref>). The recent surge in artificial intelligence (AI) technologies, particularly advancements in machine learning and neural networks (<xref ref-type="bibr" rid="B3">Alf&#xe9;rez et al., 2021</xref>; <xref ref-type="bibr" rid="B10">Chen et al., 2024</xref>), offers a compelling solution for automating and streamlining lithology identification. However, accurately identifying lithological formations in complex subsurface environments is challenging due to the high-dimensional and non-linear nature of geological data. Traditional methods often struggle to handle the complexity and variability of such data, leading to suboptimal classification results. In recent years, machine learning, particularly Artificial Neural Networks (ANNs), has shown promise in lithology identification but still faces challenges related to model optimization and scalability.</p>
<p>Artificial neural networks (ANNs) demonstrate remarkable capability in replicating complex geological structures and identifying subtle lithological features from well log data. The successful application of ANNs in this domain hinges on the optimal configuration of their hyperparameters (<xref ref-type="bibr" rid="B46">Tilahun and Korus, 2023</xref>). The ANNs performance is highly dependent on the configuration of critical settings (hyperparameters). These hyperparameters encompass Network architecture, Learning rates (<xref ref-type="bibr" rid="B14">Dutta et al., 2010</xref>) and activation function (<xref ref-type="bibr" rid="B19">Hastie et al., 2009</xref>). The high computing cost and unpredictable nature of hyperparameter optimization (<xref ref-type="bibr" rid="B21">Houshmand et al., 2022</xref>; <xref ref-type="bibr" rid="B13">Djimadoumngar, 2023</xref>) have historically been associated with it. This iterative trial-and-error approach necessitates significant computational resources and lacks deterministic convergence towards the optimal network configuration. This approach often yielded suboptimal artificial neural network (ANN) configurations, hindering the model&#x2019;s ability to achieve peak performance.</p>
<p>A strong substitute is provided by Bayesian optimization (<xref ref-type="bibr" rid="B5">Asante-Okyere et al., 2022</xref>), which guides the hyperparameter search using a probabilistic model. This method learns which regions of the hyperparameter space are most likely to produce good results by updating the probability model based on the findings of earlier assessments. By using this strategy, the networks&#x2019; predictive power is increased while the computational cost of doing comprehensive hyperparameter testing is decreased.</p>
<p>This paper introduces a novel approach that integrates Artificial Neural Networks (ANNs) with study to address the challenges of hyperparameter tuning in lithology identification. Unlike conventional methods, this approach leverages Bayesian optimization to efficiently explore the hyperparameter space, significantly improving classification accuracy and computational efficiency in high-dimensional geological data. The use of well log data from the Athabasca Oil Sands Area serves as a case study to demonstrate the effectiveness of the proposed method. The study highlights how Bayesian optimization, by constructing a probabilistic model of the objective function, outperforms traditional optimization techniques in terms of precision, scalability, and resource management. Additionally, this work offers a comprehensive comparison with state-of-the-art methods, illustrating its superiority in handling complex geological datasets, thus providing a scalable solution for various geophysical exploration settings.</p>
<p>Therefore, this work expand the employing Bayesian optimization (<xref ref-type="bibr" rid="B5">Asante-Okyere et al., 2022</xref>) to enhance the performance of artificial neural networks for lithology identification. To assess the effectiveness of Bayesian optimization in this domain, the BOANN&#x2019;s performance is comparatively evaluated against established ANN architectures encompassing various layer configurations (single-hidden layer, double-hidden layer, and triple-hidden layer networks). This rigorous benchmarking aims to quantify the benefits of utilizing Bayesian optimization for hyperparameter tuning within the context of lithology identification. The BOANN model utilizes well log data (RW: water resistivity at formation temperature, Depth: depth of an interval in meters from the Kelly Bushing elevation, SW: water saturation, PHI: density, W_Tar: mass percent bitumen, and VSH: volume of shale) to deliver improved accuracy and dependability in lithofacies classification (<xref ref-type="bibr" rid="B1">Adeniran et al., 2019</xref>; <xref ref-type="bibr" rid="B5">Asante-Okyere et al., 2022</xref>; <xref ref-type="bibr" rid="B35">Ntibahanana et al., 2022</xref>; <xref ref-type="bibr" rid="B2">Albarr&#xe1;n-Ord&#xe1;s and Zosseder, 2023</xref>).</p>
<p>Our extensive testing and validation indicate that the Bayesian-optimized model substantially surpasses traditional approaches. This progress not only confirms that Bayesian optimization can refine neural network classifiers for geological applications but also indicates a significant shift towards smarter and more effective methodologies in the oil and gas industry.</p>
</sec>
<sec sec-type="materials|methods" id="s2">
<title>2 Material and methods</title>
<sec id="s2-1">
<title>2.1 Artificial neural networks</title>
<p>Artificial Neural Networks (ANNs) are fundamental to modern computational geoscience, particularly adept at analyzing complex datasets, like those obtained from well logs for lithology identification. These ANNs comprise interconnected processing elements, termed nodes or neurons, organized into distinct layers. Each neuron executes specific mathematical operations, and the collective activity across these layers empowers the network to learn intricate relationships within data (<xref ref-type="bibr" rid="B24">LeCun et al., 2015</xref>). This architecture allows them to capture and model the nonlinear and intricate relationships found in geological data, making them ideal for predictive tasks where conventional statistical approaches may struggle (<xref ref-type="bibr" rid="B20">Heaton, 2018</xref>) (<xref ref-type="fig" rid="F1">Figure 1</xref>).</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>Example of neural network architecture.</p>
</caption>
<graphic xlink:href="feart-12-1473325-g001.tif"/>
</fig>
<p>In the perspective of lithology recognition, a distinctive ANN (<xref ref-type="bibr" rid="B50">Xiong et al., 2020</xref>; <xref ref-type="bibr" rid="B51">Xiong et al., 2022</xref>; <xref ref-type="bibr" rid="B26">Liu et al., 2024</xref>) involves an input layer, numerous hidden layers, and an output layer. The input layer collects raw data like, depth of an interval in meters from the Kelly Bushing elevation, water saturation, density, mass percent bitumen, water resistivity at formation temperature, and volume of shale, which reflect various rock properties. The hidden layers, filled with numerous neurons that possess adjustable weights and biases, process this data. The activation functions like ReLU or sigmoid are used by These neurons to combine non-linearity, serving the network detect complex relationships and interactions indoors the data (<xref ref-type="bibr" rid="B17">Glorot et al., 2011</xref>).</p>
<p>Training an ANN implies adjusting the weights and biases to decrease variations between actual and predicted outputs through a method called backpropagation. During this method, the network reduces a predefined loss function (<xref ref-type="bibr" rid="B34">Ng and Jahanbani Ghahfarokhi, 2022</xref>) using optimization algorithms like stochastic gradient descent, refining the prediction accuracy (<xref ref-type="bibr" rid="B29">Manouchehrian et al., 2012</xref>) with each iteration (<xref ref-type="bibr" rid="B12">Diederik and Jimmy, 2014</xref>). Consequently, ANNs have become effective at distinguishing lithological units by detecting subtle differences and characteristics in well log data, serving as an essential resource for geologists and engineers in oil and gas exploration and development.</p>
<p>Featuring Bayesian optimization (<xref ref-type="bibr" rid="B42">Shahriari et al., 2016</xref>; <xref ref-type="bibr" rid="B52">Zhang et al., 2020</xref>; <xref ref-type="bibr" rid="B5">Asante-Okyere et al., 2022</xref>) into ANNs beyond enhances their performance by methodically adjusting hyperparameters (<xref ref-type="bibr" rid="B5">Asante-Okyere et al., 2022</xref>; <xref ref-type="bibr" rid="B38">Olmos-de-Aguilera et al., 2023</xref>; <xref ref-type="bibr" rid="B37">Nwaila et al., 2024</xref>) such as the number of hidden layers, neurons per layer, and learning rates, based on a feedback loop of performance data. This integration not only tailors the network architecture to the unique features of the geological data but also ensures it is optimized for the most accurate lithology predictions, significantly surpassing traditional, non-optimized methods (<xref ref-type="bibr" rid="B43">Snoek et al., 2012</xref>).</p>
</sec>
<sec id="s2-2">
<title>2.2 Bayesian optimization</title>
<p>The acquisition function is used in Bayesian optimization, for global minimum finding on behalf of HF function (hypothesis function) <inline-formula id="inf1">
<mml:math id="m1">
<mml:mrow>
<mml:mi>f</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>. Give leave wherever AF function (acquisition function) <inline-formula id="inf2">
<mml:math id="m2">
<mml:mrow>
<mml:mi>f</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula> reaches the highest rate at the present contact <inline-formula id="inf3">
<mml:math id="m3">
<mml:mrow>
<mml:msup>
<mml:mi>x</mml:mi>
<mml:mo>&#x2b;</mml:mo>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula>. The PI function (probability improvement function) is defined as (<xref ref-type="bibr" rid="B42">Shahriari et al., 2016</xref>) (<xref ref-type="disp-formula" rid="e1">Equation 1</xref>):<disp-formula id="e1">
<mml:math id="m4">
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mi>I</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:mi mathvariant="normal">&#x3a1;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>f</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2265;</mml:mo>
<mml:mi>f</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msup>
<mml:mi>x</mml:mi>
<mml:mo>&#x2b;</mml:mo>
</mml:msup>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:mi mathvariant="normal">&#x3be;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>z</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(1)</label>
</disp-formula>
<list list-type="simple">
<list-item>
<p>&#x2022; <inline-formula id="inf4">
<mml:math id="m5">
<mml:mrow>
<mml:mi>z</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mi>&#x3bc;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>f</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msup>
<mml:mi>x</mml:mi>
<mml:mo>&#x2b;</mml:mo>
</mml:msup>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mi>&#x3c3;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</inline-formula>,</p>
</list-item>
<list-item>
<p>&#x2022; <inline-formula id="inf5">
<mml:math id="m6">
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mi>I</mml:mi>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mi>o</mml:mi>
<mml:mi>r</mml:mi>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mi mathvariant="normal">&#x3be;</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>: CDF (function of cumulated distribution).</p>
</list-item>
<list-item>
<p>&#x2022; <inline-formula id="inf6">
<mml:math id="m7">
<mml:mrow>
<mml:mi>&#x3c3;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf7">
<mml:math id="m8">
<mml:mrow>
<mml:mi>&#x3bc;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>: variance and probability of the calculation (prediction).</p>
</list-item>
</list>
</p>
<p>The <inline-formula id="inf8">
<mml:math id="m9">
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>E</mml:mi>
<mml:mi>I</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
</inline-formula> function (EI: expected improvement) can be defined by (<xref ref-type="disp-formula" rid="e2">Equation 2</xref>):<disp-formula id="e2">
<mml:math id="m10">
<mml:mrow>
<mml:mi>E</mml:mi>
<mml:mi>I</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:mrow>
<mml:mfenced open="{" close="" separators="|">
<mml:mrow>
<mml:mtable columnalign="left">
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>&#x3bc;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>f</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msup>
<mml:mi>x</mml:mi>
<mml:mo>&#x2b;</mml:mo>
</mml:msup>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mi mathvariant="normal">&#x3be;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>z</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>&#x3c3;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mi>&#x3c4;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>z</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mi>f</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>r</mml:mi>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mi>&#x3c3;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x3e;</mml:mo>
<mml:mn>0</mml:mn>
</mml:mrow>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd>
<mml:mrow>
<mml:mn>0</mml:mn>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mi>f</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>r</mml:mi>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mi>&#x3c3;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0</mml:mn>
</mml:mrow>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(2)</label>
</disp-formula>
<list list-type="simple">
<list-item>
<p>&#x2022; <inline-formula id="inf9">
<mml:math id="m11">
<mml:mrow>
<mml:mi>&#x3c4;</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>: DF (DF: density function) related to normal probability distribution. (<xref ref-type="disp-formula" rid="e3">Equation 3</xref>), (<xref ref-type="disp-formula" rid="e4">Equation 4</xref>) presents: upper confidence bond, lower confidence, bonds:</p>
</list-item>
</list>
<disp-formula id="e3">
<mml:math id="m12">
<mml:mrow>
<mml:mi>U</mml:mi>
<mml:mi>C</mml:mi>
<mml:mi>B</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>&#x3bc;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>&#x3b2;</mml:mi>
<mml:mi>&#x3c3;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(3)</label>
</disp-formula>
<disp-formula id="e4">
<mml:math id="m13">
<mml:mrow>
<mml:mi>L</mml:mi>
<mml:mi>C</mml:mi>
<mml:mi>B</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>&#x3bc;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>&#x3b2;</mml:mi>
<mml:mi>&#x3c3;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(4)</label>
</disp-formula>
<list list-type="simple">
<list-item>
<p>&#x2022; <inline-formula id="inf10">
<mml:math id="m14">
<mml:mrow>
<mml:mi>&#x3b2;</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> is the control parameter that define how much acquisition function explores or exploits the search space (<xref ref-type="bibr" rid="B52">Zhang et al., 2020</xref>).</p>
</list-item>
</list>
</p>
</sec>
<sec id="s2-3">
<title>2.3 Suitability of Bayesian optimization for lithology identification</title>
<p>Bayesian optimization is particularly effective in high-dimensional spaces, which is common in geological datasets where numerous features (F. <xref ref-type="bibr" rid="B51">Xiong et al., 2022</xref>) (e.g., well log measurements) must be considered. Traditional methods like grid search are inefficient and often impractical in such contexts, as they require exhaustive evaluations across all combinations of hyperparameters. It uses a probabilistic model to identify areas of the hyperparameter space that are more likely to yield better results. This approach allows it to focus on promising regions rather than blindly exploring the entire space, leading to faster convergence on optimal hyperparameters.</p>
<p>Unlike deterministic optimization methods, Bayesian optimization explicitly accounts for uncertainty in the model predictions. This is critical in lithology identification, where geological data can be noisy and complex. By modeling uncertainty, it allows for more informed decisions about which hyperparameters to test next. The balance between exploration (trying new hyperparameter values) and exploitation (refining known good values) is inherently managed in Bayesian optimization. This is crucial in lithology identification, where understanding the underlying geology often requires exploring various parameter combinations without committing to suboptimal settings.</p>
<p>Geological data often contains noise and outliers. Bayesian optimization&#x2019;s ability to incorporate uncertainty helps mitigate the impact of such noise on the optimization process (F. <xref ref-type="bibr" rid="B51">Xiong et al., 2022</xref>). This resilience allows for more reliable model performance in identifying lithology, particularly in heterogeneous formations. The relationships between input features and lithology classes are often non-linear and complex. Bayesian optimization is well-suited to optimizing models like neural networks that can capture these complexities, allowing for a more nuanced understanding of lithological characteristics, and allows for the customization of the objective function, enabling the incorporation of specific metrics relevant to lithology identification (e.g., accuracy, precision, recall). This customization ensures that the optimization process aligns closely with the project goals, enhancing overall model performance.</p>
</sec>
<sec id="s2-4">
<title>2.4 Bayesian optimization of ANNs for advanced lithology identification</title>
<p>In addressing the multifaceted challenge of lithology identification, this study employs Bayesian optimization for neural networks (ANNs) hyperparameters fine-tuning, using its robust probabilistic outline to significantly enhance classification accuracy (<xref ref-type="bibr" rid="B27">Lozano et al., 2011</xref>). Well log data encompassing measurements such as depth of an interval in meters from the Kelly Bushing elevation, water saturation, density, mass percent bitumen, water resistivity at formation temperature, and volume of shale are critical inputs for this analysis, reflecting the diverse physical properties of subterranean materials (<xref ref-type="bibr" rid="B22">Jiang et al., 2021</xref>). Given the high dimensionality and variability inherent in such data, conventional neural network (<xref ref-type="bibr" rid="B48">Wu and Zhou, 1993</xref>) setups without optimization often struggle to achieve optimal performance, underscoring the need for sophisticated tuning methods (<xref ref-type="bibr" rid="B25">Lee et al., 2021</xref>).</p>
<p>Bayesian optimization serves as a pivotal advancement in this context, applying a Gaussian process to model the relationship between hyperparameter configurations and their corresponding predictive accuracies (<xref ref-type="bibr" rid="B14">Dutta et al., 2010</xref>). This probabilistic approach not only aids in identifying the most effective neural network architecture such as determining the ideal number of layers and neurons per layer but also in fine tuning other critical parameters like learning rates and batch sizes (<xref ref-type="bibr" rid="B43">Snoek et al., 2012</xref>). The chosen method utilizes the Expected Improvement (EI) acquisition function, which systematically guides the selection process towards hyperparameter values that are likely to yield improvements over previously tested configurations (<xref ref-type="bibr" rid="B7">Bischl et al., 2023</xref>).</p>
<p>Empirical validation of the optimized models on the test dataset discloses that Bayesian-optimized ANNs best traditional, non-optimized counterparts significantly, achieving up to a 96.69% (Validation)/97.21% (Test) accuracy in lithology classification (<xref ref-type="bibr" rid="B5">Asante-Okyere et al., 2022</xref>). This score not only underscores the efficacy of the Bayesian approach but also highlights its potential to refine predictive modelling in geoscience applications. Further, statistical analysis, using confusion matrix, confirms the significance of these improvements, reinforcing Bayesian optimization (<xref ref-type="bibr" rid="B52">Zhang et al., 2020</xref>) as a crucial tool for enhancing the reliability and accuracy of lithological predictions from well log data (<xref ref-type="bibr" rid="B49">Xie et al., 2023</xref>).</p>
<p>The implications of these findings are profound, suggesting that Bayesian optimization (<xref ref-type="bibr" rid="B34">Ng and Jahanbani Ghahfarokhi, 2022</xref>) can transform the landscape of geological data analysis by enabling more accurate, efficient, and reliable lithology identification. This advancement promises to reduce the costs and time associated with traditional geological surveys, offering a more streamlined approach that could revolutionize resource exploration and management practices (<xref ref-type="bibr" rid="B36">Nuzzo, 2017</xref>; <xref ref-type="bibr" rid="B47">van de Schoot et al., 2021</xref>).</p>
</sec>
<sec id="s2-5">
<title>2.5 Data description</title>
<p>The Geological Survey of Alberta started McMurray Formation mapping project, and the Wabiskaw Member overlying of the Clearwater Formation, in the Oil Sands Area of Athabasca. The Alberta Geological Survey data report is one of the most substantial results of the project and will expectantly assist future advancement of the oil sands. The main purpose was to provide a database with 2193 wells data, containing around 750 boreholes that include core analyses, of the Athabasca Oil Sands Area. The current investigation was operated on multiple probing wells (60) that is included in digital data bank that is depicted in a File Reports of Alberta Geological Survey.</p>
<p>The study uses a comprehensive dataset comprised of well log data, sourced from multiple oil and gas fields known for their complex geological settings. This dataset includes several key geophysical logging measurements critical for lithology identification, SitID: A number used within AGS to identify wells, Depth: depth of an interval in meters from the Kelly Bushing elevation, SW: water saturation, PHI: density, W_Tar: mass percent bitumen, RW: water resistivity at formation temperature, and VSH: volume of shale. Each of these measurements provides insights into different rock properties that are indicative of specific lithological characteristics like water resistivity logs that help in identifying fluid content and porosity (<xref ref-type="bibr" rid="B6">Asquith et al., 2004</xref>; <xref ref-type="bibr" rid="B15">Ellis and Singer, 2007</xref>).</p>
<p>The dataset was carefully curated from a series of drilled wells, each providing a continuous depth-registered record of measured attributes. Preprocessing involved cleaning the data by removing outlier values, interpolating missing data points using statistical techniques, and normalizing the features to a consistent scale to ease effective machine learning analysis (<xref ref-type="bibr" rid="B31">Md Abul Ehsan et al., 2019</xref>). The final dataset includes over 18,847 individual log measurements, categorized into several lithological classes based on a combination of core sample analyses and expert geological interpretation. These classes involve Cemented Sand, Coal, Sand, Sandy Shale, Shale and Shaly Sand (<xref ref-type="table" rid="T1">Table 1</xref>).</p>
<table-wrap id="T1" position="float">
<label>TABLE 1</label>
<caption>
<p>Database Lithology outline category.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Lithology</th>
<th align="center">Sample dimension</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">Cemented Sand</td>
<td align="center">64</td>
</tr>
<tr>
<td align="left">Coal</td>
<td align="center">4335</td>
</tr>
<tr>
<td align="left">Sand</td>
<td align="center">5,846</td>
</tr>
<tr>
<td align="left">Sandy Shale</td>
<td align="center">343</td>
</tr>
<tr>
<td align="left">Shale</td>
<td align="center">4402</td>
</tr>
<tr>
<td align="left">Shaly Sand</td>
<td align="center">3,817</td>
</tr>
<tr>
<td align="left">Undefined</td>
<td align="center">40</td>
</tr>
<tr>
<td align="left">Total</td>
<td align="center">18,847</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The database occurred randomly allotted into training (60%), validation (20%), and test sets (20%). This partitioning ensures that the model is exercised on a representative data sample, validated to tune the model settings without overfitting, and finally tested on unseen data to objectively assess its predictive performance. Such a structured approach to data handling is crucial for developing robust artificial neural network models capable of accurately classifying complex lithological formations (<xref ref-type="bibr" rid="B8">Bishop, 2006</xref>).</p>
</sec>
</sec>
<sec sec-type="results" id="s3">
<title>3 Results</title>
<sec id="s3-1">
<title>3.1 Neural networks</title>
<p>The optimized and non-optimized artificial neural networks were elaborated for lithology prediction, employing described data on previous sections as inputs with MATLAB R2024a. The inclusive analysis of the ANNs models (2.0: optimized, 3.3: wide, 3.4: bilayer, and 3.5: tri-layer) (<xref ref-type="bibr" rid="B48">Wu and Zhou, 1993</xref>) offers a rich illustration of how ANNs architecture and optimization strategies shape the performance of machine learning models (<xref ref-type="bibr" rid="B29">Manouchehrian et al., 2012</xref>) in terms of accuracy, cost, and error rates across both validation and test datasets. This detailed exploration reveals the subtleties of model behavior, offering critical insights that can guide the selection of optimal configurations for specific applications, emphasizing the interplay between model complexity, learning strategies, and performance outcomes (<xref ref-type="table" rid="T2">Table 2</xref>).</p>
<table-wrap id="T2" position="float">
<label>TABLE 2</label>
<caption>
<p>Summary results table of the trained Neural Networks.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Model</th>
<th align="center">Preset</th>
<th align="center">Accuracy % (validation)</th>
<th align="center">Total cost (validation)</th>
<th align="center">Accuracy % (test)</th>
<th align="center">Total cost (test)</th>
<th align="center">Error rate % (validation)</th>
<th align="center">Error rate % (test)</th>
<th align="center">Hyperparameters</th>
<th align="center">Selected features</th>
<th align="center">Optimizer options</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">2</td>
<td align="center">Custom Neural Network</td>
<td align="center">96.69</td>
<td align="center">499</td>
<td align="center">97.21</td>
<td align="center">105</td>
<td align="center">3.31</td>
<td align="center">2.79</td>
<td align="center">Iteration limit: 1,000; Optimized Hyperparameters; Number of fully connected layers: 3; Activation: Sigmoid; Regularization strength (Lambda): 6.377e-08; Standardize data: Yes; First layer size: 232; Second layer size: 20; Third layer size: 88; Hyperparameter Search Range; Number of fully connected layers: 1&#x2013;3; Activation: ReLU, Tanh, Sigmoid, None; Standardize data: Yes, No; Regularization strength (Lambda): 6.6322e-10&#x2013;6.6322; First layer size: 1&#x2013;300; Second layer size: 1&#x2013;300; Third layer size: 1&#x2013;300</td>
<td align="center">6/6</td>
<td align="center">Optimizer: Bayesian optimization; Acquisition function: Expected improvement per second plus; Iterations: 30; Training time limit: false</td>
</tr>
<tr>
<td align="center">3.3</td>
<td align="center">Wide Neural Network</td>
<td align="center">96.45</td>
<td align="center">535</td>
<td align="center">96.98</td>
<td align="center">114</td>
<td align="center">3.55</td>
<td align="center">3.02</td>
<td align="center">Number of fully connected layers: 1; First layer size: 100; Activation: ReLU; Iteration limit: 1,000; Regularization strength (Lambda): 0; Standardize data: Yes</td>
<td align="center">6/6</td>
<td align="center">Not applicable</td>
</tr>
<tr>
<td align="center">3.4</td>
<td align="center">Bilayered Neural Network</td>
<td align="center">96.60</td>
<td align="center">513</td>
<td align="center">97.24</td>
<td align="center">104</td>
<td align="center">3.40</td>
<td align="center">2.76</td>
<td align="center">Number of fully connected layers: 2; First layer size: 10; Second layer size: 10; Activation: ReLU; Iteration limit: 1,000; Regularization strength (Lambda): 0; Standardize data: Yes</td>
<td align="center">6/6</td>
<td align="center">Not applicable</td>
</tr>
<tr>
<td align="center">3.5</td>
<td align="center">Trilayered Neural Network</td>
<td align="center">96.34</td>
<td align="center">552</td>
<td align="center">97.05</td>
<td align="center">111</td>
<td align="center">3.66</td>
<td align="center">2.95</td>
<td align="center">Number of fully connected layers: 3; First layer size: 10; Second layer size: 10; Third layer size: 10; Activation: ReLU; Iteration limit: 1,000; Regularization strength (Lambda): 0; Standardize data: Yes</td>
<td align="center">6/6</td>
<td align="center">Not applicable</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Model 2.0 stands out with its custom neural network architecture (<xref ref-type="bibr" rid="B18">Hallam et al., 2022</xref>; <xref ref-type="bibr" rid="B16">Ganer&#xf8;d et al., 2023</xref>; <xref ref-type="bibr" rid="B33">Neelakantan et al., 2024</xref>) and advanced Bayesian optimization (<xref ref-type="bibr" rid="B41">Pavlov et al., 2024</xref>). It achieves the highest validation accuracy of 96.69% and maintains substantial effectiveness in the test scenario with an accuracy of 97.21%. Particularly, this model also exhibits the lowest test costs and validation (105 and 499, respectively), as well as the lowest error rates, 3.31% in validation and 2.79% in testing. The greater performance of Model 2.0 can be attached to its advanced optimization technique, which efficiently balances the trade-offs between complexity and performance (<xref ref-type="fig" rid="F2">Figures 2A</xref>, <xref ref-type="fig" rid="F3">3A</xref>).</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>BOANN confusion matrices for: <bold>(A)</bold>. Optimizable Neural Network (Model 2) <bold>(B)</bold>. Wide Neural Network (Model 3.3) <bold>(C)</bold>. Bilayered Neural Network (Model 3.4) <bold>(D)</bold>. Trilayered Neural Network (Model 3.5).</p>
</caption>
<graphic xlink:href="feart-12-1473325-g002.tif"/>
</fig>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>BOANN validation ROC Curves for: <bold>(A)</bold>. Optimizable Neural Network (Model 2) <bold>(B)</bold>. Wide Neural Network (Model 3.3). <bold>(C)</bold>. Bilayered Neural Network (Model 3.4) <bold>(D)</bold>. Trilayered Neural Network (Model 3.5).</p>
</caption>
<graphic xlink:href="feart-12-1473325-g003.tif"/>
</fig>
<p>Bayesian optimization, acknowledged for its efficiency in exploring parameter spaces, and optimizing performance objectives, improves the model&#x2019;s capability to generalize across different datasets, thus minimizing overfitting and ensuring robustness. This model reveals how advanced optimization strategies can considerably boost the efficiency of neural networks, making it an ideal choice for applications requiring high accuracy with constrained resource usage.</p>
<p>Model 3.3, described as a Wide Neural Network (<xref ref-type="bibr" rid="B44">Soltanmohammadi and Faroughi, 2023</xref>), exhibits somewhat lower performance metrics compared to the other models. With validation and test accuracies of 96.45% and 96.98%, respectively, and higher costs in both scenarios (535 in validation and 114 in testing), it reflects potential limitations in its architectural design and the absence of a specialized optimization approach. The higher error rates of 3.55% in validation and 3.02% in testing further suggest that this model may struggle with efficiency and generalization, potentially due to its wide structure not being complemented by an effective learning strategy. This emphasizes the importance of incorporating advanced optimization techniques for performance improvement of the neural networks, particularly in complex predictive tasks, where precision and cost-efficiency are crucial (<xref ref-type="fig" rid="F2">Figures 2B</xref>, <xref ref-type="fig" rid="F3">3B</xref>).</p>
<p>Model 3.4, with its Bilayered Neural Network structure, excels particularly in the test dataset, showcasing the highest accuracy of 97.24% and the lowest test cost of 104. This model&#x2019;s configuration appears to offer an optimal balance, providing sufficient complexity to effectively capture and model intricate data patterns without incurring excessive computational overhead. The low-test error rate of 2.76% underscores its capacity for excellent generalization, suggesting that the bilayered approach is particularly effective in environments where predictive accuracy is paramount, the performance outcomes indicate a potentially well-tuned setup that maximizes efficiency and minimizes costs in operational settings (<xref ref-type="fig" rid="F2">Figures 2C</xref>, <xref ref-type="fig" rid="F3">3C</xref>).</p>
<p>Model 3.5 represents the most complex network (<xref ref-type="bibr" rid="B40">Ozkaya and Al-Fahmi, 2022</xref>; <xref ref-type="bibr" rid="B39">Ommi and Hashemi, 2024</xref>) in this analysis, featuring a Trilayered Neural Network. It shows the lowest validation accuracy (96.34%) and the highest validation cost (552), which might indicate a tendency towards overfitting on the validation set due to its deeper network architecture. Nevertheless, it performs well in the test stage, succeeding an accuracy of 97.05% with a relative cost of 111. This suggests that while deeper networks can effectively handle complex datasets, they require careful tuning and possibly more sophisticated optimization strategies to prevent overfitting and manage computational costs effectively. The higher validation error rate (3.66%) further points to the challenges associated with managing more complex models, emphasizing the need for precise model calibration and optimization (<xref ref-type="fig" rid="F2">Figures 2D</xref>, <xref ref-type="fig" rid="F3">3D</xref>).</p>
<p>In summary, this analysis and comparison delves deep into how different neural network architectures and their associated optimization strategies (<xref ref-type="bibr" rid="B4">Alyaev et al., 2021</xref>; <xref ref-type="bibr" rid="B45">Thomas et al., 2023</xref>) can dramatically affect machine learning outcomes (<xref ref-type="bibr" rid="B37">Nwaila et al., 2024</xref>). The comparison between these models elucidates a spectrum of behaviors and outcomes, from the highly efficient and robust performance of Model 2.0 with its advanced Bayesian optimization to the nuanced challenges faced by the deeper, more complex Model 3.5. Models 2.0 and 3.4 emerge as particularly effective, suggesting that a balanced approach to network design and optimization can yield superior results. This analysis not only grants a described understanding of each model&#x2019;s strengths and weaknesses, but also extends useful advice for designing neural networks that are tailored to meet specific operational needs and performance criteria (<xref ref-type="bibr" rid="B23">Lawley et al., 2022</xref>; <xref ref-type="bibr" rid="B32">Nakamura, 2023</xref>). It offers a comprehensive blueprint for leveraging architectural and strategic optimizations to enhance the predictability, efficiency, and cost-effectiveness of neural network models in varied application scenarios.</p>
</sec>
</sec>
<sec sec-type="discussion" id="s4">
<title>4 Discussion</title>
<p>Artificial intelligence (AI), particularly using machine learning algorithms like artificial neural networks (ANNs) and optimization techniques such as Bayesian optimization, has transformed various industries, including geological exploration. This part explores the inferences drawn from recent research on AI-driven lithology identification, examines the limitations encountered, and proposes future directions to advance this field of study.</p>
<sec id="s4-1">
<title>4.1 Inferences drawn from research</title>
<p>The application of AI in lithology identification has yielded several key inferences.</p>
<sec id="s4-1-1">
<title>4.1.1 Effectiveness of AI in lithology identification</title>
<p>Recent studies, including those utilizing Bayesian optimization to optimize ANN architectures, have consistently demonstrated the effectiveness of AI in accurately identifying lithological formations. Models enhanced with Bayesian optimization (e.g., Model 2.0), have proven substantial advances in predictive accuracy compared to traditional methods. This underscores the potential of AI to automate and enhance the efficiency of geological exploration processes.</p>
</sec>
<sec id="s4-1-2">
<title>4.1.2 Optimization strategies and model performance</title>
<p>The conducted research features the crucial role of optimization strategies, such as Bayesian optimization, in enhancing model performance. By iteratively fine-tuning hyperparameters based on past performance, Bayesian optimization enables ANNs to achieve higher accuracy levels while mitigating overfitting risks. Models like the bilayered neural network (Model 3.4) exemplify how a balanced approach to architecture design and optimization can optimize predictive capabilities without compromising computational efficiency.</p>
</sec>
<sec id="s4-1-3">
<title>4.1.3 Generalization and transferability</title>
<p>AI models trained on specific datasets have shown varying degrees of generalization across different geological settings. While models like Model 2.0 demonstrated robust performance in validation and test datasets within the study&#x2019;s scope, challenges remain in extrapolating these findings to diverse geological terrains with unique lithological characteristics. Upcoming research should focus on improving model generalization through multi-modal data incorporation and transfer learning techniques.</p>
</sec>
<sec id="s4-1-4">
<title>4.1.4 Impact of computational resources</title>
<p>The study also underscores the impact of computational resources on AI model deployment and scalability. Deeper neural network architectures (e.g., Model 3.5) exhibited potential for higher accuracy but required significant computational power and time-intensive training processes. This limitation highlights the need for optimizing computational efficiency while maintaining model robustness, particularly in real-time or resource-constrained exploration environments.</p>
</sec>
</sec>
<sec id="s4-2">
<title>4.2 Limits encountered</title>
<p>Despite the promising findings, several limitations were encountered during the research.</p>
<sec id="s4-2-1">
<title>4.2.1 Dataset specificity and bias</title>
<p>The analysis relied on specific lithological datasets, potentially limiting the generalizability of findings to broader geological contexts. Dataset bias, inherent in geological data collection processes, can impact model performance and validity across different geological formations and exploration scenarios. Addressing dataset variety and predisposition could be essential for improving the pertinence, robustness and applicability of AI models in real-world applications.</p>
</sec>
<sec id="s4-2-2">
<title>4.2.2 Computational complexity and resource constraints</title>
<p>Complex neural network architectures, although beneficial for capturing intricate lithological patterns, posed challenges in terms of computational complexity and resource-intensive training requirements. Balancing model complexity with computational efficiency remains a significant hurdle in deploying AI solutions for large-scale geological exploration and resource management tasks.</p>
</sec>
<sec id="s4-2-3">
<title>4.2.3 Model interpretability and transparency</title>
<p>The essential complexity of AI models, often results in limited interpretability and transparency in decision-making processes. Understanding how AI-derived predictions align with geological domain knowledge and expert insights is essential for building trust and confidence in AI-driven solutions within the geosciences community. Future research should prioritize developing interpretable AI models that facilitate meaningful collaboration between AI algorithms and domain experts.</p>
</sec>
</sec>
<sec id="s4-3">
<title>4.3 Future directions</title>
<p>Building upon the insights gained and addressing the identified limitations, several promising future directions for research in AI-driven lithology identification include.</p>
<sec id="s4-3-1">
<title>4.3.1 Advanced optimization techniques</title>
<p>Further exploration of advanced optimization techniques beyond Bayesian methods, such as evolutionary algorithms, reinforcement learning, or hybrid approaches, to enhance the efficiency and adaptability of AI models in geological exploration and resource management. These techniques can optimize not only hyperparameters but also model architectures and training strategies to improve performance across diverse geological settings.</p>
</sec>
<sec id="s4-3-2">
<title>4.3.2 Integration of multi-modal data sources</title>
<p>Integration of varied data sources, comprising remote sensing data, geological images and geochemical analyses, to supplement the feature space and advance the robustness of AI models for lithology identification. Multi-modal integration can enhance predictive accuracy, facilitate comprehensive geological insights, and mitigate the impact of dataset bias on model performance.</p>
</sec>
<sec id="s4-3-3">
<title>4.3.3 Transfer learning and domain adaptation</title>
<p>Application of transfer learning strategies to leverage pre-trained models for lithology identification tasks across different geological terrains. By transferring knowledge and models learned from one dataset to anothers, transfer learning can enhance generalization capabilities, speed up model training, and enhance the scalability of AI solutions in diverse exploration scenarios.</p>
</sec>
<sec id="s4-3-4">
<title>4.3.4 Real-time application and deployment strategies</title>
<p>Development of real-time AI applications and deployment strategies tailored for operational use in geological exploration and resource management. Highlighting scalability, adaptability and reliability to dynamic environmental conditions will be critical in incorporating AI-driven technologies into decision-making processes and operational systems within the geosciences.</p>
</sec>
<sec id="s4-3-5">
<title>4.3.5 Collaborative research initiatives</title>
<p>Cooperation with academic institutions, industry partners and government agencies to validate AI model performance in real-world exploration situations. Integrating domain expertise and feedback from geological professionals can enhance model robustness, address practical challenges, and foster innovation in AI-driven technologies for sustainable resource exploration and management.</p>
</sec>
</sec>
<sec id="s4-4">
<title>4.4 Summary</title>
<p>The inferences drawn from recent research in AI-driven lithology identification underscore the transformative potential of AI technologies in revolutionizing geological exploration and resource management practices. Despite encountered limitations related to dataset specificity, computational complexity, and model interpretability, the field continues to evolve with promising advancements in optimization techniques, data integration strategies, and real-time deployment solutions. By adopting these challenges and tracking innovative research paths, the geosciences society can employ the full potential of AI to realize more sustainable, efficient and accurate exploration outcomes in the future.</p>
</sec>
</sec>
<sec sec-type="conclusion" id="s5">
<title>5 Conclusion</title>
<p>In this study, we have led a thorough assessment of four distinct neural network models (2.0, 3.3, 3.4, and 3.5) analyzing their performance, across critical metrics such as accuracy, cost, and error rates within validation and test scenarios. This comparative study has not only illuminated the influence of varying neural network architectures and optimization strategies on model efficiency but also underscored the importance of strategic optimization in achieving superior machine learning outcomes (<xref ref-type="bibr" rid="B11">Costa et al., 2023</xref>).</p>
<p>Model 2.0, featuring a custom neural network architecture optimized through Bayesian techniques, emerged as the most efficient model, showcasing high accuracy and minimal operational costs, coupled with the lowest error rates across both datasets. This model&#x2019;s success highlighted the effectiveness of sophisticated optimization strategies, which fine-tune model parameters to enhance generalization capabilities and prevent overfitting. In contrast, Model 3.3, a Wide Neural Network without specific optimization enhancements, demonstrated the limitations of increasing network size without commensurate advancements in learning strategies, resulting in higher costs and reduced performance. Model 3.4, a Bilayered Neural Network, excelled in test conditions, affirming that an optimal balance of model complexity and computational efficiency can yield significant benefits, particularly in predictive accuracy and cost management. Finally, Model 3.5, with its Trilayered Neural Network, illustrated the challenges and potential of deeper networks, which, despite their complexity, can be effective with proper tuning and optimization, especially in handling complex datasets.</p>
<p>Overall, this study reinforces the critical role of matching network architecture with robust optimization techniques to maximize the performance of neural networks. The conclusions from this study give valuable perceptions for both specialists and researchers, offering support on designing neural networks that are not only powerful but also efficient and adaptable to various applications. This work contributes significantly on machine learning in geosciences, highlighting the need for incessant innovation in network design and optimization strategies to enhance the predictability, efficiency, and practical utility of neural network models.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s6">
<title>Data availability statement</title>
<p>The datasets presented in this study can be found in online repositories. The names of the repository/repositories and accession number(s) can be found below: <ext-link ext-link-type="uri" xlink:href="https://drive.google.com/drive/folders/16hTgxLc7SygD9IjnjK5IvWuP_oZXP8d9?usp=sharing">https://drive.google.com/drive/folders/16hTgxLc7SygD9IjnjK5IvWuP_oZXP8d9?usp&#x3d;sharing</ext-link>.</p>
</sec>
<sec sec-type="author-contributions" id="s7">
<title>Author contributions</title>
<p>SS: Conceptualization, Data curation, Formal Analysis, Funding acquisition, Investigation, Methodology, Project administration, Software, Resources, Supervision, Validation, Visualization, Writing - original draft, Writing - review and editing. AS: Conceptualization, Data curation, Formal Analysis, Funding acquisition, Investigation, Methodology, Project administration, Resources, Software, Supervision, Validation, Visualization, Writing&#x2013;original draft, Writing&#x2013;review and editing. KA: Conceptualization, Data curation, Formal Analysis, Funding acquisition, Investigation, Methodology, Project administration, Resources, Software, Supervision, Validation, Visualization, Writing&#x2013;original draft, Writing&#x2013;review and editing. AM: Conceptualization, Data curation, Formal Analysis, Funding acquisition, Investigation, Methodology, Project administration, Resources, Software, Supervision, Validation, Visualization, Writing&#x2013;review and editing. MF: Conceptualization, Data curation, Formal Analysis, Funding acquisition, Investigation, Methodology, Project administration, Resources, Software, Supervision, Validation, Visualization, Writing&#x2013;review and editing. BM: Conceptualization, Data curation, Formal Analysis, Funding acquisition, Investigation, Methodology, Project administration, Resources, Software, Supervision, Validation, Visualization, Writing&#x2013;review and editing.</p>
</sec>
<sec sec-type="funding-information" id="s8">
<title>Funding</title>
<p>The author(s) declares that financial support was received for the research, authorship, and/or publication of this article. This research was funded by the Researchers Supporting Project Number (RSP2024R249), King Saud University, Riyadh, Saudi Arabia.</p>
</sec>
<ack>
<p>We would like to thank MathWorks and Datamine software for their assistance during the development of the work. Deep thanks and gratitude to the Researchers Supporting Project Number (RSP2024R249), King Saud University, Riyadh, Saudi Arabia, for funding this research article.</p>
</ack>
<sec sec-type="COI-statement" id="s9">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="disclaimer" id="s10">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Adeniran</surname>
<given-names>A. A.</given-names>
</name>
<name>
<surname>Adebayo</surname>
<given-names>A. R.</given-names>
</name>
<name>
<surname>Salami</surname>
<given-names>H. O.</given-names>
</name>
<name>
<surname>Yahaya</surname>
<given-names>M. O.</given-names>
</name>
<name>
<surname>Abdulraheem</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>A competitive ensemble model for permeability prediction in heterogeneous oil and gas reservoirs</article-title>. <source>Appl. Comput. Geosciences</source> <volume>1</volume>, <fpage>100004</fpage>. <pub-id pub-id-type="doi">10.1016/j.acags.2019.100004</pub-id>
</citation>
</ref>
<ref id="B2">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Albarr&#xe1;n-Ord&#xe1;s</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Zosseder</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Uncertainties in 3-D stochastic geological modeling of fictive grain size distributions in detrital systems</article-title>. <source>Appl. Comput. Geosciences</source> <volume>19</volume>, <fpage>100127</fpage>. <pub-id pub-id-type="doi">10.1016/j.acags.2023.100127</pub-id>
</citation>
</ref>
<ref id="B3">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Alf&#xe9;rez</surname>
<given-names>G. H.</given-names>
</name>
<name>
<surname>V&#xe1;zquez</surname>
<given-names>E. L.</given-names>
</name>
<name>
<surname>Mart&#xed;nez Ardila</surname>
<given-names>A. M.</given-names>
</name>
<name>
<surname>Clausen</surname>
<given-names>B. L.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Automatic classification of plutonic rocks with deep learning</article-title>. <source>Appl. Comput. Geosciences</source> <volume>10</volume>, <fpage>100061</fpage>. <pub-id pub-id-type="doi">10.1016/j.acags.2021.100061</pub-id>
</citation>
</ref>
<ref id="B4">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Alyaev</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Ivanova</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Holsaeter</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Bratvold</surname>
<given-names>R. B.</given-names>
</name>
<name>
<surname>Bendiksen</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>An interactive sequential-decision benchmark from geosteering</article-title>. <source>Appl. Comput. Geosciences</source> <volume>12</volume>, <fpage>100072</fpage>. <pub-id pub-id-type="doi">10.1016/j.acags.2021.100072</pub-id>
</citation>
</ref>
<ref id="B5">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Asante-Okyere</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Shen</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Osei</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Enhanced machine learning tree classifiers for lithology identification using Bayesian optimization</article-title>. <source>Appl. Comput. Geosciences</source> <volume>16</volume>, <fpage>100100</fpage>. <pub-id pub-id-type="doi">10.1016/j.acags.2022.100100</pub-id>
</citation>
</ref>
<ref id="B6">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Asquith</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Krygowski</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Henderson</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Hurley</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2004</year>). <source>Basic well log analysis</source>. <publisher-name>American Association of Petroleum Geologists</publisher-name>. <pub-id pub-id-type="doi">10.1306/Mth16823</pub-id>
</citation>
</ref>
<ref id="B7">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bischl</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Binder</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Lang</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Pielok</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Richter</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Coors</surname>
<given-names>S.</given-names>
</name>
<etal/>
</person-group> (<year>2023</year>). <article-title>Hyperparameter optimization: foundations, algorithms, best practices, and open challenges</article-title>. <source>WIREs Data Min. Knowl. Discov.</source> <volume>13</volume>, <fpage>e1484</fpage>. <pub-id pub-id-type="doi">10.1002/widm.1484</pub-id>
</citation>
</ref>
<ref id="B8">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Bishop</surname>
<given-names>C. M.</given-names>
</name>
</person-group> (<year>2006</year>). <source>Pattern recognition and machine learning</source>. <publisher-loc>New York</publisher-loc>: <publisher-name>Springer</publisher-name>. <comment>Available at: <ext-link ext-link-type="uri" xlink:href="http://archive.org/details/patternrecogniti0000bish">http://archive.org/details/patternrecogniti0000bish</ext-link> (Accessed April 18, 2024)</comment>.</citation>
</ref>
<ref id="B9">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bonali</surname>
<given-names>F. L.</given-names>
</name>
<name>
<surname>Vitello</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Kearl</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Tibaldi</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Whitworth</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Antoniou</surname>
<given-names>V.</given-names>
</name>
<etal/>
</person-group> (<year>2024</year>). <article-title>GeaVR: an open-source tools package for geological-structural exploration and data collection using immersive virtual reality</article-title>. <source>Appl. Comput. Geosciences</source> <volume>21</volume>, <fpage>100156</fpage>. <pub-id pub-id-type="doi">10.1016/j.acags.2024.100156</pub-id>
</citation>
</ref>
<ref id="B10">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chen</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Yuan</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Zheng</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>A novel few-shot learning framework for rock images dually driven by data and knowledge</article-title>. <source>Appl. Comput. Geosciences</source> <volume>21</volume>, <fpage>100155</fpage>. <pub-id pub-id-type="doi">10.1016/j.acags.2024.100155</pub-id>
</citation>
</ref>
<ref id="B11">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Costa</surname>
<given-names>F. R.</given-names>
</name>
<name>
<surname>Carneiro</surname>
<given-names>C. de C.</given-names>
</name>
<name>
<surname>Ulsen</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Imputation of gold recovery data from low grade gold ore using artificial neural network</article-title>. <source>Minerals</source> <volume>13</volume>, <fpage>340</fpage>. <pub-id pub-id-type="doi">10.3390/min13030340</pub-id>
</citation>
</ref>
<ref id="B12">
<citation citation-type="web">
<person-group person-group-type="author">
<name>
<surname>Diederik</surname>
<given-names>P. K.</given-names>
</name>
<name>
<surname>Jimmy</surname>
<given-names>Ba</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Adam: a method for stochastic optimization</article-title>. <comment>Available at: <ext-link ext-link-type="uri" xlink:href="http://archive.org/details/arxiv-1412.6980">http://archive.org/details/arxiv-1412.6980</ext-link> (Accessed April 18, 2024)</comment>.</citation>
</ref>
<ref id="B13">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Djimadoumngar</surname>
<given-names>K.-N.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Parallel investigations of remote sensing and ground-truth Lake Chad&#x2019;s level data using statistical and machine learning methods</article-title>. <source>Appl. Comput. Geosciences</source> <volume>20</volume>, <fpage>100135</fpage>. <pub-id pub-id-type="doi">10.1016/j.acags.2023.100135</pub-id>
</citation>
</ref>
<ref id="B14">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dutta</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Bandopadhyay</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Ganguli</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Misra</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>Machine learning algorithms and their application to ore reserve estimation of sparse and imprecise data</article-title>. <source>J. Intelligent Learn. Syst. Appl. 02</source> <volume>02</volume>, <fpage>86</fpage>&#x2013;<lpage>96</lpage>. <pub-id pub-id-type="doi">10.4236/jilsa.2010.22012</pub-id>
</citation>
</ref>
<ref id="B15">
<citation citation-type="book">
<person-group person-group-type="editor">
<name>
<surname>Ellis</surname>
<given-names>D. V.</given-names>
</name>
<name>
<surname>Singer</surname>
<given-names>J. M.</given-names>
</name>
</person-group> (<year>2007</year>). <source>Well logging for earth scientists</source> (<publisher-loc>Dordrecht</publisher-loc>: <publisher-name>Springer Netherlands</publisher-name>). <pub-id pub-id-type="doi">10.1007/978-1-4020-4602-5</pub-id>
</citation>
</ref>
<ref id="B16">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ganer&#xf8;d</surname>
<given-names>A. J.</given-names>
</name>
<name>
<surname>Bakkestuen</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Calovi</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Fredin</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>R&#xf8;d</surname>
<given-names>J. K.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Where are the outcrops? Automatic delineation of bedrock from sediments using Deep-Learning techniques</article-title>. <source>Appl. Comput. Geosciences</source> <volume>18</volume>, <fpage>100119</fpage>. <pub-id pub-id-type="doi">10.1016/j.acags.2023.100119</pub-id>
</citation>
</ref>
<ref id="B17">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Glorot</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Bordes</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Bengio</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2011</year>). &#x201c;<article-title>Deep sparse rectifier neural networks</article-title>,&#x201d; in <source>Proceedings of the fourteenth international conference on artificial intelligence and statistics</source> (<publisher-name>JMLR Workshop and Conference Proceedings</publisher-name>), <fpage>315</fpage>&#x2013;<lpage>323</lpage>. <comment>Available at: <ext-link ext-link-type="uri" xlink:href="https://proceedings.mlr.press/v15/glorot11a.html">https://proceedings.mlr.press/v15/glorot11a.html</ext-link> (Accessed April 18, 2024)</comment>.</citation>
</ref>
<ref id="B18">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hallam</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Mukherjee</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Chassagne</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Multivariate imputation via chained equations for elastic well log imputation and prediction</article-title>. <source>Appl. Comput. Geosciences</source> <volume>14</volume>, <fpage>100083</fpage>. <pub-id pub-id-type="doi">10.1016/j.acags.2022.100083</pub-id>
</citation>
</ref>
<ref id="B19">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hastie</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Tibshirani</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Friedman</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2009</year>). <article-title>The elements of statistical learning</article-title>. <pub-id pub-id-type="doi">10.1007/978-0-387-84858-7</pub-id>
</citation>
</ref>
<ref id="B20">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Heaton</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Ian goodfellow, yoshua bengio, and aaron courville: deep learning</article-title>. <source>Genet. Program Evolvable Mach.</source> <volume>19</volume>, <fpage>305</fpage>&#x2013;<lpage>307</lpage>. <pub-id pub-id-type="doi">10.1007/s10710-017-9314-z</pub-id>
</citation>
</ref>
<ref id="B21">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Houshmand</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>GoodFellow</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Esmaeili</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Ord&#xf3;&#xf1;ez Calder&#xf3;n</surname>
<given-names>J. C.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Rock type classification based on petrophysical, geochemical, and core imaging data using machine and deep learning techniques</article-title>. <source>Appl. Comput. Geosciences</source> <volume>16</volume>, <fpage>100104</fpage>. <pub-id pub-id-type="doi">10.1016/j.acags.2022.100104</pub-id>
</citation>
</ref>
<ref id="B22">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jiang</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Lithology identification from well-log curves via neural networks with additional geologic constraint</article-title>. <source>GEOPHYSICS</source> <volume>86</volume>, <fpage>IM85</fpage>&#x2013;<lpage>IM100</lpage>. <pub-id pub-id-type="doi">10.1190/geo2020-0676.1</pub-id>
</citation>
</ref>
<ref id="B23">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lawley</surname>
<given-names>C. J. M.</given-names>
</name>
<name>
<surname>Raimondo</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Brin</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Zakharov</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Kur</surname>
<given-names>D.</given-names>
</name>
<etal/>
</person-group> (<year>2022</year>). <article-title>Geoscience language models and their intrinsic evaluation</article-title>. <source>Appl. Comput. Geosciences</source> <volume>14</volume>, <fpage>100084</fpage>. <pub-id pub-id-type="doi">10.1016/j.acags.2022.100084</pub-id>
</citation>
</ref>
<ref id="B24">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>LeCun</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Bengio</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Hinton</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Deep learning</article-title>. <source>Nature</source> <volume>521</volume>, <fpage>436</fpage>&#x2013;<lpage>444</lpage>. <pub-id pub-id-type="doi">10.1038/nature14539</pub-id>
</citation>
</ref>
<ref id="B25">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lee</surname>
<given-names>J. H.</given-names>
</name>
<name>
<surname>Han</surname>
<given-names>M.-K.</given-names>
</name>
<name>
<surname>Ko</surname>
<given-names>D. W.</given-names>
</name>
<name>
<surname>Suh</surname>
<given-names>I. H.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>From big to small: multi-scale local planar guidance for monocular depth estimation</article-title>. <pub-id pub-id-type="doi">10.48550/arXiv.1907.10326</pub-id>
</citation>
</ref>
<ref id="B26">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Guan</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Zhu</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Ye</surname>
<given-names>Q.</given-names>
</name>
<etal/>
</person-group> (<year>2024</year>). <article-title>RemoteCLIP: a vision language foundation model for remote sensing</article-title>. <source>IEEE Trans. Geosci. Remote Sens.</source> <volume>62</volume>, <fpage>1</fpage>&#x2013;<lpage>16</lpage>. <pub-id pub-id-type="doi">10.1109/tgrs.2024.3390838</pub-id>
</citation>
</ref>
<ref id="B27">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lozano</surname>
<given-names>A. C.</given-names>
</name>
<name>
<surname>&#x15a;wirszcz</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Abe</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>Group orthogonal matching pursuit for logistic regression</article-title>. <source>J. Mach. Learn. Res.</source> <volume>15</volume>, <fpage>452</fpage>&#x2013;<lpage>460</lpage>.</citation>
</ref>
<ref id="B28">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lui</surname>
<given-names>T. C. C.</given-names>
</name>
<name>
<surname>Gregory</surname>
<given-names>D. D.</given-names>
</name>
<name>
<surname>Anderson</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Lee</surname>
<given-names>W.-S.</given-names>
</name>
<name>
<surname>Cowling</surname>
<given-names>S. A.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Applying machine learning methods to predict geology using soil sample geochemistry</article-title>. <source>Appl. Comput. Geosciences</source> <volume>16</volume>, <fpage>100094</fpage>. <pub-id pub-id-type="doi">10.1016/j.acags.2022.100094</pub-id>
</citation>
</ref>
<ref id="B29">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Manouchehrian</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Sharifzadeh</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Moghadam</surname>
<given-names>R. H.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>Application of artificial neural networks and multivariate statistics to estimate UCS using textural characteristics</article-title>. <source>Int. J. Min. Sci. Technol.</source> <volume>22</volume>, <fpage>229</fpage>&#x2013;<lpage>236</lpage>. <pub-id pub-id-type="doi">10.1016/j.ijmst.2011.08.013</pub-id>
</citation>
</ref>
<ref id="B30">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>McCormick</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Heaven</surname>
<given-names>R. E.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>The British Geological Survey Rock Classification Scheme, its representation as linked data, and a comparison with some other lithology vocabularies</article-title>. <source>Appl. Comput. Geosciences</source> <volume>20</volume>, <fpage>100140</fpage>. <pub-id pub-id-type="doi">10.1016/j.acags.2023.100140</pub-id>
</citation>
</ref>
<ref id="B31">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Md Abul Ehsan</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Begum</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Ilham</surname>
<given-names>S. J.</given-names>
</name>
<name>
<surname>Khan</surname>
<given-names>R. S.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Advanced wind speed prediction using convective weather variables through machine learning application</article-title>. <source>Appl. Comput. Geosciences</source> <volume>1</volume>, <fpage>100002</fpage>. <pub-id pub-id-type="doi">10.1016/j.acags.2019.100002</pub-id>
</citation>
</ref>
<ref id="B32">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Nakamura</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>A practical approach for discriminating tectonic settings of basaltic rocks using machine learning</article-title>. <source>Appl. Comput. Geosciences</source> <volume>19</volume>, <fpage>100132</fpage>. <pub-id pub-id-type="doi">10.1016/j.acags.2023.100132</pub-id>
</citation>
</ref>
<ref id="B33">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Neelakantan</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Norell</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Hansson</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>L&#xe4;ngkvist</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Loutfi</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Neural network approach for shape-based euhedral pyrite identification in X-ray CT data with adversarial unsupervised domain adaptation</article-title>. <source>Appl. Comput. Geosciences</source> <volume>21</volume>, <fpage>100153</fpage>. <pub-id pub-id-type="doi">10.1016/j.acags.2023.100153</pub-id>
</citation>
</ref>
<ref id="B34">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ng</surname>
<given-names>C. S. W.</given-names>
</name>
<name>
<surname>Jahanbani Ghahfarokhi</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Adaptive proxy-based robust production optimization with multilayer perceptron</article-title>. <source>Appl. Comput. Geosciences</source> <volume>16</volume>, <fpage>100103</fpage>. <pub-id pub-id-type="doi">10.1016/j.acags.2022.100103</pub-id>
</citation>
</ref>
<ref id="B35">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ntibahanana</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Luemba</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Tondozi</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Enhancing reservoir porosity prediction from acoustic impedance and lithofacies using a weighted ensemble deep learning approach</article-title>. <source>Appl. Comput. Geosciences</source> <volume>16</volume>, <fpage>100106</fpage>. <pub-id pub-id-type="doi">10.1016/j.acags.2022.100106</pub-id>
</citation>
</ref>
<ref id="B36">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Nuzzo</surname>
<given-names>R. L.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>An introduction to bayesian data analysis for correlations</article-title>. <source>PM&#x26;R</source> <volume>9</volume>, <fpage>1278</fpage>&#x2013;<lpage>1282</lpage>. <pub-id pub-id-type="doi">10.1016/j.pmrj.2017.11.003</pub-id>
</citation>
</ref>
<ref id="B37">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Nwaila</surname>
<given-names>G. T.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>S. E.</given-names>
</name>
<name>
<surname>Bourdeau</surname>
<given-names>J. E.</given-names>
</name>
<name>
<surname>Frimmel</surname>
<given-names>H. E.</given-names>
</name>
<name>
<surname>Ghorbani</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2024</year>). <source>Spatial interpolation using machine learning: from patterns and regularities to block models</source>. <publisher-name>Springer US</publisher-name>. <pub-id pub-id-type="doi">10.1007/s11053-023-10280-7</pub-id>
</citation>
</ref>
<ref id="B38">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Olmos-de-Aguilera</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Campos</surname>
<given-names>P. G.</given-names>
</name>
<name>
<surname>Risso</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Error reduction in long-term mine planning estimates using deep learning models</article-title>. <source>Expert Syst. Appl.</source> <volume>217</volume>, <fpage>119487</fpage>. <pub-id pub-id-type="doi">10.1016/j.eswa.2022.119487</pub-id>
</citation>
</ref>
<ref id="B39">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ommi</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Hashemi</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Machine learning technique in the north zagros earthquake prediction</article-title>. <source>Appl. Comput. Geosciences</source> <volume>22</volume>, <fpage>100163</fpage>. <pub-id pub-id-type="doi">10.1016/j.acags.2024.100163</pub-id>
</citation>
</ref>
<ref id="B40">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ozkaya</surname>
<given-names>S. I.</given-names>
</name>
<name>
<surname>Al-Fahmi</surname>
<given-names>M. M.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Estimating size of finite fracture networks in layered reservoirs</article-title>. <source>Appl. Comput. Geosciences</source> <volume>15</volume>, <fpage>100089</fpage>. <pub-id pub-id-type="doi">10.1016/j.acags.2022.100089</pub-id>
</citation>
</ref>
<ref id="B41">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Pavlov</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Peshkov</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Katterbauer</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Alshehri</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Geosteering based on resistivity data and evolutionary optimization algorithm</article-title>. <source>Appl. Comput. Geosciences</source> <volume>22</volume>, <fpage>100162</fpage>. <pub-id pub-id-type="doi">10.1016/j.acags.2024.100162</pub-id>
</citation>
</ref>
<ref id="B42">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Shahriari</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Swersky</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Adams</surname>
<given-names>R. P.</given-names>
</name>
<name>
<surname>de Freitas</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Taking the human out of the loop: a review of bayesian optimization</article-title>. <source>Proc. IEEE</source> <volume>104</volume>, <fpage>148</fpage>&#x2013;<lpage>175</lpage>. <pub-id pub-id-type="doi">10.1109/JPROC.2015.2494218</pub-id>
</citation>
</ref>
<ref id="B43">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Snoek</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Larochelle</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Adams</surname>
<given-names>R. P.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>Practical bayesian optimization of machine learning algorithms</article-title>. <pub-id pub-id-type="doi">10.48550/arXiv.1206.2944</pub-id>
</citation>
</ref>
<ref id="B44">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Soltanmohammadi</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Faroughi</surname>
<given-names>S. A.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>A comparative analysis of super-resolution techniques for enhancing micro-CT images of carbonate rocks</article-title>. <source>Appl. Comput. Geosciences</source> <volume>20</volume>, <fpage>100143</fpage>. <pub-id pub-id-type="doi">10.1016/j.acags.2023.100143</pub-id>
</citation>
</ref>
<ref id="B45">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Thomas</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Sharma</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Kumar Gupta</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Use of AI tools to understand and model surface-interaction based EOR processes</article-title>. <source>Appl. Comput. Geosciences</source> <volume>17</volume>, <fpage>100111</fpage>. <pub-id pub-id-type="doi">10.1016/j.acags.2022.100111</pub-id>
</citation>
</ref>
<ref id="B46">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Tilahun</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Korus</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>3D hydrostratigraphic and hydraulic conductivity modelling using supervised machine learning</article-title>. <source>Appl. Comput. Geosciences</source> <volume>19</volume>, <fpage>100122</fpage>. <pub-id pub-id-type="doi">10.1016/j.acags.2023.100122</pub-id>
</citation>
</ref>
<ref id="B47">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>van de Schoot</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Depaoli</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>King</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Kramer</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>M&#xe4;rtens</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Tadesse</surname>
<given-names>M. G.</given-names>
</name>
<etal/>
</person-group> (<year>2021</year>). <article-title>Bayesian statistics and modelling</article-title>. <source>Nat. Rev. Methods Prim.</source> <volume>1</volume>, <fpage>1</fpage>&#x2013;<lpage>26</lpage>. <pub-id pub-id-type="doi">10.1038/s43586-020-00001-2</pub-id>
</citation>
</ref>
<ref id="B48">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wu</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>1993</year>). <article-title>Reserve estimation using neural network techniques</article-title>. <source>Comput. Geosciences</source> <volume>19</volume>, <fpage>567</fpage>&#x2013;<lpage>575</lpage>. <pub-id pub-id-type="doi">10.1016/0098-3004(93)90082-G</pub-id>
</citation>
</ref>
<ref id="B49">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Xie</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Jin</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Zhu</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>A semi-supervised coarse-to-fine approach with bayesian optimization for lithology identification</article-title>. <source>Earth Sci. Inf.</source> <volume>16</volume>, <fpage>2285</fpage>&#x2013;<lpage>2305</lpage>. <pub-id pub-id-type="doi">10.1007/s12145-023-01014-7</pub-id>
</citation>
</ref>
<ref id="B50">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Xiong</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Qian</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Material based object tracking in hyperspectral videos</article-title>. <source>Trans. Img. Proc.</source> <volume>29</volume>, <fpage>3719</fpage>&#x2013;<lpage>3733</lpage>. <pub-id pub-id-type="doi">10.1109/TIP.2020.2965302</pub-id>
</citation>
</ref>
<ref id="B51">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Xiong</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Zhao</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Lu</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Qian</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>MAC-net: model-aided nonlocal neural network for hyperspectral image denoising</article-title>. <source>IEEE Trans. Geoscience Remote Sens.</source> <volume>60</volume>, <fpage>1</fpage>&#x2013;<lpage>14</lpage>. <pub-id pub-id-type="doi">10.1109/TGRS.2021.3131878</pub-id>
</citation>
</ref>
<ref id="B52">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Apley</surname>
<given-names>D. W.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>W.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Bayesian optimization for materials design with mixed quantitative and qualitative variables</article-title>. <source>Sci. Rep.</source> <volume>10</volume>, <fpage>4924</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-020-60652-9</pub-id>
</citation>
</ref>
</ref-list>
</back>
</article>