<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Archiving and Interchange DTD v2.3 20070202//EN" "archivearticle.dtd">
<article article-type="methods-article" dtd-version="2.3" xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Nucl. Eng.</journal-id>
<journal-title>Frontiers in Nuclear Engineering</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Nucl. Eng.</abbrev-journal-title>
<issn pub-type="epub">2813-3412</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">1083164</article-id>
<article-id pub-id-type="doi">10.3389/fnuen.2022.1083164</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Nuclear Engineering</subject>
<subj-group>
<subject>Methods</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>A probabilistic inverse prediction method for predicting plutonium processing conditions</article-title>
<alt-title alt-title-type="left-running-head">Ausdemore et al.</alt-title>
<alt-title alt-title-type="right-running-head">
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fnuen.2022.1083164">10.3389/fnuen.2022.1083164</ext-link>
</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Ausdemore</surname>
<given-names>Madeline A.</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1982273/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>McCombs</surname>
<given-names>Audrey</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2112087/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Ries</surname>
<given-names>Daniel</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Zhang</surname>
<given-names>Adah</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Shuler</surname>
<given-names>Kurtis</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Tucker</surname>
<given-names>J. Derek</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2131361/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Goode</surname>
<given-names>Katherine</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Huerta</surname>
<given-names>J. Gabriel</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
</contrib>
</contrib-group>
<aff id="aff1">
<sup>1</sup>
<institution>Los Alamos National Laboratory (DOE)</institution>, <addr-line>Los Alamos</addr-line>, <addr-line>NM</addr-line>, <country>United States</country>
</aff>
<aff id="aff2">
<sup>2</sup>
<institution>Sandia National Laboratories (DOE)</institution>, <addr-line>Albuquerque</addr-line>, <addr-line>NM</addr-line>, <country>United States</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1602037/overview">Robin Taylor</ext-link>, National Nuclear Laboratory, United Kingdom</p>
</fn>
<fn fn-type="edited-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/775155/overview">Mavrik Zavarin</ext-link>, Lawrence Livermore National Laboratory (DOE), United States</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2105050/overview">Christian Ekberg</ext-link>, Chalmers University of Technology, Sweden</p>
</fn>
<corresp id="c001">&#x2a;Correspondence: Madeline A. Ausdemore, <email>mausdemore@lanl.gov</email>
</corresp>
<fn fn-type="other">
<p>This article was submitted to Nuclear Materials, a section of the journal Frontiers in Nuclear Engineering</p>
</fn>
</author-notes>
<pub-date pub-type="epub">
<day>21</day>
<month>12</month>
<year>2022</year>
</pub-date>
<pub-date pub-type="collection">
<year>2022</year>
</pub-date>
<volume>1</volume>
<elocation-id>1083164</elocation-id>
<history>
<date date-type="received">
<day>28</day>
<month>10</month>
<year>2022</year>
</date>
<date date-type="accepted">
<day>07</day>
<month>12</month>
<year>2022</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2022 Ausdemore, McCombs, Ries, Zhang, Shuler, Tucker, Goode and Huerta.</copyright-statement>
<copyright-year>2022</copyright-year>
<copyright-holder>Ausdemore, McCombs, Ries, Zhang, Shuler, Tucker, Goode and Huerta</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<p>In the past decade, nuclear chemists and physicists have been conducting studies to investigate the signatures associated with the production of special nuclear material (SNM). In particular, these studies aim to determine how various processing parameters impact the physical, chemical, and morphological properties of the resulting special nuclear material. By better understanding how these properties relate to the processing parameters, scientists can better contribute to nuclear forensics investigations by quantifying their results and ultimately shortening the forensic timeline. This paper aims to statistically analyze and quantify the relationships that exist between the processing conditions used in these experiments and the various properties of the nuclear end-product by invoking inverse methods. In particular, these methods make use of Bayesian Adaptive Spline Surface models in conjunction with Bayesian model calibration techniques to probabilistically determine processing conditions as an inverse function of morphological characteristics. Not only does the model presented in this paper allow for providing point estimates of a sample of special nuclear material, but it also incorporates uncertainty into these predictions. This model proves sufficient for predicting processing conditions within a standard deviation of the observed processing conditions, on average, provides a solid foundation for future work in predicting processing conditions of particles of special nuclear material using only their observed morphological characteristics, and is generalizable to the field of chemometrics for applicability across different materials.</p>
</abstract>
<kwd-group>
<kwd>Bayesian analysis</kwd>
<kwd>inverse prediction methods</kwd>
<kwd>nuclear forensics</kwd>
<kwd>nuclear engineering</kwd>
<kwd>uncertainty quantification</kwd>
</kwd-group>
</article-meta>
</front>
<body>
<sec id="s1">
<title>1 Introduction to nuclear forensics</title>
<p>In 2011, the Department of Homeland Security (DHS) National Technical Nuclear Forensic Center (NTNFC) hosted a panel of Plutonium (Pu) experts to develop a plan for advancing forensic analysis of Pu materials. During this conversation, the experts concluded that different production processes produce final products with different characteristics. They hypothesized that these different characteristics, or &#x201c;signatures&#x201d;, on the final product could potentially allow a forensic analyst to determine which processes were used to produce the Special Nuclear Material (SNM), and, in turn, make inferences on where the material originated.</p>
<p>As a result of these discussions, scientists at Pacific Northwest National Laboratory (PNNL) conducted an experiment designed by statisticians at Los Alamos National Laboratory (LANL) and Sandia National Laboratories (SNL) to replicate historical and modern Pu processing methodologies and conditions. This experiment consisted of 76 runs, where each run considered the same set of nine processing parameters, whose values intentionally varied from run to run. For each run, the resulting SNM was imaged with a scanning electron microscope (SEM) to generate images of the various particles of Pu. These images were then segmented using LANL&#x2019;s Morphological Analysis for Material Attribution (MAMA) software (<xref ref-type="bibr" rid="B18">Porter et al., 2016</xref>). This post-processing segmentation extracts the different particles that are present in each SEM image, and generates measurements based on the physical and morphological characteristics of the particles. Such measurements include particle areas, aspect ratios, convexities, circularities, gradients, and shadings (<xref ref-type="bibr" rid="B22">Zhang et al., 2021</xref>).</p>
<p>This paper serves to statistically relate the processing conditions of the different runs to the morphological and physical characteristics of the resulting Pu particles. In particular, we consider Pu particles that were processed using a solid oxalate feed, and particles that were processed in a 0.9&#xa0;M oxalate solution. We relate the observed MAMA characteristics (which include, but are not limited to, those listed above) to the processing conditions used to produce the samples of Pu (which include, but are not limited to, temperature, nitric acid concentration, and Pu concentration) using inverse prediction methods. We consider Bayesian methodologies to quantify our predictions on the processing conditions, and to naturally incorporate uncertainty into these predictions.</p>
<p>We discuss these statistical methodologies in <xref ref-type="sec" rid="s2">Section 2</xref>; in <xref ref-type="sec" rid="s3">Section 3</xref>, we outline the specifics of the problem from a statistical perspective; in <xref ref-type="sec" rid="s4">Section 4</xref>, we apply the methodologies to a dataset obtained from studying actual Pu particles generated under intentionally varied processing conditions within a design of experiments framework (rather than a simulated dataset); and in <xref ref-type="sec" rid="s5">Section 5</xref>, we discuss the implications of our results.</p>
</sec>
<sec id="s2">
<title>2 An inverse prediction framework</title>
<p>To study the relationship between the processing parameters and the particle characteristics, we consider an inverse prediction framework. The traditional regression problem involves making predictions about responses, given a set of explanatory or input predictors. As a simple example, we may be interested in determining the weight of an individual, given their height, sex, and nationality. Conversely, the inverse prediction framework reverses the problem and so considers making predictions about the explanatory predictors, given a set of responses. Following the simple example above, we would now be interested in determining the height, sex and nationality of an individual, based on their weight. Inverse prediction is used across a variety of disciplines, and, in particular, has been used in forensic science, and nuclear forensics [see, for example, <xref ref-type="bibr" rid="B14">Lewis et al. (2018)</xref>, <xref ref-type="bibr" rid="B19">Ries et al. (2018)</xref>, and <xref ref-type="bibr" rid="B20">Ries et al. (2022)</xref>].</p>
<p>Mathematically, consider a <italic>q</italic>-dimensional response vector, <inline-formula id="inf1">
<mml:math id="m1">
<mml:mi mathvariant="bold-italic">y</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:mo>&#x2026;</mml:mo>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>q</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2032;</mml:mo>
</mml:mrow>
</mml:msup>
</mml:math>
</inline-formula>, and a <italic>p</italic>-dimensional input vector <inline-formula id="inf2">
<mml:math id="m2">
<mml:mi mathvariant="bold-italic">x</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:mo>&#x2026;</mml:mo>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>p</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2032;</mml:mo>
</mml:mrow>
</mml:msup>
</mml:math>
</inline-formula>. We express the relationship between <bold>
<italic>x</italic>
</bold> and <bold>
<italic>y</italic>
</bold> as<disp-formula id="e1">
<mml:math id="m3">
<mml:mi mathvariant="bold-italic">y</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>g</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">x</mml:mi>
<mml:mo stretchy="false">&#x7c;</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b8;</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>&#x3b5;</mml:mi>
<mml:mo>,</mml:mo>
</mml:math>
<label>(1)</label>
</disp-formula>where <inline-formula id="inf3">
<mml:math id="m4">
<mml:mi>g</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mo>&#x22c5;</mml:mo>
</mml:mrow>
</mml:mfenced>
</mml:math>
</inline-formula> is the true underlying physical phenomenon that is responsible for producing the results, which are typically represented by an emulator or surrogate that mimics the outputs in terms of the inputs, <bold>
<italic>&#x3b8;</italic>
</bold> is a vector of model parameters, and <italic>&#x25b;</italic> is a random vector that captures noise present in the observed data.</p>
<p>As mentioned above, the goal of the inverse prediction framework is to predict the value of the input variables <bold>
<italic>x</italic>
</bold>&#x2032; that produced a new observation <bold>
<italic>y</italic>
</bold>&#x2032;. There are two approaches by which we can learn about <bold>
<italic>y</italic>
</bold>&#x2032;: We can either 1) construct a model that directly predicts <bold>
<italic>x</italic>
</bold>&#x2032; as a function of <bold>
<italic>y</italic>
</bold>&#x2032;, or we can 2) invert a traditional forward model that predicts <bold>
<italic>y</italic>
</bold>&#x2032; as a function of <bold>
<italic>x</italic>
</bold>&#x2032;. We will refer to the first approach as the &#x201c;direct&#x201d; model, and the second approach as the &#x201c;inverse&#x201d; model. It should be noted that, while it is more convenient to construct the &#x201c;direct&#x201d; model, this approach may violate regression assumptions. For example, standard linear regression models assume that the input variables are measured with negligible error. By constructing a model that directly predicts <bold>
<italic>x</italic>
</bold>&#x2032; as a function of <bold>
<italic>y</italic>
</bold>&#x2032;, we are treating <bold>
<italic>x</italic>
</bold> as the output variable and <bold>
<italic>y</italic>
</bold> as the input variable and so this assumption no longer holds. Additionally, the &#x201c;direct&#x201d; model may not be well-suited for optimal design of experiments, since the vast majority of literature in design of experiments does not consider the inverse problem in its formulation (<xref ref-type="bibr" rid="B1">Anderson-Cook et al., 2015</xref>; <xref ref-type="bibr" rid="B2">Anderson-Cook et al., 2016</xref>).</p>
<p>In this paper, we use Bayesian Model Calibration (<xref ref-type="bibr" rid="B11">Kennedy and O&#x2019;Hagan, 2001</xref>; <xref ref-type="bibr" rid="B10">Higdon et al., 2004</xref>, <xref ref-type="bibr" rid="B9">2008</xref>; <xref ref-type="bibr" rid="B21">Walters et al., 2018</xref>; <xref ref-type="bibr" rid="B13">Lee et al., 2019</xref>; <xref ref-type="bibr" rid="B16">Nguyen et al., 2021</xref>) to approach the inverse prediction framework. Model calibration is a process to estimate, or calibrate, model parameters in the context of an input to output relationship, and falls under the second method (the &#x201c;inverse&#x201d; model) discussed above. More specifically, we are interested in using a calibration approach to make predictions about the processing conditions that were used to produce samples of Pu. Each of these samples consists of several particles whose morphological characteristics contain information pertaining to the conditions under which they were produced. By studying the information contained in these particles, we can infer the associated processing conditions.</p>
<p>In this paper, we use a fully Bayesian adaptive spline surfaces (BASS) framework to model or build an emulator to approximate the underlying relationship between the inputs and outputs (<xref ref-type="bibr" rid="B6">Francom et al., 2018</xref>; <xref ref-type="bibr" rid="B5">Francom et al., 2019</xref>; <xref ref-type="bibr" rid="B4">Francom and Sans&#xf3;, 2020</xref>)<xref ref-type="fn" rid="fn1">
<sup>1</sup>
</xref>. Suppose <bold>
<italic>y</italic>
</bold> is a vector of morphological characteristics associated with a sample of Pu, and <bold>
<italic>x</italic>
</bold> is a vector of processing conditions used to produce a sample of Pu. Without loss of generality, suppose that each <italic>x</italic>
<sub>1</sub>, <italic>x</italic>
<sub>2</sub>, <italic>&#x2026;</italic>, <italic>x</italic>
<sub>
<italic>p</italic>
</sub> &#x2208; [0, 1]. The input to output relationship is modeled using.<disp-formula id="e2">
<mml:math id="m5">
<mml:mi mathvariant="bold-italic">y</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>a</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>0</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:munderover accentunder="false" accent="true">
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>M</mml:mi>
</mml:mrow>
</mml:munderover>
<mml:msub>
<mml:mrow>
<mml:mi>a</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>m</mml:mi>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mrow>
<mml:mi>B</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>m</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">x</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2b;</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b5;</mml:mi>
<mml:mo>,</mml:mo>
</mml:math>
<label>(2)</label>
</disp-formula>where <italic>a</italic>
<sub>0</sub> is the intercept, <italic>B</italic>
<sub>
<italic>m</italic>
</sub>(<bold>
<italic>x</italic>
</bold>) is a basis function on the input variables <bold>
<italic>x</italic>
</bold>, <italic>a</italic>
<sub>
<italic>m</italic>
</sub> is the coefficient for the <italic>mth</italic> basis function, and <bold>
<italic>&#x25b;</italic>
</bold> &#x223c; <italic>MVN</italic>(<bold>0</bold>, <bold>&#x3a3;</bold>) (<xref ref-type="bibr" rid="B4">Francom and Sans&#xf3;, 2020</xref>). We consider priors for each of the parameters, <bold>
<italic>a</italic>
</bold>, <italic>&#x3c3;</italic>
<sup>2</sup>, and <italic>M</italic>. Note that, because we define a prior over <italic>M</italic>, the number of basis functions is not fixed, and varies throughout the sampling process. These parameters are sampled using a Markov Chain Monte Carlo (MCMC) process. The samples obtained from this process constitute the posterior distribution of the input parameters, given the observed output parameters.</p>
<p>Upon defining the appropriate BASS model, we perform Bayesian Model Calibration, which allows us to estimate the input parameters that make the model best match the data provided by the output parameters. In this instance, we do not include a discrepancy term. Ideally, a discrepancy term would capture model bias error (i.e., how well a physical model approximates reality), which, in our case, is challenging to handle. Future work will include incorporating this discrepancy term, as well as exploring alternative emulators that are able to better capture the relationship between the input and the output parameters. For more details about Bayesian Model Calibration, see <xref ref-type="bibr" rid="B11">Kennedy and O&#x2019;Hagan. (2001)</xref>, <xref ref-type="bibr" rid="B10">Higdon et al. (2004)</xref>, <xref ref-type="bibr" rid="B9">Higdon et al. (2008)</xref>, <xref ref-type="bibr" rid="B21">Walters et al. (2018)</xref>, and <xref ref-type="bibr" rid="B16">Nguyen et al. (2021)</xref>.</p>
</sec>
<sec id="s3">
<title>3 Inverse prediction for Pu processing conditions</title>
<p>Suppose that run <inline-formula id="inf4">
<mml:math id="m6">
<mml:mi>r</mml:mi>
<mml:mo>&#x2208;</mml:mo>
<mml:mfenced open="{" close="}">
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>,</mml:mo>
<mml:mo>&#x2026;</mml:mo>
<mml:mo>,</mml:mo>
<mml:mi>R</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:math>
</inline-formula> results in <italic>n</italic>
<sub>
<italic>r</italic>
</sub> images and <italic>L</italic>
<sub>
<italic>r</italic>
</sub> particles. The number of images varies between particles, and the number of particles varies between images, so that the number of particles per run is not consistent. For our data, <italic>R</italic> &#x3d; 76. Each of the runs in our experiment yielded between 5 and 176 images per run, and between 81 and 4,643 particles per run. By using information contained in these runs, we can make predictions about the processing conditions of a new run. In this scenario, our input parameters include processing conditions such as temperature, nitric acid, and Pu concentrations, and responses include morphological characteristics such as particle shape and size.</p>
<p>
<xref ref-type="fig" rid="F1">Figure 1</xref> demonstrates two different particles from two different runs, each with different processing conditions. Note that, while size and shape are useful for determining processing conditions of a sample, far more information is required to accurately predict the processing conditions of these two particles. For example, we can see that, while these particles are produced under entirely different processing conditions, they do maintain physical similarities, alluding to the difficulty of this problem. While both particles are made up of flat sheets, we can see that the arrangement of and the angles between these sheets do differ from one another. This information is captured by segmenting the particles using LANL&#x2019;s MAMA software, which allows for capturing information about the morphological characteristics, such as area, diameter, and aspect ratio. By including the information from the MAMA software in our model, we can better distinguish between particles from different runs.</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>Examples of particle textures resulting from two different runs, with two different sets of processing conditions.</p>
</caption>
<graphic xlink:href="fnuen-01-1083164-g001.tif"/>
</fig>
<p>From <xref ref-type="sec" rid="s2">Section 2</xref>, we have that <bold>
<italic>y</italic>
</bold> is a <italic>p</italic>-vector of morphological characteristics associated with a sample of Pu, and <bold>
<italic>x</italic>
</bold> is the <italic>q</italic>-vector of processing conditions used to produce that sample of Pu. Given our sample of <italic>L</italic>
<sub>
<italic>r</italic>
</sub> particles for each of our 76 runs, <bold>
<italic>Y</italic>
</bold>
<sub>
<italic>r</italic>
</sub> is the <italic>L</italic>
<sub>
<italic>r</italic>
</sub> &#xd7; <italic>p</italic> matrix of morphological characteristics, where each row of <bold>
<italic>Y</italic>
</bold>
<sub>
<italic>r</italic>
</sub> corresponds to the morphological characteristics of the <italic>L</italic>
<sub>
<italic>r</italic>
</sub> particles associated with run <italic>r</italic> &#x2208; {1, <italic>&#x2026;</italic>, 76}. Our full matrix of morphological characteristics is then given by the <inline-formula id="inf5">
<mml:math id="m7">
<mml:msubsup>
<mml:mrow>
<mml:mo movablelimits="false" form="prefix">&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>r</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mn>76</mml:mn>
</mml:mrow>
</mml:msubsup>
<mml:msub>
<mml:mrow>
<mml:mi>L</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#xd7;</mml:mo>
<mml:mi>p</mml:mi>
</mml:math>
</inline-formula> matrix <bold>
<italic>Y</italic>
</bold>, such that<disp-formula id="equ1">
<mml:math id="m8">
<mml:mi mathvariant="bold-italic">Y</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:mtable class="matrix">
<mml:mtr>
<mml:mtd columnalign="center">
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="bold-italic">Y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="center">
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="bold-italic">Y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="center">
<mml:mo>&#x22ee;</mml:mo>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="center">
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="bold-italic">Y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:mfenced>
<mml:mspace width="0.3333em"/>
<mml:mspace width="0.3333em"/>
<mml:mspace width="0.3333em"/>
<mml:mspace width="0.3333em"/>
<mml:mtext>where&#x2009;each&#x2009;</mml:mtext>
<mml:mspace width="0.3333em"/>
<mml:mspace width="0.3333em"/>
<mml:mspace width="0.3333em"/>
<mml:mspace width="0.3333em"/>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="bold-italic">Y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:mtable class="matrix">
<mml:mtr>
<mml:mtd columnalign="center">
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="bold-italic">y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>r</mml:mi>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="center">
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="bold-italic">y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>r</mml:mi>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="center">
<mml:mo>&#x22ee;</mml:mo>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="center">
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="bold-italic">y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>r</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>L</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:msub>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:mtable class="matrix">
<mml:mtr>
<mml:mtd columnalign="center">
<mml:msub>
<mml:mrow>
<mml:mi>y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>r</mml:mi>
<mml:mn>11</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:msub>
<mml:mrow>
<mml:mi>y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>r</mml:mi>
<mml:mn>12</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mo>&#x22ef;</mml:mo>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:msub>
<mml:mrow>
<mml:mi>y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>r</mml:mi>
<mml:mn>1</mml:mn>
<mml:mi>p</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="center">
<mml:msub>
<mml:mrow>
<mml:mi>y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>r</mml:mi>
<mml:mn>21</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:msub>
<mml:mrow>
<mml:mi>y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>r</mml:mi>
<mml:mn>22</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mo>&#x22ef;</mml:mo>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:msub>
<mml:mrow>
<mml:mi>y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>r</mml:mi>
<mml:mn>2</mml:mn>
<mml:mi>p</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="center">
<mml:mo>&#x22ee;</mml:mo>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mo>&#x22ee;</mml:mo>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mo>&#x22f1;</mml:mo>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mo>&#x22ee;</mml:mo>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="center">
<mml:msub>
<mml:mrow>
<mml:mi>y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>r</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>L</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:msub>
<mml:mrow>
<mml:mi>y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>r</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>L</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mo>&#x22ef;</mml:mo>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:msub>
<mml:mrow>
<mml:mi>y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>r</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>L</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mi>p</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:mfenced>
<mml:mo>.</mml:mo>
</mml:math>
</disp-formula>As an example, consider row <bold>
<italic>y</italic>
</bold>
<sub>
<italic>r</italic>2</sub> in the matrix <bold>
<italic>Y</italic>
</bold>
<sub>
<italic>r</italic>
</sub>. This vector corresponds to the <italic>p</italic> observed morphological characteristics associated with the second particle in run <italic>r</italic>, so that <italic>y</italic>
<sub>
<italic>r</italic>21</sub> corresponds to the observed value for the first morphological characteristics for particle 2 in run <italic>r</italic>, <italic>y</italic>
<sub>
<italic>r</italic>22</sub> corresponds to the observed value for the second morphological characteristic for particle 2 in run <italic>r</italic>, and so on.</p>
<p>The matrix <bold>
<italic>X</italic>
</bold> is analogously defined for the processing conditions, where <bold>
<italic>X</italic>
</bold>
<sub>
<italic>r</italic>
</sub> is the <italic>L</italic>
<sub>
<italic>r</italic>
</sub> &#xd7; <italic>q</italic> matrix of processing conditions, where each row of <bold>
<italic>X</italic>
</bold>
<sub>
<italic>r</italic>
</sub> is the <italic>q</italic>-vector of processing conditions associated with the corresponding run <italic>r</italic>. For dimensional consistency, we consider <bold>
<italic>X</italic>
</bold>
<sub>
<italic>r</italic>
</sub> as the <italic>L</italic>
<sub>
<italic>r</italic>
</sub> &#xd7; <italic>q</italic> matrix, where <bold>
<italic>x</italic>
</bold>
<sub>
<italic>r</italic>
</sub> is merely repeated for each of the <italic>L</italic>
<sub>
<italic>r</italic>
</sub> rows in <bold>
<italic>X</italic>
</bold>
<sub>
<italic>r</italic>
</sub>, since all particles in run <italic>r</italic> are produced under the same set of processing conditions.</p>
<p>Before proceeding with calibration, we must first define our emulator. We choose to fit a BASS model using the associated BASS package in R (<xref ref-type="bibr" rid="B4">Francom and Sans&#xf3;, 2020</xref>). We then perform model calibration to obtain the best set of processing conditions that are associated with a new set of observed morphological characteristics, captured by the matrix <bold>
<italic>Y</italic>
</bold>
<sub>
<italic>R</italic>&#x2b;1</sub>, that results from observing a set of particles from a new run.</p>
<p>Since run <italic>R</italic>&#x2b;1 results in <italic>L</italic>
<sub>
<italic>R</italic>&#x2b;1</sub> particles, we calibrate on each of the <italic>l</italic>
<sub>
<italic>R</italic>&#x2b;1</sub> vectors of morphological characteristics. Each of these calibrations results in <italic>N</italic>
<sub>
<italic>MCMC</italic>
</sub> samples for each of the <italic>p</italic> processing conditions. That is, for a set of <italic>L</italic>
<sub>
<italic>R</italic>&#x2b;1</sub> particles, we obtain <italic>L</italic>
<sub>
<italic>R</italic>&#x2b;1</sub> <italic>N</italic>
<sub>
<italic>MCMC</italic>
</sub> &#xd7; <italic>p</italic> matrices of predicted processing conditions, <inline-formula id="inf6">
<mml:math id="m9">
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="bold-italic">X</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mn>11</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:mo>&#x2026;</mml:mo>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="bold-italic">X</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mn>1</mml:mn>
<mml:msub>
<mml:mrow>
<mml:mi>L</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:msub>
</mml:math>
</inline-formula>. We can use the results from this set of <italic>L</italic>
<sub>
<italic>R</italic>&#x2b;1</sub> matrices to determine a point estimate, and highest posterior density intervals for the <italic>p</italic> processing conditions. This process is outlined in <xref ref-type="statement" rid="Algorithm_1">Algorithm 1</xref>.</p>
<p>
<statement content-type="algorithm" id="Algorithm_1">
<label>Algorithm 1.</label>
<p>Methodology for predicting processing conditions of newly observed particles.<list list-type="simple">
<list-item>
<p>
<bold>Data:</bold> The matrix <bold>
<italic>X</italic>
</bold> of processing conditions for the set of observed particles; The matrix <bold>
<italic>Y</italic>
</bold> of morphological characteristics associated with each particle in the set of observed particles; A set of <italic>L</italic>
<sub>
<italic>R</italic>&#x2b;1</sub> newly observed particles; The matrix <bold>
<italic>Y</italic>
</bold>
<sub>
<italic>R</italic>&#x2b;1</sub> of morphological characteristics associated with each of the newly observed particles.</p>
</list-item>
<list-item>
<p>
<bold>Result:</bold> An <italic>L</italic>
<sub>
<italic>R</italic>&#x2b;1</sub> &#xd7; <italic>p</italic> matrix of predicted processing conditions</p>
</list-item>
<list-item>
<p>1. Fit a BASS model that predicts <bold>
<italic>X</italic>
</bold> as a function of <bold>
<italic>Y</italic>
</bold>
</p>
</list-item>
<list-item>
<p>2. <bold>for</bold> each particle <italic>l</italic>
<sub>
<italic>R</italic>&#x2b;1</sub> &#x2208; 1 : <italic>L</italic>
<sub>
<italic>R</italic>&#x2b;1</sub> <bold>do</bold>
</p>
<list list-type="simple">
<list-item>
<p>a. Perform model calibration to predict the processing conditions <inline-formula id="inf7">
<mml:math id="m10">
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="bold-italic">x</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>l</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:msub>
</mml:math>
</inline-formula> based On the morphological characteristics <inline-formula id="inf8">
<mml:math id="m11">
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="bold-italic">y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>l</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:msub>
</mml:math>
</inline-formula> observed for particle <italic>l</italic>
<sub>
<italic>R</italic>&#x2b;1</sub>
</p>
</list-item>
<list-item>
<p>b. Return the <italic>N</italic>
<sub>
<italic>MCMC</italic>
</sub> &#xd7; <italic>p</italic> matrix of posterior samples associated with the <italic>p</italic> Processing conditions</p>
</list-item>
<list-item>
<p>c. Determine the mode of the distribution of posterior samples for each of the <italic>p</italic> Processing conditions</p>
</list-item>
</list>
</list-item>
<list-item>
<p>
<bold>end</bold>
</p>
</list-item>
<list-item>
<p>&#x2a; To obtain a point estimate for each of the <italic>p</italic> processing conditions, consider the mean, median, or mode of the resulting empirical distributions of posterior modes.</p>
</list-item>
</list>
</p>
</statement>
</p>
</sec>
<sec id="s4">
<title>4 Application</title>
<p>In this section, we apply the above methodology to our dataset. The data considered in this experiment consists of runs that were processed using two types of oxalate feed. Out of the 76 total runs, 24 of these runs were processed using a solid oxalate feed, and 52 were processed in 0.9&#xa0;M oxalate solution. We analyze these two sets separately (<xref ref-type="sec" rid="s4-1">Sections 4.1</xref>, <xref ref-type="sec" rid="s4-2">4.2</xref>), and jointly (<xref ref-type="sec" rid="s4-3">Section 4.3</xref>). Considering the different types of runs separately and jointly allows us to study the effects of training models on data that are either a) more representative of an interdicted sample (i.e., when we consider the different types of oxalate feeds separately and train separate models for the different types of oxalate feeds), or b) trained on more data, and thus able to learn more (i.e., when we consider the different types of oxalate feeds together and train the model on the joint data). For example, classification techniques may allow us to distinguish between solid and solution runs. In this case, given a test run that we determine to be either a solid or solution run, we can train our model on runs that are more representative of the run we are studying.</p>
<sec id="s4-1">
<title>4.1 Solid runs</title>
<p>In this section, we consider the 24 solid runs separately from the 76 total runs. We apply <xref ref-type="statement" rid="Algorithm_1">Algorithm 1</xref> <italic>via</italic> a leave-one-out cross-validation (LOO-CV) process (<xref ref-type="bibr" rid="B12">Lachenbruch and Mickey, 1968</xref>; <xref ref-type="bibr" rid="B15">Luntz and Brailovsky, 1969</xref>; <xref ref-type="bibr" rid="B8">Gareth et al., 2013</xref>), in which we withhold all particles associated with a given run. That is, we treat the particles associated with a given run as our newly observed particles on which we wish to perform calibration.</p>
<p>To quantify the ability of the algorithm to successfully predict the processing conditions of a given run, we consider the Root Mean Square Error (RMSE) of the predicted value of the processing conditions compared to the observed value of the processing conditions. The RMSE is a useful quantity to consider, since it is robust to distribution specification, given that it is based on a point estimate, and has a nice interpretation, especially when the data being considered is normalized. As such, before training the models, we center and scale the values so that we can interpret the RMSEs in the context of standard deviations of the processing conditions. For example, RMSEs of less than one indicate that the predicted values are within one standard deviation of the true values, RMSEs of less than two indicate that the predicted values are within two standard deviations of the true values, and so on. <xref ref-type="fig" rid="F2">Figures 2</xref>, <xref ref-type="fig" rid="F3">3</xref> demonstrate the ability of this method to make predictions when we train our algorithm on solid runs, to predict processing conditions of particles produced by solid runs.</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>Violin plots of RMSEs for processing conditions 1 through 8 for solid runs. Square points represent the means of these distributions; triangular points represent the median of these distributions, and circular points represent the modes of these distributions.</p>
</caption>
<graphic xlink:href="fnuen-01-1083164-g002.tif"/>
</fig>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>Breakdown of RMSE by processing condition across solid runs. The height of a given column relays the overall RMSE for the associated run. Each color corresponds to the proportion of this RMSE that can be attributed to each processing condition.</p>
</caption>
<graphic xlink:href="fnuen-01-1083164-g003.tif"/>
</fig>
<p>
<xref ref-type="fig" rid="F2">Figure 2</xref> shows the violin plots of RMSEs when the medians of the resulting empirical distributions of posterior modes is used as a point estimate. From a comprehensive study of the solid runs, we were able to determine that the median of the empirical distributions of the posterior modes served as the best point estimate of the true processing condition. This is also true for the studies completed in <xref ref-type="sec" rid="s4-2">Sections 4.2</xref>, <xref ref-type="sec" rid="s4-3">4.3</xref>.</p>
<p>The results in <xref ref-type="fig" rid="F2">Figure 2</xref> indicate that the algorithm is better at predicting some processing conditions (which include conditions such as temperature, nitric acid concentration and Pu concentration) than others. For example, we see that, aside from processing conditions 3 and 5, this method is, on average, capable of predicting the processing conditions of a sample of Pu within a single standard deviation. In fact, the method is particularly effective at predicting processing conditions 1, 2 and 8, as is indicated by the modes of these distributions, which demonstrates that more often than not, our prediction for these processing conditions is within 0.5 standard deviations of the true value. We note that it is not particularly surprising that, in some instances, the RMSE can extend beyond a single standard deviation. Given that the production of a family of material types (in this case, Pu) requires a chemical expert to precisely execute a series of involved tasks, it is not particularly surprising using only the morphological characteristics of the end product can result in uncertainties on the predicted values of the processing conditions that extend past 3 RMSE. Whether an RMSE below 0.5 is acceptable is not a straightforward determination. Much like the level of significance, alpha, used in hypothesis testing, the threshold at which uncertainty is acceptable to the user is dependent on the objective at hand. If the uncertainty is deemed to be inadequate, then a reflection on how uncertainty can be decreased would likely be useful. This exercise provides insight not only for forensic purposes, but also for insights for biases and deviations from standard processes.</p>
<p>
<xref ref-type="fig" rid="F3">Figure 3</xref> shows the proportional breakdown of RMSE by processing condition for each run. For each run, we see the proportion of the overall RMSE that can be attributed to each processing condition. The greater the difference in the widths of the colored bars, the more disparate their individual RMSEs are from one another, and from the overall RMSE. This helps us to determine which processing conditions are well predicted for a given run, and which processing conditions are poorly predicted for a given run. From this figure, we can see that the predictions within processing conditions are relatively consistent between runs.</p>
<p>As an example, consider run 22. We see that the RMSE across all processing conditions for Run 22 is approximately 1.7 (the overall height of the stacked bar). Within this bar, we can see how each individual processing condition contributes to the root average of 1.7 (the individual covered bars). For run 22, we see that the individual colored bars are made up of several different heights, indicating that the overall RMSE for run 22 is not representative of each processing condition&#x2019;s individual RMSE. Since processing condition 1 and 8 are represented by very thin bars, their individual RMSE&#x2019;s are much smaller than 1.7 (and, in fact, are actually close to zero). On the other hand, we see that processing condition 6 is represented by a much taller bar, indicating that its individual RMSE is much larger than 1.7. This large difference is offset by the much smaller values of RMSE for processing conditions 1 and 8 when we take the root mean of all squared errors for to obtain the overall RMSE.</p>
</sec>
<sec id="s4-2">
<title>4.2 Solution runs</title>
<p>In this section, we consider the 52 solution runs separately from the 76 total runs. As before, we apply <xref ref-type="statement" rid="Algorithm_1">Algorithm 1</xref> <italic>via</italic> an LOO-CV process, and consider RMSEs to quantify the performance of our models. <xref ref-type="fig" rid="F4">Figures 4</xref>, <xref ref-type="fig" rid="F5">5</xref> demonstrate the ability of this method to make predictions when we train our algorithm on solution runs, to predict processing conditions of particles produced by solution runs.</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption>
<p>Violin plots of RMSEs for processing conditions 1 through 8 for solution runs. Square points represent the means of these distributions; triangular points represent the median of these distributions, and circular points represent the modes of these distributions.</p>
</caption>
<graphic xlink:href="fnuen-01-1083164-g004.tif"/>
</fig>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption>
<p>Breakdown of RMSE by processing condition across solution runs. The height of a given column relays the overall RMSE for the associated run. Each color corresponds to the proportion of this RMSE that can be attributed to each processing condition.</p>
</caption>
<graphic xlink:href="fnuen-01-1083164-g005.tif"/>
</fig>
<p>
<xref ref-type="fig" rid="F4">Figure 4</xref> shows the violin plots of RMSE&#x2019;s when the medians of the resulting empirical distributions of posterior modes is used as a point estimate. These results indicate that the algorithm is better at predicting some processing conditions than others. For example, we see that, aside from processing conditions 3 and 8, this method is, on average, capable of predicting the processing conditions of a sample of Pu within a single standard deviation. In fact, the method is particularly effective at predicting processing conditions 1 and 3, as is indicated by the modes of these distributions, which demonstrates that more often than not, our prediction for these processing conditions is within 0.5 standard deviations of the true value.</p>
<p>
<xref ref-type="fig" rid="F5">Figure 5</xref> shows the proportional breakdown of RMSE by processing condition for each run. This figure demonstrates that the predictions within processing conditions are relatively consistent between runs.</p>
</sec>
<sec id="s4-3">
<title>4.3 All runs</title>
<p>In this section, we consider all 76 runs together. That is, the algorithm is trained on runs that were processed using a solid oxalate feed, as well as those that were processed in solution, regardless of whether the test run is a solid run or a solution run. As before, we apply <xref ref-type="statement" rid="Algorithm_1">Algorithm 1</xref> <italic>via</italic> an LOO-CV process, and consider RMSEs to quantify the performance of our models. <xref ref-type="fig" rid="F6">Figures 6</xref>, <xref ref-type="fig" rid="F7">7</xref> demonstrate the ability of this method to make predictions when we train our algorithm on all runs, to predict processing conditions of particles produced by either a solid or solution run.</p>
<fig id="F6" position="float">
<label>FIGURE 6</label>
<caption>
<p>Distributions of predictions for processing conditions 1 through 8 for all runs. Square points represent the means of these distributions; triangular points represent the median of these distributions, and circular points represent the modes of these distributions.</p>
</caption>
<graphic xlink:href="fnuen-01-1083164-g006.tif"/>
</fig>
<fig id="F7" position="float">
<label>FIGURE 7</label>
<caption>
<p>Breakdown of RMSE by processing condition across all runs. The height of a given column relays the overall RMSE for the associated run. Each color corresponds to the proportion of this RMSE that can be attributed to each processing condition.</p>
</caption>
<graphic xlink:href="fnuen-01-1083164-g007.tif"/>
</fig>
<p>
<xref ref-type="fig" rid="F6">Figure 6</xref> shows the violin plots of RMSE&#x2019;s when the medians of the resulting empirical distributions of posterior modes is used as a point estimate. Note that the shapes of these distributions are similar to those depicted in <xref ref-type="fig" rid="F2">Figures 2</xref>, <xref ref-type="fig" rid="F4">4</xref>, indicating consistent prediction ability when we train on solid and solution runs together, <italic>versus</italic> training on just solid or just solution runs. We do note, however, that there are longer tails associated with these distributions, and that the overall RMSEs are larger when we train the algorithm on both solid and solution runs.</p>
<p>The results in <xref ref-type="fig" rid="F6">Figure 6</xref> indicate that the algorithm is better at predicting some processing conditions than others. For example, we see that, aside from processing conditions 3, 7 and 8, this method is, on average, capable of predicting the processing conditions of a sample of Pu within a single standard deviation. However, when we consider the mode as our point estimate, we see that the method is particularly effective at predicting processing conditions 1, 2, 4 and 8, as is indicated by the modes of these distributions, which demonstrates that more often than not, our prediction for these processing conditions is within 0.5 standard deviations of the true value.</p>
<p>
<xref ref-type="fig" rid="F7">Figure 7</xref> shows the proportional breakdown of RMSE by processing condition for each run. This figure demonstrates that the predictions within processing conditions are relatively consistent between runs. Again, we see that the model is particularly apt at making predictions for processing conditions 1, 2, 4 and 8.</p>
<p>We also consider a direct comparison of the broken down RMSE by run when we train on the solution and solid runs separately and jointly. <xref ref-type="fig" rid="F8">Figures 8</xref>, <xref ref-type="fig" rid="F9">9</xref> show these relationships. By comparing these figures with <xref ref-type="fig" rid="F3">Figures 3</xref>, <xref ref-type="fig" rid="F5">5</xref>, we can see how training on the solid and solution data jointly results in different predictions from those that result from training separate solid and solution models. In some instances, we see that the RMSE decreases, while in others, we see that it is increased, indicating that training a joint model is not necessarily worse than training two separate models. Furthermore, we see that the individual contribution of each processing condition to the overall RMSE for each run remains relatively consistent, although we do see slightly larger RMSEs for processing conditions 1 and 8 when we train jointly. In addition, we see that runs that are better predicted by the individually trained models are also better predicted by the jointly trained models for both solid and solution runs.</p>
<fig id="F8" position="float">
<label>FIGURE 8</label>
<caption>
<p>Breakdown of RMSE by processing condition across solid runs, when the model is trained on solid and solution data, jointly. The height of a given column relays the overall RMSE for the associated run. Each color corresponds to the proportion of this RMSE that can be attributed to each processing condition.</p>
</caption>
<graphic xlink:href="fnuen-01-1083164-g008.tif"/>
</fig>
<fig id="F9" position="float">
<label>FIGURE 9</label>
<caption>
<p>Breakdown of RMSE by processing condition across solution runs, when the model is trained on solid and solution data, jointly. The height of a given column relays the overall RMSE for the associated run. Each color corresponds to the proportion of this RMSE that can be attributed to each processing condition.</p>
</caption>
<graphic xlink:href="fnuen-01-1083164-g009.tif"/>
</fig>
</sec>
</sec>
<sec sec-type="conclusion" id="s5">
<title>5 Conclusion</title>
<p>In this paper, we looked at the ability of a BASS model to predict the processing conditions of particles of Pu, given a set of morphological characteristics by Bayesian Calibration <italic>via</italic> a Bayesian Adaptive Spline Surface model. Not only does this model allow for providing point estimates of a sample of SNM, but it also incorporates uncertainty into these predictions. This model proved to be sufficient at predicting processing conditions within a standard deviation of the observed processing conditions on average, when applied to a dataset of particles of Pu. In particular, we found that this model is best able to predict processing conditions 1, 2, and 4, and struggles most with predicting processing conditions 3, 7, and 8. By comparing <xref ref-type="fig" rid="F3">Figures 3</xref>, <xref ref-type="fig" rid="F5">5</xref>, <xref ref-type="fig" rid="F7">7</xref>, we see that using both solid and solution runs to train the model (<italic>versus</italic> analyzing solid and solution runs separately) does not affect the ability of the model to predict the processing conditions of a new run, indicating that the BASS model is flexible enough to incorporate this information into its predictions.</p>
<p>By predicting the processing conditions of samples of SNM, we can begin to understand where material may have been produced, or by whom. While it remains that this the results of this method should not be used as the sole evidence for reaching a forensic conclusion, this analysis demonstrates that statistical models can aid the nuclear forensic community. In particular, these models help provide insight into the various sources of uncertainty and bias in the chemical processes. In addition, they allow forensic decision makes to numerically bound their observations, and quantitatively support their inferences and predictions. Future studies will aim to improve these results by considering the effects of particle sizes on the predictions, and will incorporate functional shape and texture data. Additionally, we would like to determine whether incorporating the discrepancy term in our calibration step would allow us to further decrease the RMSE of our predictions. Nevertheless, this methodology provides a solid foundation for future work in predicting processing conditions of particles of SNM using only their morphological characteristics.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s6">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/supplementary materials, further inquiries can be directed to the corresponding author.</p>
</sec>
<sec id="s7">
<title>Author contributions</title>
<p>MA developed models, analyzed the data and compiled the manuscript. AM, DR, AZ, KS, KG, JT, and JH assisted with data analysis and manuscript review.</p>
</sec>
<sec id="s8">
<title>Funding</title>
<p>We acknowledge the Department of Homeland Security (DHS) Countering Weapons of Mass Destruction (CWMD) National Technical Forensics Center (NTNF), as well as the Office of Nuclear Detonation Detection (NA-222) within the Defense Nuclear Non-proliferation Research and Development of the US Department of Energy/National Nuclear Security Administration for funding this work at Los Alamos National Laboratory and at Sandia National Laboratories. This paper describes objective technical results and analysis.</p>
</sec>
<ack>
<p>We acknowledge the Department of Homeland Security (DHS) Countering Weapons of Mass Destruction (CWMD) National Technical Forensics Center (NTNF), as well as the Office of Nuclear Detonation Detection (NA-222) within the Defense Nuclear Non-proliferation Research and Development of the US Department of Energy/National Nuclear Security Administration.</p>
</ack>
<sec sec-type="COI-statement" id="s9">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="disclaimer" id="s10">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors, and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec id="s11">
<title>Author disclaimer</title>
<p>Any subjective views or opinions that might be expressed in the paper do not necessarily represent the views of the US Department of Energy or the United States Government. Los Alamos National Laboratory strongly supports academic freedom and a researcher&#x2019;s right to publish; as an institution, however, the Laboratory does not endorse the viewpoint of a publication or guarantee its technical correctness. Sandia National Laboratories is a multimission laboratory managed and operated by National Technology and Engineering Solutions of Sandia, LLC, a wholly owned subsidiary of Honeywell International, Inc., for the US Department of Energy&#x2019;s National Nuclear Security Administration under contract DE-NA0003525. Approved for public release: LA-UR-21-25447.</p>
</sec>
<fn-group>
<fn id="fn1">
<label>1</label>
<p>The BASS framework is similar to the Bayesian multivariate adaptive regression splines (BMARS) framework developed by <xref ref-type="bibr" rid="B3">Denison et al. (1998)</xref> [see also <xref ref-type="bibr" rid="B7">Friedman (1991)</xref>], but with added features that promote efficiency in the sampling processes to allow for a more efficient model estimation (e.g., Reversible Jump Markov Chain Monte Carlo (RJMCMC) <italic>via</italic> <xref ref-type="bibr" rid="B17">Nott et al. (2005)</xref>, parallel tempering). Like the BMARS framework, the BASS framework uses the input data to learn a set of basis functions that provide an approximate to <inline-formula id="inf9">
<mml:math id="m12">
<mml:mi>g</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">x</mml:mi>
<mml:mo stretchy="false">&#x7c;</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b8;</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:math>
</inline-formula> that, when considered together, give the best predictions of the output data. Like its BMARS counterpart, BASS is particularly well-suited for capturing non-linear relationships between the input and output variables (<xref ref-type="bibr" rid="B4">Francom and Sans&#xf3;, 2020</xref>).</p>
</fn>
</fn-group>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Anderson-Cook</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Burr</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Hamada</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Ruggiero</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Thomas</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Design of experiments and data analysis challenges in calibration for forensics applications</article-title>. <source>Chemom. Intelligent Laboratory Syst.</source> <volume>149</volume> (<issue>B</issue>), <fpage>107</fpage>&#x2013;<lpage>117</lpage>. <pub-id pub-id-type="doi">10.1016/j.chemolab.2015.07.008</pub-id>
</citation>
</ref>
<ref id="B2">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Anderson-Cook</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Hamada</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Burr</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>The impact of response measurement error on the analysis of designed experiments</article-title>. <source>Qual. Reliab. Eng. Int.</source> <volume>32</volume> (<issue>7</issue>), <fpage>2415</fpage>&#x2013;<lpage>2433</lpage>. <pub-id pub-id-type="doi">10.1002/qre.1945</pub-id>
</citation>
</ref>
<ref id="B3">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Denison</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Mallick</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Smith</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>1998</year>). <article-title>Bayesian Mars</article-title>. <source>Statistics Comput.</source> <volume>8</volume>, <fpage>337</fpage>&#x2013;<lpage>346</lpage>. <pub-id pub-id-type="doi">10.1023/a:1008824606259</pub-id>
</citation>
</ref>
<ref id="B4">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Francom</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Sans&#xf3;</surname>
<given-names>B.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Bass: An r package for fitting and performing sensitivity analysis of bayesian adaptive spline surfaces</article-title>. <source>J. Stat. Softw.</source> <volume>94</volume> (<issue>8</issue>), <fpage>1</fpage>&#x2013;<lpage>36</lpage>. <pub-id pub-id-type="doi">10.18637/jss.v094.i08</pub-id>
</citation>
</ref>
<ref id="B5">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Francom</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Sans&#xf2;</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Bulaevskaya</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Lucas</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Simpson</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Inferring atmospheric release characteristics in a large computer experiment using bayesian adaptive splines</article-title>. <source>J. Am. Stat. Assoc.</source> <volume>114</volume> (<issue>528</issue>), <fpage>1450</fpage>&#x2013;<lpage>1465</lpage>. <pub-id pub-id-type="doi">10.1080/01621459.2018.1562933</pub-id>
</citation>
</ref>
<ref id="B6">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Francom</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Sans&#xf3;</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Kupresanin</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Johannesson</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Sensitivity analysis and emulation for functional data using bayesian adaptive splines</article-title>. <source>Stat. Sin.</source> <volume>28</volume> (<issue>2</issue>), <fpage>791</fpage>&#x2013;<lpage>816</lpage>. <pub-id pub-id-type="doi">10.5705/ss.202016.0130</pub-id>
</citation>
</ref>
<ref id="B7">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Friedman</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>1991</year>). <article-title>Multivariate adaptive regression splines</article-title>. <source>Ann. Statistics</source> <volume>19</volume> (<issue>1</issue>), <fpage>1</fpage>&#x2013;<lpage>67</lpage>. <pub-id pub-id-type="doi">10.1214/aos/1176347963</pub-id>
</citation>
</ref>
<ref id="B8">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Gareth</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Witten</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Hastie</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Tibshirani</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2013</year>). <source>An introduction to statistical learning: With applications in R</source>. <publisher-loc>New York, NY</publisher-loc>: <publisher-name>Springer Texts in Statistics. Springer</publisher-name>.</citation>
</ref>
<ref id="B9">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Higdon</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Gattiker</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Williams</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Rightly</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2008</year>). <article-title>Computer model calibration using high-dimensional output</article-title>. <source>J. Am. Stat. Assoc.</source> <volume>103</volume> (<issue>482</issue>), <fpage>570</fpage>&#x2013;<lpage>583</lpage>. <pub-id pub-id-type="doi">10.1198/016214507000000888</pub-id>
</citation>
</ref>
<ref id="B10">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Higdon</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Kennedy</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Cavendish</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Cafeo</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Ryne</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2004</year>). <article-title>Combining field data and computer simulations for calibration and prediction</article-title>. <source>SIAM J. Sci. Comput.</source> <volume>26</volume> (<issue>1</issue>), <fpage>448</fpage>&#x2013;<lpage>466</lpage>. <pub-id pub-id-type="doi">10.1137/s1064827503426693</pub-id>
</citation>
</ref>
<ref id="B11">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kennedy</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>O&#x2019;Hagan</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2001</year>). <article-title>Bayesian calibration of computer models</article-title>. <source>J. R. Stat. Soc. Ser. B Stat. Methodol.</source> <volume>63</volume> (<issue>3</issue>), <fpage>425</fpage>&#x2013;<lpage>464</lpage>. <pub-id pub-id-type="doi">10.1111/1467-9868.00294</pub-id>
</citation>
</ref>
<ref id="B12">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lachenbruch</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Mickey</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>1968</year>). <article-title>Estimation of error rates in discriminant analysis</article-title>. <source>Technometrics</source> <volume>10</volume> (<issue>1</issue>), <fpage>1</fpage>&#x2013;<lpage>11</lpage>. <pub-id pub-id-type="doi">10.1080/00401706.1968.10490530</pub-id>
</citation>
</ref>
<ref id="B13">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lee</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Kim</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Oh</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Youn</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Kim</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Review of statistical model calibration and validation - from the perspective of uncertainty structures</article-title>. <source>Struct. Multidiscip. Optim.</source> <volume>60</volume>, <fpage>1619</fpage>&#x2013;<lpage>1644</lpage>. <pub-id pub-id-type="doi">10.1007/s00158-019-02270-2</pub-id>
</citation>
</ref>
<ref id="B14">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lewis</surname>
<given-names>J. R.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Anderson-Cook</surname>
<given-names>C. M.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Comparing multiple statistical methods for inverse prediction in nuclear forensics applications</article-title>. <source>Chemom. Intelligent Laboratory Syst.</source> <volume>175</volume>, <fpage>116</fpage>&#x2013;<lpage>129</lpage>. <pub-id pub-id-type="doi">10.1016/j.chemolab.2017.10.010</pub-id>
</citation>
</ref>
<ref id="B15">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Luntz</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Brailovsky</surname>
<given-names>V.</given-names>
</name>
</person-group> (<year>1969</year>). <article-title>On estimation of characters obtained in statistical procedure of recognition</article-title>. <source>Tech. Kibern.</source> <volume>3</volume>, <fpage>69</fpage>.</citation>
</ref>
<ref id="B16">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Nguyen</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Francom</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Luscher</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Wilkerson</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Bayesian calibration of a physics-based crystal plasticity and damage model</article-title>. <source>J. Mech. Phys. Solids</source> <volume>149</volume>, <fpage>104284</fpage>. <pub-id pub-id-type="doi">10.1016/j.jmps.2020.104284</pub-id>
</citation>
</ref>
<ref id="B17">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Nott</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Kuk</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Duc</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2005</year>). <article-title>Efficient sampling schemes for bayesian Mars models with many predictors</article-title>. <source>Statistics Comput.</source> <volume>15</volume> (<issue>2</issue>), <fpage>93</fpage>&#x2013;<lpage>101</lpage>. <pub-id pub-id-type="doi">10.1007/s11222-005-6201-x</pub-id>
</citation>
</ref>
<ref id="B18">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Porter</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Ruggiero</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Harvey</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Kelly</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Tandon</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Wilderson</surname>
<given-names>M.</given-names>
</name>
<etal/>
</person-group> (<year>2016</year>). <article-title>Mama user guide v 1.2 technical report</article-title>. <comment>Technical report</comment>. <publisher-name>Los Alamos National Laboratory</publisher-name>.</citation>
</ref>
<ref id="B19">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ries</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Lewis</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Anderson-Cook</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Wilkerson</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Wagner</surname>
<given-names>G.</given-names>
</name>
<etal/>
</person-group> (<year>2018</year>). <article-title>Utilizing distributional measurements of material characteristics from sem images for inverse prediction</article-title>. <source>J. Nucl. Mater. Manag.</source> <volume>47</volume> (<issue>1</issue>), <fpage>37</fpage>&#x2013;<lpage>46</lpage>.</citation>
</ref>
<ref id="B20">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ries</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Tucker</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Shuler</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Ausdemore</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>A framework for inverse prediction using functional response data</article-title>. <source>J. Comput. Inf. Sci. Eng.</source> <volume>23</volume>, <fpage>4053752</fpage>. <pub-id pub-id-type="doi">10.1115/1.4053752</pub-id>
</citation>
</ref>
<ref id="B21">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Walters</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Biswas</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Lawrence</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Francom</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Luscher</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Fredenburg</surname>
<given-names>D.</given-names>
</name>
<etal/>
</person-group> (<year>2018</year>). <article-title>Bayesian calibration of strength parameters using hydrocode simulations of symmetric impact shock experiments of al-5083</article-title>. <source>J. Appl. Phys.</source> <volume>124</volume> (<issue>20</issue>), <fpage>205105</fpage>. <pub-id pub-id-type="doi">10.1063/1.5051442</pub-id>
</citation>
</ref>
<ref id="B22">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Anderson-Cook</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Ries</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Tucker</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Shuler</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Ausdemore</surname>
<given-names>M.</given-names>
</name>
<etal/>
</person-group> (<year>2021</year>). <article-title>Statistical analysis for signatures of plutonium (III) oxalate experiment</article-title>. <comment>Technical Report SAND2021-7765</comment>. <publisher-name>Sandia National Laboratories</publisher-name>.</citation>
</ref>
</ref-list>
</back>
</article>