<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="2.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Vet. Sci.</journal-id>
<journal-title>Frontiers in Veterinary Science</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Vet. Sci.</abbrev-journal-title>
<issn pub-type="epub">2297-1769</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fvets.2024.1374890</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Veterinary Science</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Deep learning models for interpretation of point of care ultrasound in military working dogs</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Hernandez Torres</surname> <given-names>Sofia I.</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2355383/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Holland</surname> <given-names>Lawrence</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2746225/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Edwards</surname> <given-names>Thomas H.</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/998566/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Venn</surname> <given-names>Emilee C.</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2655170/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Snider</surname> <given-names>Eric J.</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2352993/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Organ Support and Automation Technologies Group, U.S. Army Institute of Surgical Research, JBSA Fort Sam Houston</institution>, <addr-line>San Antonio, TX</addr-line>, <country>United States</country></aff>
<aff id="aff2"><sup>2</sup><institution>Hemorrhage Control and Vascular Dysfunction Group, U.S. Army Institute of Surgical Research, JBSA Fort Sam Houston</institution>, <addr-line>San Antonio, TX</addr-line>, <country>United States</country></aff>
<aff id="aff3"><sup>3</sup><institution>Texas A&#x0026;M University, School of Veterinary Medicine</institution>, <addr-line>College Station, TX</addr-line>, <country>United States</country></aff>
<aff id="aff4"><sup>4</sup><institution>Veterinary Support Group, U.S. Army Institute of Surgical Research, JBSA Fort Sam Houston</institution>, <addr-line>San Antonio, TX</addr-line>, <country>United States</country></aff>
<author-notes>
<fn fn-type="edited-by" id="fn0001">
<p>Edited by: Blaz Cugmas, University of Latvia, Latvia</p>
</fn>
<fn fn-type="edited-by" id="fn0002">
<p>Reviewed by: Juan Claudio Gutierrez, University of California, Davis, United States</p>
<p>Martin Ceballos, University of Buenos Aires, Argentina</p>
</fn>
<corresp id="c001">&#x002A;Correspondence: Eric J. Snider, <email>eric.j.snider3.civ@health.mil</email></corresp>
</author-notes>
<pub-date pub-type="epub">
<day>06</day>
<month>06</month>
<year>2024</year>
</pub-date>
<pub-date pub-type="collection">
<year>2024</year>
</pub-date>
<volume>11</volume>
<elocation-id>1374890</elocation-id>
<history>
<date date-type="received">
<day>23</day>
<month>01</month>
<year>2024</year>
</date>
<date date-type="accepted">
<day>20</day>
<month>05</month>
<year>2024</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2024 Hernandez Torres, Holland, Edwards, Venn and Snider.</copyright-statement>
<copyright-year>2024</copyright-year>
<copyright-holder>Hernandez Torres, Holland, Edwards, Venn and Snider</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<sec id="sec1">
<title>Introduction</title>
<p>Military working dogs (MWDs) are essential for military operations in a wide range of missions. With this pivotal role, MWDs can become casualties requiring specialized veterinary care that may not always be available far forward on the battlefield. Some injuries such as pneumothorax, hemothorax, or abdominal hemorrhage can be diagnosed using point of care ultrasound (POCUS) such as the Global FAST&#x00AE; exam. This presents a unique opportunity for artificial intelligence (AI) to aid in the interpretation of ultrasound images. In this article, deep learning classification neural networks were developed for POCUS assessment in MWDs.</p>
</sec>
<sec id="sec2">
<title>Methods</title>
<p>Images were collected in five MWDs under general anesthesia or deep sedation for all scan points in the Global FAST&#x00AE; exam. For representative injuries, a cadaver model was used from which positive and negative injury images were captured. A total of 327 ultrasound clips were captured and split across scan points for training three different AI network architectures: MobileNetV2, DarkNet-19, and ShrapML. Gradient class activation mapping (GradCAM) overlays were generated for representative images to better explain AI predictions.</p>
</sec>
<sec id="sec3">
<title>Results</title>
<p>Performance of AI models reached over 82% accuracy for all scan points. The model with the highest performance was trained with the MobileNetV2 network for the cystocolic scan point achieving 99.8% accuracy. Across all trained networks the diaphragmatic hepatorenal scan point had the best overall performance. However, GradCAM overlays showed that the models with highest accuracy, like MobileNetV2, were not always identifying relevant features. Conversely, the GradCAM heatmaps for ShrapML show general agreement with regions most indicative of fluid accumulation.</p>
</sec>
<sec id="sec4">
<title>Discussion</title>
<p>Overall, the AI models developed can automate POCUS predictions in MWDs. Preliminarily, ShrapML had the strongest performance and prediction rate paired with accurately tracking fluid accumulation sites, making it the most suitable option for eventual real-time deployment with ultrasound systems. Further integration of this technology with imaging technologies will expand use of POCUS-based triage of MWDs.</p>
</sec>
</abstract>
<kwd-group>
<kwd>ultrasound imaging</kwd>
<kwd>military medicine</kwd>
<kwd>canine</kwd>
<kwd>deep learning</kwd>
<kwd>triage</kwd>
<kwd>abdominal hemorrhage</kwd>
<kwd>pneumothorax</kwd>
<kwd>hemothorax</kwd>
</kwd-group>
<counts>
<fig-count count="3"/>
<table-count count="5"/>
<equation-count count="5"/>
<ref-count count="34"/>
<page-count count="10"/>
<word-count count="5640"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Veterinary Imaging</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec5">
<label>1</label>
<title>Introduction</title>
<p>Ultrasound is commonly used in canines with suspected abdominal or thoracic injuries following trauma, to identify free fluid which may require surgical intervention. Different standardized exams are used in veterinary medicine such as the abdominal focused assessment with sonography for trauma (AFAST&#x00AE;), thoracic FAST (TFAST&#x00AE;), or the Veterinary Bedside Lung Ultrasound Exam (Vet BLUE&#x00AE;) (<xref ref-type="bibr" rid="ref1 ref2 ref3">1&#x2013;3</xref>). These are often performed together and referred to as GlobalFAST&#x00AE; which can be used for civilian trauma cases, but also for working dog casualties (<xref ref-type="bibr" rid="ref4">4</xref>). Working dogs cover a wide range of occupations including military working dogs (MWDs) which go anywhere soldiers are deployed and aid with a wide range of tasks (<xref ref-type="bibr" rid="ref5">5</xref>). The ever increasing high risk mission that MWDs share with their handlers puts them at risk for similar injuries as their Service member counterparts (<xref ref-type="bibr" rid="ref6">6</xref>, <xref ref-type="bibr" rid="ref7">7</xref>). Unfortunately, in the early roles of care, where MWD casualties are first managed, veterinary expertise may not be present to properly acquire ultrasound images and to interpret images making GlobalFAST&#x00AE; inaccessible for treatment of MWDs at these early stages of care (<xref ref-type="bibr" rid="ref8">8</xref>).</p>
<p>This is further complicated on the future battlefield where medical evacuation will be limited and more medical care and triage will need to be provided in theater, at early roles of care (<xref ref-type="bibr" rid="ref9">9</xref>). In fact, this is already being experienced with the Ukraine-Russia conflict, where limited medical evacuation opportunities arise due to challenged airspace, which is requiring far forward surgical teams to treat and manage a larger number of casualties for up to 72&#x2009;h in theater (<xref ref-type="bibr" rid="ref10">10</xref>). This is further complicated by precise long-range weaponry minimizing the relative safety of CASEVAC even at distances above 500&#x2009;km away from enemy lines. In addition, more than 70% of Ukraine casualties stem from more advanced rocket or artillery injuries, which often result in complex polytrauma to multiple organ systems (<xref ref-type="bibr" rid="ref10">10</xref>). Thus, as we look towards the future battlefield, it is even more imperative to have accurate triage procedures for prioritizing injured warfighters for access to limited evacuation opportunities.</p>
<p>Towards addressing this critical capability gap for canine and human casualties on the future battlefield, artificial intelligence (AI) can be utilized to automate medical triage image interpretation (<xref ref-type="bibr" rid="ref11">11</xref>, <xref ref-type="bibr" rid="ref12">12</xref>). AI for image interpretation often relies on deep convolutional neural network models containing millions of trainable parameters to extract features from images for making categorical predictions (<xref ref-type="bibr" rid="ref13">13</xref>, <xref ref-type="bibr" rid="ref14">14</xref>). For medical applications, AI has been widely used for tumor detection (<xref ref-type="bibr" rid="ref15">15</xref>, <xref ref-type="bibr" rid="ref16">16</xref>), COVID-19 diagnosis (<xref ref-type="bibr" rid="ref17">17</xref>, <xref ref-type="bibr" rid="ref18">18</xref>), and obstetric ultrasound applications (<xref ref-type="bibr" rid="ref19">19</xref>, <xref ref-type="bibr" rid="ref20">20</xref>). In addition, AI has been applied to interpret radiographs in thoracic (<xref ref-type="bibr" rid="ref21">21</xref>, <xref ref-type="bibr" rid="ref22">22</xref>), cardiac (<xref ref-type="bibr" rid="ref23">23</xref>, <xref ref-type="bibr" rid="ref24">24</xref>), and orthopedic (<xref ref-type="bibr" rid="ref25">25</xref>) settings. Our research team has previously developed an ultrasound image AI interpretation model for detecting shrapnel in tissue, termed ShrapML (<xref ref-type="bibr" rid="ref26">26</xref>, <xref ref-type="bibr" rid="ref27">27</xref>). We have recently expanded this work to the enhanced FAST (eFAST) exam commonly used for human emergency triage applications (<xref ref-type="bibr" rid="ref28">28</xref>). This application resulted in different AI models for detecting pneumothorax, hemothorax, and abdominal hemorrhage injuries in tissue phantom image sets. In this presented work, we hypothesize if AI image interpretation models are trained on canine image datasets, they will be able to automatically identify injuries at each POCUS scan point. By doing so, the skill threshold for POCUS interpretation will be lowered so that this critical triage task can be available at early echelons of care where emergency intervention is most needed for MWDs.</p>
</sec>
<sec sec-type="materials|methods" id="sec6">
<label>2</label>
<title>Materials and methods</title>
<sec id="sec7">
<label>2.1</label>
<title>Imaging protocol</title>
<p>Research was conducted in compliance with the Animal Welfare Act, implementing Animal Welfare regulations, and the principles of the Guide for the Care and Use for Laboratory Animals. The Institutional Animal Care and Use Committee at the Department of Defense Military Working Dog Veterinary Services approved all research conducted in this study. The facility where this research was conducted is fully accredited by the AAALAC International. The POCUS protocol used mirrored the GlobalFAST&#x00AE; procedure in a total of five (1.5 to 10&#x2009;years old) healthy canine subjects (20 to 55 kgs weight) under general anesthesia or deep sedation for other medical procedures, as prescribed by the attending veterinarian. Ultrasound (US) clips were collected in 8 scan points (<xref ref-type="table" rid="tab1">Table 1</xref>) using a C11 transducer (Fujifilm, Bothell, WA, United States) with a Sonosite Edge ultrasound system (Fujifilm, Bothell, WA, United States). The subject was positioned in right lateral, left lateral, sternal or dorsal recumbency for ease of access to each scan point. A minimum of three 15&#x2009;s clips were collected at each scan point with the probe orientation held in the coronal plane for the first 6&#x2009;s and then rotated to the transverse plane for the remainder of each clip. All clips collected from the live subjects were used as baseline (negative for injury) data. The same scanning protocol was used to obtain US imaging data from a cadaver canine model. A total of five frozen cadavers (Skulls Unlimited, Oklahoma City, OK, United States) were received and stored at &#x2212;20&#x00B0;C until ready for use. Once thawed, an endotracheal tube (McKesson Medical-Surgical, Irving, TX, United States) was placed into the trachea of each subject and secured to a bag valve mask (EMS Safety Services, Eugene, OR, United States) for ventilation. At this time thoracic and abdominal CT scans (Toshiba Aquilion CT Scanner, Cannon Medical Systems, Tustin, CA, United States) were collected to identify any pre-existing injuries. Then, data was collected at each scan point, using the same protocol as the live subjects. After collecting the first round of data, if the subject was positive for any injury, e.g., a pneumothorax, a needle decompression was performed to remove air and obtain a negative scan. Another round of data was collected with the scan points that were negative for injury. Next, controlled injuries were performed by adding blood or saline to the pleural space (up to 300&#x2009;mL) or the abdomen (up to 400&#x2009;mL) for a final round of positive injury image collection in the cadaver subjects.</p>
<table-wrap position="float" id="tab1">
<label>Table 1</label>
<caption>
<p>Scan point description for the POCUS imaging protocol.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Scan point</th>
<th align="left" valign="top">Abbreviation</th>
<th align="left" valign="top">Description</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Bilateral Chest Tube Site</td>
<td align="left" valign="top">CTS</td>
<td align="left" valign="top">Longitudinal plane on both sides of the chest perpendicular to the ribs at the 7th to 9th intercostal space.</td>
</tr>
<tr>
<td align="left" valign="top">Bilateral Pericardial Site</td>
<td align="left" valign="top">PCS</td>
<td align="left" valign="top">Longitudinal and transverse planes on each side of the chest between the 5th and 6th intercostal spaces over the heart.</td>
</tr>
<tr>
<td align="left" valign="top">Diaphragmatic Hepatic</td>
<td align="left" valign="top">DH</td>
<td align="left" valign="top">Subxiphoid view for visualization of the pleural and pericardial spaces beyond the diaphragm to evaluate hepatodiaphragmatic interface, gallbladder region, and pericardial sac.</td>
</tr>
<tr>
<td align="left" valign="top">Splenorenal</td>
<td align="left" valign="top">SR</td>
<td align="left" valign="top">Left flank view to assess the splenorenal interface and areas between the spleen and body wall</td>
</tr>
<tr>
<td align="left" valign="top">Cystocolic</td>
<td align="left" valign="top">CC</td>
<td align="left" valign="top">Midline view to assess the apex of the bladder</td>
</tr>
<tr>
<td align="left" valign="top">Hepatorenal</td>
<td align="left" valign="top">HR</td>
<td align="left" valign="top">Right flank view to assess the hepatorenal interface and areas between the spleen and body wall</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="sec8">
<label>2.2</label>
<title>Preprocessing images</title>
<p>All clips were exported from the US machine as MP4 format and then renamed to reflect the scan point, subject ID, and recumbency of each subject. Frames were extracted from each clip using ffmpeg tool, via a Ruby script, and then sorted by positive or negative for injury by scan point. Each frame was then cropped to remove the user interface information from the US system and the images were resized to 512&#x2009;&#x00D7;&#x2009;512 pixels. Additional steps were taken with images collected at the chest tube site, to recreate M-mode images. Briefly, clips were processed to extract a pixel-wide image over time for visualizing the lung-pleura interface movement. These custom-M-mode images were then cropped and resized to 512&#x2009;&#x00D7;&#x2009;512 as well.</p>
<p>Before images were ready for training, they were augmented to prevent model overfitting and improve performance. While data augmentation is useful to prevent overfitting, it can result in poor model performance and more computationally intensive training if not setup optimally for the application (<xref ref-type="bibr" rid="ref29">29</xref>). A representative image was chosen from each scan point, including M-mode reconstructions, to match histogram values across all the other images using &#x201C;imhistmatch&#x201D; function by MATLAB (MathWorks, Natick, MA, United States). Then, contrast and brightness were randomly adjusted by &#x00B1;20% to add training noise using the &#x201C;jitterColorHSV&#x201D; function by MATLAB. Both MATLAB functions were applied to all images for every scan point using Image Batch Processor on MATLAB. Augmented US images were imported at a 512 &#x00D7; 512 &#x00D7; 3 image size and were randomly assigned to training, validation or testing datasets at a 70:15:15 ratio. Image sets were set up so that an even number of positive or negative images were selected in each dataset for each split. Next, training images were augmented randomly by affine transformations: random scaling, random X and Y reflections, random rotation, random X and Y shear, and random X and Y translation. However, for the CTS M-mode scan point only X reflection and translation affine transformations were applied given how these images were constructed. Due to DH scan point images being unable to train with all augmentations (data not shown), only reflection and translation augmentations were applied for both the X and Y direction.</p>
</sec>
<sec id="sec9">
<label>2.3</label>
<title>Training AI models</title>
<p>Three different AI models were evaluated for this application that have previously been used for ultrasound image interpretation successfully &#x2013; MobileNetV2 (<xref ref-type="bibr" rid="ref30">30</xref>), DarkNet-19 (<xref ref-type="bibr" rid="ref31">31</xref>), and ShrapML (<xref ref-type="bibr" rid="ref26">26</xref>). MobileNetV2 has 53 convolutional layers, 3.5 million parameters, and was optimized for use on mobile devices. We have previously shown this architecture to perform at the highest accuracy for identifying shrapnel in a custom tissue phantom. The second-best performing architecture, DarkNet-19, has 19 convolutional layers, 20.8 million parameters, and utilizes global average pooling for making predictions. The last model used, ShrapML, was purpose built and Bayesian optimized for identifying shrapnel in ultrasound images at a high accuracy and much more rapid than conventional models. In addition, we have shown it to be successful at identifying pneumothorax, hemothorax, and abdominal hemorrhage injuries in eFAST images captured in human tissue phantom models (<xref ref-type="bibr" rid="ref28">28</xref>). ShrapML consists of 8 convolutional layers with only 430,000 trainable parameters.</p>
<p>Training for all scan points consisted of a learning rate of 0.001 with a batch size of 32 images and RMSprop (root mean squared propagation) as the optimizer. A maximum of 100 epochs was allowed for training with a validation patience of 5 epochs if the overall validation loss did not improve. The model with the lowest validation loss was selected for use with blind predictions. All training was performed using MATLAB R2022b run on a Microsoft Windows workstation with a NVIDIA GeForce RTX 3090 Ti 24Gb VRAM graphics card, Intel i9-12900k and 64&#x2009;GB RAM.</p>
</sec>
<sec id="sec10">
<label>2.4</label>
<title>Performance metrics</title>
<p>Testing image sets were used to assess blind performance in multiple ways. First, confusion matrices were generated to categorize prediction as either true positive (TP), true negative (TN), false positive (FP), or false negative (FN) results. These results were used to generate performance metrics for accuracy <xref ref-type="disp-formula" rid="EQ1">Eq. 1</xref>, precision <xref ref-type="disp-formula" rid="EQ2">Eq. 2</xref>, recall <xref ref-type="disp-formula" rid="EQ3">Eq. 3</xref>, specificity <xref ref-type="disp-formula" rid="EQ4">Eq. 4</xref>, and F1 scores <xref ref-type="disp-formula" rid="EQ5">Eq. 5</xref> using commonly used formulas for each.</p>
<disp-formula id="EQ1">
<label>(1)</label>
<mml:math id="M1">
<mml:mi mathvariant="italic">Accuracy</mml:mi>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>T</mml:mi>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>T</mml:mi>
<mml:mi>N</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mfrac>
</mml:math>
</disp-formula>
<disp-formula id="EQ2">
<label>(2)</label>
<mml:math id="M2">
<mml:mi mathvariant="italic">Precision</mml:mi>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:math>
</disp-formula>
<disp-formula id="EQ3">
<label>(3)</label>
<mml:math id="M3">
<mml:mi mathvariant="italic">Recall</mml:mi>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>P</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:math>
</disp-formula>
<disp-formula id="EQ4">
<label>(4)</label>
<mml:math id="M4">
<mml:mi mathvariant="italic">Specificity</mml:mi>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mi>N</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi>F</mml:mi>
<mml:mi>P</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:math>
</disp-formula>
<disp-formula id="EQ5">
<label>(5)</label>
<mml:math id="M5">
<mml:mi>F</mml:mi>
<mml:mn>1</mml:mn>
<mml:mspace width="0.25em"/>
<mml:mi mathvariant="italic">score</mml:mi>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>2</mml:mn>
<mml:mo>&#x00D7;</mml:mo>
<mml:mi mathvariant="italic">Precision</mml:mi>
<mml:mo>&#x00D7;</mml:mo>
<mml:mi mathvariant="italic">Recall</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">Precision</mml:mi>
<mml:mo>+</mml:mo>
<mml:mi mathvariant="italic">Recall</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:math>
</disp-formula>
<p>Then, we constructed receiver operating characteristic (ROC) plots to further classify performance for a number of confidence thresholds for the predictions. ROC plots were used to calculate the area under the ROC curve or AUROC, which tells you how well the model differentiates between categories. Next, inference time for test image predictions were quantified for each trained model to assess differences in computational efficiency of the three different AI models used. Lastly, Gradient-weighted Class Activation Mapping (GradCAM) overlays were generated for test predictions to highlight the regions of images where the AI predictions were focused (<xref ref-type="bibr" rid="ref32">32</xref>). These were used as an explainable-AI methodology to verify the AI models were accurately tracking the image regions where injury differences were present (<xref ref-type="bibr" rid="ref16">16</xref>, <xref ref-type="bibr" rid="ref33">33</xref>, <xref ref-type="bibr" rid="ref34">34</xref>).</p>
</sec>
</sec>
<sec sec-type="results" id="sec11">
<label>3</label>
<title>Results</title>
<sec id="sec12">
<label>3.1</label>
<title>MobileNetV2</title>
<p>MobileNetV2 was successfully trained for each POCUS scan point, with an average accuracy across all locations of 98.8% (<xref ref-type="table" rid="tab2">Table 2</xref>). In addition, strong performance was evident for other conventional metrics across each POCUS scan point. However, upon closer inspection using GradCAM mask overlays, the MobileNetV2 trained model was not always properly tracking the injury site, but instead was focused on image artifacts that will likely not be consistent for additional canine subjects not included in the current datasets (<xref ref-type="fig" rid="fig1">Figure 1</xref>). CTS scan sites for both M- and B-mode were accurately tracking injuries, other scan sites such as HR, DH, and SR were not tracking correctly. Average inference times across all MobileNetV2 scan site models was 6.21 ms per prediction.</p>
<table-wrap position="float" id="tab2">
<label>Table 2</label>
<caption>
<p>Summary of performance metrics for MobileNetV2.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Metric</th>
<th align="center" valign="top">CTS</th>
<th align="center" valign="top">CTS M-mode</th>
<th align="center" valign="top">PCS</th>
<th align="center" valign="top">DH</th>
<th align="center" valign="top">SR</th>
<th align="center" valign="top">CC</th>
<th align="center" valign="top">HR</th>
<th align="center" valign="top">Average</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Accuracy</td>
<td align="center" valign="middle">0.987</td>
<td align="center" valign="middle">0.997</td>
<td align="center" valign="middle">0.985</td>
<td align="center" valign="middle">0.986</td>
<td align="center" valign="middle">0.979</td>
<td align="center" valign="middle">0.998</td>
<td align="center" valign="middle">0.987</td>
<td align="center" valign="middle">0.988</td>
</tr>
<tr>
<td align="left" valign="middle">Precision</td>
<td align="center" valign="middle">0.986</td>
<td align="center" valign="middle">0.994</td>
<td align="center" valign="middle">0.995</td>
<td align="center" valign="middle">0.998</td>
<td align="center" valign="middle">0.999</td>
<td align="center" valign="middle">1.000</td>
<td align="center" valign="middle">0.982</td>
<td align="center" valign="middle">0.995</td>
</tr>
<tr>
<td align="left" valign="middle">Recall</td>
<td align="center" valign="middle">0.987</td>
<td align="center" valign="middle">1.000</td>
<td align="center" valign="middle">0.976</td>
<td align="center" valign="middle">0.973</td>
<td align="center" valign="middle">0.960</td>
<td align="center" valign="middle">0.996</td>
<td align="center" valign="middle">0.992</td>
<td align="center" valign="middle">0.980</td>
</tr>
<tr>
<td align="left" valign="middle">Specificity</td>
<td align="center" valign="middle">0.986</td>
<td align="center" valign="middle">0.994</td>
<td align="center" valign="middle">0.995</td>
<td align="center" valign="middle">0.998</td>
<td align="center" valign="middle">0.999</td>
<td align="center" valign="middle">1.000</td>
<td align="center" valign="middle">0.982</td>
<td align="center" valign="middle">0.995</td>
</tr>
<tr>
<td align="left" valign="middle">F1 Score</td>
<td align="center" valign="middle">0.987</td>
<td align="center" valign="middle">0.997</td>
<td align="center" valign="middle">0.985</td>
<td align="center" valign="middle">0.985</td>
<td align="center" valign="middle">0.979</td>
<td align="center" valign="middle">0.998</td>
<td align="center" valign="middle">0.987</td>
<td align="center" valign="middle">0.987</td>
</tr>
<tr>
<td align="left" valign="middle">AUROC</td>
<td align="center" valign="middle">0.999</td>
<td align="center" valign="middle">1.000</td>
<td align="center" valign="middle">1.000</td>
<td align="center" valign="middle">1.000</td>
<td align="center" valign="middle">0.999</td>
<td align="center" valign="middle">1.000</td>
<td align="center" valign="middle">0.999</td>
<td align="center" valign="middle">1.000</td>
</tr>
<tr>
<td align="left" valign="middle">Inference Time (ms/image)</td>
<td align="center" valign="middle">6.22</td>
<td align="center" valign="middle">7.67</td>
<td align="center" valign="middle">5.59</td>
<td align="center" valign="middle">5.58</td>
<td align="center" valign="middle">6.64</td>
<td align="center" valign="middle">6.06</td>
<td align="center" valign="middle">6.57</td>
<td align="center" valign="middle">6.21</td>
</tr>
</tbody>
</table>
</table-wrap>
<fig position="float" id="fig1">
<label>Figure 1</label>
<caption>
<p>Prediction results by scan point for MobileNetV2. Results for each scan site showing (column 1) confusion matrix test prediction results, (column 2&#x2013;3) negative and (column 4&#x2013;5) positive representative images without and with the GradCAM overlay. Regions in the images with high relevance to model predictions have red-yellow overlays, while those of lower relevance have blue-green overlays.</p>
</caption>
<graphic xlink:href="fvets-11-1374890-g001.tif"/>
</fig>
</sec>
<sec id="sec13">
<label>3.2</label>
<title>DarkNet-19</title>
<p>The DarkNet-19 models had similar inference speeds compared to MobileNetV2 at 5.93&#x2009;ms per prediction, but overall performance was reduced for a number of the scan sites, resulting in an average accuracy across all scan points of 86.4% (<xref ref-type="table" rid="tab3">Table 3</xref>). Certain scan points like chest-tube M-mode images resulted only in predictions of negative (TN or FN) and the GradCAM overlays identified no obvious tracked features in the image (<xref ref-type="fig" rid="fig2">Figure 2</xref>). While this was the worst performing dataset trained against, the Cystocolic scan site was also only at 69.2% accuracy. While performance was reduced compared to MobileNetV2 across nearly all metrics, the GradCAM overlays were more accurately tracking image features consistent with locations where free fluid was or could be identified. These results indicated that while performance was overall reduced for DarkNet-19, the predictions were more often tracking the proper image features. More images and subject variability may improve on training performance.</p>
<table-wrap position="float" id="tab3">
<label>Table 3</label>
<caption>
<p>Summary of performance metrics for DarkNet-19.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Metric</th>
<th align="center" valign="top">CTS</th>
<th align="center" valign="top">CTS M-mode</th>
<th align="center" valign="top">PCS</th>
<th align="center" valign="top">DH</th>
<th align="center" valign="top">SR</th>
<th align="center" valign="top">CC</th>
<th align="center" valign="top">HR</th>
<th align="center" valign="top">Average</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Accuracy</td>
<td align="center" valign="middle">0.933</td>
<td align="center" valign="middle">0.500</td>
<td align="center" valign="middle">0.930</td>
<td align="center" valign="middle">0.967</td>
<td align="center" valign="middle">0.878</td>
<td align="center" valign="middle">0.692</td>
<td align="center" valign="middle">0.919</td>
<td align="center" valign="middle">0.864</td>
</tr>
<tr>
<td align="left" valign="middle">Precision</td>
<td align="center" valign="middle">0.954</td>
<td/>
<td align="center" valign="middle">0.993</td>
<td align="center" valign="middle">1.000</td>
<td align="center" valign="middle">0.865</td>
<td align="center" valign="middle">0.636</td>
<td align="center" valign="middle">0.873</td>
<td align="center" valign="middle">0.844</td>
</tr>
<tr>
<td align="left" valign="middle">Recall</td>
<td align="center" valign="middle">0.911</td>
<td align="center" valign="middle">0.000</td>
<td align="center" valign="middle">0.867</td>
<td align="center" valign="middle">0.933</td>
<td align="center" valign="middle">0.896</td>
<td align="center" valign="middle">0.895</td>
<td align="center" valign="middle">0.979</td>
<td align="center" valign="middle">0.926</td>
</tr>
<tr>
<td align="left" valign="middle">Specificity</td>
<td align="center" valign="middle">0.956</td>
<td align="center" valign="middle">1.000</td>
<td align="center" valign="middle">0.994</td>
<td align="center" valign="middle">1.000</td>
<td align="center" valign="middle">0.860</td>
<td align="center" valign="middle">0.488</td>
<td align="center" valign="middle">0.858</td>
<td align="center" valign="middle">0.801</td>
</tr>
<tr>
<td align="left" valign="middle">F1 Score</td>
<td align="center" valign="middle">0.932</td>
<td/>
<td align="center" valign="middle">0.926</td>
<td align="center" valign="middle">0.966</td>
<td align="center" valign="middle">0.880</td>
<td align="center" valign="middle">0.744</td>
<td align="center" valign="middle">0.923</td>
<td align="center" valign="middle">0.878</td>
</tr>
<tr>
<td align="left" valign="middle">AUROC</td>
<td align="center" valign="middle">0.984</td>
<td align="center" valign="middle">0.575</td>
<td align="center" valign="middle">0.992</td>
<td align="center" valign="middle">0.999</td>
<td align="center" valign="middle">0.953</td>
<td align="center" valign="middle">0.737</td>
<td align="center" valign="middle">0.988</td>
<td align="center" valign="middle">0.92</td>
</tr>
<tr>
<td align="left" valign="middle">Inference Time (ms/image)</td>
<td align="center" valign="middle">6.32</td>
<td align="center" valign="middle">8.73</td>
<td align="center" valign="middle">5.53</td>
<td align="center" valign="middle">5.61</td>
<td align="center" valign="middle">5.86</td>
<td align="center" valign="middle">6.17</td>
<td align="center" valign="middle">6.07</td>
<td align="center" valign="middle">5.93</td>
</tr>
</tbody>
</table>
</table-wrap>
<fig position="float" id="fig2">
<label>Figure 2</label>
<caption>
<p>Prediction results by scan point for DarkNet-19. Results for each scan site showing (column 1) confusion matrix test prediction results, (column 2&#x2013;3) negative and (column 4&#x2013;5) positive representative images without and with the GradCAM overlay. Regions in the images with high relevance to model predictions have red-yellow overlays, while those of lower relevance have blue-green overlays.</p>
</caption>
<graphic xlink:href="fvets-11-1374890-g002.tif"/>
</fig>
</sec>
<sec id="sec14">
<label>3.3</label>
<title>ShrapML</title>
<p>The last model evaluated was ShrapML, which resulted in an accuracy across all scan sites of 93.4% (<xref ref-type="table" rid="tab4">Table 4</xref>). Unlike DarkNet-19, no trained model resulted in an instance of 100% positive or negative guesses. However, performance metrics were consistently worse than MobileNetV2. Given the smaller model size of ShrapML, the inference times were much quicker compared to the other models with prediction rates at an average of 3.43&#x2009;ms per image. GradCAM overlays more closely resembled DarkNet-19 in that many of the heat map intensity points were focused on regions where free fluid was likely to be found or near organs present in the ultrasound scan (<xref ref-type="fig" rid="fig3">Figure 3</xref>), except for the HR site. Overall, ShrapML was successful at performing similarly well to these large network structures for this GlobalFAST application, model overfitting was less evident in the results, and overall prediction speed outperformed the other models tested.</p>
<table-wrap position="float" id="tab4">
<label>Table 4</label>
<caption>
<p>Summary of performance metrics for ShrapML.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Metric</th>
<th align="center" valign="top">CTS</th>
<th align="center" valign="top">CTS M-mode</th>
<th align="center" valign="top">PCS</th>
<th align="center" valign="top">DH</th>
<th align="center" valign="top">SR</th>
<th align="center" valign="top">CC</th>
<th align="center" valign="top">HR</th>
<th align="center" valign="top">Average</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Accuracy</td>
<td align="center" valign="middle">0.900</td>
<td align="center" valign="middle">0.966</td>
<td align="center" valign="middle">0.908</td>
<td align="center" valign="middle">0.989</td>
<td align="center" valign="middle">0.861</td>
<td align="center" valign="middle">0.965</td>
<td align="center" valign="middle">0.950</td>
<td align="center" valign="middle">0.934</td>
</tr>
<tr>
<td align="left" valign="middle">Precision</td>
<td align="center" valign="middle">0.901</td>
<td align="center" valign="middle">0.994</td>
<td align="center" valign="middle">0.917</td>
<td align="center" valign="middle">0.993</td>
<td align="center" valign="middle">0.806</td>
<td align="center" valign="middle">0.967</td>
<td align="center" valign="middle">0.977</td>
<td align="center" valign="middle">0.936</td>
</tr>
<tr>
<td align="left" valign="middle">Recall</td>
<td align="center" valign="middle">0.898</td>
<td align="center" valign="middle">0.938</td>
<td align="center" valign="middle">0.897</td>
<td align="center" valign="middle">0.984</td>
<td align="center" valign="middle">0.950</td>
<td align="center" valign="middle">0.963</td>
<td align="center" valign="middle">0.921</td>
<td align="center" valign="middle">0.936</td>
</tr>
<tr>
<td align="left" valign="middle">Specificity</td>
<td align="center" valign="middle">0.901</td>
<td align="center" valign="middle">0.994</td>
<td align="center" valign="middle">0.919</td>
<td align="center" valign="middle">0.993</td>
<td align="center" valign="middle">0.772</td>
<td align="center" valign="middle">0.967</td>
<td align="center" valign="middle">0.978</td>
<td align="center" valign="middle">0.932</td>
</tr>
<tr>
<td align="left" valign="middle">F1 Score</td>
<td align="center" valign="middle">0.900</td>
<td align="center" valign="middle">0.965</td>
<td align="center" valign="middle">0.907</td>
<td align="center" valign="middle">0.988</td>
<td align="center" valign="middle">0.872</td>
<td align="center" valign="middle">0.965</td>
<td align="center" valign="middle">0.948</td>
<td align="center" valign="middle">0.935</td>
</tr>
<tr>
<td align="left" valign="middle">AUROC</td>
<td align="center" valign="middle">0.961</td>
<td align="center" valign="middle">0.998</td>
<td align="center" valign="middle">0.97</td>
<td align="center" valign="middle">0.999</td>
<td align="center" valign="middle">0.928</td>
<td align="center" valign="middle">0.995</td>
<td align="center" valign="middle">0.988</td>
<td align="center" valign="middle">0.977</td>
</tr>
<tr>
<td align="left" valign="middle">Inference Time (ms/image)</td>
<td align="center" valign="middle">5.72</td>
<td align="center" valign="middle">3.78</td>
<td align="center" valign="middle">2.63</td>
<td align="center" valign="middle">2.68</td>
<td align="center" valign="middle">3.31</td>
<td align="center" valign="middle">2.83</td>
<td align="center" valign="middle">3.05</td>
<td align="center" valign="middle">3.43</td>
</tr>
</tbody>
</table>
</table-wrap>
<fig position="float" id="fig3">
<label>Figure 3</label>
<caption>
<p>Prediction results by scan point for ShrapML. Results for each scan site showing (column 1) confusion matrix test prediction results, (column 2&#x2013;3) negative and (column 4&#x2013;5) positive representative images without and with the GradCAM overlay. Regions in the images with high relevance to model predictions have red-yellow overlays, while those of lower relevance have blue-green overlays.</p>
</caption>
<graphic xlink:href="fvets-11-1374890-g003.tif"/>
</fig>
<p>A summary table of average performance metrics for each scan site across all three model architectures is shown in <xref ref-type="table" rid="tab5">Table 5</xref>.</p>
<table-wrap position="float" id="tab5">
<label>Table 5</label>
<caption>
<p>Summary of performance metrics for each POCUS site.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th/>
<th align="center" valign="top">CTS</th>
<th align="center" valign="top">CTS M-Mode</th>
<th align="center" valign="top">PCS</th>
<th align="center" valign="top">DH</th>
<th align="center" valign="top">SR</th>
<th align="center" valign="top">CC</th>
<th align="center" valign="top">HR</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="bottom">Accuracy</td>
<td align="center" valign="middle">93.98%</td>
<td align="center" valign="middle">82.11%</td>
<td align="center" valign="middle">94.12%</td>
<td align="center" valign="middle">98.02%</td>
<td align="center" valign="middle">90.61%</td>
<td align="center" valign="middle">88.49%</td>
<td align="center" valign="middle">95.18%</td>
</tr>
<tr>
<td align="left" valign="bottom">Precision</td>
<td align="center" valign="middle">94.69%</td>
<td align="center" valign="middle">99.42%</td>
<td align="center" valign="middle">96.83%</td>
<td align="center" valign="middle">99.69%</td>
<td align="center" valign="middle">89.00%</td>
<td align="center" valign="middle">86.76%</td>
<td align="center" valign="middle">94.42%</td>
</tr>
<tr>
<td align="left" valign="bottom">Recall</td>
<td align="center" valign="middle">93.19%</td>
<td align="center" valign="middle">64.60%</td>
<td align="center" valign="middle">91.32%</td>
<td align="center" valign="middle">96.35%</td>
<td align="center" valign="middle">93.52%</td>
<td align="center" valign="middle">95.15%</td>
<td align="center" valign="middle">96.41%</td>
</tr>
<tr>
<td align="left" valign="bottom">Specificity</td>
<td align="center" valign="middle">94.77%</td>
<td align="center" valign="middle">99.62%</td>
<td align="center" valign="middle">96.92%</td>
<td align="center" valign="middle">99.70%</td>
<td align="center" valign="middle">87.69%</td>
<td align="center" valign="middle">81.82%</td>
<td align="center" valign="middle">93.95%</td>
</tr>
<tr>
<td align="left" valign="bottom">F1 Score</td>
<td align="center" valign="middle">93.92%</td>
<td align="center" valign="middle">98.11%</td>
<td align="center" valign="middle">93.93%</td>
<td align="center" valign="middle">97.98%</td>
<td align="center" valign="middle">91.04%</td>
<td align="center" valign="middle">90.22%</td>
<td align="center" valign="middle">95.29%</td>
</tr>
<tr>
<td align="left" valign="bottom">Number of Training Images</td>
<td align="center" valign="middle">23,305</td>
<td align="center" valign="middle">1,652</td>
<td align="center" valign="middle">16,380</td>
<td align="center" valign="middle">11,340</td>
<td align="center" valign="middle">9,455</td>
<td align="center" valign="middle">10,080</td>
<td align="center" valign="middle">9,455</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec sec-type="discussion" id="sec15">
<label>4</label>
<title>Discussion</title>
<p>Medical imaging-based triage is critical for both human and veterinary emergency medicine to identify issues early on and ensure resources are properly distributed. In remote or military medicine situations, the lack of skilled personnel makes imaging based-triage less relied upon, but AI prediction models can simplify this for the end user. Here, we focus on the POCUS procedure GlobalFAST&#x00AE;, a widely used triage exam to look for abdominal or thoracic free fluid in injured dogs. The AI models shown in this work can automate predictions for ultrasound results if properly tuned for the application.</p>
<p>Three different AI architectures were evaluated to see which was capable of being trained to distinguish positive injury cases from baseline images. While all models were generally successful at being trained for these applications, strong test performance may not indicate properly trained models. For instance, MobileNetV2 had the highest accuracy, but heat map overlays indicating where the AI was focused were not tracking proper image locations. Model overfit was combatted with the various image augmentation techniques used for the training, but this was insufficient to mimic proper subject variability to create a more robust model for this architecture. This issue was less evident for the other two model architectures, highlighting the importance of AI model selection and validation on ultrasound image applications such as this. However, without more subjects and the variability that those bring, it is hard to fully verify if the developed DarkNet-19 or ShrapML models are suitable. Preliminarily, ShrapML had the strongest performance and prediction rate, making it the most suitable going forward as well as eventual integration for real-time deployment with ultrasound machines.</p>
<p>Focusing on the various scan points in the used POCUS exam, there were obvious differences in the AI model training. Training image sets were not equally sized, but that did not correlate to what scan sites performed the best. The DH site was the overall strongest performing site across all performance metrics. However, this could be due to this scan site having the largest difference between live and cadaveric tissue resulting in a well-trained model. In addition, less augmentation steps were used for this site due to training issues using all affine transformations. More images are needed to address this issue from a wider range of subjects. CTS and HR views also performed well across the three models trained. Worst performing was the M-mode reconstructed chest tube images which could be influenced by the minimal training data used for this model, and thus may be improved with more training data. The CC site was also a lower performing scan site even though more than 10,000 images were used in the training dataset. However, this is mostly influenced by DarkNet-19 having lower performance for this scan site while the other two models had accuracies greater than 96%. Overall, each scan site for this POCUS application was successful as an input for an injury prediction model.</p>
</sec>
<sec sec-type="conclusions" id="sec16">
<label>5</label>
<title>Conclusion</title>
<p>Artificial intelligence has the potential to simplify triage and injury diagnosis for emergency veterinary medicine. The results shown in this work highlight how AI can be used for automating US detection of intrabdominal and intrathoracic injury detection for veterinary applications. Each scan point reached greater than 80% injury detection accuracy, with most surpassing 90% accuracy. However, more data is still needed to be able to ensure that the AI models are not overfitting the training data and can accurately predict for new subject data. Next steps for this work will expand training datasets so that blind subject testing is possible for confirming generalized models are developed. With more data, these models can be set up for real-time integration with ultrasound devices allowing for early detection of thoracic and abdominal injuries for military working dogs and other canine trauma situations. This will lower the skill threshold for medical imaging-based triage so that these techniques can be more widely used.</p>
</sec>
<sec sec-type="data-availability" id="sec18">
<title>Data availability statement</title>
<p>The datasets presented in this article are not readily available because they have been collected and maintained in a government-controlled database that is located at the US Army Institute of Surgical Research. As such, this data can be made available through the development of a Cooperative Research &#x0026; Development Agreement (CRADA) with the corresponding author. Requests to access the datasets should be directed to ES, <email>eric.j.snider3.civ@health.mil</email>.</p>
</sec>
<sec sec-type="ethics-statement" id="sec19">
<title>Ethics statement</title>
<p>The animal study was approved by Research was conducted in compliance with the Animal Welfare Act, the implementing Animal Welfare regulations, and the principles of the Guide for the Care and Use for Laboratory Animals. The Institutional Animal Care and Use Committee at the Department of Defense Military Working Dog Veterinary Services approved all research conducted in this study. The facility where this research was conducted is fully accredited by the AAALAC International. The study was conducted in accordance with the local legislation and institutional requirements.</p>
</sec>
<sec sec-type="author-contributions" id="sec20">
<title>Author contributions</title>
<p>SH: Conceptualization, Data curation, Formal analysis, Methodology, Visualization, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. LH: Data curation, Formal analysis, Methodology, Software, Writing &#x2013; review &#x0026; editing. TE: Funding acquisition, Writing &#x2013; review &#x0026; editing. EV: Writing &#x2013; original draft, Methodology, Funding acquisition, Data curation, Conceptualization. ES: Conceptualization, Data curation, Formal analysis, Methodology, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing.</p>
</sec>
</body>
<back>
<sec sec-type="funding-information" id="sec21">
<title>Funding</title>
<p>The author(s) declare that financial support was received for the research, authorship, and/or publication of this article. This work was funded through the Restoral program by the Defense Health Agency. This project was supported in part by an appointment to the Science Education Programs at National Institutes of Health (NIH), administered by ORAU through the U.S. Department of Energy Oak Ridge Institute for Science and Education (LH).</p>
</sec>
<ack>
<p>The authors would like to acknowledge Dr. Joanna Hourani, MAJ (Dr.) Richard Brooksby, and MAJ (Dr.) Erin Hennessey for their assistance with image capture in Military Working Dogs.</p>
</ack>
<sec sec-type="COI-statement" id="sec22">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="disclaimer" id="sec23">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec sec-type="disclaimer" id="sec24">
<title>Author disclaimer</title>
<p>The views expressed in this article are those of the authors and do not reflect the official policy or position of the U.S. Army Medical Department, Department of the Army, DOD, or the U.S. Government.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="ref1"><label>1.</label> <citation citation-type="journal"><person-group person-group-type="author"><name><surname>Boysen</surname> <given-names>SR</given-names></name> <name><surname>Lisciandro</surname> <given-names>GR</given-names></name></person-group>. <article-title>The use of ultrasound for dogs and cats in the emergency room: AFAST and TFAST</article-title>. <source>Vet Clin North Am Small Anim Pract</source>. (<year>2013</year>) <volume>43</volume>:<fpage>773</fpage>&#x2013;<lpage>97</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cvsm.2013.03.011</pub-id></citation></ref>
<ref id="ref2"><label>2.</label> <citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cole</surname> <given-names>L</given-names></name> <name><surname>Pivetta</surname> <given-names>M</given-names></name> <name><surname>Humm</surname> <given-names>K</given-names></name></person-group>. <article-title>Diagnostic accuracy of a lung ultrasound protocol (vet BLUE) for detection of pleural fluid, pneumothorax and lung pathology in dogs and cats</article-title>. <source>J Small Anim Pract</source>. (<year>2021</year>) <volume>62</volume>:<fpage>178</fpage>&#x2013;<lpage>86</lpage>. doi: <pub-id pub-id-type="doi">10.1111/jsap.13271</pub-id></citation></ref>
<ref id="ref3"><label>3.</label> <citation citation-type="other"><person-group person-group-type="author"><name><surname>Boatright</surname> <given-names>K.</given-names></name></person-group> (<year>2020</year>). Up your imaging game: The power of AFAST <fpage>52</fpage>. Available at: <ext-link xlink:href="https://www.dvm360.com/view/up-your-imaging-game-the-power-of-afast" ext-link-type="uri">https://www.dvm360.com/view/up-your-imaging-game-the-power-of-afast</ext-link> (Accessed January 5, 2024).</citation></ref>
<ref id="ref4"><label>4.</label> <citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lisciandro</surname> <given-names>GR</given-names></name> <name><surname>Lisciandro</surname> <given-names>SC</given-names></name></person-group>. <article-title>Global FAST for patient monitoring and staging in dogs and cats</article-title>. <source>Vet Clin North Am Small Anim Pract</source>. (<year>2021</year>) <volume>51</volume>:<fpage>1315</fpage>&#x2013;<lpage>33</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cvsm.2021.07.011</pub-id></citation></ref>
<ref id="ref5"><label>5.</label> <citation citation-type="other"><person-group person-group-type="author"><name><surname>Green</surname> <given-names>R.</given-names></name></person-group> (<year>2021</year>). What do military working dogs do? <italic>Am. Kennel Club</italic> Available at: <ext-link xlink:href="https://www.akc.org/expert-advice/news/what-are-military-working-dogs/" ext-link-type="uri">https://www.akc.org/expert-advice/news/what-are-military-working-dogs/</ext-link> (Accessed January 5, 2024).</citation></ref>
<ref id="ref6"><label>6.</label> <citation citation-type="journal"><person-group person-group-type="author"><name><surname>Edwards</surname> <given-names>T</given-names></name> <name><surname>Scott</surname> <given-names>LLF</given-names></name> <name><surname>Gonyeau</surname> <given-names>KE</given-names></name> <name><surname>Howard</surname> <given-names>EH</given-names></name> <name><surname>Parker</surname> <given-names>JS</given-names></name> <name><surname>Hall</surname> <given-names>K</given-names></name></person-group>. <article-title>Comparison of trauma sustained by civilian dogs and deployed military working dogs</article-title>. <source>J Vet Emerg Crit Care (San Antonio)</source>. (<year>2021</year>) <volume>31</volume>:<fpage>498</fpage>&#x2013;<lpage>507</lpage>. doi: <pub-id pub-id-type="doi">10.1111/vec.13064</pub-id></citation></ref>
<ref id="ref7"><label>7.</label> <citation citation-type="journal"><person-group person-group-type="author"><name><surname>McGraw</surname> <given-names>AL</given-names></name> <name><surname>Thomas</surname> <given-names>TM</given-names></name></person-group>. <article-title>Military working dogs: an overview of veterinary Care of these Formidable Assets</article-title>. <source>Vet Clin North Am Small Anim Pract</source>. (<year>2021</year>) <volume>51</volume>:<fpage>933</fpage>&#x2013;<lpage>44</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cvsm.2021.04.010</pub-id></citation></ref>
<ref id="ref8"><label>8.</label> <citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lagutchik</surname> <given-names>M</given-names></name> <name><surname>Baker</surname> <given-names>J</given-names></name> <name><surname>Balser</surname> <given-names>J</given-names></name> <name><surname>Burghardt</surname> <given-names>W</given-names></name> <name><surname>Enroth</surname> <given-names>M</given-names></name> <name><surname>Flournoy</surname> <given-names>S</given-names></name> <etal/></person-group>. <article-title>Trauma Management of Military Working Dogs</article-title>. <source>Mil Med</source>. (<year>2018</year>) <volume>183</volume>:<fpage>180</fpage>&#x2013;<lpage>9</lpage>. doi: <pub-id pub-id-type="doi">10.1093/milmed/usy119</pub-id></citation></ref>
<ref id="ref9"><label>9.</label> <citation citation-type="book"><person-group person-group-type="author"><name><surname>Townsend</surname> <given-names>S</given-names></name> <name><surname>Lasher</surname> <given-names>W</given-names></name></person-group>. <source>The U.S. Army in multi-domain operations 2028</source>. <publisher-loc>Arlington, VA, USA</publisher-loc>: <publisher-name>U.S. Army</publisher-name> (<year>2018</year>).</citation></ref>
<ref id="ref10"><label>10.</label> <citation citation-type="journal"><person-group person-group-type="author"><name><surname>Epstein</surname> <given-names>A</given-names></name> <name><surname>Lim</surname> <given-names>R</given-names></name> <name><surname>Johannigman</surname> <given-names>J</given-names></name> <name><surname>Fox</surname> <given-names>CJ</given-names></name> <name><surname>Inaba</surname> <given-names>K</given-names></name> <name><surname>Vercruysse</surname> <given-names>GA</given-names></name> <etal/></person-group>. <article-title>Putting medical boots on the ground: lessons from the war in Ukraine and applications for future conflict with near-peer adversaries</article-title>. <source>J Am Coll Surg</source>. (<year>2023</year>) <volume>237</volume>:<fpage>364</fpage>&#x2013;<lpage>73</lpage>. doi: <pub-id pub-id-type="doi">10.1097/XCS.0000000000000707</pub-id></citation></ref>
<ref id="ref11"><label>11.</label> <citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Latif</surname> <given-names>J.</given-names></name> <name><surname>Xiao</surname> <given-names>C.</given-names></name> <name><surname>Imran</surname> <given-names>A.</given-names></name> <name><surname>Tu</surname> <given-names>S.</given-names></name></person-group> (<year>2019</year>). <article-title>Medical imaging using machine learning and deep learning algorithms: a review</article-title>, in <conf-name>2019 2nd international conference on computing, mathematics and engineering technologies (iCoMET)</conf-name>, (<publisher-name>IEEE</publisher-name>), <fpage>1</fpage>&#x2013;<lpage>5</lpage>.</citation></ref>
<ref id="ref12"><label>12.</label> <citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>R</given-names></name> <name><surname>Rong</surname> <given-names>Y</given-names></name> <name><surname>Peng</surname> <given-names>Z</given-names></name></person-group>. <article-title>A review of medical artificial intelligence</article-title>. <source>Glob Health J</source>. (<year>2020</year>) <volume>4</volume>:<fpage>42</fpage>&#x2013;<lpage>5</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.glohj.2020.04.002</pub-id></citation></ref>
<ref id="ref13"><label>13.</label> <citation citation-type="journal"><person-group person-group-type="author"><name><surname>Komatsu</surname> <given-names>M</given-names></name> <name><surname>Sakai</surname> <given-names>A</given-names></name> <name><surname>Dozen</surname> <given-names>A</given-names></name> <name><surname>Shozu</surname> <given-names>K</given-names></name> <name><surname>Yasutomi</surname> <given-names>S</given-names></name> <name><surname>Machino</surname> <given-names>H</given-names></name> <etal/></person-group>. <article-title>Towards clinical application of artificial intelligence in ultrasound imaging</article-title>. <source>Biomedicines</source>. (<year>2021</year>) <volume>9</volume>:<fpage>720</fpage>. doi: <pub-id pub-id-type="doi">10.3390/biomedicines9070720</pub-id></citation></ref>
<ref id="ref14"><label>14.</label> <citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>S</given-names></name> <name><surname>Wang</surname> <given-names>Y</given-names></name> <name><surname>Yang</surname> <given-names>X</given-names></name> <name><surname>Lei</surname> <given-names>B</given-names></name> <name><surname>Liu</surname> <given-names>L</given-names></name> <name><surname>Li</surname> <given-names>SX</given-names></name> <etal/></person-group>. <article-title>Deep learning in medical ultrasound analysis: a review</article-title>. <source>Engineering</source>. (<year>2019</year>) <volume>5</volume>:<fpage>261</fpage>&#x2013;<lpage>75</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.eng.2018.11.020</pub-id></citation></ref>
<ref id="ref15"><label>15.</label> <citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chiang</surname> <given-names>T-C</given-names></name> <name><surname>Huang</surname> <given-names>Y-S</given-names></name> <name><surname>Chen</surname> <given-names>R-T</given-names></name> <name><surname>Huang</surname> <given-names>C-S</given-names></name> <name><surname>Chang</surname> <given-names>R-F</given-names></name></person-group>. <article-title>Tumor detection in automated breast ultrasound using 3-D CNN and prioritized candidate aggregation</article-title>. <source>IEEE Trans Med Imaging</source>. (<year>2019</year>) <volume>38</volume>:<fpage>240</fpage>&#x2013;<lpage>9</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TMI.2018.2860257</pub-id></citation></ref>
<ref id="ref16"><label>16.</label> <citation citation-type="journal"><person-group person-group-type="author"><name><surname>Song</surname> <given-names>D</given-names></name> <name><surname>Yao</surname> <given-names>J</given-names></name> <name><surname>Jiang</surname> <given-names>Y</given-names></name> <name><surname>Shi</surname> <given-names>S</given-names></name> <name><surname>Cui</surname> <given-names>C</given-names></name> <name><surname>Wang</surname> <given-names>L</given-names></name> <etal/></person-group>. <article-title>A new xAI framework with feature explainability for tumors decision-making in ultrasound data: comparing with grad-CAM</article-title>. <source>Comput Methods Prog Biomed</source>. (<year>2023</year>) <volume>235</volume>:<fpage>107527</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cmpb.2023.107527</pub-id></citation></ref>
<ref id="ref17"><label>17.</label> <citation citation-type="journal"><person-group person-group-type="author"><name><surname>Diaz-Escobar</surname> <given-names>J</given-names></name> <name><surname>Ord&#x00F3;&#x00F1;ez-Guill&#x00E9;n</surname> <given-names>NE</given-names></name> <name><surname>Villarreal-Reyes</surname> <given-names>S</given-names></name> <name><surname>Galaviz-Mosqueda</surname> <given-names>A</given-names></name> <name><surname>Kober</surname> <given-names>V</given-names></name> <name><surname>Rivera-Rodriguez</surname> <given-names>R</given-names></name> <etal/></person-group>. <article-title>Deep-learning based detection of COVID-19 using lung ultrasound imagery</article-title>. <source>PLoS One</source>. (<year>2021</year>) <volume>16</volume>:<fpage>e0255886</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pone.0255886</pub-id></citation></ref>
<ref id="ref18"><label>18.</label> <citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gil-Rodr&#x00ED;guez</surname> <given-names>J</given-names></name> <name><surname>P&#x00E9;rez de Rojas</surname> <given-names>J</given-names></name> <name><surname>Aranda-Laserna</surname> <given-names>P</given-names></name> <name><surname>Benavente-Fern&#x00E1;ndez</surname> <given-names>A</given-names></name> <name><surname>Martos-Ruiz</surname> <given-names>M</given-names></name> <name><surname>Peregrina-Rivas</surname> <given-names>J-A</given-names></name> <etal/></person-group>. <article-title>Ultrasound findings of lung ultrasonography in COVID-19: a systematic review</article-title>. <source>Eur J Radiol</source>. (<year>2022</year>) <volume>148</volume>:<fpage>110156</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ejrad.2022.110156</pub-id></citation></ref>
<ref id="ref19"><label>19.</label> <citation citation-type="journal"><person-group person-group-type="author"><name><surname>Baumgartner</surname> <given-names>CF</given-names></name> <name><surname>Kamnitsas</surname> <given-names>K</given-names></name> <name><surname>Matthew</surname> <given-names>J</given-names></name> <name><surname>Fletcher</surname> <given-names>TP</given-names></name> <name><surname>Smith</surname> <given-names>S</given-names></name> <name><surname>Koch</surname> <given-names>LM</given-names></name> <etal/></person-group>. <article-title>SonoNet: real-time detection and localisation of Fetal standard scan planes in freehand ultrasound</article-title>. <source>IEEE Trans Med Imaging</source>. (<year>2017</year>) <volume>36</volume>:<fpage>2204</fpage>&#x2013;<lpage>15</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TMI.2017.2712367</pub-id></citation></ref>
<ref id="ref20"><label>20.</label> <citation citation-type="journal"><person-group person-group-type="author"><name><surname>Iriani Sapitri</surname> <given-names>A</given-names></name> <name><surname>Nurmaini</surname> <given-names>S</given-names></name> <name><surname>Naufal Rachmatullah</surname> <given-names>M</given-names></name> <name><surname>Tutuko</surname> <given-names>B</given-names></name> <name><surname>Darmawahyuni</surname> <given-names>A</given-names></name> <name><surname>Firdaus</surname> <given-names>F</given-names></name> <etal/></person-group>. <article-title>Deep learning-based real time detection for cardiac objects with fetal ultrasound video</article-title>. <source>Inform Med Unlocked</source>. (<year>2023</year>) <volume>36</volume>:<fpage>101150</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.imu.2022.101150</pub-id></citation></ref>
<ref id="ref21"><label>21.</label> <citation citation-type="journal"><person-group person-group-type="author"><name><surname>Banzato</surname> <given-names>T</given-names></name> <name><surname>Wodzinski</surname> <given-names>M</given-names></name> <name><surname>Burti</surname> <given-names>S</given-names></name> <name><surname>Osti</surname> <given-names>VL</given-names></name> <name><surname>Rossoni</surname> <given-names>V</given-names></name> <name><surname>Atzori</surname> <given-names>M</given-names></name> <etal/></person-group>. <article-title>Automatic classification of canine thoracic radiographs using deep learning</article-title>. <source>Sci Rep</source>. (<year>2021</year>) <volume>11</volume>:<fpage>3964</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-021-83515-3</pub-id></citation></ref>
<ref id="ref22"><label>22.</label> <citation citation-type="journal"><person-group person-group-type="author"><name><surname>M&#x00FC;ller</surname> <given-names>TR</given-names></name> <name><surname>Solano</surname> <given-names>M</given-names></name> <name><surname>Tsunemi</surname> <given-names>MH</given-names></name></person-group>. <article-title>Accuracy of artificial intelligence software for the detection of confirmed pleural effusion in thoracic radiographs in dogs</article-title>. <source>Vet Radiol Ultrasound</source>. (<year>2022</year>) <volume>63</volume>:<fpage>573</fpage>&#x2013;<lpage>9</lpage>. doi: <pub-id pub-id-type="doi">10.1111/vru.13089</pub-id></citation></ref>
<ref id="ref23"><label>23.</label> <citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kim</surname> <given-names>E</given-names></name> <name><surname>Fischetti</surname> <given-names>AJ</given-names></name> <name><surname>Sreetharan</surname> <given-names>P</given-names></name> <name><surname>Weltman</surname> <given-names>JG</given-names></name> <name><surname>Fox</surname> <given-names>PR</given-names></name></person-group>. <article-title>Comparison of artificial intelligence to the veterinary radiologist&#x2019;s diagnosis of canine cardiogenic pulmonary edema</article-title>. <source>Vet Radiol Ultrasound</source>. (<year>2022</year>) <volume>63</volume>:<fpage>292</fpage>&#x2013;<lpage>7</lpage>. doi: <pub-id pub-id-type="doi">10.1111/vru.13062</pub-id></citation></ref>
<ref id="ref24"><label>24.</label> <citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>S</given-names></name> <name><surname>Wang</surname> <given-names>Z</given-names></name> <name><surname>Visser</surname> <given-names>LC</given-names></name> <name><surname>Wisner</surname> <given-names>ER</given-names></name> <name><surname>Cheng</surname> <given-names>H</given-names></name></person-group>. <article-title>Pilot study: application of artificial intelligence for detecting left atrial enlargement on canine thoracic radiographs</article-title>. <source>Vet Radiol Ultrasound</source>. (<year>2020</year>) <volume>61</volume>:<fpage>611</fpage>&#x2013;<lpage>8</lpage>. doi: <pub-id pub-id-type="doi">10.1111/vru.12901</pub-id></citation></ref>
<ref id="ref25"><label>25.</label> <citation citation-type="journal"><person-group person-group-type="author"><name><surname>McEvoy</surname> <given-names>FJ</given-names></name> <name><surname>Proschowsky</surname> <given-names>HF</given-names></name> <name><surname>M&#x00FC;ller</surname> <given-names>AV</given-names></name> <name><surname>Moorman</surname> <given-names>L</given-names></name> <name><surname>Bender-Koch</surname> <given-names>J</given-names></name> <name><surname>Svalastoga</surname> <given-names>EL</given-names></name> <etal/></person-group>. <article-title>Deep transfer learning can be used for the detection of hip joints in pelvis radiographs and the classification of their hip dysplasia status</article-title>. <source>Vet Radiol Ultrasound</source>. (<year>2021</year>) <volume>62</volume>:<fpage>387</fpage>&#x2013;<lpage>93</lpage>. doi: <pub-id pub-id-type="doi">10.1111/vru.12968</pub-id></citation></ref>
<ref id="ref26"><label>26.</label> <citation citation-type="journal"><person-group person-group-type="author"><name><surname>Boice</surname> <given-names>EN</given-names></name> <name><surname>Hernandez-Torres</surname> <given-names>SI</given-names></name> <name><surname>Snider</surname> <given-names>EJ</given-names></name></person-group>. <article-title>Comparison of ultrasound image classifier deep learning algorithms for shrapnel detection</article-title>. <source>J Imaging</source>. (<year>2022</year>) <volume>8</volume>:<fpage>140</fpage>. doi: <pub-id pub-id-type="doi">10.3390/jimaging8050140</pub-id></citation></ref>
<ref id="ref27"><label>27.</label> <citation citation-type="journal"><person-group person-group-type="author"><name><surname>Snider</surname> <given-names>EJ</given-names></name> <name><surname>Hernandez-Torres</surname> <given-names>SI</given-names></name> <name><surname>Boice</surname> <given-names>EN</given-names></name></person-group>. <article-title>An image classification deep-learning algorithm for shrapnel detection from ultrasound images</article-title>. <source>Sci Rep</source>. (<year>2022</year>) <volume>12</volume>:<fpage>8427</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-022-12367-2</pub-id></citation></ref>
<ref id="ref28"><label>28.</label> <citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hernandez-Torres</surname> <given-names>SI</given-names></name> <name><surname>Bedolla</surname> <given-names>C</given-names></name> <name><surname>Berard</surname> <given-names>D</given-names></name> <name><surname>Snider</surname> <given-names>EJ</given-names></name></person-group>. <article-title>An extended focused assessment with sonography in trauma ultrasound tissue-mimicking phantom for developing automated diagnostic technologies</article-title>. <source>Front Bioeng Biotechnol</source>. (<year>2023</year>) <volume>11</volume>:<fpage>1244616</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fbioe.2023.1244616</pub-id></citation></ref>
<ref id="ref29"><label>29.</label> <citation citation-type="other"><person-group person-group-type="author"><name><surname>Xu</surname> <given-names>Y.</given-names></name> <name><surname>Noy</surname> <given-names>A.</given-names></name> <name><surname>Lin</surname> <given-names>M.</given-names></name> <name><surname>Qian</surname> <given-names>Q.</given-names></name> <name><surname>Li</surname> <given-names>H.</given-names></name> <name><surname>Jin</surname> <given-names>R.</given-names></name></person-group> (<year>2020</year>). <article-title>Wemix: How to better utilize data augmentation</article-title>. <source>arXiv preprint</source>. <volume>arXiv</volume>:<fpage>2010.01267</fpage>. Available at: <ext-link xlink:href="https://arxiv.org/abs/2010.01267" ext-link-type="uri">https://arxiv.org/abs/2010.01267</ext-link></citation></ref>
<ref id="ref30"><label>30.</label> <citation citation-type="other"><person-group person-group-type="author"><name><surname>Sandler</surname> <given-names>M.</given-names></name> <name><surname>Howard</surname> <given-names>A.</given-names></name> <name><surname>Zhu</surname> <given-names>M.</given-names></name> <name><surname>Zhmoginov</surname> <given-names>A.</given-names></name> <name><surname>Chen</surname> <given-names>LC.</given-names></name></person-group> (<year>2018</year>). <article-title>Mobilenetv2: Inverted residuals and linear bottlenecks</article-title>. In: <source>Proceedings of the IEEE conference on computer vision and pattern recognition</source>. IEEE publisher. <fpage>4510</fpage>&#x2013;<lpage>4520</lpage>. Available at: <ext-link xlink:href="https://ieeexplore.ieee.org/abstract/document/8578572" ext-link-type="uri">https://ieeexplore.ieee.org/abstract/document/8578572</ext-link></citation></ref>
<ref id="ref31"><label>31.</label> <citation citation-type="other"><person-group person-group-type="author"><name><surname>Redmon</surname> <given-names>J.</given-names></name> <name><surname>Farhadi</surname> <given-names>A.</given-names></name></person-group> (<year>2016</year>). YOLO9000: Better, Faster, Stronger. <italic>ArXiv161208242 Cs</italic>. Available at: <ext-link xlink:href="http://arxiv.org/abs/1612.08242" ext-link-type="uri">http://arxiv.org/abs/1612.08242</ext-link> (Accessed April 22, 2022).</citation></ref>
<ref id="ref32"><label>32.</label> <citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Selvaraju</surname> <given-names>R. R.</given-names></name> <name><surname>Cogswell</surname> <given-names>M.</given-names></name> <name><surname>Das</surname> <given-names>A.</given-names></name> <name><surname>Vedantam</surname> <given-names>R.</given-names></name> <name><surname>Parikh</surname> <given-names>D.</given-names></name> <name><surname>Batra</surname> <given-names>D.</given-names></name></person-group> (<year>2017</year>). <article-title>Grad-cam: visual explanations from deep networks via gradient-based localization</article-title>, in <conf-name>Proceedings of the IEEE international conference on computer vision</conf-name>, <fpage>618</fpage>&#x2013;<lpage>626</lpage>.</citation></ref>
<ref id="ref33"><label>33.</label> <citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hsu</surname> <given-names>S-T</given-names></name> <name><surname>Su</surname> <given-names>Y-J</given-names></name> <name><surname>Hung</surname> <given-names>C-H</given-names></name> <name><surname>Chen</surname> <given-names>M-J</given-names></name> <name><surname>Lu</surname> <given-names>C-H</given-names></name> <name><surname>Kuo</surname> <given-names>C-E</given-names></name></person-group>. <article-title>Automatic ovarian tumors recognition system based on ensemble convolutional neural network with ultrasound imaging</article-title>. <source>BMC Med Inform Decis Mak</source>. (<year>2022</year>) <volume>22</volume>:<fpage>298</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s12911-022-02047-6</pub-id></citation></ref>
<ref id="ref34"><label>34.</label> <citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yang</surname> <given-names>J</given-names></name> <name><surname>Shi</surname> <given-names>X</given-names></name> <name><surname>Wang</surname> <given-names>B</given-names></name> <name><surname>Qiu</surname> <given-names>W</given-names></name> <name><surname>Tian</surname> <given-names>G</given-names></name> <name><surname>Wang</surname> <given-names>X</given-names></name> <etal/></person-group>. <article-title>Ultrasound image classification of thyroid nodules based on deep learning</article-title>. <source>Front Oncol</source>. (<year>2022</year>) <volume>12</volume>:<fpage>905955</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fonc.2022.905955</pub-id></citation></ref>
</ref-list>
</back>
</article>