<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" article-type="research-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Vet. Sci.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Veterinary Science</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Vet. Sci.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2297-1769</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fvets.2026.1736979</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Use of animal biometrics for accurate hunting evidence of wild ungulates: red deer as a model species</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Kanich</surname>
<given-names>Ond&#x0159;ej</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3387806"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Cukor</surname>
<given-names>Jan</given-names>
</name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2326765"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Ad&#x00E1;mkov&#x00E1;</surname>
<given-names>Jana</given-names>
</name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3243087"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Skot&#x00E1;k</surname>
<given-names>Vlastimil</given-names>
</name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Sakin</surname>
<given-names>Martin</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3387883"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Olejn&#x00ED;&#x010D;kov&#x00E1;</surname>
<given-names>Veronika</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/612578"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Volf</surname>
<given-names>Tom&#x00E1;&#x0161;</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3387847"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Drahansk&#x00FD;</surname>
<given-names>Martin</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/150177"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Hart</surname>
<given-names>Vlastimil</given-names>
</name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Department of Anthropology, Faculty of Science, Masaryk University</institution>, <city>Brno</city>, <country country="cz">Czechia</country></aff>
<aff id="aff2"><label>2</label><institution>Department of Silviculture, Faculty of Forestry and Wood Sciences, Czech University of Life Sciences Prague</institution>, <city>Prague</city>, <country country="cz">Czechia</country></aff>
<aff id="aff3"><label>3</label><institution>Department of Game Management, Forestry and Game Management Research Institute</institution>, <city>J&#x00ED;lovi&#x0161;t&#x011B;</city>, <country country="cz">Czechia</country></aff>
<aff id="aff4"><label>4</label><institution>Department of Game Management and Wildlife Biology, Faculty of Forestry and Wood Sciences, Czech University of Life Sciences Prague</institution>, <city>Prague</city>, <country country="cz">Czechia</country></aff>
<author-notes>
<corresp id="c001"><label>&#x002A;</label>Correspondence: Jana Ad&#x00E1;mkov&#x00E1;, <email xlink:href="mailto:adamkovaj@fld.czu.cz">adamkovaj@fld.czu.cz</email>; Jan Cukor, <email xlink:href="mailto:cukor@fld.czu.cz">cukor@fld.czu.cz</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-24">
<day>24</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>13</volume>
<elocation-id>1736979</elocation-id>
<history>
<date date-type="received">
<day>31</day>
<month>10</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>06</day>
<month>02</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>06</day>
<month>02</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2026 Kanich, Cukor, Ad&#x00E1;mkov&#x00E1;, Skot&#x00E1;k, Sakin, Olejn&#x00ED;&#x010D;kov&#x00E1;, Volf, Drahansk&#x00FD; and Hart.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Kanich, Cukor, Ad&#x00E1;mkov&#x00E1;, Skot&#x00E1;k, Sakin, Olejn&#x00ED;&#x010D;kov&#x00E1;, Volf, Drahansk&#x00FD; and Hart</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-24">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<p>Central Europe faces an overabundance of wild ungulates, which is driven by several factors, including traditional hunting practices. The harvest of females is insufficient and recorded without verification, even when they were not actually hunted. This practice contributes to further population growth through accurate hunting records. Therefore, basic procedures for automated registration based on muzzle pattern animal biometric evaluation of harvested wild ungulates were proposed. The red deer (<italic>Cervus elaphus</italic>) served as the model species. For the assessment of biometric characteristics, 2,193 photographs were taken from the frontal and overhead directions of 972 harvested red deer during regular game management. A comparison of the collected images using the LoFTR (<italic>Local Feature TRansformer</italic>) method revealed the potential for individual identification, with the peak accuracy of 95.048%. On the contrary, the minimum accuracy was 90.048% using a combination of overhead and frontal images of high and medium quality. Because there is no solution for the recognition of ungulates the comparison of these results was performed with the recognition systems for pets and livestock. Achieved accuracy is around 2% better than comparable recognition systems (with similar dataset size, number of feature points, etc.). The results confirmed that biometric methods can be used to identify and record harvested game. This can be achieved by developing a mobile application that transmits images for automated comparison and evaluation. Once individual identity is confirmed, the animal will be registered. This ensures a verifiable record of harvested game and provides a solid foundation for sustainable hunting planning.</p>
</abstract>
<kwd-group>
<kwd>hunting statistics</kwd>
<kwd>LoFTR</kwd>
<kwd>muzzle pattern recognition</kwd>
<kwd>overabundance</kwd>
<kwd>wildlife management</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This research was funded by the Ministry of Agriculture of the Czech Republic (grant no. QK23020117).</funding-statement>
</funding-group>
<counts>
<fig-count count="7"/>
<table-count count="4"/>
<equation-count count="0"/>
<ref-count count="77"/>
<page-count count="12"/>
<word-count count="9377"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Animal Behavior and Welfare</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec1">
<label>1</label>
<title>Introduction</title>
<p>In recent decades, populations of wild ungulates have been steadily increasing across Europe (<xref ref-type="bibr" rid="ref1 ref2 ref3">1&#x2013;3</xref>) which led to challenges for sustainable wildlife management (<xref ref-type="bibr" rid="ref4">4</xref>). With increasing populations of major ungulate species, both native and introduced, a wide range of negative impacts on ecosystems and the management of natural resources, such as forestry and agriculture, has been observed. Among the primary concerns associated with overabundance is the detrimental impact on natural forest regeneration by browsing, fraying damage, and bark stripping in older stands (<xref ref-type="bibr" rid="ref5">5</xref>). In an agricultural landscape, the damage is associated with grazing, trampling, and rooting of crops, which can result in damages amounting to tens of millions of euros annually, as was evaluated in the Czech Republic (<xref ref-type="bibr" rid="ref6">6</xref>). Based on the population increase and the related negative impacts, it is possible to point out the overabundance of ungulates according to biological, ecological, and socio-economic criteria at the local level in Europe (<xref ref-type="bibr" rid="ref1">1</xref>).</p>
<p>Therefore, effective population control methods are sought. Standard hunting management still represents the crucial and most widespread solution, though in many cases, it is inefficient and puts more pressure on hunters (<xref ref-type="bibr" rid="ref7">7</xref>). As the population of ungulates increases, so does the number of harvested individuals, highlighting need for reliable and verifiable systems to track these individuals and their hunting evidence (<xref ref-type="bibr" rid="ref8">8</xref>, <xref ref-type="bibr" rid="ref9">9</xref>). Currently, the standard methods for recording game harvests rely heavily on self-reported data provided by hunters and local hunting organizations, in particular hunting districts (<xref ref-type="bibr" rid="ref10">10</xref>). Although hunting records are mandatory in many countries, these records are often subject to errors, inconsistencies, or even deliberate misreporting (<xref ref-type="bibr" rid="ref10">10</xref>), especially in cases where there is high pressure on local hunters to increase hunting bags, such as in the Czech Republic. This may result in reported amounts being higher than the actual number of individuals that were really hunted (especially females) (<xref ref-type="bibr" rid="ref8">8</xref>, <xref ref-type="bibr" rid="ref11">11</xref>). Consequently, this creates a significant gap between reported and actual harvests, ultimately reducing the reliability of population models and limiting the ability of authorities to develop evidence-based management strategies.</p>
<p>To address this issue, there is a growing interest in applying animal biometric recognition methods for wildlife evidence, which could guarantee verifiable records of harvested wildlife individuals. In this context, the use of biometrics is proposed&#x2014;a method that has already been validated in the past for the identification of livestock and domestic animals (<xref ref-type="bibr" rid="ref12 ref13 ref14 ref15 ref16">12&#x2013;16</xref>). Economically, reliable identification of livestock is essential for breed differentiation, official registration, traceability across production systems, veterinary care, and the prevention of false insurance claims. Following the success of biometrics on livestock, similar techniques in harvested wild ungulates, based on muzzle pattern (<italic>planum nasolabiale</italic>), may allow for the identification of individuals through features analogous to human fingerprint minutiae (<xref ref-type="bibr" rid="ref17">17</xref>, <xref ref-type="bibr" rid="ref18">18</xref>). In recent years, with the growing interest in the application of computer vision and deep learning in the veterinary field (<xref ref-type="bibr" rid="ref19 ref20 ref21">19&#x2013;21</xref>), studies on animal biometrics based on muzzle recognition have increased. These studies often used machine learning (<xref ref-type="bibr" rid="ref22">22</xref>) or deep learning techniques (<xref ref-type="bibr" rid="ref23">23</xref>, <xref ref-type="bibr" rid="ref24">24</xref>) to track the muzzle prints of various livestock or domestic species, including horses (<italic>Equus ferus caballus</italic>) (<xref ref-type="bibr" rid="ref25">25</xref>), pigs (<italic>Sus scrofa domesticus</italic>) (<xref ref-type="bibr" rid="ref26">26</xref>, <xref ref-type="bibr" rid="ref27">27</xref>), and dogs (<italic>Canis lupus familiaris</italic>) (<xref ref-type="bibr" rid="ref24">24</xref>, <xref ref-type="bibr" rid="ref28">28</xref>, <xref ref-type="bibr" rid="ref29">29</xref>). However, the majority of research focuses on cattle (<italic>Bos taurus</italic>) (<xref ref-type="bibr" rid="ref13">13</xref>, <xref ref-type="bibr" rid="ref23">23</xref>, <xref ref-type="bibr" rid="ref30 ref31 ref32 ref33 ref34 ref35 ref36 ref37 ref38 ref39 ref40 ref41 ref42 ref43">30&#x2013;43</xref>). In those species, the technique has been successfully applied to individual animal identification, health and welfare monitoring, and traceability within food production systems. Despite its success in livestock, muzzle-based biometric identification has not been applied to wild ungulates yet, and studies in free-ranging populations are absent. The acquisition of muzzle in wildlife introduces specific challenges, including field conditions, variation in tissue integrity post-mortem, and, of course, the willingness and commitment of the hunters themselves to carry out the recording (<xref ref-type="bibr" rid="ref17">17</xref>, <xref ref-type="bibr" rid="ref23">23</xref>).</p>
<p>Consequently, this study seeks to assess the viability of using muzzle pattern characteristics as a biometric identification tool for harvested ungulates&#x2019; evidence. This approach could facilitate effective hunting management following verification and potential practical implementation. Therefore, the primary study aim is to evaluate the proposed method on the red deer (<italic>Cervus elaphus</italic>), as this ungulate species is common across Europe. The particular objectives are to: (i) assess the biometric properties (mainly uniqueness, measurability, and performance) of muzzle pattern biometric characteristics; (ii) evaluate the practical usability of acquisition under field conditions; and (iii) evaluate the image of an individual&#x2019;s muzzle pattern with the rest of the database to evaluate the precision of the proposed methodology. By adapting modern machine learning methods, we aim to bridge the gap between traditional wildlife management practices and modern digital tools, thereby contributing to more transparent and scientifically grounded hunting management systems.</p>
</sec>
<sec sec-type="materials|methods" id="sec2">
<label>2</label>
<title>Materials and methods</title>
<p>An animal biometric recognition system for wild ungulates (ABRSWU&#x2014;Animal Biometric Recognition System for Wild Ungulates) was developed, consisting of several interdependent components, similar to other biometric systems. The first component is the capture device, responsible for acquiring images of the muzzle surface and other relevant data for the particular wild ungulate species. This data is then used to build the database, which represents the second step of the system. The core components of the solution include preprocessing, feature extraction, and comparison methods. All biometric terminology used in this study follows the definitions of the biometric vocabulary standard (<xref ref-type="bibr" rid="ref44">44</xref>).</p>
<sec id="sec3">
<label>2.1</label>
<title>Data acquisition</title>
<p>The biometric comparison methods primarily rely on machine learning techniques. The success of these neural networks is crucially dependent on the datasets used, vis-&#x00E0;-vis the quantity and quality of the data used, such as a photo of the muzzle surface in this case. Only then can the database be transformed into a perfectly functional dataset. Therefore, due to the potential for using animal biometric comparison in wildlife management practice, the photos of muzzle were collected by cameras on a standard mobile phone. The evaluation was divided into two phases. The first one took place from November 2023 to August 2024 and contained common wild ungulate species (see <xref ref-type="table" rid="tab1">Table 1</xref>). The second phase followed the first one, and it lasted until August 2025. It used a specialized application (further information is given later&#x2014;see <xref ref-type="fig" rid="fig1">Figure 1</xref>) and was focused on red deer only.</p>
<table-wrap position="float" id="tab1">
<label>Table 1</label>
<caption>
<p>List of animal species recorded during the first phase.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">English name</th>
<th align="left" valign="top">Latin name</th>
<th align="center" valign="top">Individuals</th>
<th align="center" valign="top">Photos</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Fallow deer</td>
<td align="left" valign="top"><italic>Dama dama</italic></td>
<td align="center" valign="top">391</td>
<td align="center" valign="top">1,042</td>
</tr>
<tr>
<td align="left" valign="top">Red deer</td>
<td align="left" valign="top"><italic>Cervus elaphus</italic></td>
<td align="center" valign="top">338</td>
<td align="center" valign="top">924</td>
</tr>
<tr>
<td align="left" valign="top">Sika deer</td>
<td align="left" valign="top"><italic>Cervus nippon</italic></td>
<td align="center" valign="top">244</td>
<td align="center" valign="top">592</td>
</tr>
<tr>
<td align="left" valign="top">Roe deer</td>
<td align="left" valign="top"><italic>Capreolus capreolus</italic></td>
<td align="center" valign="top">1,268</td>
<td align="center" valign="top">2,896</td>
</tr>
<tr>
<td align="left" valign="top">European mouflon</td>
<td align="left" valign="top"><italic>Ovis aries musimon</italic></td>
<td align="center" valign="top">216</td>
<td align="center" valign="top">559</td>
</tr>
<tr>
<td align="left" valign="top" colspan="2">Total</td>
<td align="center" valign="top">2,457</td>
<td align="center" valign="top">6,013</td>
</tr>
</tbody>
</table>
</table-wrap>
<fig position="float" id="fig1">
<label>Figure 1</label>
<caption>
<p>Example of the frontal (left) and overhead (right) images captured by the application with muzzle outlined in yellow.</p>
</caption>
<graphic xlink:href="fvets-13-1736979-g001.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Close-up photographs of two red deer or deer muzzles are shown side by side, each overlaid with anatomical line diagrams in white, yellow, and red, emphasizing nostril and facial structure details for comparative or scientific analysis.</alt-text>
</graphic>
</fig>
<p>In the first phase, hunters captured images of the muzzle surfaces of wild ungulates at a minimum Full HD (1,920&#x202F;&#x00D7;&#x202F;1,080 pixels) resolution, using a wide range of their mobile phones. Because the main aim is to propose and validate a simple field method, the hunters who took photographs of harvested game received these basic instructions for mobile-phone camera settings as follows: digital zoom and macro mode were disabled. Furthermore, the muzzle had to be cleaned from the blood, mud, or other dirt, and the camera was required to focus precisely on the surface structure. At least two images were taken from every hunted individual. First, a frontal image was taken from a distance of 6&#x2013;15&#x202F;cm, ensuring that the entire head was visible and, in males, at least part of the antlers was captured for basic categorization of the sex of harvested individuals. Second, an overhead image was taken from approximately a 45&#x00B0; angle above the head, focusing on the muzzle.</p>
<p>All photographs were collected in hunting districts across the entire territory of the Czech Republic, ensuring nationwide geographical coverage for all ungulate species included in the dataset. This way, 2,457 individuals predominantly from the family Cervidae, with 6,013 photos, were acquired (for exact values see <xref ref-type="table" rid="tab1">Table 1</xref>) and used in the first phase of testing (see Results).</p>
<p>During the second phase, a mobile application was developed for data acquisition, specifically for photographing the muzzle pattern. The main advantage was the implementation of a graphical overlay. This was created to follow the morphology of the Cervidae head. This mask is displayed when the camera is activated, allowing the background outside the outlined area to be faded out. Separate overlays were designed for frontal and overhead photographs. The application also includes a form for entering information about the hunted individual and the hunter. In addition, it automatically records metadata related to each image. The use of the application for capturing suitable photographs is illustrated in <xref ref-type="fig" rid="fig1">Figure 1</xref>. The application can automatically send the data into the database or save the complete package, which can be sent later (for example, when the connection is better). The second phase of testing focused on red deer; altogether with the first phase, 972 individuals were acquired (with 2,193 photographs).</p>
<p>In the second step of database acquisition, each photograph was manually inspected and annotated. For annotation, the LabelMe software was used. A rectangle was used to delineate the muzzle, two circles marked the positions of the nostrils, and two additional rectangles indicated the locations of the antlers (when present). The basic classification of image quality of photos submitted by hunters from routine hunting practice was done manually, based on general visual criteria. Three quality levels were assigned to the images. High-quality photographs were defined by proper lighting, clear contrast of the muzzle surface, sharp focus, and correct positioning. Medium-quality images were acceptable, with clearly visible and identifiable muzzle structure. The lowest quality category included unusable photographs&#x2014;those lacking the animal altogether, showing the entire animal instead of the muzzle, focused on the hunter or background, or where the muzzle pattern was obscured, unreadable, overexposed, underexposed, blurred, or incorrectly positioned.</p>
</sec>
<sec id="sec4">
<label>2.2</label>
<title>Animal biometric recognition of muzzle pattern</title>
<p>The following comparison methods were evaluated: DenseNet combined with FAISS (Facebook AI Similarity Search), SIFT (Scale-Invariant Feature Transform), SURF (Speeded-Up Robust Features), ORB (Oriented FAST and Rotated BRIEF), HOG (Histogram of Oriented Gradients), Siamese networks, and handcrafted features analogous to fingerprint minutiae. All of these methods were tested during the first phase of the study.</p>
<p>DenseNet + FAISS combines a convolutional neural network&#x2014;specifically DenseNet, in which each layer is fully connected to all preceding layers (<xref ref-type="bibr" rid="ref45">45</xref>)&#x2014;with the FAISS library developed by Meta, designed for efficient large-scale similarity search (<xref ref-type="bibr" rid="ref46">46</xref>). The SIFT is a classical, non-neural algorithm used to detect key points in an image based on differences of Gaussian and to define descriptors for subsequent comparison (<xref ref-type="bibr" rid="ref47">47</xref>). The SURF represents an accelerated version of SIFT, employing box filters instead of Gaussian differences and relying on the Hessian matrix for key point detection (<xref ref-type="bibr" rid="ref47">47</xref>). The ORB is another classical approach that utilizes the FAST (Features from Accelerated Segment Test) algorithm to locate key points, and the BRIEF (Binary Robust Independent Elementary Features) descriptor to extract robust and computationally efficient image features (<xref ref-type="bibr" rid="ref47">47</xref>).</p>
<p>The HOG is a descriptor for image data, dividing the image into cells and computing the histogram of gradients for each (also a classical algorithm) (<xref ref-type="bibr" rid="ref48">48</xref>). A Siamese network is usually used with two neural networks containing the same architecture and shared weights; each network is given an image, and over time, it trains itself to measure the similarities between them (<xref ref-type="bibr" rid="ref49">49</xref>). The method uses handcrafted features to compare the bifurcation of the valleys on the muzzle (similar to a fingerprint comparison). A graphical representation of the comparison of extracted valleys can be found in <xref ref-type="fig" rid="fig2">Figure 2</xref>. This method also uses only a classical approach.</p>
<fig position="float" id="fig2">
<label>Figure 2</label>
<caption>
<p>Example of muzzle pattern structure valley comparison.</p>
</caption>
<graphic xlink:href="fvets-13-1736979-g002.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">An example of muzzle pattern structure comparison. Coloured lines show matched polygonal structures representing the muzzle pattern.</alt-text>
</graphic>
</fig>
<p>For the second phase of testing, the LoFTR (<italic>Local Feature TRansformer</italic>) matcher was chosen as the comparison method. All the information about the LoFTR method is taken from (<xref ref-type="bibr" rid="ref50">50</xref>). The LoFTR is a deep learning algorithm using the Transformer architecture. It finds corresponding points between two images (detector-free) and then returns the similarity score. An overview of the method is shown in <xref ref-type="fig" rid="fig3">Figure 3</xref>.</p>
<fig position="float" id="fig3">
<label>Figure 3</label>
<caption>
<p>Overview of the LoFTR method, divided into four s (<xref ref-type="bibr" rid="ref50">50</xref>). The first CNN section where coarse (red) and fine (green) feature maps are extracted. The second part involves flattening the coarse features and combining them with positional encoding. Afterwards, they are processed by a transformer encoder. The third part is where correspondences from the encoder are found, and the fourth is where the results are refined using fine features.</p>
</caption>
<graphic xlink:href="fvets-13-1736979-g003.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Diagram illustrating a four-step image matching (LoFTR method) pipeline: local feature CNN extracts features from two input images, followed by positional encoding and coarse-level local feature transformation using self- and cross-attention. A differentiable matching module generates a confidence matrix, and a coarse-to-fine module refines predictions via cropping, correlation, and expectation calculation. Boxes, arrows, and graphical representations depict data flow between steps.</alt-text>
</graphic>
</fig>
<p>The first part uses a local feature CNN (<italic>Convolutional Neural Network</italic>), which extracts feature maps (coarse&#x2013;red, and fine&#x2013;green). The coarse features are flattened into a one-dimensional vector and put together with positional encoding for the second part. These are processed by another neural network (specifically, a transformer encoder, using self-attention and cross-attention layers). In the third part, correspondences between transformed coarse-level features are found (using a differentiable matching layer). Each coarse-level prediction is refined using a local window and fine-level features. The whole technique was pretrained on several million images (indoor and outdoor). In this case, the pretrained <italic>outdoor-ds</italic> checkpoint was employed in this study. Graphically, the comparison can be visualized as shown in <xref ref-type="fig" rid="fig4">Figure 4</xref>. The image on the top shows confirmed verification (visualizing 247 corresponding key points), the image below shows denied verification (visualizing 163 corresponding key points).</p>
<fig position="float" id="fig4">
<label>Figure 4</label>
<caption>
<p>Visualization of verification using the LoFTR method. Blue and green lines connect areas where the corresponding key points in both images were found. The top picture shows confirmed verification with 247 corresponding key points found. The bottom image shows denied verification with only 163 corresponding key points found.</p>
</caption>
<graphic xlink:href="fvets-13-1736979-g004.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Two photographic panels display a close-up view of an animal&#x2019;s muzzle area with fur, split vertically down the middle and overlaid with numerous colored lines representing feature matches between the left and right sides. The top panel indicates 247 feature matches, while the bottom panel shows 163. Both panels are labeled &#x201C;LoFTR (outdoor)&#x201D; and demonstrate point correspondence for image analysis.</alt-text>
</graphic>
</fig>
<p>For comparison of individuality, only the muzzle was used (otherwise, there could be a lower accuracy due to finding similarities in buildings and nature in the background). More specifically, a rectangle around the muzzle was used, as evident from <xref ref-type="fig" rid="fig4">Figure 4</xref>. Some key points were outside of the muzzle (e.g., on hair). Therefore, the effect of precise trimming of the muzzle area was tested. An example is shown in <xref ref-type="fig" rid="fig5">Figure 5</xref>, where the similarity score in the first row is 70 and 60% in the second. Using a rectangular region of interest proved to be both faster and more accurate; hence, additional trimming was not required. Nevertheless, several basic preprocessing steps were applied before using the LoFTR method. Specifically, each image was resized to 480&#x202F;&#x00D7;&#x202F;640 pixels, converted to greyscale (single color channel), and normalized to a mean value of 0.485 and a standard deviation of 0.229.</p>
<fig position="float" id="fig5">
<label>Figure 5</label>
<caption>
<p>Examples of cropped images used for training and testing prior to preprocessing steps. The top images display the muzzle structure without special trimming, while the bottom images show the muzzle with special trimming.</p>
</caption>
<graphic xlink:href="fvets-13-1736979-g005.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Four-panel collage showing two close-up photographs of a textured animal nose with visible pores and whiskers at the top, and two digitally isolated images of the same nose shape below, set against blue and red backgrounds respectively.</alt-text>
</graphic>
</fig>
</sec>
<sec id="sec5">
<label>2.3</label>
<title>Dataset creation and training process for LoFTR</title>
<p>Before presenting the exact results, it is essential to describe the training and testing procedure, which consisted of two main components: <italic>dataset creation</italic> and <italic>threshold estimation</italic>. The first step in creating a the dataset involved dividing the database according to image quality and capture angle&#x2014;specifically into high-quality, medium-quality, frontal (0&#x00B0;), and overhead (45&#x00B0;) categories. Based on these criteria, four datasets were established: (1) high-quality frontal (0&#x00B0;) images; (2) combined high- and medium-quality frontal (0&#x00B0;) images; (3) high-quality frontal (0&#x00B0;) and overhead (45&#x00B0;) images; and (4) combined high- and medium-quality frontal (0&#x00B0;) and overhead (45&#x00B0;) images.</p>
<p>For each category, training and testing sets composed of image pairs were generated, with an equal split&#x2014;50% of the data used for training and 50% for testing and evaluation, with no crossover. Because the database reflected real-world conditions, which resulted in a substantial imbalance existed between the number of images representing the same individual and those of different individuals (i.e., many images of different animals, but only a few per specific individual). Within each set, pairs of animal photographs were generated and labeled according to whether they depicted the same or different individuals. The generation process maintained a predefined ratio between matching (same individual) and non-matching (different individual) pairs, which in the present red deer experiments ranged from 0.1 to 0.5.</p>
<p>The training set was used to determine the optimal threshold for distinguishing whether a pair of images represented the same individual. This is accomplished by evaluating the entire training dataset and selecting the lowest possible threshold value that produced no false positives. The identified threshold was then applied during classifier evaluation on the testing dataset. Classifier outputs were subsequently compared with the ground truth for each image pair, and performance metrics such as accuracy, precision, and false positive rate were calculated based on these comparisons.</p>
</sec>
<sec id="sec6">
<label>2.4</label>
<title>Data analysis</title>
<p>Once the essential parts were prepared (neural networks trained, threshold defined, etc.), their accuracy was computed. Information about metrics was taken from ISO/IEC 19795-1:2021 (<xref ref-type="bibr" rid="ref51">51</xref>) in this section. Pairs of images from a particular dataset were fed into the algorithms, and the results was comparison score. If the comparison score was higher than the threshold, the images were declared to be from the same individual. If the computed score was lower, the images would be from a different individual. This automated answer was then compared with the ground truth. After that, the performance statistics could be computed.</p>
<p>All the metrics were computed based on the following definitions: <italic>true positive</italic> (the number of the same individual image pairs identified as such), <italic>true negative</italic> (the number of different individual image pairs identified as such), <italic>false positive</italic> (the number of the same individual image pairs incorrectly identified as different&#x2014;also referred to as type I statistical error) and <italic>false negative</italic> (the number of different individual image pairs incorrectly identified as the same&#x2014;also referred to as type II statistical error).</p>
<p><italic>Accuracy</italic> is defined as the sum of true positives and true negatives divided by the total number of image pairs in the dataset (<xref ref-type="bibr" rid="ref52">52</xref>). The <italic>True Match Rate</italic> (TMR) is the number of true positives divided by the number of the same individual image pairs. The <italic>True Non-Match Rate</italic> (TNMR) is the sum of true negatives divided by the number of different individual image pairs. The <italic>False Match Rate</italic> (FMR) is the number of false positives divided by the number of the same individual image pairs. The <italic>False Non-Match Rate</italic> (FNMR) is the sum of false negatives divided by the number of different individual image pairs. In machine learning, accuracy values are usually compared when evaluating different methods. However, for practical applications, FMR and FNMR (or TMR/TNMR) are generally more understandable.</p>
<p>The evaluation results of the LoFTR method (<italic>accuracy</italic>) were statistically compared between selected categories using comparison of multiple binomial proportions (<xref ref-type="bibr" rid="ref53">53</xref>). Confidence interval limits for each variant were computed using beta-distribution. The results of this analysis were visualized by bar plot with error bars. This analysis was performed in R software (<xref ref-type="bibr" rid="ref54">54</xref>), plot was created in its package ggplot2 (<xref ref-type="bibr" rid="ref55">55</xref>). Alpha level of 0.05 was selected for statistical computations.</p>
</sec>
</sec>
<sec sec-type="results" id="sec7">
<label>3</label>
<title>Results</title>
<p>The evaluation was conducted in two phases, as described in the Data Acquisition section. The first phase, carried out primarily in September 2024, utilized a multispecies database (see <xref ref-type="table" rid="tab1">Table 1</xref> for details). The dataset was used without additional filtering, except for neural network-based approaches, where it was divided into two equal parts for training and testing. The resulting accuracy values for the evaluated methods are presented in <xref ref-type="table" rid="tab2">Table 2</xref> (two best values in bold).</p>
<table-wrap position="float" id="tab2">
<label>Table 2</label>
<caption>
<p>Evaluation of the various methods on the first phase dataset.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th>Evaluation method</th>
<th align="center" valign="top">DenseNet + FAISS</th>
<th align="center" valign="top">SIFT</th>
<th align="center" valign="top">SURF</th>
<th align="center" valign="top">ORB</th>
<th align="center" valign="top">HOG</th>
<th align="center" valign="top">Siamese network</th>
<th align="center" valign="top">Handcrafted features</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Accuracy</td>
<td align="char" valign="top" char=".">62.6%</td>
<td align="char" valign="top" char=".">60.1%</td>
<td align="char" valign="top" char=".">63.5%</td>
<td align="char" valign="top" char="."><bold>68.6%</bold></td>
<td align="char" valign="top" char=".">57.9%</td>
<td align="char" valign="top" char=".">65.3%</td>
<td align="char" valign="top" char="."><bold>67.4%</bold></td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Two main issues were identified: first, the overall performance of the tested methods remained below 70%; and second, as the database expanded, several initially promising approaches exhibited poor scalability. Since performance on large datasets represented one of the key evaluation criteria, the second phase was scheduled for the following year, focusing exclusively on a single species (red deer) and employing methods known to scale effectively with database size, specifically the LoFTR algorithm. The second phase of the evaluation pursued several objectives. The primary goal was to assess the accuracy of the selected method. The second objective was to examine the influence of <italic>image quality</italic> (high and medium) and <italic>capture angle</italic> (frontal and overhead) on performance. Finally, the third objective was to evaluate the effect of varying the ratio of generated pairs used for training and testing. The main performance metrics are summarized in <xref ref-type="table" rid="tab3">Table 3</xref>.</p>
<table-wrap position="float" id="tab3">
<label>Table 3</label>
<caption>
<p>Evaluation of the LoFTR method on red deer datasets in the second phase.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Category</th>
<th align="center" valign="top">Accuracy</th>
<th align="center" valign="top">TMR</th>
<th align="center" valign="top">TNMR</th>
<th align="center" valign="top">FMR</th>
<th align="center" valign="top">FNMR</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">High, 0&#x00B0;</td>
<td align="char" valign="top" char=".">94.974%</td>
<td align="char" valign="top" char=".">92.063%</td>
<td align="center" valign="top">97.884%</td>
<td align="char" valign="top" char=".">2.116%</td>
<td align="char" valign="top" char=".">7.937%</td>
</tr>
<tr>
<td align="left" valign="top">High and medium, 0&#x00B0;</td>
<td align="char" valign="top" char="."><bold>95.085%</bold></td>
<td align="char" valign="top" char="."><bold>92.308%</bold></td>
<td align="center" valign="top">97.863%</td>
<td align="char" valign="top" char=".">2.137%</td>
<td align="char" valign="top" char="."><bold>7.692%</bold></td>
</tr>
<tr>
<td align="left" valign="top">High, 0&#x00B0; and 45&#x00B0;</td>
<td align="char" valign="top" char=".">93.154%</td>
<td align="char" valign="top" char=".">87.759%</td>
<td align="center" valign="top">98.548%</td>
<td align="char" valign="top" char=".">1.452%</td>
<td align="char" valign="top" char=".">12.241%</td>
</tr>
<tr>
<td align="left" valign="top">High and medium, 0&#x00B0; and 45&#x00B0;</td>
<td align="char" valign="top" char=".">90.048%</td>
<td align="char" valign="top" char=".">80.096%</td>
<td align="center" valign="top"><bold>100%</bold></td>
<td align="char" valign="top" char="."><bold>0.000%</bold></td>
<td align="char" valign="top" char=".">19.904%</td>
</tr>
</tbody>
</table>
</table-wrap>
<p><xref ref-type="table" rid="tab3">Table 3</xref> shows that the best results (in bold) were achieved when high- and medium-quality images were used, but only the frontal (0&#x00B0;) ones. Using overhead (45&#x00B0;) images performed noticeably worse. The difference between frontal high and frontal high and medium was only marginal. The answer to the question of why the usage of lower-quality images was better than higher-quality images is presented in <xref ref-type="table" rid="tab4">Table 4</xref>. The second category used 12.5% more images than the first.</p>
<table-wrap position="float" id="tab4">
<label>Table 4</label>
<caption>
<p>Images in datasets used in the second phase red deer evaluation.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Category</th>
<th align="center" valign="top">Pairs</th>
<th align="center" valign="top">Animals training</th>
<th align="center" valign="top">Images training</th>
<th align="center" valign="top">Animals testing</th>
<th align="center" valign="top">Images testing</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">High, 0&#x00B0;</td>
<td align="center" valign="top">378</td>
<td align="center" valign="top">411</td>
<td align="center" valign="top">583</td>
<td align="center" valign="top">390</td>
<td align="center" valign="top">550</td>
</tr>
<tr>
<td align="left" valign="top">High and medium, 0&#x00B0;</td>
<td align="center" valign="top">468</td>
<td align="center" valign="top">452</td>
<td align="center" valign="top">656</td>
<td align="center" valign="top">453</td>
<td align="center" valign="top">659</td>
</tr>
<tr>
<td align="left" valign="top">High, 0&#x00B0; and 45&#x00B0;</td>
<td align="center" valign="top">964</td>
<td align="center" valign="top">444</td>
<td align="center" valign="top">853</td>
<td align="center" valign="top">403</td>
<td align="center" valign="top">790</td>
</tr>
<tr>
<td align="left" valign="top">High and medium, 0&#x00B0; and 45&#x00B0;</td>
<td align="center" valign="top">1,246</td>
<td align="center" valign="top">463</td>
<td align="center" valign="top">967</td>
<td align="center" valign="top">478</td>
<td align="center" valign="top">989</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>A statistical comparison (<xref ref-type="fig" rid="fig6">Figure 6</xref>) of <italic>accuracy</italic> between the selected categories revealed a statistically significant difference for the category High and medium, 0&#x00B0; and 45&#x00B0; when compared with all other variants. This category exhibited an accuracy of 90.05%, whereas the accuracies of the remaining variants ranged from 93.15 to 95.09%. <italic>p</italic>-values and the studentized range (q) values (test statistics) for comparison of category High and medium, 0&#x00B0; and 45&#x00B0; to others were following: to High, 0&#x00B0; and 45&#x00B0; &#x2013; <italic>q</italic>&#x202F;=&#x202F;3.69, <italic>p</italic>&#x202F;=&#x202F;0.04, to High, 0&#x00B0; &#x2013; <italic>q</italic>&#x202F;=&#x202F;4.71, <italic>p</italic>&#x202F;=&#x202F;0.005 and to High and medium, 0&#x00B0; &#x2013; <italic>q</italic>&#x202F;=&#x202F;5.24, <italic>p</italic>&#x202F;=&#x202F;0.001.</p>
<fig position="float" id="fig6">
<label>Figure 6</label>
<caption>
<p>Comparison of model matching accuracy between selected variants. Error bars depict 95% confidence interval computed using beta-distribution. Indices above each bar depict statistical homogeneity (variants with identical index are not statistically different and vice versa).</p>
</caption>
<graphic xlink:href="fvets-13-1736979-g006.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Bar graph showing detection accuracy by category, with four groups: "High &#x0026; medium, 0&#x00B0; &#x0026; 45&#x00B0;", "High, 0&#x00B0; &#x0026; 45&#x00B0;", "High, 0&#x00B0;", and "High &#x0026; medium, 0&#x00B0;". Error bars are included for each group. Detection accuracy increases from the first to the last group, with labeled significance letters above each bar: &#x201C;a&#x201D; for the first, &#x201C;b&#x201D; for the other three.</alt-text>
</graphic>
</fig>
<p>All of previous results were obtained using a 0.5 ratio between genuine and imposter pairs. The rationale for employing this balanced 50:50 ratio is illustrated in <xref ref-type="fig" rid="fig7">Figure 7</xref>. As shown, when the dataset becomes more imbalanced, the results appear artificially improved. This occurs because, at a 0.1 ratio, a method that classifies all images as different would still be correct in 90% of cases purely by chance. To avoid this statistical bias, a balanced 0.5 ratio was adopted. Additional insights from <xref ref-type="fig" rid="fig7">Figure 7</xref> indicate that the frontal high-quality category seems to perform slightly better than the combined frontal high- and medium-quality category. Moreover, the results confirm that incorporating overhead (45&#x00B0;) images does not improve performance for this method. Based on the annotation, 95 images were of the lowest quality (unusable). This value could be utilized for the FTA (Fail to Acquire Rate), which would be 4.324%. Although the images were successfully captured, those deemed unusable were classified as &#x201C;fail to acquire&#x201D; rather than &#x201C;fail to extract&#x201D; cases [for a description of these metrics, see (<xref ref-type="bibr" rid="ref51">51</xref>)]. Another limitation identified during the comparison process involved errors in database acquisition. Specifically, four animals produced highly similar images, and one case included photographs of two different individuals. These entries were subsequently blacklisted and excluded from the final evaluation.</p>
<fig position="float" id="fig7">
<label>Figure 7</label>
<caption>
<p>The relationship between the accuracy evaluation in testing and the ratio of genuine matching pairs used during the training phase is presented as percentages in decimal form. The red, blue, green, and yellow lines represent individual categories: high 0&#x00B0;, high and medium 0&#x00B0;, high 0&#x00B0; and 45&#x00B0;, and high and medium 0&#x00B0; and 45&#x00B0;, respectively. All graphs converge toward nearly perfect accuracy when the ratio of genuine matching pairs is low.</p>
</caption>
<graphic xlink:href="fvets-13-1736979-g007.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Line chart titled &#x201C;Accuracy vs Matching Ratio&#x201D; shows accuracy on the vertical axis and matching pairs ratio on the horizontal axis. Four colored lines represent different experiment conditions. All lines generally trend upward, with accuracy increasing as matching pairs ratio decreases, except the orange line, which sharply drops at 0.3 ratio before rising again. A legend explains the color coding: red for &#x201C;HIGH - 0&#x00B0;&#x201D;, blue for &#x201C;HIGH, MEDIUM - 0&#x00B0;&#x201D;, green for &#x201C;HIGH - 0&#x00B0;, 45&#x00B0;&#x201D;, and orange for &#x201C;HIGH, MEDIUM - 0&#x00B0;, 45&#x00B0;&#x201D;.</alt-text>
</graphic>
</fig>
</sec>
<sec sec-type="discussion" id="sec8">
<label>4</label>
<title>Discussion</title>
<p>Given the specificity of the topic, which focuses on assessing the individuality of hunter ungulates, it is practically impossible to compare the obtained results with similar systems. Therefore, a comparison with livestock and pet records is a feasible alternative. In the case of cattle, Kumar et al. (<xref ref-type="bibr" rid="ref23">23</xref>) propose a deep learning methodology across different identification scenarios with multiple test galleries, using 500 animals with 10 images each (5,000 images). The best result is achieved with <italic>Deep Belief Network</italic> (DBN), using 400 feature sets, with an impressive accuracy of up to 98.99%. When compared to the proposed ABRSWU solution, it should be noted that red deer muzzle surface is approximately half the size of cattle. This is important because the ABRSWU solution uses around 200 key points. In the study by Kumar et al. (<xref ref-type="bibr" rid="ref23">23</xref>), the usage of 200 feature sets would lead to only 77.94% accuracy. The ABRSWU achieved 95% accuracy, and the dataset contained almost double the number of individuals (972 versus 500). Moreover, their study already proposed an application on the Android platform for the future.</p>
<p>Another study by Shojaeipour et al. (<xref ref-type="bibr" rid="ref40">40</xref>) achieved similarly remarkable biometric identification accuracy of 99.1%. However, their research included fewer beef cattle animals (300 animals and 2,900 images) than our research. They used a tuned ResNet-50 model to extract muzzle features and ensure individual identification. The test-to-train set ratio was 80:20 in their case (compared to 50:50 in this study), which further improved the accuracy. Due to the larger area of the muzzle, they used images with a resolution of 1,024&#x202F;&#x00D7;&#x202F;1,024 (compared to 480&#x202F;&#x00D7;&#x202F;640). According to that study, using a similar resolution of 608&#x202F;&#x00D7;&#x202F;832, the accuracy reached 92.75% which is 2% lower than the ABRSWU method. Surprisingly, when compared to our study, almost double the images (268 images or 9.24%) did not capture the muzzle or were extremely blurry.</p>
<p>In the study by Nishanov et al. (<xref ref-type="bibr" rid="ref43">43</xref>) 4,923 images of 268 cattle were used, along with 8 models for training. The DenseNet-121, WideResNet-50, and InceptionV3 models reached the highest accuracy rates of 99.2, 99.1, and 99.1%, respectively. This exemplifies the considerable impact that the number of animals in the database has. Using only a third of the individuals, the ABRSWU had an enormous impact on the results, as demonstrated by the first phase of this study. That is further enhanced by using more images. Deep learning employs fewer individuals and more images to better recognize the features of each individual.</p>
<p>A similar study also attempted to identify the nose pattern of dogs (<xref ref-type="bibr" rid="ref28">28</xref>). Their research involved training with 1,035 images of 345 dogs, while testing was conducted on 828 images of 276 dogs. The highest accuracy recorded (in the top 1 results) was 92.39%. This was accomplished using a complex network of neural architectures and preprocessing techniques to generate a feature vector comparable to the minutiae of human fingerprints. In contrast, our study&#x2019;s method utilizes a larger sample size leading to 3% improvement in accuracy despite employing only half the number of images. Compared to most of the studies on domestic animals mentioned above, our dataset is characterized by a significantly larger number of individuals combined with a relatively small number of images per individual. This makes the identification task considerably more challenging and the achieved performance particularly informative, as deep learning-based identification systems usually benefit from the opposite setting.</p>
<p>The determined accuracy of individual identification, which exceeded 95% for frontal photographs, provides a promising starting point for further method development and represents significant potential for practical application. If the best possible method were applied to the annual harvest of red deer in the Czech Republic, which amounted to 35,001 animals during the 2024/2025 hunting season (<xref ref-type="bibr" rid="ref56">56</xref>), it would mean that 34,253 individuals could be identified as unique, while 748 would require confirmation by an alternative method. In this hypothetical scenario, there would be no cheating (i.e., the same individual seen as a different one). However, the evidence can be further refined in the future. The necessary steps are quality control and feedback when the image is acquired. After evaluation, it was also concluded that more focus should be placed on the quality of frontal images.</p>
<p>In addition to the quality of the acquired photo, the applicability of animal biometric identification method based on muzzle pattern could be affected by dermatologic conditions involving the analyzed area, similar to human fingerprints (<xref ref-type="bibr" rid="ref57 ref58 ref59">57&#x2013;59</xref>). Severe hand dermatitis has been shown to significantly impair fingerprint verification, with failure reported in 27% of patients with clinical dermatitis involving a palmar distal phalanx of either thumb (<xref ref-type="bibr" rid="ref58">58</xref>). Other dermatologic conditions, such as psoriasis, eczema, and sclerodermatous diseases, have also been associated with disruption of epidermal ridge patterns, leading to reduced fingerprint recognition accuracy (<xref ref-type="bibr" rid="ref59">59</xref>, <xref ref-type="bibr" rid="ref60">60</xref>). A comparable issue may occur in hunted ungulates. Papillomavirus infections lead to multiple benign fibropapilloma tumors, most frequently localized on the head, neck, abdomen, and extremities (<xref ref-type="bibr" rid="ref61">61</xref>). Given the increasing prevalence of these infections (<xref ref-type="bibr" rid="ref62">62</xref>), the presence of tumors on the muzzle may interfere with pattern recognition. Similarly, the other condition affecting the skin of the hunted ungulates, such as infection diseases (<xref ref-type="bibr" rid="ref63 ref64 ref65 ref66">63&#x2013;66</xref>), metabolic diseases, specifically zinc deficiency (<xref ref-type="bibr" rid="ref67">67</xref>, <xref ref-type="bibr" rid="ref68">68</xref>) or plant-induced photosensitivity (<xref ref-type="bibr" rid="ref69">69</xref>) can reduce the reliability of muzzle pattern characteristics. Although autoimmune diseases are less frequently reported in hunted ungulates than in domestic animals, it could potentially impair their muzzle surface (<xref ref-type="bibr" rid="ref70">70</xref>). Additionally, skin neoplastic processes, particularly squamous cell carcinoma (SCC), are well documented in cattle (<xref ref-type="bibr" rid="ref71">71</xref>) and have also been observed in wild ungulates (<xref ref-type="bibr" rid="ref72">72</xref>). As SCC predominantly develops in areas exposed to sunlight, the muzzle is a common site of occurrence (<xref ref-type="bibr" rid="ref73">73</xref>). Even though skin disease manifesting at the muzzle could interfere with the muzzle pattern-based identification, in the context of animal biometric identification applied post-mortem in hunted ungulates, images are acquired at a single time point. As a result, progressive skin disease or dermatologic conditions that do not affect a sufficient portion of the muzzle surface may have limited or no impact on the utility of muzzle pattern-based hunted ungulates evidence. Also, if the skin disease or dermatological condition change the muzzle pattern in a unique way, it could make the identification task simple and more reliable (similarly to tattoos or scars that make humans easily distinguishable).</p>
<p>Although the present study does not aim to address the animal welfare of harvested ungulates directly, it is worth noting that, if implemented in practice, this biometric identification method could indirectly contribute to the welfare and health status of wild ungulate populations. An overabundance of ungulates does not manifest only in negative impacts on the ecosystem and human land-use but also affects the condition of individuals of the given species (<xref ref-type="bibr" rid="ref1">1</xref>). At sites with ungulate overabundance, a reduction in body mass can also be expected, according to the density-dependent concept in population dynamics (<xref ref-type="bibr" rid="ref74">74</xref>). Moreover, for roe deer, intra-population stress could be expected in localities with high population densities due to males&#x2019; territorial behavior (<xref ref-type="bibr" rid="ref75">75</xref>). In this context, the spreading of diseases is another aspect that could affect the health status of wild ungulate populations, including the most serious ones, such as African swine fever in the case of wild boars (<xref ref-type="bibr" rid="ref76">76</xref>, <xref ref-type="bibr" rid="ref77">77</xref>). Nevertheless, the spread of disease can be mitigated through systematic population reduction, as regulated by the proposed control mechanism. For practical implementation, the final system should incorporate additional functionality. A key requirement is the inclusion of user authentication, enabling hunters and administrative personnel to log into the application. Furthermore, a comprehensive and clearly visualized database, accompanied by statistical summaries, is essential for reliable record-keeping. Whenever possible, data should be recorded automatically&#x2014;particularly the date, time, and location (GPS coordinates) of each harvest&#x2014;to facilitate accurate and efficient documentation via mobile devices. These parameters represent fundamental components internationally recognized as essential for reliable harvest records (<xref ref-type="bibr" rid="ref8">8</xref>, <xref ref-type="bibr" rid="ref10">10</xref>). Automated data collection reduces errors and saves hunters&#x2019; time, a prerequisite for the system to become acceptable in everyday practice (<xref ref-type="bibr" rid="ref13">13</xref>, <xref ref-type="bibr" rid="ref14">14</xref>). Additional key information that should be entered by the user includes the sex and age class of the harvested individual. For management purposes, it is also highly valuable to record the method of harvest. Individual or collective hunting or trapping can significantly influence population density, as well as the age and sex structure of harvested game (<xref ref-type="bibr" rid="ref4">4</xref>, <xref ref-type="bibr" rid="ref7">7</xref>). Such data can then be used to recommend applying particular methods in other areas. Supplementary information, such as body weight, trophy parameters, or health status, can significantly enhance the scope of evaluation and is recommended for comprehensive population monitoring (<xref ref-type="bibr" rid="ref11">11</xref>, <xref ref-type="bibr" rid="ref33">33</xref>).</p>
<p>The application should also operate on the principle of multi-level data access. This would enable hunters to produce a quick and simple record, which&#x2014;owing to biometric traceability&#x2014;is unambiguous and difficult to contest (<xref ref-type="bibr" rid="ref17">17</xref>). Game managers or administrative authorities would then gain access to statistics and visualizations. These outputs should include harvest summaries by species, sex, age class, temporal trends, and spatial maps with options for comparisons across seasons or hunting grounds (<xref ref-type="bibr" rid="ref1">1</xref>, <xref ref-type="bibr" rid="ref9">9</xref>).</p>
<p>An application designed this way creates a robust and transparent database based on biometric reliability and the automated collection of key information. This database can be used not only to fulfil legislative requirements, but above all, as a strategic tool for game management, enabling flexible responses to population dynamics, reducing discrepancies between planned and actual harvests, and mitigating the negative impacts of overabundant game on forest and agricultural ecosystems (<xref ref-type="bibr" rid="ref1">1</xref>, <xref ref-type="bibr" rid="ref4">4</xref>, <xref ref-type="bibr" rid="ref5">5</xref>, <xref ref-type="bibr" rid="ref7">7</xref>).</p>
</sec>
<sec sec-type="conclusions" id="sec9">
<label>5</label>
<title>Conclusion</title>
<p>The proposed method for identifying ungulates through the biometric features of the harvested individuals&#x2019; muzzle patterns demonstrates significant potential. Further development and refinement of the method are essential, as is the possibility of digitalizing and simplifying the traditionally conservative field of wildlife management. Based on the findings presented above, several conclusions can be drawn regarding the biometric properties of the ABRSWU system. All of the examined samples contained distinguishable muzzle pattern structures, indicating high <italic>universality</italic>. No identical or nearly identical muzzle patterns were observed, confirming a high degree of <italic>uniqueness</italic>. <italic>Permanence</italic> and <italic>acceptability</italic> do not present concerns in this context, as the images were obtained post-mortem. There were some issues with the quality of the images; nevertheless, no cases were found where the muzzle pattern could not be captured, i.e., <italic>measurability</italic> is high. <italic>Price</italic> and <italic>maintenance</italic> are relatively low (hunters use their own smartphones), so the main cost is the software and database. The final property to consider is <italic>circumvention</italic>. Following the evaluation, several areas were identified where potential presentation or other forms of attack could occur. Addressing these vulnerabilities represents an important challenge for future development. Another key direction for future research is to extend the testing and validation of the method to additional wild ungulate species to assess its precision and general applicability.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="sec10">
<title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p>
</sec>
<sec sec-type="author-contributions" id="sec11">
<title>Author contributions</title>
<p>OK: Writing &#x2013; review &#x0026; editing, Conceptualization, Writing &#x2013; original draft, Methodology. JC: Formal analysis, Writing &#x2013; review &#x0026; editing, Project administration, Validation, Writing &#x2013; original draft, Resources, Supervision, Funding acquisition, Conceptualization. JA: Writing &#x2013; review &#x0026; editing, Writing &#x2013; original draft, Project administration. VS: Formal analysis, Resources, Writing &#x2013; original draft. MS: Writing &#x2013; review &#x0026; editing, Software, Data curation. VO: Writing &#x2013; original draft. TV: Data curation, Software, Writing &#x2013; review &#x0026; editing. MD: Writing &#x2013; review &#x0026; editing, Conceptualization, Methodology, Supervision, Project administration. VH: Validation, Funding acquisition, Writing &#x2013; review &#x0026; editing, Formal analysis, Resources, Project administration, Supervision, Conceptualization.</p>
</sec>
<ack>
<title>Acknowledgments</title>
<p>The authors would like to express their gratitude to the Hunterra company, and in particular to David H&#x00E1;j&#x00ED;&#x010D;ek, for their assistance with data collection; to the ARTIN company for their support in data evaluation; and to the Czech State Forests for their cooperation in data acquisition.</p>
</ack>
<sec sec-type="COI-statement" id="sec12">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="sec13">
<title>Generative AI statement</title>
<p>The author(s) declared that Generative AI was used in the creation of this manuscript. Generative AI was used exclusively for verifying linguistic accuracy and for minor grammatical and stylistic refinements of the manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="sec14">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="ref1"><label>1.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Carpio</surname><given-names>AJ</given-names></name> <name><surname>Apollonio</surname><given-names>M</given-names></name> <name><surname>Acevedo</surname><given-names>P</given-names></name></person-group>. <article-title>Wild ungulate overabundance in Europe: contexts, causes, monitoring and management recommendations</article-title>. <source>Mamm Rev</source>. (<year>2021</year>) <volume>51</volume>:<fpage>95</fpage>&#x2013;<lpage>108</lpage>. doi: <pub-id pub-id-type="doi">10.1111/mam.12221</pub-id></mixed-citation></ref>
<ref id="ref2"><label>2.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Valente</surname><given-names>AM</given-names></name> <name><surname>Acevedo</surname><given-names>P</given-names></name> <name><surname>Figueiredo</surname><given-names>AM</given-names></name> <name><surname>Fonseca</surname><given-names>C</given-names></name> <name><surname>Torres</surname><given-names>RT</given-names></name></person-group>. <article-title>Overabundant wild ungulate populations in Europe: management with consideration of socio-ecological consequences</article-title>. <source>Mamm Rev</source>. (<year>2020</year>) <volume>50</volume>:<fpage>353</fpage>&#x2013;<lpage>66</lpage>. doi: <pub-id pub-id-type="doi">10.1111/mam.12202</pub-id></mixed-citation></ref>
<ref id="ref3"><label>3.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Cerri</surname><given-names>J</given-names></name> <name><surname>Chirichella</surname><given-names>R</given-names></name> <name><surname>Arnold</surname><given-names>W</given-names></name> <name><surname>Barto&#x0161;</surname><given-names>L</given-names></name> <name><surname>Borowik</surname><given-names>T</given-names></name> <name><surname>Carranza</surname><given-names>J</given-names></name> <etal/></person-group>. <article-title>Trends of ungulate species in Europe: not all stories are equal</article-title> (<year>2025</year>). [Preprint]. doi: <pub-id pub-id-type="doi">10.32942/X26642</pub-id></mixed-citation></ref>
<ref id="ref4"><label>4.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hardalau</surname><given-names>D</given-names></name> <name><surname>Codrean</surname><given-names>C</given-names></name> <name><surname>Iordache</surname><given-names>D</given-names></name> <name><surname>Fedorca</surname><given-names>M</given-names></name> <name><surname>Ionescu</surname><given-names>O</given-names></name></person-group>. <article-title>The expanding thread of ungulate browsing&#x2014;a review of forest ecosystem effects and management approaches in Europe</article-title>. <source>Forests</source>. (<year>2024</year>) <volume>15</volume>:<fpage>1311</fpage>. doi: <pub-id pub-id-type="doi">10.3390/f15081311</pub-id></mixed-citation></ref>
<ref id="ref5"><label>5.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gill</surname><given-names>RMA</given-names></name></person-group>. <article-title>A review of damage by mammals in north temperate forests: 3. Impact on trees and forests</article-title>. <source>Forestry</source>. (<year>1992</year>) <volume>65</volume>:<fpage>363</fpage>&#x2013;<lpage>88</lpage>. doi: <pub-id pub-id-type="doi">10.1093/forestry/65.4.363-a</pub-id></mixed-citation></ref>
<ref id="ref6"><label>6.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Skot&#x00E1;k</surname><given-names>V</given-names></name> <name><surname>Kamler</surname><given-names>J</given-names></name> <name><surname>Klein</surname><given-names>V</given-names></name></person-group>. <article-title>Estimation of wild herbivore damage to field crops in the Czech Republic in 2019</article-title>. <source>Acta Univ Agric Silvic Mendelianae Brun</source>. (<year>2021</year>) <volume>69</volume>:<fpage>467</fpage>&#x2013;<lpage>72</lpage>. doi: <pub-id pub-id-type="doi">10.11118/actaun.2021.041</pub-id></mixed-citation></ref>
<ref id="ref7"><label>7.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Linnell</surname><given-names>JDC</given-names></name> <name><surname>Cretois</surname><given-names>B</given-names></name> <name><surname>Nilsen</surname><given-names>EB</given-names></name> <name><surname>Rolandsen</surname><given-names>CM</given-names></name> <name><surname>Solberg</surname><given-names>EJ</given-names></name> <name><surname>Veiberg</surname><given-names>V</given-names></name> <etal/></person-group>. <article-title>The challenges and opportunities of coexisting with wild ungulates in the human-dominated landscapes of Europe&#x2019;s Anthropocene</article-title>. <source>Biol Conserv</source>. (<year>2020</year>) <volume>244</volume>:<fpage>108500</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.biocon.2020.108500</pub-id></mixed-citation></ref>
<ref id="ref8"><label>8.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ruiz-Rodr&#x00ED;guez</surname><given-names>C</given-names></name> <name><surname>Blanco-Aguiar</surname><given-names>JA</given-names></name> <name><surname>G&#x00F3;mez-Molina</surname><given-names>A</given-names></name> <name><surname>Illanas</surname><given-names>S</given-names></name> <name><surname>Fern&#x00E1;ndez-L&#x00F3;pez</surname><given-names>J</given-names></name> <name><surname>Acevedo</surname><given-names>P</given-names></name> <etal/></person-group>. <article-title>Towards standardising the collection of game statistics in Europe: a case study</article-title>. <source>Eur J Wildl Res</source>. (<year>2023</year>) <volume>69</volume>:<fpage>122</fpage>. doi: <pub-id pub-id-type="doi">10.1007/s10344-023-01746-3</pub-id></mixed-citation></ref>
<ref id="ref9"><label>9.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Aubry</surname><given-names>P</given-names></name> <name><surname>Guillemain</surname><given-names>M</given-names></name></person-group>. <article-title>Attenuating the nonresponse bias in hunting bag surveys: the multiphase sampling strategy</article-title>. <source>PLoS One</source>. (<year>2019</year>) <volume>14</volume>:<fpage>e0213670</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pone.0213670</pub-id>, <pub-id pub-id-type="pmid">30875395</pub-id></mixed-citation></ref>
<ref id="ref10"><label>10.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Aubry</surname><given-names>P</given-names></name> <name><surname>Guillemain</surname><given-names>M</given-names></name> <name><surname>Sorrenti</surname><given-names>M</given-names></name></person-group>. <article-title>Increasing the trust in hunting bag statistics: why random selection of hunters is so important</article-title>. <source>Ecol Indic</source>. (<year>2020</year>) <volume>117</volume>:<fpage>106522</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ecolind.2020.106522</pub-id></mixed-citation></ref>
<ref id="ref11"><label>11.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Schmidt</surname><given-names>JI</given-names></name> <name><surname>Kellie</surname><given-names>KA</given-names></name> <name><surname>Chapin</surname><given-names>FS</given-names></name></person-group>. <article-title>Detecting, estimating, and correcting for biases in harvest data</article-title>. <source>J Wildl Manag</source>. (<year>2015</year>) <volume>79</volume>:<fpage>1152</fpage>&#x2013;<lpage>62</lpage>. doi: <pub-id pub-id-type="doi">10.1002/jwmg.928</pub-id></mixed-citation></ref>
<ref id="ref12"><label>12.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Andrew</surname><given-names>W</given-names></name> <name><surname>Gao</surname><given-names>J</given-names></name> <name><surname>Mullan</surname><given-names>S</given-names></name> <name><surname>Campbell</surname><given-names>N</given-names></name> <name><surname>Dowsey</surname><given-names>AW</given-names></name> <name><surname>Burghardt</surname><given-names>T</given-names></name></person-group>. <article-title>Visual identification of individual Holstein-Friesian cattle via deep metric learning</article-title>. <source>Comput Electron Agric</source>. (<year>2021</year>) <volume>185</volume>:<fpage>106133</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.compag.2021.106133</pub-id></mixed-citation></ref>
<ref id="ref13"><label>13.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kumar</surname><given-names>S</given-names></name> <name><surname>Singh</surname><given-names>SK</given-names></name></person-group>. <article-title>Cattle recognition: a new frontier in visual animal biometrics research</article-title>. <source>Proc Natl Acad Sci India A Phys Sci</source>. (<year>2020</year>) <volume>90</volume>:<fpage>689</fpage>&#x2013;<lpage>708</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s40010-019-00610-x</pub-id></mixed-citation></ref>
<ref id="ref14"><label>14.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Awad</surname><given-names>AI</given-names></name></person-group>. <article-title>From classical methods to animal biometrics: a review on cattle identification and tracking</article-title>. <source>Comput Electron Agric</source>. (<year>2016</year>) <volume>123</volume>:<fpage>423</fpage>&#x2013;<lpage>35</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.compag.2016.03.014</pub-id></mixed-citation></ref>
<ref id="ref15"><label>15.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Choi</surname><given-names>HI</given-names></name> <name><surname>Lee</surname><given-names>Y</given-names></name> <name><surname>Shin</surname><given-names>H</given-names></name> <name><surname>Lee</surname><given-names>S</given-names></name> <name><surname>Choi</surname><given-names>SS</given-names></name> <name><surname>Han</surname><given-names>CY</given-names></name> <etal/></person-group>. <article-title>The formation and invariance of canine nose pattern of beagle dogs from early puppy to young adult periods</article-title>. <source>Animals</source>. (<year>2021</year>) <volume>11</volume>:<fpage>2664</fpage>. doi: <pub-id pub-id-type="doi">10.3390/ani11092664</pub-id>, <pub-id pub-id-type="pmid">34573628</pub-id></mixed-citation></ref>
<ref id="ref16"><label>16.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Choi</surname><given-names>HI</given-names></name> <name><surname>Kim</surname><given-names>M-Y</given-names></name> <name><surname>Yoon</surname><given-names>H-Y</given-names></name> <name><surname>Lee</surname><given-names>S</given-names></name> <name><surname>Choi</surname><given-names>SS</given-names></name> <name><surname>Han</surname><given-names>CY</given-names></name> <etal/></person-group>. <article-title>Study on the viability of canine nose pattern as a unique biometric marker</article-title>. <source>Animals</source>. (<year>2021</year>) <volume>11</volume>:<fpage>3372</fpage>. doi: <pub-id pub-id-type="doi">10.3390/ani11123372</pub-id>, <pub-id pub-id-type="pmid">34944149</pub-id></mixed-citation></ref>
<ref id="ref17"><label>17.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cihan</surname><given-names>P</given-names></name> <name><surname>Saygili</surname><given-names>A</given-names></name> <name><surname>Ozmen</surname><given-names>NE</given-names></name> <name><surname>Akyuzlu</surname><given-names>M</given-names></name></person-group>. <article-title>Identification and recognition of animals from biometric markers using computer vision approaches: a review</article-title>. <source>Kafkas Univ Vet Fak Derg</source>. (<year>2023</year>) <volume>29</volume>:<fpage>581</fpage>&#x2013;<lpage>93</lpage>. doi: <pub-id pub-id-type="doi">10.9775/kvfd.2023.30265</pub-id></mixed-citation></ref>
<ref id="ref18"><label>18.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Paudel</surname><given-names>S</given-names></name> <name><surname>Brown-Brandl</surname><given-names>T</given-names></name></person-group>. <article-title>Advancements in individual animal identification: a historical perspective from prehistoric times to the present</article-title>. <source>Animals</source>. (<year>2025</year>) <volume>15</volume>:<fpage>2514</fpage>. doi: <pub-id pub-id-type="doi">10.3390/ani15172514</pub-id>, <pub-id pub-id-type="pmid">40941309</pub-id></mixed-citation></ref>
<ref id="ref19"><label>19.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Xiao</surname><given-names>S</given-names></name> <name><surname>Dhand</surname><given-names>NK</given-names></name> <name><surname>Wang</surname><given-names>Z</given-names></name> <name><surname>Hu</surname><given-names>K</given-names></name> <name><surname>Thomson</surname><given-names>PC</given-names></name> <name><surname>House</surname><given-names>JK</given-names></name> <etal/></person-group>. <article-title>Review of applications of deep learning in veterinary diagnostics and animal health</article-title>. <source>Front Vet Sci</source>. (<year>2025</year>) <volume>12</volume>:<fpage>1511522</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fvets.2025.1511522</pub-id>, <pub-id pub-id-type="pmid">40144529</pub-id></mixed-citation></ref>
<ref id="ref20"><label>20.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Romano</surname><given-names>A</given-names></name> <name><surname>De Camillis</surname><given-names>A</given-names></name> <name><surname>Sciota</surname><given-names>D</given-names></name> <name><surname>Baghini</surname><given-names>S</given-names></name> <name><surname>Di Provvido</surname><given-names>A</given-names></name> <name><surname>Rosamilia</surname><given-names>A</given-names></name> <etal/></person-group>. <article-title>Cross-species AI: shifting a convolutional neural network from pigs to lambs to detect pneumonia at slaughter</article-title>. <source>Front Vet Sci</source>. (<year>2025</year>) <volume>12</volume>:<fpage>1591032</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fvets.2025.1591032</pub-id>, <pub-id pub-id-type="pmid">40470285</pub-id></mixed-citation></ref>
<ref id="ref21"><label>21.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Iglesias Pastrana</surname><given-names>C</given-names></name> <name><surname>Navas Gonz&#x00E1;lez</surname><given-names>FJ</given-names></name> <name><surname>Ciani</surname><given-names>E</given-names></name> <name><surname>Mar&#x00ED;n Navas</surname><given-names>C</given-names></name> <name><surname>Delgado Bermejo</surname><given-names>JV</given-names></name></person-group>. <article-title>Thermographic ranges of dromedary camels during physical exercise: applications for physical health/welfare monitoring and phenotypic selection</article-title>. <source>Front Vet Sci</source>. (<year>2023</year>) <volume>10</volume>:<fpage>1297412</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fvets.2023.1297412</pub-id>, <pub-id pub-id-type="pmid">38173554</pub-id></mixed-citation></ref>
<ref id="ref22"><label>22.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Fuentes</surname><given-names>S</given-names></name> <name><surname>Gonzalez Viejo</surname><given-names>C</given-names></name> <name><surname>Tongson</surname><given-names>E</given-names></name> <name><surname>Dunshea</surname><given-names>FR</given-names></name> <name><surname>Dac</surname><given-names>HH</given-names></name> <name><surname>Lipovetzky</surname><given-names>N</given-names></name></person-group>. <article-title>Animal biometric assessment using non-invasive computer vision and machine learning are good predictors of dairy cows age and welfare: the future of automated veterinary support systems</article-title>. <source>J Agric Food Res</source>. (<year>2022</year>) <volume>10</volume>:<fpage>100388</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jafr.2022.100388</pub-id></mixed-citation></ref>
<ref id="ref23"><label>23.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kumar</surname><given-names>S</given-names></name> <name><surname>Pandey</surname><given-names>A</given-names></name> <name><surname>Sai Ram Satwik</surname><given-names>K</given-names></name> <name><surname>Kumar</surname><given-names>S</given-names></name> <name><surname>Singh</surname><given-names>SK</given-names></name> <name><surname>Singh</surname><given-names>AK</given-names></name> <etal/></person-group>. <article-title>Deep learning framework for recognition of cattle using muzzle point image pattern</article-title>. <source>Measurement</source>. (<year>2018</year>) <volume>116</volume>:<fpage>1</fpage>&#x2013;<lpage>17</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.measurement.2017.10.064</pub-id></mixed-citation></ref>
<ref id="ref24"><label>24.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bae</surname><given-names>HB</given-names></name> <name><surname>Pak</surname><given-names>D</given-names></name> <name><surname>Lee</surname><given-names>S</given-names></name></person-group>. <article-title>Dog nose-print identification using deep neural networks</article-title>. <source>IEEE Access</source>. (<year>2021</year>) <volume>9</volume>:<fpage>49141</fpage>&#x2013;<lpage>53</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ACCESS.2021.3068517</pub-id></mixed-citation></ref>
<ref id="ref25"><label>25.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Taha</surname><given-names>A</given-names></name> <name><surname>Darwish</surname><given-names>A</given-names></name> <name><surname>Hassanien</surname><given-names>A</given-names></name> <name><surname>ElKholy</surname><given-names>A</given-names></name></person-group>. <article-title>Arabian horse identification and gender determination system based on feature fusion and gray wolf optimization</article-title>. <source>Int J Eng Syst</source>. (<year>2020</year>) <volume>13</volume>:<fpage>145</fpage>&#x2013;<lpage>55</lpage>. doi: <pub-id pub-id-type="doi">10.22266/ijies2020.0831.13</pub-id></mixed-citation></ref>
<ref id="ref26"><label>26.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Karthik</surname><given-names>K</given-names></name> <name><surname>Chakraborty</surname><given-names>S</given-names></name> <name><surname>Banik</surname><given-names>S</given-names></name></person-group>. <source>Muzzle analysis for biometric identification of pigs. 2017 ninth international conference on advances in pattern recognition (ICAPR)</source>. <publisher-loc>Bangalore, India</publisher-loc>: <publisher-name>IEEE</publisher-name> (<year>2017</year>). p. <fpage>1</fpage>&#x2013;<lpage>6</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ICAPR.2017.8593204</pub-id></mixed-citation></ref>
<ref id="ref27"><label>27.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chakraborty</surname><given-names>S</given-names></name> <name><surname>Karthik</surname><given-names>K</given-names></name> <name><surname>Banik</surname><given-names>S</given-names></name></person-group>. <article-title>Graph synthesis for pig breed classification from muzzle images</article-title>. <source>IEEE Access</source>. (<year>2021</year>) <volume>9</volume>:<fpage>127240</fpage>&#x2013;<lpage>58</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ACCESS.2021.3111957</pub-id></mixed-citation></ref>
<ref id="ref28"><label>28.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chan</surname><given-names>Y-K</given-names></name> <name><surname>Lin</surname><given-names>C-H</given-names></name> <name><surname>Wang</surname><given-names>C-L</given-names></name> <name><surname>Tu</surname><given-names>K-C</given-names></name> <name><surname>Yang</surname><given-names>S-C</given-names></name> <name><surname>Tsai</surname><given-names>M-H</given-names></name> <etal/></person-group>. <article-title>Dog identification based on textural features and spatial relation of noseprint</article-title>. <source>Pattern Recogn</source>. (<year>2024</year>) <volume>151</volume>:<fpage>110353</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.patcog.2024.110353</pub-id></mixed-citation></ref>
<ref id="ref29"><label>29.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jang</surname><given-names>DH</given-names></name> <name><surname>Kwon</surname><given-names>KS</given-names></name> <name><surname>Kim</surname><given-names>JK</given-names></name> <name><surname>Yang</surname><given-names>KY</given-names></name> <name><surname>Kim</surname><given-names>JB</given-names></name></person-group>. <article-title>Dog identification method based on muzzle pattern image</article-title>. <source>Appl Sci</source>. (<year>2020</year>) <volume>10</volume>:<fpage>1</fpage>&#x2013;<lpage>17</lpage>. doi: <pub-id pub-id-type="doi">10.3390/app10248994</pub-id></mixed-citation></ref>
<ref id="ref30"><label>30.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ahmed</surname><given-names>SU</given-names></name> <name><surname>Frnda</surname><given-names>J</given-names></name> <name><surname>Waqas</surname><given-names>M</given-names></name> <name><surname>Khan</surname><given-names>MH</given-names></name></person-group>. <article-title>Dataset of cattle biometrics through muzzle images</article-title>. <source>Data Brief</source>. (<year>2024</year>) <volume>53</volume>:<fpage>110125</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.dib.2024.110125</pub-id>, <pub-id pub-id-type="pmid">38370917</pub-id></mixed-citation></ref>
<ref id="ref31"><label>31.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bello</surname><given-names>R</given-names></name> <name><surname>Talib</surname><given-names>AZH</given-names></name> <name><surname>Mohamed</surname><given-names>ASAB</given-names></name></person-group>. <article-title>Deep learning-based architectures for recognition of cow using cow nose image pattern</article-title>. <source>Gazi Univ J Sci</source>. (<year>2020</year>) <volume>33</volume>:<fpage>831</fpage>&#x2013;<lpage>44</lpage>. doi: <pub-id pub-id-type="doi">10.35378/gujs.605631</pub-id></mixed-citation></ref>
<ref id="ref32"><label>32.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bello</surname><given-names>R-W</given-names></name> <name><surname>Talib</surname><given-names>AZH</given-names></name> <name><surname>Mohamed</surname><given-names>ASAB</given-names></name></person-group>. <article-title>Deep belief network approach for recognition of cow using cow nose image pattern</article-title>. <source>Walailak J Sci Technol</source>. (<year>2021</year>) <volume>18</volume>:<fpage>1</fpage>&#x2013;<lpage>14</lpage>. doi: <pub-id pub-id-type="doi">10.48048/wjst.2021.8984</pub-id></mixed-citation></ref>
<ref id="ref33"><label>33.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ferreira</surname><given-names>REP</given-names></name> <name><surname>Bresolin</surname><given-names>T</given-names></name> <name><surname>Rosa</surname><given-names>GJM</given-names></name> <name><surname>D&#x00F3;rea</surname><given-names>JRR</given-names></name></person-group>. <article-title>Using dorsal surface for individual identification of dairy calves through 3D deep learning algorithms</article-title>. <source>Comput Electron Agric</source>. (<year>2022</year>) <volume>201</volume>:<fpage>107272</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.compag.2022.107272</pub-id></mixed-citation></ref>
<ref id="ref34"><label>34.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gaber</surname><given-names>T</given-names></name> <name><surname>Tharwat</surname><given-names>A</given-names></name> <name><surname>Hassanien</surname><given-names>AE</given-names></name> <name><surname>Snasel</surname><given-names>V</given-names></name></person-group>. <article-title>Biometric cattle identification approach based on weber&#x2019;s local descriptor and AdaBoost classifier</article-title>. <source>Comput Electron Agric</source>. (<year>2016</year>) <volume>122</volume>:<fpage>55</fpage>&#x2013;<lpage>66</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.compag.2015.12.022</pub-id></mixed-citation></ref>
<ref id="ref35"><label>35.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Indrabayu</surname><given-names>I</given-names></name> <name><surname>Nurtanio</surname><given-names>I</given-names></name> <name><surname>Areni</surname><given-names>IS</given-names></name> <name><surname>Bugiwati</surname><given-names>SRA</given-names></name> <name><surname>Bustamin</surname><given-names>A</given-names></name> <name><surname>Rahmatullah</surname><given-names>M</given-names></name></person-group>. <article-title>A portable cattle tagging based on muzzle pattern</article-title>. <source>Int J Interact Mobile Technol</source>. (<year>2020</year>) <volume>14</volume>:<fpage>134</fpage>. doi: <pub-id pub-id-type="doi">10.3991/ijim.v14i13.13237</pub-id></mixed-citation></ref>
<ref id="ref36"><label>36.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kaur</surname><given-names>A</given-names></name> <name><surname>Kumar</surname><given-names>M</given-names></name> <name><surname>Jindal</surname><given-names>MK</given-names></name></person-group>. <article-title>Shi-Tomasi corner detector for cattle identification from muzzle print image pattern</article-title>. <source>Ecol Inform</source>. (<year>2022</year>) <volume>68</volume>:<fpage>101549</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ecoinf.2021.101549</pub-id></mixed-citation></ref>
<ref id="ref37"><label>37.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Li</surname><given-names>G</given-names></name> <name><surname>Erickson</surname><given-names>GE</given-names></name> <name><surname>Xiong</surname><given-names>Y</given-names></name></person-group>. <article-title>Individual beef cattle identification using muzzle images and deep learning techniques</article-title>. <source>Animals</source>. (<year>2022</year>) <volume>12</volume>:<fpage>1453</fpage>. doi: <pub-id pub-id-type="doi">10.3390/ani12111453</pub-id>, <pub-id pub-id-type="pmid">35681917</pub-id></mixed-citation></ref>
<ref id="ref38"><label>38.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mahmoud</surname><given-names>HA</given-names></name> <name><surname>Hadad</surname><given-names>HMR</given-names><prefix>El</prefix></name></person-group> <article-title>Automatic cattle muzzle print classification system using multiclass support vector machine</article-title> <source>Int J Image Min</source> <year>2015</year> <volume>1</volume>:<fpage>126</fpage> doi: <pub-id pub-id-type="doi">10.1504/IJIM.2015.070022</pub-id></mixed-citation></ref>
<ref id="ref39"><label>39.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Noviyanto</surname><given-names>A</given-names></name> <name><surname>Arymurthy</surname><given-names>AM</given-names></name></person-group>. <article-title>Beef cattle identification based on muzzle pattern using a matching refinement technique in the SIFT method</article-title>. <source>Comput Electron Agric</source>. (<year>2013</year>) <volume>99</volume>:<fpage>77</fpage>&#x2013;<lpage>84</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.compag.2013.09.002</pub-id></mixed-citation></ref>
<ref id="ref40"><label>40.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Shojaeipour</surname><given-names>A</given-names></name> <name><surname>Falzon</surname><given-names>G</given-names></name> <name><surname>Kwan</surname><given-names>P</given-names></name> <name><surname>Hadavi</surname><given-names>N</given-names></name> <name><surname>Cowley</surname><given-names>FC</given-names></name> <name><surname>Paul</surname><given-names>D</given-names></name></person-group>. <article-title>Automated muzzle detection and biometric identification via few-shot deep transfer learning of mixed breed cattle</article-title>. <source>Agronomy</source>. (<year>2021</year>) <volume>11</volume>:<fpage>2365</fpage>. doi: <pub-id pub-id-type="doi">10.3390/agronomy11112365</pub-id></mixed-citation></ref>
<ref id="ref41"><label>41.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>&#x0160;poljari&#x0107;</surname><given-names>D</given-names></name> <name><surname>Pajurin</surname><given-names>L</given-names></name> <name><surname>Kujund&#x017E;i&#x0107;</surname><given-names>M</given-names></name> <name><surname>Feren&#x010D;akovi&#x0107;</surname><given-names>M</given-names></name> <name><surname>Vrba&#x0161;ki</surname><given-names>A</given-names></name> <name><surname>&#x0160;poljari&#x0107;</surname><given-names>B</given-names></name> <etal/></person-group>. <article-title>Identification of cattle using nasolabial plate imprints and biometric analysis</article-title>. <source>Vet Med Sci</source>. (<year>2025</year>) <volume>11</volume>:<fpage>e70589</fpage>. doi: <pub-id pub-id-type="doi">10.1002/vms3.70589</pub-id>, <pub-id pub-id-type="pmid">40844795</pub-id></mixed-citation></ref>
<ref id="ref42"><label>42.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Tharwat</surname><given-names>A</given-names></name> <name><surname>Gaber</surname><given-names>T</given-names></name> <name><surname>Hassanien</surname><given-names>AE</given-names></name></person-group>. <source>Cattle identification based on muzzle images using Gabor features and SVM classifier</source>. <comment>Communications in Computer and Information Science</comment>, <publisher-name>Springer Verlag</publisher-name> (<year>2014</year>). p. <fpage>236</fpage>&#x2013;<lpage>247</lpage>.</mixed-citation></ref>
<ref id="ref43"><label>43.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nishanov</surname><given-names>A</given-names></name> <name><surname>Zaripov</surname><given-names>F</given-names></name> <name><surname>Akbaraliev</surname><given-names>B</given-names></name> <name><surname>Babadjanov</surname><given-names>E</given-names></name> <name><surname>Geldibayev</surname><given-names>B</given-names></name></person-group>. <article-title>Improved deep learning model for cattle identification using muzzle images</article-title>. <source>J Math Mech Comput Sci</source>. (<year>2025</year>) <volume>125</volume>:<fpage>18</fpage>&#x2013;<lpage>28</lpage>. doi: <pub-id pub-id-type="doi">10.26577/JMMCS2025125102</pub-id></mixed-citation></ref>
<ref id="ref44"><label>44.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><collab id="coll1">ISO/IEC 2382&#x2013;37:2022</collab></person-group>. <source>Information technology&#x2014;vocabulary, Part 37: Biometrics. 2022 3:1&#x2013;34</source>. Available online at: <ext-link xlink:href="https://www.iso.org/standard/73514.html" ext-link-type="uri">https://www.iso.org/standard/73514.html</ext-link> (Accessed October 14, 2025).</mixed-citation></ref>
<ref id="ref45"><label>45.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Huang</surname><given-names>G</given-names></name> <name><surname>Liu</surname><given-names>Z</given-names></name> <name><surname>van der Maaten</surname><given-names>L</given-names></name> <name><surname>Weinberger</surname><given-names>KQ</given-names></name></person-group>. <source>Densely connected convolutional networks</source>. (<year>2018</year>). Available online at: <ext-link xlink:href="http://arxiv.org/abs/1608.06993" ext-link-type="uri">http://arxiv.org/abs/1608.06993</ext-link></mixed-citation></ref>
<ref id="ref46"><label>46.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Douze</surname><given-names>M</given-names></name> <name><surname>Guzhva</surname><given-names>A</given-names></name> <name><surname>Deng</surname><given-names>C</given-names></name> <name><surname>Johnson</surname><given-names>J</given-names></name> <name><surname>Szilvasy</surname><given-names>G</given-names></name> <name><surname>Mazar&#x00E9;</surname><given-names>P-E</given-names></name> <etal/></person-group>. <article-title>The Faiss library</article-title>. (<year>2025</year>). Available online at: <ext-link xlink:href="http://arxiv.org/abs/2401.08281" ext-link-type="uri">http://arxiv.org/abs/2401.08281</ext-link></mixed-citation></ref>
<ref id="ref47"><label>47.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Karami</surname><given-names>E</given-names></name> <name><surname>Prasad</surname><given-names>S</given-names></name> <name><surname>Shehata</surname><given-names>M</given-names></name></person-group>. <source>Image matching using SIFT, SURF, BRIEF and ORB: performance comparison for distorted images</source>. (<year>2017</year>). Available online at: <ext-link xlink:href="https://arxiv.org/abs/1710.02726" ext-link-type="uri">https://arxiv.org/abs/1710.02726</ext-link> (Accessed January 30, 2026).</mixed-citation></ref>
<ref id="ref48"><label>48.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Dalal</surname><given-names>N</given-names></name> <name><surname>Triggs</surname><given-names>B</given-names></name></person-group>. <source>Histograms of oriented gradients for human detection</source>. Available online at: <ext-link xlink:href="https://lear.inrialpes.fr/people/triggs/pubs/Dalal-cvpr05.pdf" ext-link-type="uri">https://lear.inrialpes.fr/people/triggs/pubs/Dalal-cvpr05.pdf</ext-link> (Accessed January 30, 2026).</mixed-citation></ref>
<ref id="ref49"><label>49.</label><mixed-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Nandy</surname><given-names>A</given-names></name> <name><surname>Haldar</surname><given-names>S</given-names></name> <name><surname>Banerjee</surname><given-names>S</given-names></name> <name><surname>Mitra</surname><given-names>S</given-names></name></person-group> <chapter-title>A survey on applications of Siamese neural networks in computer vision</chapter-title>. <conf-name>2020 International Conference for Emerging Technology (INCET)</conf-name>. <publisher-loc>Belgaum, India</publisher-loc>:  <publisher-name>IEEE</publisher-name> (<year>2020</year>). p. <fpage>1</fpage>&#x2013;<lpage>5</lpage>. doi: <pub-id pub-id-type="doi">10.1109/INCET49848.2020.9153977</pub-id></mixed-citation></ref>
<ref id="ref50"><label>50.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Sun</surname><given-names>J</given-names></name> <name><surname>Shen</surname><given-names>Z</given-names></name> <name><surname>Wang</surname><given-names>Y</given-names></name> <name><surname>Bao</surname><given-names>H</given-names></name> <name><surname>Zhou</surname><given-names>X</given-names></name></person-group>.<source>LoFTR: detector-free local feature matching with transformers</source>. Available online at: <ext-link xlink:href="https://zju3dv.github.io/loftr/" ext-link-type="uri">https://zju3dv.github.io/loftr/</ext-link> (Accessed January 30, 2026).</mixed-citation></ref>
<ref id="ref51"><label>51.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><collab id="coll2">ISO/IEC 19795-1:2021</collab></person-group>. <source>Information technology&#x2014;biometric performance testing and reporting</source>. <year>2021</year>. Available online at: <ext-link xlink:href="https://www.iso.org/standard/73515.html" ext-link-type="uri">https://www.iso.org/standard/73515.html</ext-link> (Accessed October 14, 2025).</mixed-citation></ref>
<ref id="ref52"><label>52.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chan</surname><given-names>Y-K</given-names></name> <name><surname>Lin</surname><given-names>C-H</given-names></name> <name><surname>Ben</surname><given-names>Y-R</given-names></name> <name><surname>Wang</surname><given-names>C-L</given-names></name> <name><surname>Yang</surname><given-names>S-C</given-names></name> <name><surname>Tsai</surname><given-names>M-H</given-names></name> <etal/></person-group>. <article-title>Dog nose-print recognition based on the shape and spatial features of scales</article-title>. <source>Expert Syst Appl</source>. (<year>2024</year>) <volume>240</volume>:<fpage>122308</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.eswa.2023.122308</pub-id></mixed-citation></ref>
<ref id="ref53"><label>53.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Agresti</surname><given-names>A</given-names></name> <name><surname>Bini</surname><given-names>M</given-names></name> <name><surname>Bertaccini</surname><given-names>B</given-names></name> <name><surname>Ryu</surname><given-names>E</given-names></name></person-group>. <article-title>Simultaneous confidence intervals for comparing binomial parameters</article-title>. <source>Biometrics</source>. (<year>2008</year>) <volume>64</volume>:<fpage>1270</fpage>&#x2013;<lpage>5</lpage>. doi: <pub-id pub-id-type="doi">10.1111/j.1541-0420.2008.00990.x</pub-id>, <pub-id pub-id-type="pmid">18266891</pub-id></mixed-citation></ref>
<ref id="ref54"><label>54.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><collab id="coll3">R Core Team</collab></person-group>. <source>R: a language and environment for statistical computing</source>. <publisher-loc>Vienna</publisher-loc>. <year>2024</year>. Available online at: <ext-link xlink:href="https://www.r-project.org/" ext-link-type="uri">https://www.r-project.org/</ext-link> (Accessed November 27, 2025)</mixed-citation></ref>
<ref id="ref55"><label>55.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Wickham</surname><given-names>H</given-names></name></person-group>. <source>ggplot2</source>. <publisher-loc>New York, NY</publisher-loc>: <publisher-name>Springer New York</publisher-name> (<year>2009</year>).</mixed-citation></ref>
<ref id="ref56"><label>56.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><collab id="coll4">Czech Statistical Office</collab></person-group>. <source>Lov zv&#x011B;&#x0159;e, zazv&#x011B;&#x0159;ov&#x00E1;n&#x00ED; a jarn&#x00ED; kmenov&#x00E9; stavy zv&#x011B;&#x0159;e 2024/2025</source>. (<year>2025</year>) Available online at: <ext-link xlink:href="https://vdb.czso.cz/vdbvo2/faces/cs/index.jsf?page=vystup-objekt&#x0026;z=T&#x0026;f=TABULKA&#x0026;skupId=2342&#x0026;katalog=30841&#x0026;pvo=LES0331&#x0026;pvo=LES0331&#x0026;evo=v977_!_LES0331-2024_1" ext-link-type="uri">https://vdb.czso.cz/vdbvo2/faces/cs/index.jsf?page=vystup-objekt&#x0026;z=T&#x0026;f=TABULKA&#x0026;skupId=2342&#x0026;katalog=30841&#x0026;pvo=LES0331&#x0026;pvo=LES0331&#x0026;evo=v977_!_LES0331-2024_1</ext-link> (Accessed October 11, 2025).</mixed-citation></ref>
<ref id="ref57"><label>57.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Drahansk&#x00FD;</surname><given-names>M</given-names></name> <name><surname>Brezinova</surname><given-names>E</given-names></name> <name><surname>Lodrov&#x00E1;</surname><given-names>D</given-names></name> <name><surname>Ors&#x00E1;g</surname><given-names>F</given-names></name></person-group>. <article-title>Fingerprint recognition influenced by skin diseases</article-title>. <source>Int J Biosci Biotechnol</source>. (<year>2010</year>) <volume>3</volume>:<fpage>11</fpage>&#x2013;<lpage>22</lpage>.</mixed-citation></ref>
<ref id="ref58"><label>58.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lee</surname><given-names>CK</given-names></name> <name><surname>Chang</surname><given-names>CC</given-names></name> <name><surname>Johar</surname><given-names>A</given-names></name> <name><surname>Puwira</surname><given-names>O</given-names></name> <name><surname>Roshidah</surname><given-names>B</given-names></name></person-group>. <article-title>Fingerprint changes and verification failure among patients with hand dermatitis</article-title>. <source>JAMA Dermatol</source>. (<year>2013</year>) <volume>149</volume>:<fpage>295</fpage>&#x2013;<lpage>9</lpage>. doi: <pub-id pub-id-type="doi">10.1001/jamadermatol.2013.1425</pub-id>, <pub-id pub-id-type="pmid">23682364</pub-id></mixed-citation></ref>
<ref id="ref59"><label>59.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Alotaibi</surname><given-names>LA</given-names></name> <name><surname>Alblaies</surname><given-names>MF</given-names></name> <name><surname>Alghamdi</surname><given-names>NH</given-names></name> <name><surname>AlNujaidi</surname><given-names>RY</given-names></name> <name><surname>Alali</surname><given-names>SA</given-names></name> <name><surname>Menezes</surname><given-names>RG</given-names></name></person-group>. <article-title>Forensic implications of fingerprint verification failure among people with skin diseases</article-title>. <source>Med Leg J</source>. (<year>2022</year>) <volume>90</volume>:<fpage>94</fpage>&#x2013;<lpage>7</lpage>. doi: <pub-id pub-id-type="doi">10.1177/00258172211061947</pub-id>, <pub-id pub-id-type="pmid">35156436</pub-id></mixed-citation></ref>
<ref id="ref60"><label>60.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Drahansky</surname><given-names>M</given-names></name> <name><surname>Dolezel</surname><given-names>M</given-names></name> <name><surname>Urbanek</surname><given-names>J</given-names></name> <name><surname>Brezinova</surname><given-names>E</given-names></name> <name><surname>Kim</surname><given-names>T</given-names></name></person-group>. <article-title>Influence of skin diseases on fingerprint recognition</article-title>. <source>J Biomed Biotechnol</source>. (<year>2012</year>) <volume>2012</volume>:<fpage>1</fpage>&#x2013;<lpage>14</lpage>. doi: <pub-id pub-id-type="doi">10.1155/2012/626148</pub-id>, <pub-id pub-id-type="pmid">22654483</pub-id></mixed-citation></ref>
<ref id="ref61"><label>61.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kmetec</surname><given-names>J</given-names></name> <name><surname>Kuhar</surname><given-names>U</given-names></name> <name><surname>Fajfar</surname><given-names>AG</given-names></name> <name><surname>Vengu&#x0161;t</surname><given-names>D&#x017D;</given-names></name> <name><surname>Vengu&#x0161;t</surname><given-names>G</given-names></name></person-group>. <article-title>A comprehensive study of cutaneous Fibropapillomatosis in free-ranging roe deer (<italic>Capreolus capreolus</italic>) and Red Deer (<italic>Cervus elaphus</italic>): from clinical manifestations to whole-genome sequencing of papillomaviruses</article-title>. <source>Viruses</source>. (<year>2020</year>) <volume>12</volume>:<fpage>1001</fpage>. doi: <pub-id pub-id-type="doi">10.3390/v12091001</pub-id>, <pub-id pub-id-type="pmid">32911735</pub-id></mixed-citation></ref>
<ref id="ref62"><label>62.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rajsk&#x00FD;</surname><given-names>D</given-names></name> <name><surname>Rajsk&#x00FD;</surname><given-names>M</given-names></name> <name><surname>Garaj</surname><given-names>P</given-names></name> <name><surname>Kropil</surname><given-names>R</given-names></name> <name><surname>Ivan</surname><given-names>M</given-names></name> <name><surname>Vodnansky</surname><given-names>M</given-names></name> <etal/></person-group>. <article-title>Emergence and expansion of roe deer (<italic>Capreolus capreolus</italic>) fibropapillomatosis in Slovakia</article-title>. <source>Eur J Wildl Res</source>. (<year>2016</year>) <volume>62</volume>:<fpage>43</fpage>&#x2013;<lpage>9</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10344-015-0972-y</pub-id></mixed-citation></ref>
<ref id="ref63"><label>63.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nemeth</surname><given-names>NM</given-names></name> <name><surname>Ruder</surname><given-names>MG</given-names></name> <name><surname>Gerhold</surname><given-names>RW</given-names></name> <name><surname>Brown</surname><given-names>JD</given-names></name> <name><surname>Munk</surname><given-names>BA</given-names></name> <name><surname>Oesterle</surname><given-names>PT</given-names></name> <etal/></person-group>. <article-title>Demodectic mange, Dermatophilosis, and other parasitic and bacterial dermatologic diseases in free-ranging white-tailed deer (<italic>Odocoileus virginianus</italic>) in the United States from 1975 to 2012</article-title>. <source>Vet Pathol</source>. (<year>2014</year>) <volume>51</volume>:<fpage>633</fpage>&#x2013;<lpage>40</lpage>. doi: <pub-id pub-id-type="doi">10.1177/0300985813498783</pub-id>, <pub-id pub-id-type="pmid">23912715</pub-id></mixed-citation></ref>
<ref id="ref64"><label>64.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Scagliarini</surname><given-names>A</given-names></name> <name><surname>Vaccari</surname><given-names>F</given-names></name> <name><surname>Turrini</surname><given-names>F</given-names></name> <name><surname>Bianchi</surname><given-names>A</given-names></name> <name><surname>Cordioli</surname><given-names>P</given-names></name> <name><surname>Lavazza</surname><given-names>A</given-names></name></person-group>. <article-title>Parapoxvirus infections of red deer, Italy</article-title>. <source>Emerg Infect Dis</source>. (<year>2011</year>) <volume>17</volume>:<fpage>684</fpage>&#x2013;<lpage>7</lpage>. doi: <pub-id pub-id-type="doi">10.3201/eid1704.101454</pub-id>, <pub-id pub-id-type="pmid">21470460</pub-id></mixed-citation></ref>
<ref id="ref65"><label>65.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mart&#x00ED;n-Hernando</surname><given-names>MP</given-names></name> <name><surname>Torres</surname><given-names>MJ</given-names></name> <name><surname>Aznar</surname><given-names>J</given-names></name> <name><surname>Negro</surname><given-names>JJ</given-names></name> <name><surname>Gand&#x00ED;a</surname><given-names>A</given-names></name> <name><surname>Gort&#x00E1;zar</surname><given-names>C</given-names></name></person-group>. <article-title>Distribution of lesions in red and fallow deer naturally infected with <italic>Mycobacterium bovis</italic></article-title>. <source>J Comp Pathol</source>. (<year>2010</year>) <volume>142</volume>:<fpage>43</fpage>&#x2013;<lpage>50</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jcpa.2009.07.003</pub-id>, <pub-id pub-id-type="pmid">19691968</pub-id></mixed-citation></ref>
<ref id="ref66"><label>66.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Keel</surname><given-names>MK</given-names></name> <name><surname>Gage</surname><given-names>PJ</given-names></name> <name><surname>Noon</surname><given-names>TH</given-names></name> <name><surname>Bradley</surname><given-names>GA</given-names></name> <name><surname>Collins</surname><given-names>JK</given-names></name></person-group>. <article-title>Caprine Herpesvirus-2 in association with naturally occurring malignant catarrhal fever in captive sika deer (<italic>Cervus Nippon</italic>)</article-title>. <source>J Vet Diagn Invest</source>. (<year>2003</year>) <volume>15</volume>:<fpage>179</fpage>&#x2013;<lpage>83</lpage>. doi: <pub-id pub-id-type="doi">10.1177/104063870301500215</pub-id>, <pub-id pub-id-type="pmid">12661731</pub-id></mixed-citation></ref>
<ref id="ref67"><label>67.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Gallivan</surname><given-names>GJ</given-names></name> <name><surname>Culverwel1</surname><given-names>J</given-names></name> <name><surname>Girdwood</surname><given-names>R</given-names></name></person-group>. <source>Serum magnesium and zinc status of wild ungulates in the Swaziland Lowveld</source>. <year>2014</year>. Available online at: <ext-link xlink:href="https://nagonline.net/serum-magnesium-zinc-status-wild-ungulates-swaziland-lowveld/" ext-link-type="uri">https://nagonline.net/serum-magnesium-zinc-status-wild-ungulates-swaziland-lowveld/</ext-link> (Accessed January 30, 2026).</mixed-citation></ref>
<ref id="ref68"><label>68.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zou</surname><given-names>P</given-names></name> <name><surname>Du</surname><given-names>Y</given-names></name> <name><surname>Yang</surname><given-names>C</given-names></name> <name><surname>Cao</surname><given-names>Y</given-names></name></person-group>. <article-title>Trace element zinc and skin disorders</article-title>. <source>Front Med</source>. (<year>2023</year>) <volume>9</volume>:<fpage>1093868</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fmed.2022.1093868</pub-id>, <pub-id pub-id-type="pmid">36733937</pub-id></mixed-citation></ref>
<ref id="ref69"><label>69.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Stegelmeier</surname><given-names>BL</given-names></name> <name><surname>Davis</surname><given-names>TZ</given-names></name> <name><surname>Clayton</surname><given-names>MJ</given-names></name></person-group>. <article-title>Plant-induced photosensitivity and dermatitis in livestock</article-title>. <source>Vet Clin N Am Food Anim Pract</source>. (<year>2020</year>) <volume>36</volume>:<fpage>725</fpage>&#x2013;<lpage>33</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cvfa.2020.08.008</pub-id>, <pub-id pub-id-type="pmid">33032702</pub-id></mixed-citation></ref>
<ref id="ref70"><label>70.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Olivry</surname><given-names>T</given-names></name></person-group>. <article-title>Auto-immune skin diseases in animals: time to reclassify and review after 40 years</article-title>. <source>BMC Vet Res</source>. (<year>2018</year>) <volume>14</volume>:<fpage>157</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s12917-018-1477-1</pub-id>, <pub-id pub-id-type="pmid">29751810</pub-id></mixed-citation></ref>
<ref id="ref71"><label>71.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Vasconcelos</surname><given-names>J</given-names></name> <name><surname>Pires</surname><given-names>M d A</given-names></name> <name><surname>Alves</surname><given-names>A</given-names></name> <name><surname>Vieira-Pinto</surname><given-names>M</given-names></name> <name><surname>Saraiva</surname><given-names>C</given-names></name> <name><surname>Cardoso</surname><given-names>L</given-names></name></person-group>. <article-title>Neoplasms in domestic ruminants and swine: a systematic literature review</article-title>. <source>Vet Sci</source>. (<year>2023</year>) <volume>10</volume>:<fpage>163</fpage>. doi: <pub-id pub-id-type="doi">10.3390/vetsci10020163</pub-id>, <pub-id pub-id-type="pmid">36851467</pub-id></mixed-citation></ref>
<ref id="ref72"><label>72.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Agrimi</surname><given-names>U</given-names></name> <name><surname>Morelli</surname><given-names>L</given-names></name> <name><surname>Di Guardo</surname><given-names>G</given-names></name></person-group>. <article-title>Squamous cell carcinoma of the skin in a P&#x00E8;re David&#x2019;s deer (<italic>Elaphurus davidianus</italic>)</article-title>. <source>J Wildl Dis</source>. (<year>1993</year>) <volume>29</volume>:<fpage>616</fpage>&#x2013;<lpage>7</lpage>. doi: <pub-id pub-id-type="doi">10.7589/0090-3558-29.4.616</pub-id>, <pub-id pub-id-type="pmid">8258867</pub-id></mixed-citation></ref>
<ref id="ref73"><label>73.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Yarmiben Mukeshbhai</surname><given-names>G</given-names></name> <name><surname>Sangwan</surname><given-names>V</given-names></name></person-group> <source>An overview on the squamous cell carcinoma in Cattle</source>. <publisher-loc>Ludhiana</publisher-loc>. <year>2024</year>. <fpage>47</fpage>&#x2013;<lpage>50</lpage>. Available online at: <ext-link xlink:href="https://www.gadvasu.in/assests/uploads/images/Vet%20Alumnus_Dec%202024-f.pdf" ext-link-type="uri">https://www.gadvasu.in/assests/uploads/images/Vet%20Alumnus_Dec%202024-f.pdf</ext-link> (Accessed February 4, 2026).</mixed-citation></ref>
<ref id="ref74"><label>74.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Bonenfant</surname><given-names>C</given-names></name> <name><surname>Gaillard</surname><given-names>J</given-names></name> <name><surname>Coulson</surname><given-names>T</given-names></name> <name><surname>Festa-Bianchet</surname><given-names>M</given-names></name> <name><surname>Loison</surname><given-names>A</given-names></name> <name><surname>Garel</surname><given-names>M</given-names></name> <etal/></person-group>. "<chapter-title>Chapter 5 empirical evidence of density-dependence in populations of large herbivores</chapter-title>" In: <source>Advances in ecological research</source>: <publisher-name>Academic Press Inc.</publisher-name> (<year>2009</year>). <fpage>313</fpage>&#x2013;<lpage>57</lpage>. doi: <pub-id pub-id-type="doi">10.1016/S0065-2504(09)00405-X</pub-id></mixed-citation></ref>
<ref id="ref75"><label>75.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Franchini</surname><given-names>M</given-names></name> <name><surname>Peric</surname><given-names>T</given-names></name> <name><surname>Frangini</surname><given-names>L</given-names></name> <name><surname>Prandi</surname><given-names>A</given-names></name> <name><surname>Comin</surname><given-names>A</given-names></name> <name><surname>Rota</surname><given-names>M</given-names></name> <etal/></person-group>. <article-title>You&#x2019;re stressing me out! Effect of interspecific competition from red deer on roe deer physiological stress response</article-title>. <source>J Zool</source>. (<year>2023</year>) <volume>320</volume>:<fpage>63</fpage>&#x2013;<lpage>74</lpage>. doi: <pub-id pub-id-type="doi">10.1111/jzo.13058</pub-id></mixed-citation></ref>
<ref id="ref76"><label>76.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cukor</surname><given-names>J</given-names></name> <name><surname>Faltusov&#x00E1;</surname><given-names>M</given-names></name> <name><surname>Vacek</surname><given-names>Z</given-names></name> <name><surname>Linda</surname><given-names>R</given-names></name> <name><surname>Skot&#x00E1;k</surname><given-names>V</given-names></name> <name><surname>V&#x00E1;clavek</surname><given-names>P</given-names></name> <etal/></person-group>. <article-title>Wild boar carcasses in the center of boar activity: crucial risks of ASF transmission</article-title>. <source>Front Vet Sci</source>. (<year>2024</year>) <volume>11</volume>:<fpage>1497361</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fvets.2024.1497361</pub-id>, <pub-id pub-id-type="pmid">39748874</pub-id></mixed-citation></ref>
<ref id="ref77"><label>77.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Galindo</surname><given-names>I</given-names></name> <name><surname>Alonso</surname><given-names>C</given-names></name></person-group>. <article-title>African swine fever virus: a review</article-title>. <source>Viruses</source>. (<year>2017</year>) <volume>9</volume>:<fpage>103</fpage>. doi: <pub-id pub-id-type="doi">10.3390/v9050103</pub-id>, <pub-id pub-id-type="pmid">28489063</pub-id></mixed-citation></ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by" id="fn0001"><p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/199180/overview">Livia D'Angelo</ext-link>, University of Naples Federico II, Italy</p></fn>
<fn fn-type="custom" custom-type="reviewed-by" id="fn0002"><p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/617756/overview">Elena De Felice</ext-link>, University of Camerino, Italy</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/939806/overview">James Edward Brereton</ext-link>, Sparsholt College, United Kingdom</p></fn>
</fn-group>
</back>
</article>