<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xml:lang="EN" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Med.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Medicine</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Med.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2296-858X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fmed.2026.1759383</article-id>
<article-version article-version-type="Corrected Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Improved YOLOv8 with average pooling downsampling for detection and classification of intertrochanteric femoral fractures in X-ray images: a study focusing on AO/OTA classification</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" equal-contrib="yes">
<name><surname>Shen</surname> <given-names>Zheming</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="author-notes" rid="fn002"><sup>&#x2020;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author" equal-contrib="yes">
<name><surname>Wang</surname> <given-names>Yu</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="author-notes" rid="fn002"><sup>&#x2020;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/3076964/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author" equal-contrib="yes">
<name><surname>Chen</surname> <given-names>Yu</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="author-notes" rid="fn002"><sup>&#x2020;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/3108851/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author" equal-contrib="yes">
<name><surname>Lu</surname> <given-names>Haowen</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="author-notes" rid="fn002"><sup>&#x2020;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Tang</surname> <given-names>Can</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Gao</surname> <given-names>Zhiheng</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/2976870/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Zhao</surname> <given-names>Xuequan</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Sun</surname> <given-names>Haifu</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/3143069/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Qian</surname> <given-names>Yuchen</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c002"><sup>&#x002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/3109847/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Zhang</surname> <given-names>Youbin</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c003"><sup>&#x002A;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Qiao</surname> <given-names>Yusen</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c004"><sup>&#x002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/2926676/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Department of Orthopaedics, The First Affiliated Hospital of Soochow University</institution>, <city>Suzhou</city>, <country country="cn">China</country></aff>
<aff id="aff2"><label>2</label><institution>Xiangcheng District Caohu People&#x2019;s Hospital</institution>, <city>Suzhou</city>, <country country="cn">China</country></aff>
<aff id="aff3"><label>3</label><institution>Cangzhou Integrated Traditional Chinese and Western Medicine Hospital</institution>, <city>Cangzhou</city>, <country country="cn">China</country></aff>
<author-notes>
<corresp id="c001"><label>&#x002A;</label>Correspondence: Haifu Sun, <email xlink:href="mailto:shfsdfyy@163.com">shfsdfyy@163.com</email></corresp>
<corresp id="c002">Yuchen Qian, <email xlink:href="mailto:1564885495@qq.com">1564885495@qq.com</email></corresp>
<corresp id="c003">Youbin Zhang, <email xlink:href="mailto:youbinz@suda.edu.cn">youbinz@suda.edu.cn</email></corresp>
<corresp id="c004">Yusen Qiao, <email xlink:href="mailto:qiaoyusen8612@suda.edu.cn">qiaoyusen8612@suda.edu.cn</email></corresp>
<fn fn-type="equal" id="fn002"><label>&#x2020;</label><p>These authors have contributed equally to this work and share first authorship</p></fn>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-03-02">
<day>02</day>
<month>03</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="corrected" iso-8601-date="2026-03-10">
<day>10</day>
<month>03</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>13</volume>
<elocation-id>1759383</elocation-id>
<history>
<date date-type="received">
<day>02</day>
<month>12</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>02</day>
<month>02</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>09</day>
<month>02</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2026 Shen, Wang, Chen, Lu, Tang, Gao, Zhao, Sun, Qian, Zhang and Qiao.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Shen, Wang, Chen, Lu, Tang, Gao, Zhao, Sun, Qian, Zhang and Qiao</copyright-holder>
<license>
<ali:license_ref start_date="2026-03-02">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<sec>
<title>Objective</title>
<p>This study aims to develop an artificial intelligence system for the accurate detection and classification of intertrochanteric femoral fractures (types A1&#x2013;A3 according to the AO/OTA classification) in X-ray images, focusing on improving precision and optimizing computational efficiency.</p>
</sec>
<sec>
<title>Methods</title>
<p>This study adopted a retrospective design, using 976 X-ray image datasets collected from hospital archives. The images were preprocessed, annotated by orthopedic specialists, and divided into training and test sets. The model was improved by replacing the traditional convolutional downsampling modules in YOLOv8 with Average Pooling Downsampling (ADown) modules to enhance feature extraction for small fracture targets. Model training incorporated data augmentation techniques and was evaluated using metrics such as precision, recall, and mean Average Precision (mAP).</p>
</sec>
<sec>
<title>Results</title>
<p>The proposed YOLOv8-ADown model achieved an overall mAP50 of 81.7%, higher than the 80.5% of the original YOLOv8. The detection precision for A1, A2, and A3 type fractures increased by 7.3, 3.5, and 7.8%, respectively. Furthermore, the number of model parameters was reduced by 12.3%, and computational complexity (FLOPs) was decreased by 9.8%, demonstrating potential for deployment on edge devices.</p>
</sec>
<sec>
<title>Conclusion</title>
<p>The YOLOv8-ADown model provides an efficient solution for fracture detection and is expected to assist in clinical diagnosis. Future work should address data collection challenges and conduct multi-center validation.</p>
</sec>
</abstract>
<kwd-group>
<kwd>artificial intelligence</kwd>
<kwd>hip fracture</kwd>
<kwd>intertrochanteric femoral fracture</kwd>
<kwd>medical imaging</kwd>
<kwd>object detection</kwd>
<kwd>YOLOv8</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This work was supported by the National Natural Science Foundation of China (Grant No. 82572432).</funding-statement>
</funding-group>
<counts>
<fig-count count="12"/>
<table-count count="2"/>
<equation-count count="4"/>
<ref-count count="42"/>
<page-count count="14"/>
<word-count count="6940"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Precision Medicine</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec id="S1" sec-type="intro">
<label>1</label>
<title>Introduction</title>
<p>Hip fracture is a significant global health problem, especially among the elderly population, where untimely diagnosis can lead to high morbidity and mortality (<xref ref-type="bibr" rid="B1">1</xref>, <xref ref-type="bibr" rid="B2">2</xref>). According to the Global Burden of Disease Study, the incidence and prevalence of hip fractures in patients aged 55 and over have continued to rise over the past three decades. In 2019, the global age-standardized incidence rate reached 681.35 per 100,000 people, highlighting its substantial disease burden (<xref ref-type="bibr" rid="B3">3</xref>). Accurate diagnosis is crucial for treatment decisions; for instance, early detection can help physicians differentiate between surgical intervention (such as internal fixation or arthroplasty) and conservative treatment, thereby optimizing patient outcomes, reducing complications, and lowering healthcare costs (<xref ref-type="bibr" rid="B4">4</xref>, <xref ref-type="bibr" rid="B5">5</xref>). However, traditional radiological diagnosis highly depends on the physician&#x2019;s level of experience, leading to significant variability in diagnostic sensitivity and specificity. Studies have shown that the sensitivity of fracture detection by general practitioners can be as low as 69.2%, while that of specialists can reach 96.2%. This disparity underscores the necessity for auxiliary tools to reduce diagnostic errors and missed diagnoses (<xref ref-type="bibr" rid="B6">6</xref>&#x2013;<xref ref-type="bibr" rid="B8">8</xref>). Furthermore, the clinical utility of AI tools can be amplified by integrating them with biomechanical principles, where optimization of internal fixation configurations and external fixator designs can enhance treatment precision, thereby providing multidisciplinary support for the YOLOv8-ADown model (<xref ref-type="bibr" rid="B9">9</xref>, <xref ref-type="bibr" rid="B10">10</xref>).</p>
<p>In recent years, artificial intelligence (AI) has emerged as a promising method for medical image analysis. However, existing models still face challenges of insufficient accuracy and precision when dealing with small targets like fractures (<xref ref-type="bibr" rid="B11">11</xref>&#x2013;<xref ref-type="bibr" rid="B13">13</xref>). For example, most AI research focuses on binary classification (fracture vs. non-fracture) (<xref ref-type="bibr" rid="B14">14</xref>), with only a few studies incorporating fracture grading standards, such as the Garden classification for femoral neck fractures (<xref ref-type="bibr" rid="B15">15</xref>). There is a lack of fine-grained classification of fracture types (e.g., AO/OTA classification), and an inability to provide precise localization of the fracture area (<xref ref-type="bibr" rid="B16">16</xref>, <xref ref-type="bibr" rid="B17">17</xref>). Although some work has attempted to integrate interpretability techniques like Grad-CAM, these methods often fail to directly segment fracture lines, limiting their clinical applicability (<xref ref-type="bibr" rid="B18">18</xref>). Furthermore, evaluations of deep learning-based decision support systems in real clinical environments have shown limited performance improvement when collaborating with human doctors and a risk of high bias (<xref ref-type="bibr" rid="B19">19</xref>).</p>
<p>Addressing these shortcomings, this study proposes an improved framework based on YOLOv8&#x2014;the YOLOv8-ADown model. By replacing the traditional convolutional downsampling modules with Average Pooling Downsampling (ADown) modules, the feature extraction capability for small fracture targets is optimized. This framework not only achieves fracture detection, and classification and but also significantly improves computational efficiency. Preliminary results indicate that the proposed YOLOv8-ADown framework significantly enhances detection accuracy and computational efficiency, addressing the limitations of existing models in fine-grained fracture classification. Compared to previous studies, the advantages of this framework are: 1. It provides multi-class fracture grading (AO/OTA types A1&#x2013;A3), enhancing interpretability; 2. Through the combination of attention mechanisms and pooling optimization, it balances accuracy and speed, making it suitable for deployment on edge devices (<xref ref-type="bibr" rid="B20">20</xref>, <xref ref-type="bibr" rid="B21">21</xref>).</p>
<p>Looking forward to future applications, this model is expected to serve as a clinical auxiliary tool, integrated into radiological workflows, helping doctors quickly identify subtle fractures, particularly in high-volume or resource-limited scenarios (<xref ref-type="bibr" rid="B22">22</xref>). Multi-center validation and real-time deployment will be the next key steps to assess its generalizability and clinical impact (<xref ref-type="bibr" rid="B23">23</xref>, <xref ref-type="bibr" rid="B24">24</xref>). Simultaneously, by integrating with osteoporosis screening and risk stratification tools (e.g., FRAXplus), the AI system can be further extended to preventive care, optimizing the entire process from diagnosis to management (<xref ref-type="bibr" rid="B25">25</xref>, <xref ref-type="bibr" rid="B26">26</xref>). Through continuous improvement in data diversity and model interpretability, such frameworks are expected to promote the development of personalized medicine, ultimately enhancing the quality of life for hip fracture patients worldwide.</p>
</sec>
<sec id="S2" sec-type="materials|methods">
<label>2</label>
<title>Materials and methods</title>
<sec id="S2.SS1">
<label>2.1</label>
<title>Study subjects</title>
<p>The dataset used in this study consisted of 976 X-ray images of intertrochanteric femoral fractures collected from our hospital and affiliated institutions between June 2020 and October 2025. These X-ray images clearly display intertrochanteric femoral fractures of different grades (A1, A2, A3) according to the 2018 version of the AO/OTA classification.</p>
</sec>
<sec id="S2.SS2">
<label>2.2</label>
<title>Dataset preparation</title>
<p>In preparation for input into YOLOv8, the X-ray images underwent a preprocessing stage. This stage included standardizing the images to a uniform size and resolution and converting them to grayscale. Grayscale conversion reduced the number of image channels, improving image processing efficiency.</p>
<p>Under the supervision of an experienced orthopedic surgeon, a medical intern annotated and classified the fracture grades on the X-ray images using the Labelme annotation software. This process constructed a deep learning dataset for intertrochanteric femoral fracture grade detection. The supervision process involved initial training of the intern on AO/OTA criteria, followed by real-time feedback and validation of all annotations by the surgeon. Any discrepancies were resolved via consensus discussions, ensuring adherence to clinical standards. To quantify consistency, a random subset of 100 images was re-annotated by the surgeon, showing high inter-observer agreement (kappa = 0.85). Future work will expand this to include multiple experts for independent annotations to further enhance reliability. Intertrochanteric femoral fractures were classified into three grades: A1, A2, and A3. For clarity, this study adopted a specific naming convention: &#x201C;A1&#x201D; represents type A1 intertrochanteric femoral fracture, &#x201C;A2&#x201D; represents type A2, and &#x201C;A3&#x201D; represents type A3. This naming convention is consistently used in the figures of this paper. In this study, 976 intertrochanteric fracture images were selected for training, including 261 type A1, 579 type A2, and 136 type A3. The aforementioned 976 samples were randomly split into training and test sets in an 8:2 ratio using a random seed method. Although the sample ratios of the three fracture types were severely imbalanced, we employed a class-weighted loss function to mitigate this issue, increasing the loss penalty for misclassification of minority classes. This function assigned higher weights to A1 and A3 fractures during training to reduce bias, but the limited sample size for A3 (136 instances) may still affect subtype performance reliability. Future iterations will incorporate advanced techniques like synthetic data generation to address this imbalance more effectively.</p>
</sec>
<sec id="S2.SS3">
<label>2.3</label>
<title>Principles of the YOLOv8 object detection algorithm</title>
<p>The YOLOv8 algorithm inherits and extends the advantages of previous generations in the YOLO series, further enhancing the accuracy and efficiency of object detection. The algorithm introduces several key architectural improvements. First, YOLOv8 adopts CSPDarknet53 as its backbone network. By introducing Cross-Stage Partial (CSP) connections, it enhances information flow, thereby improving feature extraction efficiency and overall network performance. Second, YOLOv8 introduces a Path Aggregation Network (PAN) in the neck structure, enabling the model to effectively fuse features at different scales, particularly excelling when dealing with targets of varying sizes. Compared to previous generations, YOLOv8 abandons the traditional anchor-based strategy and adopts an anchor-free detection approach. This not only simplifies the model architecture and reduces computational burden but also significantly improves the detection accuracy of small targets. In terms of training strategy, YOLOv8 introduces advanced methods such as Mosaic data augmentation and MixUp, and employs a cosine annealing scheduler to optimize the learning rate, significantly improving the model&#x2019;s generalization ability and convergence speed. These improvements make YOLOv8 perform excellently on multiple object detection benchmarks, showing significant advantages especially in inference speed and detection accuracy. These innovations in YOLOv8 demonstrate great potential and broad application prospects in the field of medical image detection, particularly in fracture detection from medical images, where its high accuracy and efficiency provide strong support for improving diagnostic precision.</p>
</sec>
</sec>
<sec id="S3">
<label>3</label>
<title>Improved fracture detection algorithm based on YOLOv8</title>
<p>To improve the localization accuracy and fracture grade judgment accuracy of the model on X-ray images of intertrochanteric femoral fractures, while reducing the model&#x2019;s computational load and parameters, key improvements were made based on the YOLOv8 framework, proposing the YOLOv8-ADown model. Its structure is shown in <xref ref-type="fig" rid="F1">Figure 1</xref>. The improvement involves replacing the traditional convolutional downsampling (Conv) modules in the backbone and neck networks of YOLOv8 with Average Pooling Downsampling (ADown) modules. This reduces the number of model parameters and computational load, while avoiding the loss of detail during traditional downsampling, thereby enhancing the model&#x2019;s feature extraction capability for subtle fractures in X-ray images.</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption><p>YOLOv8-ADown model architecture.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmed-13-1759383-g001.tif">
<alt-text content-type="machine-generated">Flowchart of a convolutional neural network architecture divided into Backbone, Neck, and Head sections, showing modules such as Conv, C2f, ADown, SPPF, UpSample, Concat, and Detect, along with detailed block diagrams for C2f and ADown components.</alt-text>
</graphic>
</fig>
<sec id="S3.SS1">
<label>3.1</label>
<title>Principles of the ADown module</title>
<p>The X-ray image samples in this study contain small fracture targets. The traditional Conv downsampling module performs feature extraction using 3 &#x00D7; 3 convolutions with a stride of 2, which not only increases the number of model parameters and computational load but also fails to effectively capture features of small targets, easily leading to feature loss and missed detections. To address the problem of information loss in small fracture targets, this model introduces the Average Pooling Downsampling (ADown) module into the YOLOv8n model, replacing the Conv downsampling modules in the backbone and neck networks, effectively solving the problem of small fracture target information loss during feature extraction. The workflow of the ADown module is shown in <xref ref-type="fig" rid="F2">Figure 2</xref>.</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption><p>ADown module schematic.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmed-13-1759383-g002.tif">
<alt-text content-type="machine-generated">Flowchart illustrating a neural network block: input tensor undergoes average pooling and max pooling, then two separate convolution operations, and finally their outputs are combined before producing the output tensor.</alt-text>
</graphic>
</fig>
<p>The ADown module is primarily composed of a 2D Average Pooling layer (AvgPool2d), a 2D Max Pooling layer (MaxPool2d), and a Convolutional layer (Conv). First, the AvgPool2d module performs a 2 &#x00D7; 2 average pooling operation with a stride of 1 on the input feature map, calculating the average value of all pixels in the region. This preserves the main features of the image and removes detailed noise, allowing it to capture finer features and avoid the loss of small target features. Then, the feature map obtained from 2D average pooling is evenly split into two groups along the channel dimension, with each group having half the number of channels. These two groups undergo different downsampling operations. The first group undergoes a 3 &#x00D7; 3 2D Max Pooling layer (MaxPool2d) operation with a stride of 2, halving the feature map size and retaining the maximum value in the region, highlighting features of small targets and suppressing background noise, followed by a 1 &#x00D7; 1 convolution to fuse pixel features. The second group of feature maps undergoes downsampling via a 3 &#x00D7; 3 convolutional layer with a stride of 2, halving the feature map size. Finally, the two processed feature map groups are concatenated to restore the original number of channels.</p>
</sec>
</sec>
<sec id="S4">
<label>4</label>
<title>Experimental environment and evaluation metrics</title>
<sec id="S4.SS1">
<label>4.1</label>
<title>Experimental environment</title>
<p>The experiments were conducted on a Windows 11 operating system with an NVIDIA GeForce RTX 3060 Ti 8GB GPU. The virtual environment was configured as follows: Python version 3.9.23, PyTorch version 2.0.0, and CUDA version 11.8.</p>
<p>This study utilized the YOLOv8 deep learning network and proposed a new YOLOv8-ADown fracture detection method. The official Ultralytics YOLOv8n model implementation (version 8.2.103) was used. The initial learning rate (Lr0) was set to 0.01, the final learning rate (Lrf) to 0.01, batch size to 16, and the number of training epochs to 200. Image augmentation techniques (e.g., Mosaic augmentation, horizontal flip, scale, and translate) were used to enhance model robustness. The SGD optimizer with a weight decay of 5 &#x00D7; 10<sup>&#x2013;4</sup> was employed.</p>
</sec>
<sec id="S4.SS2">
<label>4.2</label>
<title>Evaluation metrics</title>
<p>This experiment used Precision (P), Recall (R), and mean Average Precision (mAP) to evaluate the performance of the YOLOv8-ADown model in detecting fractures.</p>
<p>Here, Precision <italic>P</italic> represents the proportion of actual positive samples among the predicted positive samples, calculated as:</p>
<disp-formula id="S4.Ex1">
<mml:math id="M1">
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mo>+</mml:mo>
<mml:mrow>
<mml:mi>F</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>P</mml:mi>
</mml:mrow>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<p>Where: <italic>TP</italic> (True Positive) is the number of correctly classified positive samples, i.e., targets labeled as fracture correctly detected as fracture; <italic>FP</italic> (False Positive) is the number of incorrectly detected negative samples, i.e., targets not labeled as fracture incorrectly detected as fracture.</p>
<p>Recall <italic>R</italic> is calculated as follows:</p>
<disp-formula id="S4.Ex2">
<mml:math id="M2">
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mo>+</mml:mo>
<mml:mrow>
<mml:mi>F</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<p>Where: <italic>FN</italic> (False Negative) is the number of missed positive samples, i.e., targets labeled as fracture that were not detected.</p>
<p>The mean Average Precision <italic>mAP</italic> uses <italic>mAP</italic><sub>50</sub> (<italic>IoU</italic> threshold of 0.5) and <italic>mAP</italic><sub>50:95</sub> (<italic>IoU</italic> threshold from 0.5 to 0.95 in steps of 0.05) as evaluation metrics. The <italic>mAP</italic> is calculated as follows:</p>
<disp-formula id="S4.Ex3">
<mml:math id="M3">
<mml:mrow>
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>A</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mo>=</mml:mo>
<mml:mrow>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mi>N</mml:mi>
</mml:mfrac>
<mml:mo>&#x2062;</mml:mo>
<mml:mrow>
<mml:munderover>
<mml:mstyle displaystyle="true"><mml:mo largeop="true" movablelimits="false" symmetric="true">&#x2211;</mml:mo></mml:mstyle>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>N</mml:mi>
</mml:munderover>
<mml:mrow>
<mml:mi>A</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:msub>
<mml:mi>P</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mrow>
</mml:mrow>
</mml:mrow>
</mml:math>
</disp-formula>
<p>Where: <italic>N</italic> is the number of target classes. In this study, <italic>N</italic> = 1, <italic>AP</italic><sub><italic>i</italic></sub> is the Average Precision for class <italic>i</italic>, calculated as:</p>
<disp-formula id="S4.Ex4">
<mml:math id="M4">
<mml:mrow>
<mml:mrow>
<mml:mi>A</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:msub>
<mml:mi>P</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
<mml:mo>=</mml:mo>
<mml:mrow>
<mml:msubsup>
<mml:mstyle displaystyle="true"><mml:mo largeop="true" symmetric="true">&#x222B;</mml:mo></mml:mstyle>
<mml:mn>0</mml:mn>
<mml:mn>1</mml:mn>
</mml:msubsup>
<mml:mrow>
<mml:msub>
<mml:mi>P</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x22C5;</mml:mo>
<mml:msub>
<mml:mi>R</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mrow>
</mml:mrow>
</mml:math>
</disp-formula>
<p>Where: <italic>P</italic><sub><italic>i</italic></sub> and <italic>R</italic><sub><italic>i</italic></sub> represent the precision and recall for detection class <italic>i</italic>, respectively.</p>
</sec>
</sec>
<sec id="S5">
<label>5</label>
<title>Results analysis</title>
<p>The Precision (P), Recall (R), mAP50, and mAP50:95 of the original YOLOv8 and the improved YOLOv8-ADown model on the dataset used in this study are shown in <xref ref-type="table" rid="T1">Table 1</xref>. The original YOLOv8 model achieved an overall mAP50 of 80.5% for these fracture types, while the improved YOLOv8-ADown model increased the overall mAP50 to 81.7%. This performance is comparable to Yang et al. (<xref ref-type="bibr" rid="B27">27</xref>), who reported an mAP50-95 of 85.9% for vertebral fracture classification using YOLOv8-Seg, highlighting the versatility of YOLOv8 in fracture detection. This indicates that the improved model enhanced the performance in fracture detection. Specifically, the detection precision (P) of the improved model for A1, A2, and A3 type fractures increased by 7.3, 3.5, and 7.8%, respectively, compared to the original model, effectively improving the detection precision for different fracture types.</p>
<table-wrap position="float" id="T1">
<label>TABLE 1</label>
<caption><p>Comparison of model accuracy and parameters before and after improvement.</p></caption>
<table cellspacing="5" cellpadding="5" frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="center">Model</th>
<th valign="top" align="left">Class</th>
<th valign="top" align="center">All</th>
<th valign="top" align="center">A1</th>
<th valign="top" align="center">A2</th>
<th valign="top" align="center">A3</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="center" rowspan="2">YOLOv8</td>
<td valign="top" align="left">Images</td>
<td valign="top" align="center">196</td>
<td valign="top" align="center">50</td>
<td valign="top" align="center">109</td>
<td valign="top" align="center">37</td>
</tr>
<tr>
<td valign="top" align="left">Instances</td>
<td valign="top" align="center">196</td>
<td valign="top" align="center">50</td>
<td valign="top" align="center">109</td>
<td valign="top" align="center">37</td>
</tr>
<tr>
<td valign="top" align="center">Params</td>
<td valign="top" align="left">Box(P)</td>
<td valign="top" align="center">0.766</td>
<td valign="top" align="center">0.723</td>
<td valign="top" align="center">0.782</td>
<td valign="top" align="center">0.794</td>
</tr>
<tr>
<td valign="top" align="center">3.01M</td>
<td valign="top" align="left">R</td>
<td valign="top" align="center">0.714</td>
<td valign="top" align="center">0.66</td>
<td valign="top" align="center">0.86</td>
<td valign="top" align="center">0.622</td>
</tr>
<tr>
<td valign="top" align="center">FLOPs</td>
<td valign="top" align="left">mAP50</td>
<td valign="top" align="center">0.805</td>
<td valign="top" align="center">0.772</td>
<td valign="top" align="center">0.867</td>
<td valign="top" align="center">0.776</td>
</tr>
<tr>
<td valign="top" align="center">8.1G</td>
<td valign="top" align="left">mAP50-95</td>
<td valign="top" align="center">0.615</td>
<td valign="top" align="center">0.599</td>
<td valign="top" align="center">0.677</td>
<td valign="top" align="center">0.565</td>
</tr>
<tr>
<td valign="top" align="center" rowspan="2">YOLOv8-ADown</td>
<td valign="top" align="left">Images</td>
<td valign="top" align="center">196</td>
<td valign="top" align="center">50</td>
<td valign="top" align="center">109</td>
<td valign="top" align="center">37</td>
</tr>
<tr>
<td valign="top" align="left">Instances</td>
<td valign="top" align="center">196</td>
<td valign="top" align="center">50</td>
<td valign="top" align="center">109</td>
<td valign="top" align="center">37</td>
</tr>
<tr>
<td valign="top" align="center">Params</td>
<td valign="top" align="left">Box(P)</td>
<td valign="top" align="center">0.828</td>
<td valign="top" align="center">0.796</td>
<td valign="top" align="center">0.817</td>
<td valign="top" align="center">0.872</td>
</tr>
<tr>
<td valign="top" align="center">2.64M</td>
<td valign="top" align="left">R</td>
<td valign="top" align="center">0.757</td>
<td valign="top" align="center">0.732</td>
<td valign="top" align="center">0.89</td>
<td valign="top" align="center">0.649</td>
</tr>
<tr>
<td valign="top" align="center">FLOPs</td>
<td valign="top" align="left">mAP50</td>
<td valign="top" align="center">0.817</td>
<td valign="top" align="center">0.773</td>
<td valign="top" align="center">0.868</td>
<td valign="top" align="center">0.81</td>
</tr>
<tr>
<td valign="top" align="center">7.3G</td>
<td valign="top" align="left">mAP50-95</td>
<td valign="top" align="center">0.625</td>
<td valign="top" align="center">0.603</td>
<td valign="top" align="center">0.685</td>
<td valign="top" align="center">0.591</td>
</tr>
</tbody>
</table></table-wrap>
<p>Furthermore, the improved YOLOv8-ADown model reduced the number of parameters (Params) by 12.3% and the computational complexity (FLOPs) by 9.8% compared to the original YOLOv8 model. YOLOv8-ADown significantly reduced the model&#x2019;s parameters and computations, decreasing computational demands, making it more suitable for deployment on edge devices with limited computing power, and potentially improving detection speed under the same computational constraints.</p>
<p><xref ref-type="fig" rid="F3">Figure 3</xref> details the recognition results for the three fracture types. The performance of the YOLOv8 and the improved YOLOv8-ADown algorithms was evaluated on the dataset of 976 fracture samples constructed in this study. Among the 261 analyzed A1-type fractures, the YOLOv8 model correctly identified 70%, while the YOLOv8-ADown model correctly identified 72%, indicating that accuracy in detecting A1-type fractures still needs improvement. Similarly, among the 579 examined A2-type fractures, the YOLOv8 model accurately identified 84%, while the improved model further increased this to 89%, demonstrating the algorithm&#x2019;s effectiveness in identifying A2-type fractures. Notably, among the 136 A3-type fractures in the dataset, only 57% were correctly identified by YOLOv8, while the improved YOLOv8-ADown achieved a correct identification rate of 62%. The detection accuracy for A3-type fractures was the lowest, which may be related to the smaller number of A3-type fracture samples in the dataset.</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption><p>Comparison of normalized confusion matrices of the models.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmed-13-1759383-g003.tif">
<alt-text content-type="machine-generated">Side-by-side confusion matrix graphics compare YOLOv8 and YOLOv8-ADown models. Each matrix has predicted classes A1, A2, A3, and background on the vertical axis and true classes on the horizontal axis. YOLOv8-ADown shows slightly higher accuracy for A1 and A2, as reflected in darker color intensity and larger values on the diagonal. Both matrices are normalized and use a blue color scale to represent values. Labels and axes are clear for comparison.</alt-text>
</graphic>
</fig>
<p><xref ref-type="fig" rid="F4">Figure 4</xref> shows the data distribution of the training set, including the number of instances per class, the size and quantity of bounding boxes, the location of center points relative to the entire image, and the aspect ratio of targets in the images. The top-left corner shows the instance count for each class in the training set, i.e., the total number of occurrences of that class across all training images. It can be seen that the sample sizes for A1, A2, and A3 type fractures are uneven, providing a basis for understanding the sample size of each class and guidance for data augmentation. The top-right corner shows the effect of aligning and overlaying the bounding boxes of all classes centered on the image origin, allowing for an intuitive understanding of the shape, size, and approximate positional distribution of the bounding boxes for each class. The bottom-left corner shows the distribution of target center points relative to the entire image, allowing further analysis of the spatial location preference of the targets or defects within the image. The bottom-right corner shows the distribution of the height-to-width ratios of the target bounding boxes in the images, allowing further analysis of the size and shape preferences of the defect bounding boxes.</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption><p>Distribution and attribute analysis of training set data.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmed-13-1759383-g004.tif">
<alt-text content-type="machine-generated">Grouped data visualizations include a vertical bar chart comparing instances of categories A1, A2, and A3, three scatter plots showing relationships among variables x versus y, width versus height, and a density contour plot shaped like a rectangle.</alt-text>
</graphic>
</fig>
<p><xref ref-type="fig" rid="F5">Figure 5</xref> depicts the correlation matrix (pairs plot) between attributes of the annotation boxes (labels), such as the normalized horizontal and vertical coordinates of the center point (x, y) and the normalized width and height. The diagonal of the matrix shows the one-dimensional distribution histograms for each attribute. The off-diagonal plots are two-dimensional scatter plots between two different attributes, showing their relationships; darker colors indicate denser data points.</p>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption><p>Correlation matrix of bounding box attributes.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmed-13-1759383-g005.tif">
<alt-text content-type="machine-generated">Scatterplot matrix with blue density hexbin plots and diagonal histograms compares four variables labeled x, y, width, and height, illustrating distributions and pairwise relationships among all variables for exploratory data analysis.</alt-text>
</graphic>
</fig>
<p><xref ref-type="fig" rid="F6">Figure 6</xref> shows the curves of loss changes and performance metrics (Precision, Recall, etc.) during model training. <xref ref-type="fig" rid="F6">Figure 6</xref> shows the loss curves (downward trend) and performance metrics (upward trend). The model uses loss functions to evaluate the difference between predicted and true values; these loss functions significantly impact model performance as they guide the training process toward more accurate predictions. The loss functions are divided into classification and regression parts: The classification loss (cls_loss) uses Binary Cross-Entropy Loss to calculate the difference between predicted and true classes. A lower classification loss indicates more precise classification of detected objects into their respective categories. The regression branch losses include dfl_loss (Distributed Focal Loss) and box_loss (Bounding Box Loss). The box_loss calculates the loss for the predicted bounding box&#x2019;s position and size relative to the ground truth box, using the Intersection over Union (IoU) metric. A higher IoU value indicates higher localization accuracy, reflecting a more precise overlap between the predicted and ground truth bounding boxes. In summary, these loss functions are indispensable for optimizing the YOLOv8 model, thereby driving improvements in detection and classification accuracy.</p>
<fig id="F6" position="float">
<label>FIGURE 6</label>
<caption><p>Comparison of model loss and accuracy curves.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmed-13-1759383-g006.tif">
<alt-text content-type="machine-generated">Two sets of training and validation metrics graphs compare YOLOv8 and YOLOv8-ADown models across 200 epochs, including box loss, classification loss, distribution focal loss, precision, recall, mAP50, and mAP50-95, with both results and smoothed trend lines shown for each metric.</alt-text>
</graphic>
</fig>
<p>The curves on the right side of <xref ref-type="fig" rid="F6">Figure 6</xref>, showing an upward trend, represent the changes in Precision, Recall, mAP50, and mAP50:95 during training. Precision is defined as a measure of the accuracy of the model&#x2019;s positive predictions, calculated as the ratio of correctly identified positive instances (True Positives) to all instances identified as positive (True Positives + False Positives). Higher precision indicates that the model makes fewer false positive errors. Recall represents the proportion of actual positive samples correctly identified by the model, essentially measuring the model&#x2019;s ability to identify all relevant instances in the dataset. It is calculated as the ratio of True Positives to the sum of True Positives and False Negatives. mAP50 and mAP50:95 represent the mean Average Precision at an IoU threshold of 0.5 and the average of mean Average Precision computed at IoU thresholds from 0.5 to 0.95 with a step size of 0.05, respectively, reflecting the model&#x2019;s true detection performance from different dimensions.</p>
<p>Subsequently, a comparative analysis of the YOLOv8 model before and after improvement was conducted using four key metrics&#x2014;Precision, Recall, F1-Score, and Average Precision (AP)&#x2014;to provide a comprehensive evaluation of its performance enhancements across various dimensions. First, the Precision for the three fracture types was examined, which in this experiment refers to the proportion of correctly predicted fracture types out of the total number of fracture types predicted by the model. As shown in <xref ref-type="fig" rid="F7">Figure 7</xref>, which compares the Precision curves of the models before and after improvement, the enhanced YOLOv8-ADown model achieved a Precision of 0.979 for recognizing all fracture classes, representing an increase of 0.031 compared to the original YOLOv8&#x2019;s Precision of 0.948. This result clearly demonstrates that the improved YOLOv8-ADown model exhibits a significant enhancement in detection accuracy relative to the original model.</p>
<fig id="F7" position="float">
<label>FIGURE 7</label>
<caption><p>Comparison of model P curves.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmed-13-1759383-g007.tif">
<alt-text content-type="machine-generated">Side-by-side line graphs compare precision-confidence curves for YOLOv8 and YOLOv8-ADown models. Each chart plots precision against confidence for three classes and aggregated performance, showing higher precision for YOLOv8-ADown at similar confidence levels.</alt-text>
</graphic>
</fig>
<p>Recall (R) represents the proportion of correctly predicted fracture types out of the total number of actual fracture types. <xref ref-type="fig" rid="F8">Figure 8</xref> shows the comparison of the R curves between the original and improved models. Although the comprehensive recall score in the legend is 0.96 for both, showing no obvious overall improvement, the recall curve for each individual fracture type in the improved model shows some enhancement compared to the pre-improvement model, indicating that the improved YOLOv8-ADown model has a certain positive effect on recall.</p>
<fig id="F8" position="float">
<label>FIGURE 8</label>
<caption><p>Comparison of model R curves.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmed-13-1759383-g008.tif">
<alt-text content-type="machine-generated">Two side-by-side line graphs compare recall-confidence curves for YOLOv8 and YOLOv8-ADown models, each showing performance for classes A1, A2, A3, and all classes combined, with the combined class highlighted in bold blue.</alt-text>
</graphic>
</fig>
<p>The F1-Score is the harmonic mean of Precision and Recall, used to comprehensively evaluate the performance of a binary classification model. Setting the confidence threshold to 0.5, <xref ref-type="fig" rid="F9">Figure 9</xref> shows the F1 curve comparison between the original and improved models. It can be observed that the F1 scores for all three fracture types (A1, A2, A3) slightly improved. Although the comprehensive F1 score did not show a significant increase, the confidence threshold corresponding to the highest F1 score increased from 0.495 to 0.720. This indicates that the model achieves a better F1 score at higher confidence thresholds, making the model&#x2019;s predictions more reliable.</p>
<fig id="F9" position="float">
<label>FIGURE 9</label>
<caption><p>Comparison of model F1 curves.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmed-13-1759383-g009.tif">
<alt-text content-type="machine-generated">Two side-by-side line charts labeled F1-Confidence Curve compare YOLOv8 and YOLOv8-ADown models. Each chart shows F1 score versus confidence for classes A1, A2, A3 and a combined class line, with YOLOv8 at 0.495 and YOLOv8-ADown at 0.720 for all classes.</alt-text>
</graphic>
</fig>
<p>As the most important among the four metrics, the Average Precision (AP) value provides a more comprehensive measure of the relationship between Precision and Recall. Essentially, the AP value is the area under the Precision-Recall (P-R) curve; a larger area indicates a higher AP value, meaning better detection accuracy for that class of objects. <xref ref-type="fig" rid="F10">Figure 10</xref> shows the comparison of the P-R curves between the original and improved models. After replacing the traditional Conv modules with the ADown downsampling modules, the changes in AP values for each fracture type can be seen. Among them, the AP value for A1-type fractures showed no significant change, but the P-R curve became noticeably smoother. The AP value for A2-type fractures increased from 0.866 to 0.867. The AP value for A3-type fractures improved more substantially, increasing from 0.777 to 0.810, a gain of 0.033. Overall, the mean Average Precision (mAP) for the three fracture types increased by 0.011, from 0.806 to 0.817. This first comparative experiment confirms that the introduction of the ADown downsampling module leads to a relatively clear improvement in the model&#x2019;s detection performance.</p>
<fig id="F10" position="float">
<label>FIGURE 10</label>
<caption><p>Comparison of model PR curves.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmed-13-1759383-g010.tif">
<alt-text content-type="machine-generated">Two side-by-side precision-recall curve charts compare YOLOv8 and YOLOv8-ADown models. Each plot displays three class curves and one combined curve, with YOLOv8 showing an all-classes mAP@0.5 of 0.806 and YOLOv8-ADown showing 0.817. Both models include similar class-specific average precision: A1 around 0.773, A2 approximately 0.866 or 0.867, and A3 near 0.777 or 0.810, respectively. The image highlights a performance comparison between the two models.</alt-text>
</graphic>
</fig>
<p><xref ref-type="fig" rid="F11">Figure 11</xref> shows the visual comparison of the recognition results between the YOLOv8 model and the YOLOv8-ADown model. It can be observed that the improved model increased the recognition confidence for all three fracture types. The confidence for A1-type fractures increased from 0.88 to 0.94, for A2-type from 0.86 to 0.89, and for A3-type from 0.90 to 0.92. This further proves that the improved model enhances the recognition capability for the three fracture types.</p>
<fig id="F11" position="float">
<label>FIGURE 11</label>
<caption><p>Visualization comparison of model recognition results.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmed-13-1759383-g011.tif">
<alt-text content-type="machine-generated">Nine-panel comparison showing three separate X-ray images of a left hip and femur, each presented in three columns: original image, YOLOv8 inference results with colored bounding boxes and confidence scores, and YOLOv8-ADown inference results with similar annotations.</alt-text>
</graphic>
</fig>
<p>Since this study only collected 976 original images, there is an issue of limited data volume. To further validate the robustness of the model, data augmentation was performed on the original images and labels. Various data augmentation methods are available; this study adopted horizontal flipping to simulate the left-right symmetric structure of the human body, enabling a single image to serve for predicting both left and right fracture types. Additionally, the built-in data augmentation methods of the YOLOv8 model were employed during training, including hue adjustment (hsv_h), saturation adjustment (hsv_s), brightness adjustment (hsv_v), translation, scaling, etc., to further test the model&#x2019;s robustness in training.</p>
<p>Experiments were conducted on the augmented dataset, with a random 7:3 split for training and validation sets. The training results are shown in <xref ref-type="table" rid="T2">Table 2</xref>. The improved YOLOv8-ADown model achieved improvements of 6.1, 2.6, 1.1, and 1.2% in the four parameters P, R, mAP50, and mAP50-95 on the augmented dataset, respectively. The detection accuracy of the model on the augmented dataset remained at a good level, demonstrating good robustness.</p>
<table-wrap position="float" id="T2">
<label>TABLE 2</label>
<caption><p>Training results of the models before and after improvement on the augmented dataset.</p></caption>
<table cellspacing="5" cellpadding="5" frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="center">Model</th>
<th valign="top" align="left">Class</th>
<th valign="top" align="center">All</th>
<th valign="top" align="center">A1</th>
<th valign="top" align="center">A2</th>
<th valign="top" align="center">A3</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="center" rowspan="6">YOLOv8</td>
<td valign="top" align="left">Images</td>
<td valign="top" align="center">586</td>
<td valign="top" align="center">176</td>
<td valign="top" align="center">298</td>
<td valign="top" align="center">112</td>
</tr>
<tr>
<td valign="top" align="left">Instances</td>
<td valign="top" align="center">586</td>
<td valign="top" align="center">176</td>
<td valign="top" align="center">298</td>
<td valign="top" align="center">112</td>
</tr>
<tr>
<td valign="top" align="left">Box(P)</td>
<td valign="top" align="center">0.748</td>
<td valign="top" align="center">0.715</td>
<td valign="top" align="center">0.782</td>
<td valign="top" align="center">0.781</td>
</tr>
<tr>
<td valign="top" align="left">R</td>
<td valign="top" align="center">0.706</td>
<td valign="top" align="center">0.642</td>
<td valign="top" align="center">0.86</td>
<td valign="top" align="center">0.613</td>
</tr>
<tr>
<td valign="top" align="left">mAP50</td>
<td valign="top" align="center">0.794</td>
<td valign="top" align="center">0.758</td>
<td valign="top" align="center">0.867</td>
<td valign="top" align="center">0.767</td>
</tr>
<tr>
<td valign="top" align="left">mAP50-95</td>
<td valign="top" align="center">0.601</td>
<td valign="top" align="center">0.592</td>
<td valign="top" align="center">0.677</td>
<td valign="top" align="center">0.556</td>
</tr>
<tr>
<td valign="top" align="center" rowspan="6">YOLOv8-Adown</td>
<td valign="top" align="left">Images</td>
<td valign="top" align="center">586</td>
<td valign="top" align="center">176</td>
<td valign="top" align="center">298</td>
<td valign="top" align="center">112</td>
</tr>
<tr>
<td valign="top" align="left">Instances</td>
<td valign="top" align="center">586</td>
<td valign="top" align="center">176</td>
<td valign="top" align="center">298</td>
<td valign="top" align="center">112</td>
</tr>
<tr>
<td valign="top" align="left">Box(P)</td>
<td valign="top" align="center">0.809</td>
<td valign="top" align="center">0.789</td>
<td valign="top" align="center">0.806</td>
<td valign="top" align="center">0.864</td>
</tr>
<tr>
<td valign="top" align="left">R</td>
<td valign="top" align="center">0.732</td>
<td valign="top" align="center">0.723</td>
<td valign="top" align="center">0.879</td>
<td valign="top" align="center">0.64</td>
</tr>
<tr>
<td valign="top" align="left">mAP50</td>
<td valign="top" align="center">0.805</td>
<td valign="top" align="center">0.759</td>
<td valign="top" align="center">0.854</td>
<td valign="top" align="center">0.798</td>
</tr>
<tr>
<td valign="top" align="left">mAP50-95</td>
<td valign="top" align="center">0.613</td>
<td valign="top" align="center">0.601</td>
<td valign="top" align="center">0.672</td>
<td valign="top" align="center">0.578</td>
</tr>
</tbody>
</table></table-wrap>
<p>The core focus before and after improvement lies in the changes of the four parameters P, R, mAP50, and mAP50-95. Therefore, to more intuitively display the changes in model accuracy before and after improvement, a comparison curve of the training accuracy before and after improvement on the augmented dataset was plotted, as shown in <xref ref-type="fig" rid="F12">Figure 12</xref>.</p>
<fig id="F12" position="float">
<label>FIGURE 12</label>
<caption><p>Comparison of accuracy curves between the improved and original models on the data augmented dataset.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fmed-13-1759383-g012.tif">
<alt-text content-type="machine-generated">Four line charts compare YOLOv8n and YOLOv8n-ADown across 200 epochs for precision, recall, mAP 0.5, and mAP 0.5:0.95. Red and blue lines represent each model, with both demonstrating similar upward performance trends and small differences in each metric. Legends and metric labels are included for clarity.</alt-text>
</graphic>
</fig>
</sec>
<sec id="S6" sec-type="discussion">
<label>6</label>
<title>Discussion</title>
<sec id="S6.SS1">
<label>6.1</label>
<title>Research summary and main contributions</title>
<p>This study introduced YOLOv8-ADown, an enhanced framework for intertrochanteric femoral fracture detection. Key improvements, such as replacing convolutional downsampling with ADown modules, boosted feature extraction for small targets, yielding an overall mAP50 of 81.7% (vs. 80.5% for baseline YOLOv8). Precision gains for A1, A2, and A3 fractures were 7.3, 3.5, and 7.8%, respectively, while parameters and FLOPs decreased by 12.3 and 9.8%. These outcomes highlight our model&#x2019;s efficiency-edge device potential, aligning with lightweight trends like GSCDown in transmission line detection (<xref ref-type="bibr" rid="B28">28</xref>).</p>
<p>The innovation of this work lies in being the first to achieve fine-grained classification and localization of A1-A3 type fractures according to the AO/OTA classification, breaking through the limitations of traditional binary classification models. Wang et al. (<xref ref-type="bibr" rid="B29">29</xref>) applied an enhanced YOLOv8 for distal radius fracture classification, achieving high accuracy in AO typing, which aligns with our focus on computational efficiency. Furthermore, by combining attention mechanisms and pooling operations, the model&#x2019;s feature extraction capability in complex backgrounds is enhanced. This is consistent with the Local Importance-based Pooling (LIP) method proposed by Gao et al. (<xref ref-type="bibr" rid="B30">30</xref>), which optimizes the downsampling process through adaptive weights to improve discriminative feature retention.</p>
</sec>
<sec id="S6.SS2">
<label>6.2</label>
<title>Research limitations and shortcomings</title>
<p>Although the YOLOv8-ADown model performs excellently on multiple metrics, the following limitations remain:</p>
<p>First, the data sample imbalance issue is prominent. With only 136 samples for A3-type fractures, its recognition rate is relatively low (62%). To enhance the credibility of subtype-specific claims, we recommend that future studies prioritize data balancing through multi-center collaborations or oversampling methods. This will ensure more equitable representation of all fracture classes and improve model generalizability. The reliability of the new classification system is affected by the sample size, and the generalization ability for minority categories is insufficient (<xref ref-type="bibr" rid="B31">31</xref>). Similarly, biomechanical analyses reveal that the management of the fracture line requires caution; for instance, interfragmentary compression may impose additional stress on the physeal plate, suggesting that AI models require diverse, multi-center data to mitigate such clinical decision-making biases (<xref ref-type="bibr" rid="B32">32</xref>). Similar challenges were noted by Sun et al. (<xref ref-type="bibr" rid="B33">33</xref>) in predicting post-surgical pain, where sample size limitations impacted model generalizability, underscoring the need for expanded datasets in orthopedic AI studies.</p>
<p>Second, the single-center retrospective data may introduce biases from equipment differences and imaging parameters, limiting the model&#x2019;s generalizability. To mitigate this, future studies should involve multi-center prospective data collection, encompassing varied imaging protocols and demographic populations. This approach will help validate the model&#x2019;s performance across diverse clinical settings and enhance its adoption in real-world scenarios. The performance of AI-assisted systems may fluctuate in multi-center validations; for example, while sensitivity in pelvic fracture detection might improve, specificity could decrease, highlighting the necessity of external validation (<xref ref-type="bibr" rid="B34">34</xref>).</p>
<p>Third, the annotation process may introduce subjectivity. Although annotations were performed by a medical intern under rigorous supervision, we acknowledge that involving multiple specialists could reduce potential bias. In future studies, we plan to adopt a dual-annotation framework with blinded reviews to improve generalizability and minimize subjectivity in label generation.</p>
<p>Additionally, the model&#x2019;s ability to handle complex fractures (such as comminuted A3-type fractures) is insufficient. Although deep learning models for proximal femoral fracture detection may outperform radiologists, their error rates increase in cases with skeletal abnormalities, reflecting the model&#x2019;s poor adaptability to morphological variations (<xref ref-type="bibr" rid="B35">35</xref>).</p>
</sec>
<sec id="S6.SS3">
<label>6.3</label>
<title>Future research directions</title>
<p>Addressing the above limitations, future research can focus on the following aspects: First, expanding the dataset and conducting multi-center validation. Drawing on global hip fracture epidemiology studies (<xref ref-type="bibr" rid="B36">36</xref>), integrating diverse data across ages, geographies, and clinical settings can enhance model robustness. Second, the model architecture can be further optimized. For instance, incorporating self-attention mechanisms like Transformers, or methods like using deformable convolutions to improve YOLOv8 (<xref ref-type="bibr" rid="B37">37</xref>), can enhance the detection capability for irregularly shaped targets. Simultaneously, multi-modal data fusion is an important direction. Digital twin technology, by combining multi-modal images such as CT and MRI, enables personalized anatomical modeling (<xref ref-type="bibr" rid="B38">38</xref>). Simultaneously, the integration of material science data, such as the biomechanical performance and fatigue behavior of different screw materials, represents a crucial direction for extending the AI system into an implant recommendation platform for end-to-end patient management (<xref ref-type="bibr" rid="B39">39</xref>). Our model could be extended to 3D fracture detection and risk prediction. For instance, Gao et al. (<xref ref-type="bibr" rid="B40">40</xref>) demonstrated the utility of YOLOv8-seg in classifying intervertebral disc anomalies from CT images, suggesting potential for adapting our model to other spinal disorders. Furthermore, clinical deployment requires enhanced interpretability. The multimodal large language model (MLLM) by Nam et al. (<xref ref-type="bibr" rid="B41">41</xref>) improves diagnostic transparency through visual question answering. Our work could introduce similar techniques like Grad-CAM++ to generate heatmaps. Finally, cross-disease generalization research has potential. Multi-scale optimization studies on YOLOv8 indicate that the model can be adapted to other fracture types (e.g., femoral neck fractures) (<xref ref-type="bibr" rid="B42">42</xref>). Future work could explore its feasibility as a universal auxiliary platform.</p>
</sec>
</sec>
<sec id="S7" sec-type="conclusion">
<label>7</label>
<title>Conclusion</title>
<p>The YOLOv8-ADown model, improved through average pooling downsampling, achieves a balance between accuracy and efficiency in the detection of intertrochanteric femoral fractures, providing an efficient and lightweight AI-assisted tool for clinical practice. In the future, through multi-center data validation, architectural innovation, and clinical integration, it is expected to promote the practical application of AI in orthopedic imaging, ultimately optimizing the allocation of medical resources and improving patient outcomes. Compared to existing research, the advantages of this framework in fine-grained classification and computational efficiency lay the foundation for personalized medicine in fracture diagnosis.</p>
</sec>
</body>
<back>
<sec id="S8" sec-type="data-availability">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/supplementary material, further inquiries can be directed to the corresponding authors.</p>
</sec>
<sec id="S9" sec-type="author-contributions">
<title>Author contributions</title>
<p>ZS: Writing &#x2013; original draft. YW: Writing &#x2013; original draft. YC: Writing &#x2013; original draft. HL: Writing &#x2013; original draft. CT: Writing &#x2013; review &#x0026; editing. ZG: Writing &#x2013; review &#x0026; editing. XZ: Writing &#x2013; review &#x0026; editing. HS: Writing &#x2013; review &#x0026; editing. YcQ: Writing &#x2013; review &#x0026; editing. YZ: Writing &#x2013; review &#x0026; editing. YsQ: Writing &#x2013; review &#x0026; editing.</p>
</sec>
<sec id="S11" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="S12" sec-type="ai-statement">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec id="S14" sec-type="correction-note">
<title>Correction note</title>
<p>This article has been corrected with minor changes. These changes do not impact the scientific content of the article.</p>
</sec>
<sec id="S13" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<label>1.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Twinprai</surname> <given-names>N</given-names></name> <name><surname>Boonrod</surname> <given-names>A</given-names></name> <name><surname>Boonrod</surname> <given-names>A</given-names></name> <name><surname>Chindaprasirt</surname> <given-names>J</given-names></name> <name><surname>Sirithanaphol</surname> <given-names>W</given-names></name> <name><surname>Chindaprasirt</surname> <given-names>P</given-names></name><etal>et al</etal></person-group>. <article-title>Artificial intelligence (AI) vs. human in hip fracture detection.</article-title> <source><italic>Heliyon</italic>.</source> (<year>2022</year>) <volume>8</volume>:<fpage>e11266</fpage>. <pub-id pub-id-type="doi">10.1016/j.heliyon.2022.e11266</pub-id> <pub-id pub-id-type="pmid">36339768</pub-id></mixed-citation></ref>
<ref id="B2">
<label>2.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lex</surname> <given-names>JR</given-names></name> <name><surname>Di Michele</surname> <given-names>J</given-names></name> <name><surname>Koucheki</surname> <given-names>R</given-names></name> <name><surname>Pincus</surname> <given-names>D</given-names></name> <name><surname>Whyne</surname> <given-names>C</given-names></name> <name><surname>Ravi</surname> <given-names>B</given-names></name></person-group>. <article-title>Artificial intelligence for hip fracture detection and outcome prediction: a systematic review and meta-analysis.</article-title> <source><italic>JAMA Netw Open</italic>.</source> (<year>2023</year>) <volume>6</volume>:<fpage>e233391</fpage>. <pub-id pub-id-type="doi">10.1001/jamanetworkopen.2023.3391</pub-id> <pub-id pub-id-type="pmid">36930153</pub-id></mixed-citation></ref>
<ref id="B3">
<label>3.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Feng</surname> <given-names>JN</given-names></name> <name><surname>Zhang</surname> <given-names>CG</given-names></name> <name><surname>Li</surname> <given-names>BH</given-names></name> <name><surname>Zhan</surname> <given-names>SY</given-names></name> <name><surname>Wang</surname> <given-names>SF</given-names></name> <name><surname>Song</surname> <given-names>CL</given-names></name></person-group>. <article-title>Global burden of hip fracture: the Global Burden of Disease Study.</article-title> <source><italic>Osteoporos Int.</italic></source> (<year>2024</year>) <volume>35</volume>:<fpage>41</fpage>&#x2013;<lpage>52</lpage>. <pub-id pub-id-type="doi">10.1007/s00198-023-06907-3</pub-id> <pub-id pub-id-type="pmid">37704919</pub-id></mixed-citation></ref>
<ref id="B4">
<label>4.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cheng</surname> <given-names>CT</given-names></name> <name><surname>Ho</surname> <given-names>TY</given-names></name> <name><surname>Lee</surname> <given-names>TY</given-names></name> <name><surname>Chang</surname> <given-names>CC</given-names></name> <name><surname>Chou</surname> <given-names>CC</given-names></name> <name><surname>Chen</surname> <given-names>CC</given-names></name><etal>et al</etal></person-group>. <article-title>Application of a deep learning algorithm for detection and visualization of hip fractures on plain pelvic radiographs.</article-title> <source><italic>Eur Radiol</italic>.</source> (<year>2019</year>) <volume>29</volume>:<fpage>5469</fpage>&#x2013;<lpage>77</lpage>. <pub-id pub-id-type="doi">10.1007/s00330-019-06167-y</pub-id> <pub-id pub-id-type="pmid">30937588</pub-id></mixed-citation></ref>
<ref id="B5">
<label>5.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zheng</surname> <given-names>Z</given-names></name> <name><surname>Ryu</surname> <given-names>BY</given-names></name> <name><surname>Kim</surname> <given-names>SE</given-names></name> <name><surname>Song</surname> <given-names>DS</given-names></name> <name><surname>Kim</surname> <given-names>SH</given-names></name> <name><surname>Park</surname> <given-names>JW</given-names></name><etal>et al</etal></person-group>. <article-title>Deep learning for automated hip fracture detection and classification : achieving superior accuracy.</article-title> <source><italic>Bone Joint J.</italic></source> (<year>2025</year>) <volume>107</volume>:<fpage>213</fpage>&#x2013;<lpage>20</lpage>. <pub-id pub-id-type="doi">10.1302/0301-620X.107B2.BJJ-2024-0791.R1</pub-id> <pub-id pub-id-type="pmid">39889758</pub-id></mixed-citation></ref>
<ref id="B6">
<label>6.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Adams</surname> <given-names>SJ</given-names></name> <name><surname>Henderson</surname> <given-names>RDE</given-names></name> <name><surname>Yi</surname> <given-names>X</given-names></name> <name><surname>Babyn</surname> <given-names>P</given-names></name></person-group>. <article-title>Artificial intelligence solutions for analysis of X-ray images.</article-title> <source><italic>Can Assoc Radiol J</italic>.</source> (<year>2020</year>) <volume>72</volume>:<fpage>60</fpage>&#x2013;<lpage>72</lpage>. <pub-id pub-id-type="doi">10.1177/0846537120941671</pub-id> <pub-id pub-id-type="pmid">32757950</pub-id></mixed-citation></ref>
<ref id="B7">
<label>7.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Urakawa</surname> <given-names>T</given-names></name> <name><surname>Tanaka</surname> <given-names>Y</given-names></name> <name><surname>Goto</surname> <given-names>S</given-names></name> <name><surname>Matsuzawa</surname> <given-names>H</given-names></name> <name><surname>Watanabe</surname> <given-names>K</given-names></name> <name><surname>Endo</surname> <given-names>N</given-names></name></person-group>. <article-title>Detecting intertrochanteric hip fractures with orthopedist-level accuracy using a deep convolutional neural network.</article-title> <source><italic>Skeletal Radiol</italic>.</source> (<year>2018</year>) <volume>48</volume>:<fpage>239</fpage>&#x2013;<lpage>44</lpage>. <pub-id pub-id-type="doi">10.1007/s00256-018-3016-3</pub-id> <pub-id pub-id-type="pmid">29955910</pub-id></mixed-citation></ref>
<ref id="B8">
<label>8.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sun</surname> <given-names>X</given-names></name> <name><surname>Jing</surname> <given-names>P</given-names></name> <name><surname>Yang</surname> <given-names>Y</given-names></name> <name><surname>Sun</surname> <given-names>H</given-names></name> <name><surname>Tang</surname> <given-names>W</given-names></name> <name><surname>Mi</surname> <given-names>J</given-names></name><etal>et al</etal></person-group>. <article-title>Risk prediction of osteoporotic vertebral compression fractures in postmenopausal osteoporotic women by machine learning modelling.</article-title> <source><italic>Front Med</italic>.</source> (<year>2025</year>) <volume>12</volume>:<fpage>1664219</fpage>. <pub-id pub-id-type="doi">10.3389/fmed.2025.1664219</pub-id> <pub-id pub-id-type="pmid">41020247</pub-id></mixed-citation></ref>
<ref id="B9">
<label>9.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Inal</surname> <given-names>S</given-names></name> <name><surname>Gok</surname> <given-names>K</given-names></name> <name><surname>Gok</surname> <given-names>A</given-names></name> <name><surname>Pinar</surname> <given-names>AM</given-names></name> <name><surname>Inal</surname> <given-names>C</given-names></name></person-group>. <article-title>Comparison of biomechanical effects of different configurations of kirschner wires on the epiphyseal plate and stability in a salter-harris type 2 distal femoral fracture model.</article-title> <source><italic>J Am Podiatr Med Assoc</italic>.</source> (<year>2019</year>) <volume>109</volume>:<fpage>13</fpage>&#x2013;<lpage>21</lpage>. <pub-id pub-id-type="doi">10.7547/16-112</pub-id> <pub-id pub-id-type="pmid">30964320</pub-id></mixed-citation></ref>
<ref id="B10">
<label>10.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gok</surname> <given-names>A</given-names></name> <name><surname>Inal</surname> <given-names>S</given-names></name> <name><surname>Taspinar</surname> <given-names>F</given-names></name> <name><surname>Gulbandilar</surname> <given-names>E</given-names></name> <name><surname>Gok</surname> <given-names>K</given-names></name></person-group>. <article-title>Comparison of parallel or convergent proximal schanz screw placement of pertrochanteric fixator in intertrochanteric fracture model.</article-title> <source><italic>Mech. Sci.</italic></source> (<year>2017</year>) <volume>8</volume>:<fpage>259</fpage>&#x2013;<lpage>66</lpage>. <pub-id pub-id-type="doi">10.5194/ms-8-259-2017</pub-id></mixed-citation></ref>
<ref id="B11">
<label>11.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Meza</surname> <given-names>G</given-names></name> <name><surname>Ganta</surname> <given-names>D</given-names></name> <name><surname>Gonzalez Torres</surname> <given-names>S</given-names></name></person-group>. <article-title>Deep learning approach for arm fracture detection based on an improved YOLOv8 Algorithm.</article-title> <source><italic>Algorithms.</italic></source> (<year>2024</year>) <volume>17</volume>:<fpage>471</fpage>. <pub-id pub-id-type="doi">10.3390/a17110471</pub-id></mixed-citation></ref>
<ref id="B12">
<label>12.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Guo</surname> <given-names>M-H</given-names></name> <name><surname>Xu</surname> <given-names>TX</given-names></name> <name><surname>Liu</surname> <given-names>JJ</given-names></name> <name><surname>Liu</surname> <given-names>ZN</given-names></name> <name><surname>Jiang</surname> <given-names>PT</given-names></name> <name><surname>Mu</surname> <given-names>TJ</given-names></name><etal>et al</etal></person-group>. <article-title>Attention mechanisms in computer vision: a survey.</article-title> <source><italic>Comput Vis Media.</italic></source> (<year>2022</year>) <volume>8</volume>:<fpage>331</fpage>&#x2013;<lpage>68</lpage>. <pub-id pub-id-type="doi">10.1007/s41095-022-0271-y</pub-id></mixed-citation></ref>
<ref id="B13">
<label>13.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>P</given-names></name> <name><surname>Lu</surname> <given-names>L</given-names></name> <name><surname>Chen</surname> <given-names>Y</given-names></name> <name><surname>Huo</surname> <given-names>T</given-names></name> <name><surname>Xue</surname> <given-names>M</given-names></name> <name><surname>Wang</surname> <given-names>H</given-names></name><etal>et al</etal></person-group>. <article-title>Artificial intelligence to detect the femoral intertrochanteric fracture: the arrival of the intelligent-medicine era.</article-title> <source><italic>Front Bioeng Biotechnol</italic>.</source> (<year>2022</year>) <volume>10</volume>:<fpage>927926</fpage>. <pub-id pub-id-type="doi">10.3389/fbioe.2022.927926</pub-id> <pub-id pub-id-type="pmid">36147533</pub-id></mixed-citation></ref>
<ref id="B14">
<label>14.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Adams</surname> <given-names>M</given-names></name> <name><surname>Chen</surname> <given-names>W</given-names></name> <name><surname>Holcdorf</surname> <given-names>D</given-names></name> <name><surname>McCusker</surname> <given-names>MW</given-names></name> <name><surname>Howe</surname> <given-names>PD</given-names></name> <name><surname>Gaillard</surname> <given-names>F</given-names></name></person-group>. <article-title>Computer vs human: deep learning versus perceptual training for the detection of neck of femur fractures.</article-title> <source><italic>J Med Imaging Radiat Oncol</italic>.</source> (<year>2018</year>) <volume>63</volume>:<fpage>27</fpage>&#x2013;<lpage>32</lpage>. <pub-id pub-id-type="doi">10.1111/1754-9485.12828</pub-id> <pub-id pub-id-type="pmid">30407743</pub-id></mixed-citation></ref>
<ref id="B15">
<label>15.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mutasa</surname> <given-names>S</given-names></name> <name><surname>Varada</surname> <given-names>S</given-names></name> <name><surname>Goel</surname> <given-names>A</given-names></name> <name><surname>Wong</surname> <given-names>TT</given-names></name> <name><surname>Rasiej</surname> <given-names>MJ</given-names></name></person-group>. <article-title>Advanced deep learning techniques applied to automated femoral neck fracture detection and classification.</article-title> <source><italic>J Digit Imaging</italic>.</source> (<year>2020</year>) <volume>33</volume>:<fpage>1209</fpage>&#x2013;<lpage>17</lpage>. <pub-id pub-id-type="doi">10.1007/s10278-020-00364-8</pub-id> <pub-id pub-id-type="pmid">32583277</pub-id></mixed-citation></ref>
<ref id="B16">
<label>16.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Krogue</surname> <given-names>JD</given-names></name> <name><surname>Cheng</surname> <given-names>KV</given-names></name> <name><surname>Hwang</surname> <given-names>KM</given-names></name> <name><surname>Toogood</surname> <given-names>P</given-names></name> <name><surname>Meinberg</surname> <given-names>EG</given-names></name> <name><surname>Geiger</surname> <given-names>EJ</given-names></name><etal>et al</etal></person-group>. <article-title>Automatic hip fracture identification and functional subclassification with deep learning.</article-title> <source><italic>Radiol Artif Intell</italic>.</source> (<year>2020</year>) <volume>2</volume>:<fpage>e190023</fpage>. <pub-id pub-id-type="doi">10.1148/ryai.2020190023</pub-id> <pub-id pub-id-type="pmid">33937815</pub-id></mixed-citation></ref>
<ref id="B17">
<label>17.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Alzaid</surname> <given-names>A</given-names></name> <name><surname>Wignall</surname> <given-names>A</given-names></name> <name><surname>Dogramadzi</surname> <given-names>S</given-names></name> <name><surname>Pandit</surname> <given-names>H</given-names></name> <name><surname>Xie</surname> <given-names>SQ</given-names></name></person-group>. <article-title>Automatic detection and classification of peri-prosthetic femur fracture.</article-title> <source><italic>Int J Comput Assist Radiol Surg</italic>.</source> (<year>2022</year>) <volume>17</volume>:<fpage>649</fpage>&#x2013;<lpage>60</lpage>. <pub-id pub-id-type="doi">10.1007/s11548-021-02552-5</pub-id> <pub-id pub-id-type="pmid">35157227</pub-id></mixed-citation></ref>
<ref id="B18">
<label>18.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Selvaraju</surname> <given-names>RR</given-names></name> <name><surname>Cogswell</surname> <given-names>M</given-names></name> <name><surname>Das</surname> <given-names>A</given-names></name> <name><surname>Vedantam</surname> <given-names>R</given-names></name> <name><surname>Parikh</surname> <given-names>D</given-names></name> <name><surname>Batra</surname> <given-names>D</given-names></name><etal>et al</etal></person-group>. <article-title>Grad-CAM: visual explanations from deep networks via gradient-based localization.</article-title> <source><italic>Int J Comput Vis.</italic></source> (<year>2019</year>) <volume>128</volume>:<fpage>336</fpage>&#x2013;<lpage>59</lpage>. <pub-id pub-id-type="doi">10.1007/s11263-019-01228-7</pub-id></mixed-citation></ref>
<ref id="B19">
<label>19.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Vasey</surname> <given-names>B</given-names></name> <name><surname>Ursprung</surname> <given-names>S</given-names></name> <name><surname>Beddoe</surname> <given-names>B</given-names></name> <name><surname>Taylor</surname> <given-names>EH</given-names></name> <name><surname>Marlow</surname> <given-names>N</given-names></name> <name><surname>Bilbro</surname> <given-names>N</given-names></name><etal>et al</etal></person-group>. <article-title>Association of clinician diagnostic performance with machine learning-based decision support systems: a systematic review.</article-title> <source><italic>JAMA Netw Open</italic>.</source> (<year>2021</year>) <volume>4</volume>:<fpage>e211276</fpage>. <pub-id pub-id-type="doi">10.1001/jamanetworkopen.2021.1276</pub-id> <pub-id pub-id-type="pmid">33704476</pub-id></mixed-citation></ref>
<ref id="B20">
<label>20.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gonz&#x00E1;lez</surname> <given-names>G</given-names></name> <name><surname>Galant</surname> <given-names>J</given-names></name> <name><surname>Salinas</surname> <given-names>JM</given-names></name> <name><surname>Ben&#x00ED;tez</surname> <given-names>E</given-names></name> <name><surname>S&#x00E1;nchez-Valverde</surname> <given-names>MD</given-names></name> <name><surname>Calbo</surname> <given-names>J</given-names></name><etal>et al</etal></person-group>. <article-title>Classification and segmentation of hip fractures in x-rays: highlighting fracture regions for interpretable diagnosis.</article-title> <source><italic>Insights Imaging</italic>.</source> (<year>2025</year>) <volume>16</volume>:<fpage>86</fpage>. <pub-id pub-id-type="doi">10.1186/s13244-025-01958-y</pub-id> <pub-id pub-id-type="pmid">40232323</pub-id></mixed-citation></ref>
<ref id="B21">
<label>21.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Marullo</surname> <given-names>G</given-names></name> <name><surname>Ulrich</surname> <given-names>L</given-names></name> <name><surname>Antonaci</surname> <given-names>FG</given-names></name> <name><surname>Audisio</surname> <given-names>A</given-names></name> <name><surname>Aprato</surname> <given-names>A</given-names></name> <name><surname>Mass&#x00E8;</surname> <given-names>A</given-names></name><etal>et al</etal></person-group>. <article-title>Classification of AO/OTA 31A/B femur fractures in X-ray images using YOLOv8 and advanced data augmentation techniques.</article-title> <source><italic>Bone Rep</italic>.</source> (<year>2024</year>) <volume>22</volume>:<fpage>101801</fpage>. <pub-id pub-id-type="doi">10.1016/j.bonr.2024.101801</pub-id> <pub-id pub-id-type="pmid">39324016</pub-id></mixed-citation></ref>
<ref id="B22">
<label>22.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hoong</surname> <given-names>CWS</given-names></name> <name><surname>Saul</surname> <given-names>D</given-names></name> <name><surname>Khosla</surname> <given-names>S</given-names></name> <name><surname>Sfeir</surname> <given-names>JG</given-names></name></person-group>. <article-title>Advances in the management of osteoporosis.</article-title> <source><italic>BMJ</italic>.</source> (<year>2025</year>) <volume>390</volume>:<fpage>e081250</fpage>. <pub-id pub-id-type="doi">10.1136/bmj-2024-081250</pub-id> <pub-id pub-id-type="pmid">40738610</pub-id></mixed-citation></ref>
<ref id="B23">
<label>23.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kahwati</surname> <given-names>LC</given-names></name> <name><surname>Kistler</surname> <given-names>CE</given-names></name> <name><surname>Booth</surname> <given-names>G</given-names></name> <name><surname>Sathe</surname> <given-names>N</given-names></name> <name><surname>Gordon</surname> <given-names>RD</given-names></name> <name><surname>Okah</surname> <given-names>E</given-names></name><etal>et al</etal></person-group>. <article-title>Screening for osteoporosis to prevent fractures: a systematic evidence review for the US preventive services task force.</article-title> <source><italic>JAMA.</italic></source> (<year>2025</year>) <volume>333</volume>:<fpage>509</fpage>&#x2013;<lpage>31</lpage>. <pub-id pub-id-type="doi">10.1001/jama.2024.21653</pub-id> <pub-id pub-id-type="pmid">39808441</pub-id></mixed-citation></ref>
<ref id="B24">
<label>24.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Terven</surname> <given-names>J</given-names></name> <name><surname>C&#x00F3;rdova-Esparza</surname> <given-names>D-M</given-names></name> <name><surname>Romero-Gonz&#x00E1;lez</surname> <given-names>J-A</given-names></name></person-group>. <article-title>A comprehensive review of YOLO architectures in computer vision: from YOLOv1 to YOLOv8 and YOLO-NAS.</article-title> <source><italic>Mach Learn Knowl Extract.</italic></source> (<year>2023</year>) <volume>5</volume>:<fpage>1680</fpage>&#x2013;<lpage>716</lpage>. <pub-id pub-id-type="doi">10.3390/make5040083</pub-id></mixed-citation></ref>
<ref id="B25">
<label>25.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Downey</surname> <given-names>C</given-names></name> <name><surname>Kelly</surname> <given-names>M</given-names></name> <name><surname>Quinlan</surname> <given-names>JF</given-names></name></person-group>. <article-title>Changing trends in the mortality rate at 1-year post hip fracture - a systematic review.</article-title> <source><italic>World J Orthop</italic>.</source> (<year>2019</year>) <volume>10</volume>:<fpage>166</fpage>&#x2013;<lpage>75</lpage>. <pub-id pub-id-type="doi">10.5312/wjo.v10.i3.166</pub-id> <pub-id pub-id-type="pmid">30918799</pub-id></mixed-citation></ref>
<ref id="B26">
<label>26.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gale</surname> <given-names>W</given-names></name> <name><surname>Oakden-Rayner</surname> <given-names>L</given-names></name> <name><surname>Carneiro</surname> <given-names>G</given-names></name> <name><surname>Bradley</surname> <given-names>AP</given-names></name> <name><surname>Palmer</surname> <given-names>LJ</given-names></name></person-group>. <article-title>Detecting hip fractures with radiologist-level performance using deep neural networks.</article-title> <source><italic>arXiv [Preprint].</italic></source> (<year>2017</year>): <pub-id pub-id-type="doi">10.48550/arXiv.1711.06504</pub-id></mixed-citation></ref>
<ref id="B27">
<label>27.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Yang</surname> <given-names>F</given-names></name> <name><surname>Qian</surname> <given-names>Y</given-names></name> <name><surname>Xiao</surname> <given-names>H</given-names></name> <name><surname>Gao</surname> <given-names>Z</given-names></name> <name><surname>Zhao</surname> <given-names>X</given-names></name> <name><surname>Chen</surname> <given-names>Y</given-names></name><etal>et al</etal></person-group>. <article-title>YOLOv8-Seg: a deep learning approach for accurate classification of osteoporotic vertebral fractures.</article-title> <source><italic>Front Radiol</italic>.</source> (<year>2025</year>) <volume>5</volume>:<fpage>1651798</fpage>. <pub-id pub-id-type="doi">10.3389/fradi.2025.1651798</pub-id> <pub-id pub-id-type="pmid">41164317</pub-id></mixed-citation></ref>
<ref id="B28">
<label>28.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Duan</surname> <given-names>P</given-names></name> <name><surname>Liang</surname> <given-names>X</given-names></name></person-group>. <article-title>An improved YOLOv8-based foreign detection algorithm for transmission lines.</article-title> <source><italic>Sensors</italic>.</source> (<year>2024</year>) <volume>24</volume>:<fpage>6468</fpage>. <pub-id pub-id-type="doi">10.3390/s24196468</pub-id> <pub-id pub-id-type="pmid">39409508</pub-id></mixed-citation></ref>
<ref id="B29">
<label>29.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>Y</given-names></name> <name><surname>Sun</surname> <given-names>H</given-names></name> <name><surname>Jiang</surname> <given-names>T</given-names></name> <name><surname>Shi</surname> <given-names>J</given-names></name> <name><surname>Wang</surname> <given-names>Q</given-names></name> <name><surname>Yang</surname> <given-names>H</given-names></name><etal>et al</etal></person-group>. <article-title>A multi-module enhanced YOLOv8 framework for accurate AO classification of distal radius fractures: scfast-yolo.</article-title> <source><italic>Front Med</italic>.</source> (<year>2025</year>) <volume>12</volume>:<fpage>1635016</fpage>. <pub-id pub-id-type="doi">10.3389/fmed.2025.1635016</pub-id> <pub-id pub-id-type="pmid">40909458</pub-id></mixed-citation></ref>
<ref id="B30">
<label>30.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gao</surname> <given-names>Z</given-names></name> <name><surname>Wang</surname> <given-names>L</given-names></name> <name><surname>Wu</surname> <given-names>G</given-names></name></person-group>. <article-title>LIP: local importance-based pooling.</article-title> <source><italic>Int J Comput Vis.</italic></source> (<year>2022</year>) <volume>131</volume>:<fpage>1</fpage>&#x2013;<lpage>22</lpage>. <pub-id pub-id-type="doi">10.1007/s11263-022-01707-4</pub-id></mixed-citation></ref>
<ref id="B31">
<label>31.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Xu</surname> <given-names>X</given-names></name> <name><surname>Liu</surname> <given-names>Y</given-names></name> <name><surname>Diao</surname> <given-names>S</given-names></name> <name><surname>Zhou</surname> <given-names>J</given-names></name></person-group>. <article-title>Inter- and Intra-Observer Reliability of the New Comprehensive Classification of Intertrochanteric Fracture of the Femur.</article-title> <source><italic>Int J Gen Med</italic>.</source> (<year>2025</year>) <volume>18</volume>:<fpage>1261</fpage>&#x2013;<lpage>70</lpage>. <pub-id pub-id-type="doi">10.2147/IJGM.S508342</pub-id> <pub-id pub-id-type="pmid">40065982</pub-id></mixed-citation></ref>
<ref id="B32">
<label>32.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Inal</surname> <given-names>S</given-names></name> <name><surname>Gok</surname> <given-names>K</given-names></name> <name><surname>Gok</surname> <given-names>A</given-names></name> <name><surname>Uzumcugil</surname> <given-names>AO</given-names></name> <name><surname>Kuyuba&#x015F;&#x0131;</surname> <given-names>N</given-names></name></person-group>. <article-title>Should we really compress the fracture line in the treatment of Salter&#x2013;Harris type 4 distal femoral fractures? A biomechanical study.</article-title> <source><italic>J Braz. Soc. Mech. Sci. Eng.</italic></source> (<year>2018</year>) <volume>40</volume>:<fpage>528</fpage>. <pub-id pub-id-type="doi">10.1007/s40430-018-1448-2</pub-id></mixed-citation></ref>
<ref id="B33">
<label>33.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sun</surname> <given-names>H</given-names></name> <name><surname>Tang</surname> <given-names>W</given-names></name> <name><surname>You</surname> <given-names>X</given-names></name> <name><surname>Deng</surname> <given-names>L</given-names></name> <name><surname>Chen</surname> <given-names>L</given-names></name> <name><surname>Qian</surname> <given-names>Z</given-names></name><etal>et al</etal></person-group>. <article-title>The role of the lumbar paravertebral muscles in the development of short-term residual pain after lumbar fusion surgery.</article-title> <source><italic>Spine</italic>.</source> (<year>2025</year>) <volume>50</volume>:<fpage>537</fpage>&#x2013;<lpage>47</lpage>. <pub-id pub-id-type="doi">10.1097/BRS.0000000000005303</pub-id> <pub-id pub-id-type="pmid">39967515</pub-id></mixed-citation></ref>
<ref id="B34">
<label>34.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jaillat</surname> <given-names>A</given-names></name> <name><surname>Cyteval</surname> <given-names>C</given-names></name> <name><surname>Baron Sarrabere</surname> <given-names>MP</given-names></name> <name><surname>Ghomrani</surname> <given-names>H</given-names></name> <name><surname>Maman</surname> <given-names>Y</given-names></name> <name><surname>Thouvenin</surname> <given-names>Y</given-names></name><etal>et al</etal></person-group>. <article-title>Added value of artificial intelligence for the detection of pelvic and hip fractures.</article-title> <source><italic>Jpn J Radiol</italic>.</source> (<year>2025</year>) <volume>43</volume>:<fpage>1166</fpage>&#x2013;<lpage>75</lpage>. <pub-id pub-id-type="doi">10.1007/s11604-025-01754-0</pub-id> <pub-id pub-id-type="pmid">40038216</pub-id></mixed-citation></ref>
<ref id="B35">
<label>35.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Oakden-Rayner</surname> <given-names>L</given-names></name> <name><surname>Gale</surname> <given-names>W</given-names></name> <name><surname>Bonham</surname> <given-names>TA</given-names></name> <name><surname>Lungren</surname> <given-names>MP</given-names></name> <name><surname>Carneiro</surname> <given-names>G</given-names></name> <name><surname>Bradley</surname> <given-names>AP</given-names></name><etal>et al</etal></person-group>. <article-title>Validation and algorithmic audit of a deep learning system for the detection of proximal femoral fractures in patients in the emergency department: a diagnostic accuracy study.</article-title> <source><italic>Lancet Digit Health</italic>.</source> (<year>2022</year>) <volume>4</volume>:<fpage>e351</fpage>&#x2013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1016/S2589-7500(22)00004-8</pub-id> <pub-id pub-id-type="pmid">35396184</pub-id></mixed-citation></ref>
<ref id="B36">
<label>36.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sing</surname> <given-names>CW</given-names></name> <name><surname>Lin</surname> <given-names>TC</given-names></name> <name><surname>Bartholomew</surname> <given-names>S</given-names></name> <name><surname>Bell</surname> <given-names>JS</given-names></name> <name><surname>Bennett</surname> <given-names>C</given-names></name> <name><surname>Beyene</surname> <given-names>K</given-names></name><etal>et al</etal></person-group>. <article-title>Global epidemiology of hip fractures: secular trends in incidence rate, post-fracture treatment, and all-cause mortality.</article-title> <source><italic>J Bone Miner Res</italic>.</source> (<year>2023</year>) <volume>38</volume>:<fpage>1064</fpage>&#x2013;<lpage>75</lpage>. <pub-id pub-id-type="doi">10.1002/jbmr.4821</pub-id> <pub-id pub-id-type="pmid">37118993</pub-id></mixed-citation></ref>
<ref id="B37">
<label>37.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gao</surname> <given-names>Y</given-names></name> <name><surname>Liu</surname> <given-names>W</given-names></name> <name><surname>Chui</surname> <given-names>HC</given-names></name> <name><surname>Chen</surname> <given-names>X</given-names></name></person-group>. <article-title>Large span sizes and irregular shapes target detection methods using variable convolution-improved YOLOv8.</article-title> <source><italic>Sensors</italic>.</source> (<year>2024</year>) <volume>24</volume>:<fpage>2560</fpage>. <pub-id pub-id-type="doi">10.3390/s24082560</pub-id> <pub-id pub-id-type="pmid">38676177</pub-id></mixed-citation></ref>
<ref id="B38">
<label>38.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhao</surname> <given-names>F</given-names></name> <name><surname>Wu</surname> <given-names>Y</given-names></name> <name><surname>Hu</surname> <given-names>M</given-names></name> <name><surname>Chang</surname> <given-names>CW</given-names></name> <name><surname>Liu</surname> <given-names>R</given-names></name> <name><surname>Qiu</surname> <given-names>R</given-names></name><etal>et al</etal></person-group>. <article-title>Current progress of digital twin construction using medical imaging.</article-title> <source><italic>J Appl Clin Med Phys</italic>.</source> (<year>2025</year>) <volume>26</volume>:<fpage>e70226</fpage>. <pub-id pub-id-type="doi">10.1002/acm2.70226</pub-id> <pub-id pub-id-type="pmid">40841176</pub-id></mixed-citation></ref>
<ref id="B39">
<label>39.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gok</surname> <given-names>K</given-names></name> <name><surname>Inal</surname> <given-names>S</given-names></name> <name><surname>Urtekin</surname> <given-names>L</given-names></name> <name><surname>Gok</surname> <given-names>A</given-names></name><etal>et al</etal></person-group>. <article-title>Biomechanical performance using finite element analysis of different screw materials in the parallel screw fixation of Salter&#x2013;Harris Type 4 fractures.</article-title> <source><italic>J Braz Soc Mech Sci Eng.</italic></source> (<year>2019</year>) <volume>41</volume>:<fpage>143</fpage>. <pub-id pub-id-type="doi">10.1007/s40430-019-1640-z</pub-id></mixed-citation></ref>
<ref id="B40">
<label>40.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gao</surname> <given-names>Z</given-names></name> <name><surname>Qian</surname> <given-names>Y</given-names></name> <name><surname>Fan</surname> <given-names>R</given-names></name> <name><surname>Yang</surname> <given-names>Y</given-names></name> <name><surname>Wang</surname> <given-names>Y</given-names></name> <name><surname>Xing</surname> <given-names>L</given-names></name><etal>et al</etal></person-group>. <article-title>Intervertebral disc anomaly intelligent classification system based on deep learning, IDAICS.</article-title> <source><italic>Front Radiol</italic>.</source> (<year>2025</year>) <volume>5</volume>:<fpage>1646008</fpage>. <pub-id pub-id-type="doi">10.3389/fradi.2025.1646008</pub-id> <pub-id pub-id-type="pmid">40994700</pub-id></mixed-citation></ref>
<ref id="B41">
<label>41.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nam</surname> <given-names>Y</given-names></name> <name><surname>Kim</surname> <given-names>DY</given-names></name> <name><surname>Kyung</surname> <given-names>S</given-names></name> <name><surname>Seo</surname> <given-names>J</given-names></name> <name><surname>Song</surname> <given-names>JM</given-names></name> <name><surname>Kwon</surname> <given-names>J</given-names></name><etal>et al</etal></person-group>. <article-title>Multimodal large language models in medical imaging: current state and future directions.</article-title> <source><italic>Korean J Radiol</italic>.</source> (<year>2025</year>) <volume>26</volume>:<fpage>900</fpage>&#x2013;<lpage>23</lpage>. <pub-id pub-id-type="doi">10.3348/kjr.2025.0599</pub-id> <pub-id pub-id-type="pmid">41015856</pub-id></mixed-citation></ref>
<ref id="B42">
<label>42.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rasheed</surname> <given-names>AF</given-names></name> <name><surname>Zarkoosh</surname> <given-names>M</given-names></name></person-group>. <article-title>Optimized YOLOv8 for multi-scale object detection.</article-title> <source><italic>J Real-Time Image Process.</italic></source> (<year>2024</year>) <volume>22</volume>:<fpage>6</fpage>. <pub-id pub-id-type="doi">10.1007/s11554-024-01582-x</pub-id></mixed-citation></ref>
</ref-list>
<fn-group>
<fn id="n1" fn-type="custom" custom-type="edited-by"><p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3052491/overview">Dianning He</ext-link>, China Medical University, China</p></fn>
<fn id="n2" fn-type="custom" custom-type="reviewed-by"><p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3114955/overview">Arif G&#x00F6;k</ext-link>, Dumlupinar University, T&#x00FC;rkiye</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3329551/overview">Mubasahr Tariq</ext-link>, Kyung Hee University, Republic of Korea</p></fn>
</fn-group>
</back>
</article>