<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article article-type="research-article" dtd-version="1.3" xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Physiol.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Physiology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Physiol.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">1664-042X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">1773031</article-id>
<article-id pub-id-type="doi">10.3389/fphys.2026.1773031</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Construction and validation of a multi-function artificial intelligence&#x2013;assisted system for pressure injury recognition</article-title>
<alt-title alt-title-type="left-running-head">Wang et al.</alt-title>
<alt-title alt-title-type="right-running-head">
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fphys.2026.1773031">10.3389/fphys.2026.1773031</ext-link>
</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" equal-contrib="yes">
<name>
<surname>Wang</surname>
<given-names>Zhenni</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="author-notes" rid="fn001">
<sup>&#x2020;</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/3385313"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing - original draft</role>
</contrib>
<contrib contrib-type="author" equal-contrib="yes">
<name>
<surname>Xu</surname>
<given-names>Yueping</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="author-notes" rid="fn001">
<sup>&#x2020;</sup>
</xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing - original draft</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Xia</surname>
<given-names>Kaijian</given-names>
</name>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal Analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Dai</surname>
<given-names>Yiqi</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing - original draft</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Xu</surname>
<given-names>Xiaodan</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/3229461"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing - original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Chen</surname>
<given-names>Jian</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/3068158"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing - original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
</contrib>
</contrib-group>
<aff id="aff1">
<label>1</label>
<institution>Gastroenterology Department, Changshu Hospital Affiliated to Soochow University</institution>, <city>Suzhou</city>, <country country="CN">China</country>
</aff>
<aff id="aff2">
<label>2</label>
<institution>Nursing Department, Changshu Hospital Affiliated to Soochow University</institution>, <city>Suzhou</city>, <country country="CN">China</country>
</aff>
<aff id="aff3">
<label>3</label>
<institution>Key Laboratory of Medical Artificial Intelligence and Big Data, Changshu Hospital Affiliated to Soochow University</institution>, <city>Suzhou</city>, <country country="CN">China</country>
</aff>
<author-notes>
<corresp id="c001">
<label>&#x2a;</label>Correspondence: Xiaodan Xu, <email xlink:href="mailto:xxddocter@gmail.com">xxddocter@gmail.com</email>; Jian Chen, <email xlink:href="mailto:szcsdocter@gmail.com">szcsdocter@gmail.com</email>
</corresp>
<fn fn-type="equal" id="fn001">
<label>&#x2020;</label>
<p>These authors have contributed equally to this work and share first authorship</p>
</fn>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-18">
<day>18</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>17</volume>
<elocation-id>1773031</elocation-id>
<history>
<date date-type="received">
<day>22</day>
<month>12</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>29</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>02</day>
<month>02</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2026 Wang, Xu, Xia, Dai, Xu and Chen.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Wang, Xu, Xia, Dai, Xu and Chen</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-18">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<sec>
<title>Background</title>
<p>With the acceleration of population aging, the incidence of pressure injury (PI) continues to rise, making early identification and accurate staging essential for preventing disease progression and improving prognosis. Conventional manual assessment relies heavily on clinical experience and subjective judgment, limiting real-time, objective, and quantitative evaluation.</p>
</sec>
<sec>
<title>Objective</title>
<p>This study aimed to develop and validate an artificial intelligence model based on the YOLOv11 neural network that integrates automatic PI detection, intelligent staging, and wound size measurement, thereby enhancing the timeliness, accuracy, and objectivity of PI assessment.</p>
</sec>
<sec>
<title>Methods</title>
<p>A total of 1,815 PI images collected from the electronic PI management systems of two medical centers between January 2021 and June 2025 were included. According to the 2019 National Pressure Ulcer Advisory Panel (NPUAP) guidelines, images were classified into six categories: Stage I, Stage II, Stage III, Stage IV, unstageable, and deep tissue injury. Transfer learning was applied to train YOLOv11 models of different scales (v11n/s/m/l/x). Lesion localization and staging performance were compared to identify the optimal model. Automatic wound size measurement was achieved by integrating ArUco marker recognition with pixel-to-centimeter conversion.</p>
</sec>
<sec>
<title>Results</title>
<p>For bounding box localization, the YOLOv11s model demonstrated superior performance, with a precision of 0.854, recall of 0.766, mAP50 of 0.842, mAP<sup>50&#x2013;95</sup> of 0.629, and an inference speed of 4.8 ms per image. On the test set, overall staging classification accuracy reached 92.64%, with a sensitivity of 79.79%, specificity of 95.56%, and a false-positive rate of 4.44%. The highest accuracy was observed for deep tissue injury (96.45%), while Stage III showed the lowest accuracy (85.04%). In wound size measurement, PI-3DAS demonstrated high agreement with the reference standard, with a length mean absolute error (MAE) of 0.155 cm and intraclass correlation coefficient (ICC) of 0.996, and a width MAE of 0.137 cm and ICC of 0.994. The mean time for AI-based measurement was 0.691 s, representing a 36.8-fold reduction compared with manual measurement (25.414 s; P &#x3c; 0.001).</p>
</sec>
<sec>
<title>Conclusion</title>
<p>The YOLOv11-based PI-3DAS system enables automated PI detection, staging, and non-contact wound size quantification with high accuracy and consistency, while substantially improving measurement efficiency. This system provides a portable and practical tool to support clinical nursing assessment, therapeutic follow-up, and remote PI management.</p>
</sec>
</abstract>
<kwd-group>
<kwd>automated staging</kwd>
<kwd>deep learning</kwd>
<kwd>pressure injury</kwd>
<kwd>size measurement</kwd>
<kwd>YOLO</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This study received financial support from the Suzhou Nursing Association Research Project (SZHL-B-202407); Changshu Municipal Science and Technology Program (CS202432); Capacity Enhancement Project of the Key Laboratory of Medical Artificial Intelligence and Big Data in Changshu (CYZ202301); the 23rd Batch of Suzhou Science and Technology Development Program (SLT2023006); Suzhou City Special Project on Clinical Key Disease Diagnosis and Treatment Technologies (LCZX202334); Suzhou Science and Technology Development Program (SYW2025034). No funding body had any role in the design of the study and collection, analysis, interpretation of data, or in writing the manuscript.</funding-statement>
</funding-group>
<counts>
<fig-count count="13"/>
<table-count count="3"/>
<equation-count count="2"/>
<ref-count count="21"/>
<page-count count="00"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Skin Physiology</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<label>1</label>
<title>Introduction</title>
<p>Pressure injury (PI), also referred to as pressure ulcer or decubitus ulcer, is defined as localized damage to the skin and/or underlying soft tissue caused by sustained pressure or pressure combined with shear forces, and may also be associated with medical devices or other external objects. Once PI develops, patients may suffer from pain, discomfort, and reduced mobility; in severe cases, it can result in serious infection, prolonged hospitalization, or even death, posing substantial challenges to both patients and healthcare systems (<xref ref-type="bibr" rid="B15">Me et al., 2019</xref>). The severity of PI ranges from non-blanchable erythema of intact skin to complete loss of skin and underlying tissues with exposed bone (<xref ref-type="bibr" rid="B9">Headlam and Illsley, 2005</xref>). The prevention and management of PI represent a central yet challenging aspect of clinical nursing practice. In China, tertiary hospitals have widely implemented dedicated PI training programs and established specialized nursing teams. Nevertheless, nurses with limited clinical experience may demonstrate reduced accuracy in PI staging due to insufficient professional knowledge, assessment skills, and systematic training. In clinical evaluation, beyond staging, wound size measurement is a crucial indicator for monitoring disease progression and therapeutic effectiveness. Currently, most healthcare institutions rely on manual measurement using rulers or flexible tapes, a process that is labor-intensive and contact-dependent, increasing the risk of cross-infection. Moreover, such measurements are highly susceptible to variations in imaging angle, lighting conditions, and examiner subjectivity, resulting in limited reproducibility and objectivity (<xref ref-type="bibr" rid="B7">Haavisto et al., 2022</xref>; <xref ref-type="bibr" rid="B5">Boyko et al., 2018</xref>).</p>
<p>In recent years, driven by the rapid advancement of big data and cloud computing technologies, artificial intelligence (AI) has been increasingly adopted in the medical field, rendering computer-aided diagnosis a focal area of clinical research (<xref ref-type="bibr" rid="B1">Agnes et al., 2019</xref>). <xref ref-type="bibr" rid="B19">&#x160;&#xed;n et al. (2022)</xref> developed a machine learning model based on clinical data to predict the risk of PI occurrence, providing a structured risk assessment tool to support nursing decision-making. In the domain of medical image intelligence, researchers have further explored deep learning approaches for the automated detection and staging of PI. <xref ref-type="bibr" rid="B2">Aldughayfiq et al. (2023)</xref> proposed a YOLOv5-based model for automatic PI detection and classification into Stages I&#x2013;IV, achieving an overall mAP of 76.9% on a large annotated dataset and outperforming conventional PI recognition methods. With continuous iterations of object detection algorithms, recent studies have demonstrated that optimized YOLOv8s models (mAP &#x2248;84.2%, recall 82.3%) further enhance recognition accuracy for visually ambiguous stages such as Stage II, as well as for deep tissue injury and unstageable categories (<xref ref-type="bibr" rid="B21">Tusar et al., 2025</xref>). Despite these advances, most existing studies have focused on single tasks such as PI risk prediction, detection, or staging, and there remains a lack of integrated models that simultaneously address lesion detection, staging, and wound size measurement, thereby limiting standardized and objective quantification. Moreover, the majority of prior work has not incorporated explainable analysis, nor has it provided bedside tools suitable for real-time clinical application.</p>
<p>Accordingly, this study aims to develop a multi-function intelligent PI detection system that integrates automatic lesion detection, intelligent staging, and wound size measurement (<xref ref-type="fig" rid="F1">Figure 1</xref>). A non-contact digital measurement approach is introduced, and Grad-CAM is employed to provide visual interpretability of the model&#x2019;s decision-making process. In addition, the model is implemented as an interactive bedside tool to enhance the accuracy, objectivity, and clinical usability of PI assessment.</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>Schematic overview of the clinical application of the PI multi-function recognition system.</p>
</caption>
<graphic xlink:href="fphys-17-1773031-g001.tif">
<alt-text content-type="machine-generated">Infographic compares traditional wound assessment using subjective judgment, existing AI with classification and detection but limited measurement, and the PI-3DAS solution, which uses a smartphone for non-contact, explainable, multi-task, and precise wound quantification.</alt-text>
</graphic>
</fig>
</sec>
<sec sec-type="materials|methods" id="s2">
<label>2</label>
<title>Materials and methods</title>
<sec id="s2-1">
<label>2.1</label>
<title>Datasets</title>
<p>This study collected a total of 1,815 images of pressure injury (PI) from hospitalized patients between January 2021 and June 2025 at Changshu Hospital Affiliated to Soochow University (Dataset 1) and Changshu Hospital Affiliated to Nanjing University of Chinese Medicine (Dataset 2). All images were stored in a dedicated electronic PI management system. According to the 2019 guidelines for the prevention and treatment of pressure injury issued by the National Pressure Ulcer Advisory Panel (NPUAP) (<xref ref-type="bibr" rid="B12">Kottner et al., 2019</xref>), PIs were classified into six stages: Stage I (249 images), Stage II (393 images), Stage III (525 images), Stage IV (261 images), suspected deep tissue injury (SDTI; 210 images), and unstageable (177 images). Representative images for each stage are presented in <xref ref-type="fig" rid="F2">Figure 2</xref>, and the overall study workflow is illustrated in <xref ref-type="fig" rid="F3">Figure 3</xref>.</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>Representative images and distribution of quantities in the dataset.</p>
</caption>
<graphic xlink:href="fphys-17-1773031-g002.tif">
<alt-text content-type="machine-generated">Clinical photographs display six examples of pressure ulcers at various stages. Stage I shows non-blanchable redness on intact skin. Stage II depicts partial-thickness skin loss with a shallow open wound. Stage III reveals a deeper ulcer with visible subcutaneous tissue. Stage IV presents extensive tissue loss with exposed bone or tendon. Suspected deep tissue injury (SDTI) demonstrates a dark purple localized area with partial skin breakdown. The unstageable ulcer features full-thickness tissue loss obscured by necrotic tissue. Each image includes a measurement scale for wound assessment.</alt-text>
</graphic>
</fig>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>Research flowchart.</p>
</caption>
<graphic xlink:href="fphys-17-1773031-g003.tif">
<alt-text content-type="machine-generated">Flowchart outlining a machine learning workflow for pressure injury (PI) staging using patient images, model training with YOLOv11 variants, performance evaluation, and deployment to mobile phones for nurses, doctors, patients, and family members.</alt-text>
</graphic>
</fig>
<p>To prevent data leakage, patient-level separation was implemented during dataset splitting. All images from the same patient were assigned to the same data subset (training, validation, or test set), ensuring that the validation and test sets contained no images from patients in the training set.</p>
</sec>
<sec id="s2-2">
<label>2.2</label>
<title>Image acquisition and annotation</title>
<p>PI images were captured by inpatient nursing teams across different clinical departments during routine care and subsequently uploaded to the electronic pressure injury management system. Once a PI was identified, nurses used either medical handheld devices or personal smartphones to take photographs, ensuring that the camera-to-lesion distance was maintained between 40 and 65 cm. During image acquisition, careful handling was emphasized to minimize motion artifacts and ensure high image quality.</p>
<p>The image annotation process was divided into three sequential stages, with participating nurses organized into three teams, each responsible for a specific stage. The detailed annotation workflow is illustrated in <xref ref-type="fig" rid="F4">Figure 4</xref>. Only images that had undergone standardized annotation and verification were included in the training of the deep learning models. Rectangular bounding-box annotations were performed for all six PI stages using the LabelMe tool (v5.3.1). To ensure compatibility with model training, the LabelMe-generated JSON files were converted into YOLO format. Detailed examples of the bounding-box annotations are presented in <xref ref-type="fig" rid="F5">Figure 5</xref>.</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption>
<p>Image Annotation Process. <bold>(A)</bold> represents the first stage, in which clinical nurses from various hospital departments identify and photograph PI lesions; <bold>(B)</bold> denotes the second stage, where two members of the pressure injury specialist team independently annotate PI images from all six stages using rectangular bounding boxes and perform cross-validation; <bold>(C)</bold> indicates the third stage, in which a senior pressure injury specialist holding international wound ostomy certification reviews all annotations and makes the final adjudication.</p>
</caption>
<graphic xlink:href="fphys-17-1773031-g004.tif">
<alt-text content-type="machine-generated">Three-panel flowchart illustrating a pressure injury documentation process: Step 1 shows wound photo capture by nurses, Step 2 depicts annotation and cross-validation by team members, and Step 3 involves a final review by senior members. A legend clarifies roles, annotation, and review steps using color-coded icons and arrows.</alt-text>
</graphic>
</fig>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption>
<p>Image annotation display. <bold>(A)</bold> presents an aggregated overview of all annotated bounding boxes. <bold>(B)</bold> and <bold>(C)</bold> illustrate the distributions of bounding box center coordinates and width&#x2013;height dimensions, respectively, where darker colors indicate higher frequencies.</p>
</caption>
<graphic xlink:href="fphys-17-1773031-g005.tif">
<alt-text content-type="machine-generated">Panel A shows layered, semi-transparent squares creating a nested pattern. Panel B is a scatter plot of blue data points with y versus x axes, showing central clustering. Panel C is a scatter plot with height versus width axes, displaying a positive correlation among blue data points.</alt-text>
</graphic>
</fig>
</sec>
<sec id="s2-3">
<label>2.3</label>
<title>Deep learning network</title>
<sec id="s2-3-1">
<label>2.3.1</label>
<title>Image preprocessing</title>
<p>This study aimed to enhance the accuracy of AI-based PI image recognition through a series of image preprocessing and augmentation strategies. During training, online data augmentation techniques were employed (<xref ref-type="bibr" rid="B3">Athalye and Arnaout, 2023</xref>; <xref ref-type="bibr" rid="B11">Kang et al., 2019</xref>), whereby image data were dynamically modified in real time without generating additional image files. This approach ensured that the model was exposed to slightly varied images at each iteration, thereby improving its robustness to real-world image variability. Preprocessing steps included resizing images to 640 pixels while preserving the original aspect ratio, as well as applying random horizontal flipping with a probability of 50% to increase data diversity. In addition, RandomResize and RandomCrop techniques were used to introduce stochastic variations in image scale and spatial cropping, enhancing the model&#x2019;s ability to recognize size variations and local features. Furthermore, HSVRandomAug provided by YOLOX (<xref ref-type="bibr" rid="B16">Qiu et al., 2023</xref>) was applied to randomly adjust the HSV color space, strengthening the model&#x2019;s adaptability to variations in illumination and color conditions.</p>
</sec>
<sec id="s2-3-2">
<label>2.3.2</label>
<title>Model training configuration</title>
<p>This study adopted a transfer learning strategy (<xref ref-type="bibr" rid="B18">Shin et al., 2016</xref>) and utilized five variants of the YOLOv11 model pretrained on the Common Objects in Context (COCO) dataset (<xref ref-type="bibr" rid="B10">Jocher et al., 2023</xref>), namely, nano (n), small (s), medium (m), large (l), and extra-large (x), representing increasing levels of model size and complexity. Model weights were initialized and subsequently fine-tuned by retraining all layers on the PI image dataset. During training, the optimizer was automatically selected, and the learning rate was adaptively adjusted according to the configuration file to optimize performance. The maximum number of training epochs was set to 100, with a batch size of 16 and a maximum of 30 detectable objects per image. Automatic mixed-precision training on the graphics processing unit (GPU) was enabled to improve computational efficiency. All training settings, including resizing images to 640 pixels, were strictly implemented according to the predefined configuration parameters. To mitigate overfitting, an early stopping mechanism was applied with a patience of 10 epochs, such that training was terminated if no improvement in validation performance was observed over 10 consecutive epochs.</p>
</sec>
<sec id="s2-3-3">
<label>2.3.3</label>
<title>Multi-function system development</title>
<p>After identifying the optimal YOLOv11 model, the present study further extended it into a multi-function system integrating automatic lesion detection, intelligent staging, and physical size measurement (<xref ref-type="fig" rid="F6">Figure 6</xref>). This three-in-one framework was designated PI-3DAS (Pressure Injury 3-Task Detection, Assessment, and Sizing), highlighting its unified design for simultaneously accomplishing lesion localization, staging, and dimensional quantification.</p>
<fig id="F6" position="float">
<label>FIGURE 6</label>
<caption>
<p>A multi-function system for PI automatic detection, staging, and dimensional measurement.</p>
</caption>
<graphic xlink:href="fphys-17-1773031-g006.tif">
<alt-text content-type="machine-generated">Flowchart illustrating a mobile application pipeline for multi-class lesion detection, showing input wound images undergoing preprocessing, a detection model predicting bounding boxes and classes, and final annotated predictions, emphasizing high precision and portable use.</alt-text>
</graphic>
</fig>
<p>To enable objective size measurement, the system first establishes a conversion relationship between image pixels and real-world physical distances using Augmented Reality University of Cordoba (ArUco) markers. A standardized 5 cm &#xd7; 5 cm ArUco marker (perimeter 20 cm) was generated using a custom script (available at: <ext-link ext-link-type="uri" xlink:href="https://share.weiyun.com/sZ48DSxB">https://share.weiyun.com/sZ48DSxB</ext-link>) and placed on the same plane as the lesion during image acquisition. The OpenCV ArUcoDetector was then employed to automatically identify the four corner points of the marker and compute its pixel perimeter (<inline-formula id="inf1">
<mml:math id="m1">
<mml:mrow>
<mml:msub>
<mml:mi>P</mml:mi>
<mml:mtext>pixel</mml:mtext>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>). The pixel-to-centimeter conversion ratio was subsequently calculated using the formula: <inline-formula id="inf2">
<mml:math id="m2">
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>x</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>l</mml:mi>
<mml:mo>_</mml:mo>
<mml:mi>c</mml:mi>
<mml:mi>m</mml:mi>
<mml:mo>_</mml:mo>
<mml:mi>r</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>o</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:msub>
<mml:mi>P</mml:mi>
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>x</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mi>P</mml:mi>
<mml:mrow>
<mml:mi>r</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</inline-formula>, where the true perimeter P_real equals 20 cm. This ratio represents the number of pixels corresponding to 1 cm in the image and serves as the key parameter for dimensional conversion.</p>
<p>Following scale calibration, the input image is processed by the optimized YOLOv11 model. The model outputs include the lesion bounding box coordinates (x<sub>1</sub>, y<sub>1</sub>, x<sub>2</sub>, y<sub>2</sub>), the predicted staging category, and the associated confidence score. The pixel-based width (<inline-formula id="inf3">
<mml:math id="m3">
<mml:mrow>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>x</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>) and height (<inline-formula id="inf4">
<mml:math id="m4">
<mml:mrow>
<mml:msub>
<mml:mi>H</mml:mi>
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>x</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>) of the lesion are obtained from the horizontal and vertical differences of the bounding box. Using the established conversion ratio, pixel dimensions are transformed into real physical distances, enabling automatic measurement of lesion length and width. The lesion length and width were calculated according to <xref ref-type="disp-formula" rid="e1">Equations 1</xref>, <xref ref-type="disp-formula" rid="e2">2</xref>. According to the following formulas:<disp-formula id="e1">
<mml:math id="m5">
<mml:mrow>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:mrow>
<mml:mi>c</mml:mi>
<mml:mi>m</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>x</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>x</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>l</mml:mi>
<mml:mo>_</mml:mo>
<mml:mi>c</mml:mi>
<mml:mi>m</mml:mi>
<mml:mo>_</mml:mo>
<mml:mi>r</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>o</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
<label>(1)</label>
</disp-formula>
<disp-formula id="e2">
<mml:math id="m6">
<mml:mrow>
<mml:msub>
<mml:mi>H</mml:mi>
<mml:mrow>
<mml:mi>c</mml:mi>
<mml:mi>m</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:msub>
<mml:mi>H</mml:mi>
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>x</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>x</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>l</mml:mi>
<mml:mo>_</mml:mo>
<mml:mi>c</mml:mi>
<mml:mi>m</mml:mi>
<mml:mo>_</mml:mo>
<mml:mi>r</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>o</mml:mi>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
<label>(2)</label>
</disp-formula>
</p>
</sec>
</sec>
<sec id="s2-4">
<label>2.4</label>
<title>Model performance evaluation</title>
<p>Multiple evaluation metrics were employed to comprehensively assess the performance of the AI models. For deep learning&#x2013;based object detection, evaluation focused on two key aspects: the accuracy of bounding box localization and the correctness of category prediction. Bounding box precision was quantified using mAP<sup>50</sup> and mAP<sup>50&#x2013;95</sup>, where mAP<sup>50</sup> represents the mean average precision at an intersection-over-union (IoU) threshold of 0.5, and mAP<sup>50&#x2013;95</sup> denotes the mean average precision averaged across IoU thresholds ranging from 0.50 to 0.95 in increments of 0.05. To evaluate the model&#x2019;s performance in lesion classification, sensitivity, specificity, accuracy, and false-positive rate were calculated. In addition, real-time processing capability was assessed using frames per second (FPS), which directly reflects the model&#x2019;s inference speed and responsiveness in practical clinical scenarios.</p>
</sec>
<sec id="s2-5">
<label>2.5</label>
<title>Web application development</title>
<p>To facilitate clinical translation of the PI intelligent assessment model, an interactive web-based application named &#x201c;PI-3DAS&#x201d; was developed using the Streamlit framework. The application was implemented in Python and integrates the optimally trained YOLOv11 model as its core inference engine. The front end provides an intuitive user interface, while the back end employs the OpenCV and PIL libraries for image processing. After users upload clinical images, the system automatically executes an integrated three-step workflow of detection, staging, and measurement: lesions are first precisely localized and staged, followed by calculation of the pixel-to-centimeter conversion through recognition of ArUco markers in the image, enabling automatic quantification of wound dimensions (length and width). The results are displayed in real time as visually enhanced images and structured data outputs. Owing to its lightweight design, ease of deployment, and cross-platform compatibility, the application is well suited for practical clinical use.</p>
</sec>
<sec id="s2-6">
<label>2.6</label>
<title>Experimental platform</title>
<p>The computational platform used in this study was configured with an NVIDIA RTX A4000 GPU (16.9 GB of VRAM), an Intel Xeon E5-2680 v4 six-core processor, 30.1 GB of system memory, and 451.0 GB of storage. Development, training, and image processing of the deep learning models were conducted using PyTorch 1.10.1 with CUDA 11.3 support. The model development environment was based on Ultralytics YOLOv11.3.47, and the web application was implemented using Streamlit 1.36.0, all running under Python 3.9.</p>
</sec>
<sec id="s2-7">
<label>2.7</label>
<title>Statistical analysis</title>
<p>Data processing and analysis were performed using Python 3.9 and associated libraries, including Pandas 1.3.4 and NumPy 1.21.4. Continuous variables are presented as mean &#xb1; standard deviation or median (interquartile range), as appropriate, while categorical variables are expressed as frequencies and percentages. Model performance was evaluated using precision, recall, mean average precision (mAP), F1 score, sensitivity, specificity, and accuracy. For PI size measurement, accuracy was quantified using mean absolute error (MAE), root mean square error (RMSE), and mean absolute percentage error (MAPE). Agreement between AI-based and reference measurements was assessed using the intraclass correlation coefficient [ICC(A,1)], with values &#x3e; 0.75 indicating good agreement, and Bland&#x2013;Altman analysis was applied to evaluate systematic bias and the 95% limits of agreement (LoA). Differences in measurement time between AI-based and manual methods were compared using the Wilcoxon signed-rank test (nonparametric). A two-sided P value &#x3c; 0.05 was considered statistically significant.</p>
</sec>
</sec>
<sec sec-type="results" id="s3">
<label>3</label>
<title>Results</title>
<sec id="s3-1">
<label>3.1</label>
<title>Model training</title>
<p>A total of 1,815 PI images across different stages were included in this study and divided into a training set (1,336 images), a validation set (334 images), and a test set (145 images). The distribution of images by stage was as follows: Stage I, 249 images; Stage II, 393 images; Stage III, 525 images; Stage IV, 261 images; suspected deep tissue injury (SDTI), 210 images; and unstageable, 177 images. The detailed distribution is illustrated in <xref ref-type="fig" rid="F7">Figure 7</xref>.</p>
<fig id="F7" position="float">
<label>FIGURE 7</label>
<caption>
<p>Distribution of images for each stage of PI.</p>
</caption>
<graphic xlink:href="fphys-17-1773031-g007.tif">
<alt-text content-type="machine-generated">Bar chart comparing the count of cases in six categories&#x2014;Stage I, Stage II, Stage III, Stage IV, SDTI, and Unstageable&#x2014;across three datasets: Trainset, Valset, and Testset, with Trainset having the highest counts in all categories.</alt-text>
</graphic>
</fig>
<p>During training, the loss function of the YOLOv11n model exhibited a clear downward trend, progressively decreasing and eventually stabilizing as the number of training epochs increased, indicating convergence toward an optimal solution (<xref ref-type="fig" rid="F8">Figure 8A</xref>). Concurrently, the bounding box localization performance metrics showed an upward trajectory throughout training, with a rapid initial improvement followed by stabilization. Specifically, the model achieved a bounding box precision of 0.837 and a recall of 0.711. In addition, the mAP<sup>50</sup> and mAP<sup>50&#x2013;95</sup> reached 0.808 and 0.558, respectively, demonstrating the robust object detection capability of the YOLOv11n model (<xref ref-type="fig" rid="F8">Figure 8B</xref>).</p>
<fig id="F8" position="float">
<label>FIGURE 8</label>
<caption>
<p>Changes in the loss function and performance metrics during YOLOv11 model training. <bold>(A)</bold> shows the variation of the loss function, while <bold>(B)</bold> illustrates changes in bounding box localization performance metrics. mAP50 denotes the mean average precision at an intersection-over-union (IoU) threshold of 0.5, and mAP<sup>50&#x2013;95</sup> represents the mean average precision averaged over IoU thresholds ranging from 0.50 to 0.95 in increments of 0.05.</p>
</caption>
<graphic xlink:href="fphys-17-1773031-g008.tif">
<alt-text content-type="machine-generated">Panel A shows a line graph of loss versus epoch with three lines for train box loss, train classification loss, and train distribution focal loss, all generally decreasing over eighty epochs. Panel B presents a line graph of four metrics&#x2014;precision, recall, mAP50, and mAP50&#x2013;95&#x2014;versus epoch, all generally increasing or stabilizing over eighty epochs.</alt-text>
</graphic>
</fig>
</sec>
<sec id="s3-2">
<label>3.2</label>
<title>Bounding box localization performance of different YOLO model variants</title>
<p>The comparative performance of different models on the validation set is summarized in <xref ref-type="table" rid="T1">Table 1</xref>. To provide a comprehensive baseline comparison, the established YOLOv8s model was also evaluated under identical experimental conditions. Overall, YOLOv11s demonstrated the best bounding box localization performance, achieving higher precision (0.854), recall (0.766), mAP<sup>50</sup> (0.842), and mAP<sup>50&#x2013;95</sup> (0.629) than the other variants. Compared with YOLOv8s, YOLOv11s showed improvements of 4.0% in precision, 5.8% in recall, 4.5% in mAP<sup>50</sup>, and 10.2% in mAP<sup>50-95</sup>, while maintaining comparable inference speed (4.8 ms vs. 4.5 ms per image). YOLOv11m and YOLOv11l showed slightly lower mAP<sup>50</sup> and mAP<sup>50&#x2013;95</sup> values compared with YOLOv11s. In terms of inference speed, YOLOv11n exhibited the shortest inference time (3.2 ms per image), making it the fastest model; however, its mAP<sup>50&#x2013;95</sup> (0.558) and precision (0.837) were inferior to those of YOLOv11s. Conversely, YOLOv11x required the longest inference time (8.1 ms per image) and did not demonstrate advantages in either precision (0.698) or mAP<sup>50</sup> (0.731). Taking both detection accuracy and inference speed into account, YOLOv11s achieved the most favorable performance balance in this study. The precision&#x2013;latency trade-off curves for all models are shown in <xref ref-type="fig" rid="F9">Figure 9</xref>.</p>
<table-wrap id="T1" position="float">
<label>TABLE 1</label>
<caption>
<p>Performance of bounding box localization across different YOLO model versions.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">YOLO Version</th>
<th align="center">Precision</th>
<th align="center">Recall</th>
<th align="center">mAP<sup>50</sup>
</th>
<th align="center">mAP<sup>50-95</sup>
</th>
<th align="center">Speed (ms/img)</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">YOLOv8s</td>
<td align="center">0.821</td>
<td align="center">0.724</td>
<td align="center">0.806</td>
<td align="center">0.571</td>
<td align="center">4.5</td>
</tr>
<tr>
<td align="center">YOLOv11n</td>
<td align="center">0.837</td>
<td align="center">0.711</td>
<td align="center">0.808</td>
<td align="center">0.558</td>
<td align="center">3.2</td>
</tr>
<tr>
<td align="center">YOLOv11s</td>
<td align="center">0.854</td>
<td align="center">0.766</td>
<td align="center">0.842</td>
<td align="center">0.629</td>
<td align="center">4.8</td>
</tr>
<tr>
<td align="center">YOLOv11m</td>
<td align="center">0.767</td>
<td align="center">0.621</td>
<td align="center">0.738</td>
<td align="center">0.522</td>
<td align="center">6.2</td>
</tr>
<tr>
<td align="center">YOLOv11l</td>
<td align="center">0.794</td>
<td align="center">0.642</td>
<td align="center">0.745</td>
<td align="center">0.532</td>
<td align="center">6.9</td>
</tr>
<tr>
<td align="center">YOLOv11x</td>
<td align="center">0.698</td>
<td align="center">0.715</td>
<td align="center">0.731</td>
<td align="center">0.496</td>
<td align="center">8.1</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>mAP<sup>50</sup> and mAP<sup>50&#x2013;95</sup> denote the mean average precision at IoU thresholds of 0.5 and 0.50&#x2013;0.95 (step &#x3d; 0.05), respectively. Speed indicates the average inference time per image (ms) on an NVIDIA RTX A4000 GPU.</p>
</fn>
</table-wrap-foot>
</table-wrap>
<fig id="F9" position="float">
<label>FIGURE 9</label>
<caption>
<p>Performance of different versions of the YOLOv11 model.</p>
</caption>
<graphic xlink:href="fphys-17-1773031-g009.tif">
<alt-text content-type="machine-generated">Line chart comparing five YOLOv11 model variants, with mean average precision at fifty percent IoU on the y-axis and inference latency in milliseconds per image on the x-axis. YOLOv11n and YOLOv11s, represented by blue and orange dots, have the best balance of speed and precision, with YOLOv11s achieving the highest mAP50 of zero point eight four two. A legend lists models by color and respective mAP50 values. A bold blue arrow labeled &#x22;Faster&#x22; points leftward, indicating decreasing latency. Dotted lines connect each model point sequentially.</alt-text>
</graphic>
</fig>
</sec>
<sec id="s3-3">
<label>3.3</label>
<title>Classification performance of the YOLOv11s model on the test set</title>
<p>With respect to classification accuracy across different PI stages, the YOLOv11s model achieved an overall accuracy of 92.64%, with a sensitivity of 79.79%, specificity of 95.56%, and a false-positive rate of 4.44% across all categories. Among the six PI stages, the highest classification accuracy was observed for SDTI at 96.45%, whereas Stage III exhibited the lowest accuracy at 85.04%. Detailed performance metrics for each stage are presented in <xref ref-type="table" rid="T2">Table 2</xref>.</p>
<table-wrap id="T2" position="float">
<label>TABLE 2</label>
<caption>
<p>Classification performance of the YOLOv11s model on the test set (%).</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Category</th>
<th align="center">Accuracy (%)</th>
<th align="center">Sensitivity (%)</th>
<th align="center">Specificity (%)</th>
<th align="center">Precision (%)</th>
<th align="center">F1 Score (%)</th>
<th align="center">FPR(%)</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">StageI</td>
<td align="center">95.58</td>
<td align="center">85.00</td>
<td align="center">97.85</td>
<td align="center">89.47</td>
<td align="center">87.18</td>
<td align="center">2.15</td>
</tr>
<tr>
<td align="center">Stage II</td>
<td align="center">89.26</td>
<td align="center">75.86</td>
<td align="center">93.48</td>
<td align="center">78.57</td>
<td align="center">77.19</td>
<td align="center">6.52</td>
</tr>
<tr>
<td align="center">Stage III</td>
<td align="center">85.04</td>
<td align="center">77.27</td>
<td align="center">89.16</td>
<td align="center">79.07</td>
<td align="center">78.16</td>
<td align="center">10.84</td>
</tr>
<tr>
<td align="center">Stage IV</td>
<td align="center">93.10</td>
<td align="center">76.19</td>
<td align="center">96.84</td>
<td align="center">84.21</td>
<td align="center">80.00</td>
<td align="center">3.16</td>
</tr>
<tr>
<td align="center">SDTI</td>
<td align="center">96.45</td>
<td align="center">91.67</td>
<td align="center">97.00</td>
<td align="center">78.57</td>
<td align="center">84.62</td>
<td align="center">3.00</td>
</tr>
<tr>
<td align="center">Unstageable</td>
<td align="center">96.43</td>
<td align="center">72.73</td>
<td align="center">99.01</td>
<td align="center">88.89</td>
<td align="center">80.00</td>
<td align="center">0.99</td>
</tr>
<tr>
<td align="center">Macro-averaged</td>
<td align="center">92.64</td>
<td align="center">79.79</td>
<td align="center">95.56</td>
<td align="center">83.13</td>
<td align="center">81.19</td>
<td align="center">4.44</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>The overall (macro-averaged) values represent the equally weighted mean of the performance metrics (e.g., accuracy, sensitivity) across all PI, stages; False Positive Rate (FPR).</p>
</fn>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="s3-4">
<label>3.4</label>
<title>Relationship between precision, recall, F1 score, and confidence in the AI model</title>
<p>The YOLOv11s object detection model was applied to localize PI lesions and estimate prediction confidence. During video analysis, the model performed continuous detection on each frame. The highest F1 score was achieved at a confidence threshold of 0.561 (<xref ref-type="fig" rid="F10">Figure 10</xref>). At lower confidence levels, the model effectively balanced precision and recall, minimizing the risk of missing potential lesions during the initial screening phase&#x2014;an aspect that is particularly critical in clinical settings, where final diagnosis relies on verification by specialized nursing teams. In typical object detection tasks, an appropriate trade-off between precision and recall is required, and the optimal cutoff point is generally determined at the peak F1 score. In real-world clinical applications, pressure injury specialist nurses review the model&#x2019;s annotations; when the confidence threshold was &#x2264;0.1, the F1 score increased rapidly and subsequently showed a gradual upward trend.</p>
<fig id="F10" position="float">
<label>FIGURE 10</label>
<caption>
<p>Relationship between precision, recall, F1 score, and confidence in the AI model. <bold>(A)</bold> shows the precision&#x2013;confidence curve, <bold>(B)</bold> the recall&#x2013;confidence curve, and <bold>(C)</bold> the F1 score&#x2013;confidence curve.</p>
</caption>
<graphic xlink:href="fphys-17-1773031-g010.tif">
<alt-text content-type="machine-generated">Three side-by-side line charts labeled A, B, and C display precision, recall, and F1 scores, respectively, versus confidence for six classes of data and an overall performance line. Each chart includes a legend denoting the classes: Stage I, Stage II, Stage III, Stage IV, SDTI, Unstageable, and all classes, represented by different colored lines. The overall performance for all classes is shown as a thick blue line on each plot. Performance values at specified confidence levels are indicated in the legend on each panel.</alt-text>
</graphic>
</fig>
</sec>
<sec id="s3-5">
<label>3.5</label>
<title>PI-3DAS performance in PI size measurement</title>
<p>The wound size measurement performance of PI-3DAS was evaluated on the test set (n &#x3d; 145). Compared with the reference standard, PI-3DAS demonstrated small measurement errors for both length and width (<xref ref-type="table" rid="T3">Table 3</xref>): for length, MAE was 0.155 cm, RMSE was 0.211 cm, and MAPE was 3.68%; for width, MAE was 0.137 cm, RMSE was 0.168 cm, and MAPE was 5.85%. Under the absolute error threshold of &#x7c;&#x394;&#x7c; &#x2264; 0.5 cm, the success rates for length and width measurements were 96.6% and 100.0%, respectively; under the relative error threshold of &#x2264;10%, the corresponding success rates were 93.1% and 82.8%.</p>
<table-wrap id="T3" position="float">
<label>TABLE 3</label>
<caption>
<p>Accuracy and agreement of PI size measurements between PI-3DAS and the reference standard.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Measurement</th>
<th align="center">MAE (cm)</th>
<th align="center">RMSE (cm)</th>
<th align="center">MAPE (%)</th>
<th align="center">Success Rate (&#x2264;0.5 cm),%</th>
<th align="center">Success Rate (Rel&#x2264;10%),%</th>
<th align="center">ICC(A,1) [ICC 95% CI]</th>
<th align="center">Bias (cm)</th>
<th align="center">LoA (cm)</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">Length</td>
<td align="center">0.155</td>
<td align="center">0.211</td>
<td align="center">3.68</td>
<td align="center">96.6</td>
<td align="center">93.1</td>
<td align="center">0.996 (0.994&#x2013;0.998)</td>
<td align="center">0.044</td>
<td align="center">&#x2212;0.361 to 0.449</td>
</tr>
<tr>
<td align="center">Width</td>
<td align="center">0.137</td>
<td align="center">0.168</td>
<td align="center">5.85</td>
<td align="center">100</td>
<td align="center">82.8</td>
<td align="center">0.994 (0.989&#x2013;0.996)</td>
<td align="center">0.047</td>
<td align="center">&#x2212;0.270 to 0.364</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>ICC(A,1), intraclass correlation coefficient (two-way random-effects, absolute agreement); LoA, limits of agreement.</p>
</fn>
</table-wrap-foot>
</table-wrap>
<p>Agreement analysis indicated excellent concordance between PI-3DAS and the reference standard (<xref ref-type="table" rid="T3">Table 3</xref>; <xref ref-type="fig" rid="F11">Figures 11A,B</xref>), with ICC(A,1) values of 0.996 (95% CI: 0.994&#x2013;0.998) for length and 0.994 (95% CI: 0.989&#x2013;0.996) for width. Bland&#x2013;Altman analysis revealed biases close to zero with a slight positive bias (AI measurements marginally higher than the reference standard) (<xref ref-type="fig" rid="F11">Figures 11C,D</xref>): length bias of &#x2b;0.044 cm with limits of agreement (LoA) from &#x2212;0.361 to 0.449 cm, and width bias of &#x2b;0.047 cm with LoA from &#x2212;0.270 to 0.364 cm.</p>
<fig id="F11" position="float">
<label>FIGURE 11</label>
<caption>
<p>Agreement between PI-3DAS and the reference standard for PI size measurements. <bold>(A,B)</bold> show Bland&#x2013;Altman plots for length and width, respectively, illustrating the mean difference (solid line) and the 95% limits of agreement (dashed lines). <bold>(C,D)</bold> present scatter plots comparing AI-derived measurements with the reference standard for length and width, respectively.</p>
</caption>
<graphic xlink:href="fphys-17-1773031-g011.tif">
<alt-text content-type="machine-generated">Panel A shows a Bland-Altman plot comparing the difference and mean of AI and reference measurements in centimeters, with most data points clustering near zero difference. Panel B displays a similar Bland-Altman plot for another dataset with a narrower range of values and limits of agreement. Panel C presents a scatter plot showing a strong linear correlation between AI and reference measurements with values ranging from about 2 to 14 centimeters. Panel D shows another scatter plot indicating strong linear correlation between AI and reference data, spanning approximately 1 to 12 centimeters.</alt-text>
</graphic>
</fig>
<p>In terms of efficiency, PI-3DAS markedly reduced the time required for single-case measurement. The median and mean times for AI-based measurement were 0.694 s and 0.691 s, respectively, compared with 23.662 s and 25.414 s for manual measurement, representing an approximately 36.8-fold reduction in measurement time. The difference was statistically significant according to the Wilcoxon signed-rank test (P &#x3c; 0.001).</p>
</sec>
<sec id="s3-6">
<label>3.6</label>
<title>Terminal deployment and visual interpretability</title>
<p>Based on the best-performing YOLOv11s model, this study developed a multi-function AI-assisted system that integrates pressure injury (PI) lesion detection, automated staging, and wound size measurement, and deployed it as a web-based application using the Streamlit framework, designated PI-3DAS. To facilitate broad adoption across hospitals, nursing homes, and home-care settings&#x2014;and to meet the needs of clinicians, nurses, patients, and students&#x2014;users can access the system by scanning the QR code shown in <xref ref-type="fig" rid="F12">Figure 12A</xref> or by visiting <ext-link ext-link-type="uri" xlink:href="https://pi-3das-v2.streamlit.app/">https://pi-3das-v2.streamlit.app/</ext-link>via a mobile browser. The user interface is illustrated in <xref ref-type="fig" rid="F12">Figure 12D</xref>.</p>
<fig id="F12" position="float">
<label>FIGURE 12</label>
<caption>
<p>Terminal deployment and application examples of the PI-3DAS multi-function system. <bold>(A)</bold> QR code for mobile access to the web application; <bold>(B)</bold> application example in a video scenario with real-time PI detection, staging, and size measurement; <bold>(C)</bold> application example in an image scenario with automatic wound size measurement based on ArUco marker calibration; <bold>(D)</bold> user interface of the PI-3DAS web application supporting image/video upload and real-time inference.</p>
</caption>
<graphic xlink:href="fphys-17-1773031-g012.tif">
<alt-text content-type="machine-generated">Panel A shows a QR code, panel B shows a slightly different QR code, panel C displays a pressure injury on skin annotated with machine learning predictions for wound size and stage using an ArUco marker, and panel D shows a tablet screen with a clinical decision support interface for assessing pressure injury.</alt-text>
</graphic>
</fig>
<p>
<xref ref-type="fig" rid="F12">Figures 12B,C</xref> demonstrate representative applications of the system in video and image scenarios. Users may upload images or videos through the sidebar or capture images in real time using a camera on a mobile device or computer. Upon clicking the &#x201c;Predict&#x201d; button, the system automatically performs PI detection, staging, wound size measurement, and heatmap generation. The application is cross-platform, user-friendly, and shareable, enabling seamless use on both mobile and desktop devices.</p>
<p>To enhance model interpretability and support clinical decision-making, the system integrates Grad-CAM heatmap visualization (<xref ref-type="fig" rid="F13">Figure 13</xref>). By selecting the &#x201c;Visualize Heatmap&#x201d; option in the sidebar, users can overlay class activation heatmaps onto the detection results, intuitively highlighting the lesion-specific regions that the model focuses on during diagnostic inference. This feature assists clinicians in validating the rationale and reliability of AI-based assessments.</p>
<fig id="F13" position="float">
<label>FIGURE 13</label>
<caption>
<p>Grad-CAM visualizations of the AI model&#x2019;s decision-making process. <bold>(A1&#x2013;A4)</bold> show the original images, and <bold>(B1&#x2013;B4)</bold> display the corresponding activation heatmaps overlaid on the original images.</p>
</caption>
<graphic xlink:href="fphys-17-1773031-g013.tif">
<alt-text content-type="machine-generated">Eight clinical photographs arranged in two rows and four columns show pressure ulcers in different stages and locations on the body. Row A depicts color photographs of wounds of varying severity, each accompanied by a measurement scale. Row B shows the corresponding thermal or heatmap images for each wound, highlighting temperature variations with color overlays. Columns are labeled A1&#x2013;A4 and B1&#x2013;B4, linking each photographic image to its thermal visualization.</alt-text>
</graphic>
</fig>
</sec>
</sec>
<sec sec-type="discussion" id="s4">
<label>4</label>
<title>Discussion</title>
<p>PI represents a major global health burden affecting millions of individuals worldwide, and its early detection and accurate staging are critical for preventing lesion progression and improving clinical outcomes. In Europe and North America, Stage II and above PIs are regarded as &#x201c;never events.&#x201d; Nevertheless, the incidence of PI remains alarmingly high, reaching up to 66% among patients undergoing prolonged surgical procedures and approximately 50% in intensive care units (<xref ref-type="bibr" rid="B8">Hajhosseini et al., 2020</xref>). In China, the accelerating aging population and uneven distribution of medical resources mean that specialized PI teams are predominantly concentrated in large tertiary hospitals, posing persistent challenges for early diagnosis and accurate staging in primary healthcare settings. Previous studies have reported PI healing rates ranging from only 5.1%&#x2013;29.9% (<xref ref-type="bibr" rid="B20">Sun et al., 2017</xref>), which not only prolongs hospital stays but also substantially increases healthcare costs (<xref ref-type="bibr" rid="B6">Haesler et al., 2022</xref>). Against this background, the present study developed and validated PI-3DAS, a multi-function AI-assisted system that delivers a closed-loop output encompassing lesion detection, automated staging, and wound size measurement within a unified framework, while also achieving explainable visualization and terminal deployment.</p>
<p>Effective PI management depends critically on early and accurate identification. For less experienced healthcare professionals, precise PI staging remains challenging and may be compromised by insufficient training, limited information, observer bias, and the heterogeneous appearance of wounds (<xref ref-type="bibr" rid="B13">LeBlanc et al., 2019</xref>; <xref ref-type="bibr" rid="B4">Bates-Jensen et al., 2019</xref>). In addition, patient-related factors such as skin pigmentation, age, and overall health status can further confound visual assessment (<xref ref-type="bibr" rid="B5">Boyko et al., 2018</xref>). Consequently, there is an urgent need for more efficient, accurate, and objective automated methods for PI detection and staging. Computer-aided diagnosis has emerged as a focal area of clinical research. <xref ref-type="bibr" rid="B19">&#x160;&#xed;n et al. (2022)</xref> applied machine learning to clinical data from intensive care unit patients to predict PI risk, reporting an accuracy of 96.0% using a random forest model. In the field of PI image analysis, (<xref ref-type="bibr" rid="B2">Aldughayfiq et al., 2023</xref>) developed a YOLOv5-based model that achieved an mAP50 of 0.769. In contrast, by adopting the more advanced YOLOv11 architecture, our study achieved superior detection performance (mAP50 of 0.842) and demonstrated robust automated PI staging, with an overall accuracy of 92.64%, sensitivity of 79.79%, and specificity of 95.56%. Notably, the model achieved particularly high accuracy for suspected deep tissue injury, reaching 96.45%.</p>
<p>Beyond staging, objective quantification of wound size represents a critical challenge in PI management. Measurements of wound dimensions, including length, width, and area, directly influence staging decisions, treatment planning, and evaluation of healing progression.The NPUAP Pressure Ulcer Scale for Healing (PUSH) Tool 3.0, which is widely used in clinical practice, instructs clinicians to measure the greatest length (head to toe) and the greatest width (side to side) and multiply these measurements to estimate surface area. Although the length &#xd7; width method may overestimate wound area by approximately 44% for irregularly shaped wounds (<xref ref-type="bibr" rid="B17">Sahbudak and Gunes, 2026</xref>), it remains the predominant measurement approach in clinical settings due to its simplicity, practicality, and acceptable reproducibility for monitoring wound status over time. The EPUAP/NPIAP/PPPIA 2019 International Clinical Practice Guideline continues to recommend standardized wound size measurement using this approach.</p>
<p>Conventional measurement methods rely primarily on rulers or flexible tapes, which are not only labor-intensive but also involve direct contact with damaged tissue, making it difficult to comply with aseptic and minimal-contact principles. Previous studies have suggested that digital wound measurement may serve as a non-contact and objective alternative (<xref ref-type="bibr" rid="B14">Liu et al., 2023</xref>). <xref ref-type="bibr" rid="B14">Liu et al. (2023)</xref>, <xref ref-type="bibr" rid="B2">Aldughayfiq et al. (2023)</xref> combined U-Net and Mask R-CNN with a LiDAR camera to achieve PI region segmentation and size measurement; however, this approach depends on additional hardware and still yielded a mean relative error of 26.2%, limiting its applicability in routine ward and bedside settings. <xref ref-type="bibr" rid="B2">Aldughayfiq et al. (2023)</xref> developed a YOLOv5-based model for automated PI detection and staging, which improved recognition efficiency and staging consistency, but did not address automatic wound size quantification. In contrast, the present study integrates YOLOv11 with ArUco marker&#x2013;based calibration to construct a unified multi-function system encompassing detection, staging, and size measurement, thereby enabling non-contact digital quantification. PI-3DAS demonstrated excellent agreement in both length and width measurements (ICC &#x3e;0.99), with errors controlled within 0.2 cm, and achieved approximately a 37-fold improvement in measurement efficiency compared with manual methods. This approach effectively addresses the limitations of previous studies with respect to objective wound size quantification and practical bedside applicability.</p>
<p>Beyond performance validation, the clinical value of an AI system fundamentally depends on its deployability and credibility. In this study, the best-performing YOLOv11s model was deployed as an interactive web-based application (PI-3DAS) using the Streamlit framework. In contrast to most existing PI intelligent systems, which remain limited to algorithmic validation or rely on high-performance servers and specialized hardware, the proposed system achieves lightweight deployment without additional hardware requirements. Users can perform PI image or video analysis directly through mobile or desktop browsers, enabling flexible application across hospital wards, nursing homes, and home-care settings. To further enhance the transparency of model decision-making, Grad-CAM visualization was integrated into the system. The resulting heatmaps indicate that the model&#x2019;s attention is primarily focused on clinically relevant regions, such as wound margins and areas of tissue abnormality, which are consistent with key clinical assessment criteria. This interpretability feature facilitates healthcare professionals&#x2019; understanding of AI-generated results and strengthens their confidence in AI-assisted evaluations.</p>
<p>This study has several limitations. At present, wound size measurement is primarily based on two-dimensional images and does not incorporate three-dimensional information such as wound depth. Additionally, the current bounding box-based measurement method aligns with routine clinical ruler measurements, providing length and width estimates rather than precise wound area. While pixel-wise segmentation approaches (e.g., U-Net) may offer more accurate area quantification, they require significantly greater annotation burden and computational complexity. Future studies may consider integrating segmentation-based methods for applications where precise area measurement is clinically indicated. Furthermore, although the dataset was derived from two independent medical centers, broader external validation across diverse healthcare settings and patient populations is needed to further establish the system&#x2019;s generalizability.</p>
</sec>
<sec sec-type="conclusion" id="s5">
<label>5</label>
<title>Conclusion</title>
<p>In summary, this study developed and validated a multi-function AI-assisted system (PI-3DAS) based on the YOLOv11 neural network that integrates automated pressure injury detection, staging, and wound size measurement. The results demonstrate that the system achieves high accuracy and consistency in lesion recognition, automated staging, and wound dimension quantification. Moreover, through terminal deployment and explainable visualization, PI-3DAS enhances its usability and reliability in clinical nursing practice.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s6">
<title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p>
</sec>
<sec sec-type="ethics-statement" id="s7">
<title>Ethics statement</title>
<p>The studies involving humans were approved by Ethics Committee of Changshu Hospital Affiliated to Soochow University. The studies were conducted in accordance with the local legislation and institutional requirements. Written informed consent for participation was not required from the participants or the participants&#x2019; legal guardians/next of kin in accordance with the national legislation and institutional requirements.</p>
</sec>
<sec sec-type="author-contributions" id="s8">
<title>Author contributions</title>
<p>ZW: Data curation, Funding acquisition, Methodology, Writing &#x2013; original draft. YX: Data curation, Validation, Writing &#x2013; original draft. KX: Formal Analysis, Methodology, Software, Visualization, Writing &#x2013; review and editing. YD: Data curation, Writing &#x2013; original draft. XX: Funding acquisition, Project administration, Resources, Supervision, Writing &#x2013; original draft, Writing &#x2013; review and editing. JC: Conceptualization, Project administration, Visualization, Writing &#x2013; original draft, Writing &#x2013; review and editing.</p>
</sec>
<ack>
<title>Acknowledgements</title>
<p>We sincerely thank Engineer Zihao Zhang from Shanghai Haoxiong Education Technology Co., Ltd. for his valuable technical support throughout the course of this study. We also express our gratitude to the OpenMMLab team (<ext-link ext-link-type="uri" xlink:href="https://openmmlab.com/">https://openmmlab.com/</ext-link>) for their valuable guidance and support in the field of artificial intelligence.</p>
</ack>
<sec sec-type="COI-statement" id="s10">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="s11">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="s12">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Agnes</surname>
<given-names>S. A.</given-names>
</name>
<name>
<surname>Anitha</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Pandian</surname>
<given-names>S. I. A.</given-names>
</name>
<name>
<surname>Peter</surname>
<given-names>J. D.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Classification of mammogram images using multiscale all convolutional neural network (MA-CNN)</article-title>. <source>J. Med. Syst.</source> <volume>44</volume> (<issue>1</issue>), <fpage>30</fpage>. <pub-id pub-id-type="doi">10.1007/s10916-019-1494-z</pub-id>
<pub-id pub-id-type="pmid">31838610</pub-id>
</mixed-citation>
</ref>
<ref id="B2">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Aldughayfiq</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Ashfaq</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Jhanjhi</surname>
<given-names>N. Z.</given-names>
</name>
<name>
<surname>Humayun</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>YOLO-based deep learning model for pressure ulcer detection and classification</article-title>. <source>Healthcare-basel</source> <volume>11</volume> (<issue>9</issue>), <fpage>1222</fpage>. <pub-id pub-id-type="doi">10.3390/healthcare11091222</pub-id>
<pub-id pub-id-type="pmid">37174764</pub-id>
</mixed-citation>
</ref>
<ref id="B3">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Athalye</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Arnaout</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Domain-guided data augmentation for deep learning on medical imaging</article-title>. <source>Plos one</source> <volume>18</volume> (<issue>3</issue>), <fpage>e282532</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0282532</pub-id>
<pub-id pub-id-type="pmid">36952442</pub-id>
</mixed-citation>
</ref>
<ref id="B4">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bates-Jensen</surname>
<given-names>B. M.</given-names>
</name>
<name>
<surname>McCreath</surname>
<given-names>H. E.</given-names>
</name>
<name>
<surname>Harputlu</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Patlan</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Reliability of the bates-jensen wound assessment tool for pressure injury assessment: the pressure ulcer detection study</article-title>. <source>Wound Repair Regeneration Official Publication Wound Heal. Soc. Eur. Tissue Repair Soc.</source> <volume>27</volume> (<issue>4</issue>), <fpage>386</fpage>&#x2013;<lpage>395</lpage>. <pub-id pub-id-type="doi">10.1111/wrr.12714</pub-id>
<pub-id pub-id-type="pmid">30828890</pub-id>
</mixed-citation>
</ref>
<ref id="B5">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Boyko</surname>
<given-names>T. V.</given-names>
</name>
<name>
<surname>Longaker</surname>
<given-names>M. T.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>G. P.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Review of the current management of pressure ulcers</article-title>. <source>Adv. WOUND CARE</source> <volume>7</volume> (<issue>2</issue>), <fpage>57</fpage>&#x2013;<lpage>67</lpage>. <pub-id pub-id-type="doi">10.1089/wound.2016.0697</pub-id>
<pub-id pub-id-type="pmid">29392094</pub-id>
</mixed-citation>
</ref>
<ref id="B6">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Haesler</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Pittman</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Cuddigan</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Law</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Chang</surname>
<given-names>Y. Y.</given-names>
</name>
<name>
<surname>Balzer</surname>
<given-names>K.</given-names>
</name>
<etal/>
</person-group> (<year>2022</year>). <article-title>An exploration of the perspectives of individuals and their caregivers on pressure ulcer/injury prevention and management to inform the development of a clinical guideline</article-title>. <source>J. TISSUE VIABILITY</source> <volume>31</volume> (<issue>1</issue>), <fpage>1</fpage>&#x2013;<lpage>10</lpage>. <pub-id pub-id-type="doi">10.1016/j.jtv.2021.10.008</pub-id>
<pub-id pub-id-type="pmid">34776327</pub-id>
</mixed-citation>
</ref>
<ref id="B7">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Haavisto</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Stolt</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Puukka</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Korhonen</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Kielo-Viljamaa</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Consistent practices in pressure ulcer prevention based on international care guidelines: a cross-sectional study</article-title>. <source>Int. WOUND J.</source> <volume>19</volume> (<issue>5</issue>), <fpage>1141</fpage>&#x2013;<lpage>1157</lpage>. <pub-id pub-id-type="doi">10.1111/iwj.13710</pub-id>
<pub-id pub-id-type="pmid">34761513</pub-id>
</mixed-citation>
</ref>
<ref id="B8">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hajhosseini</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Longaker</surname>
<given-names>M. T.</given-names>
</name>
<name>
<surname>Gurtner</surname>
<given-names>G. C.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Pressure injury</article-title>. <source>Ann. Surg.</source> <volume>271</volume> (<issue>4</issue>), <fpage>671</fpage>&#x2013;<lpage>679</lpage>. <pub-id pub-id-type="doi">10.1097/SLA.0000000000003567</pub-id>
<pub-id pub-id-type="pmid">31460882</pub-id>
</mixed-citation>
</ref>
<ref id="B9">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Headlam</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Illsley</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2005</year>). <article-title>Pressure ulcers: an overview</article-title>. <source>Br. Journal Hospital Med. Lond. Engl.</source> <volume>81</volume> (<issue>12</issue>), <fpage>1</fpage>&#x2013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.12968/hmed.2020.0074</pub-id>
<pub-id pub-id-type="pmid">33377838</pub-id>
</mixed-citation>
</ref>
<ref id="B10">
<mixed-citation publication-type="web">
<person-group person-group-type="author">
<name>
<surname>Jocher</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Chaurasia</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Qiu</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Ultralytics YOLOv8</article-title>. <comment>Available online at: <ext-link ext-link-type="uri" xlink:href="https://github.com/ultralytics/ultralytics">https://github.com/ultralytics/ultralytics</ext-link>
</comment> (<comment>Accessed January 29, 2026</comment>).</mixed-citation>
</ref>
<ref id="B11">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Kang</surname>
<given-names>L. W.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>I. S.</given-names>
</name>
<name>
<surname>Chou</surname>
<given-names>K. L.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>S. Y.</given-names>
</name>
<name>
<surname>Chang</surname>
<given-names>C. Y.</given-names>
</name>
</person-group> (<year>2019</year>). &#x201c;<article-title>Image-based real-time fire detection using deep learning with data augmentation for vision-based surveillance applications</article-title>&#x201d;. <publisher-loc>Taipei, Taiwan</publisher-loc>, <fpage>1</fpage>&#x2013;<lpage>4</lpage>.</mixed-citation>
</ref>
<ref id="B12">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kottner</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Cuddigan</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Carville</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Balzer</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Berlowitz</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Law</surname>
<given-names>S.</given-names>
</name>
<etal/>
</person-group> (<year>2019</year>). <article-title>Prevention and treatment of pressure ulcers/injuries: the protocol for the second update of the international clinical practice guideline 2019</article-title>. <source>J. Tissue viability</source> <volume>28</volume> (<issue>2</issue>), <fpage>51</fpage>&#x2013;<lpage>58</lpage>. <pub-id pub-id-type="doi">10.1016/j.jtv.2019.01.001</pub-id>
<pub-id pub-id-type="pmid">30658878</pub-id>
</mixed-citation>
</ref>
<ref id="B13">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>LeBlanc</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Woo</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Bassett</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Botros</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Professionals&#x27; knowledge, attitudes, and practices related to pressure injuries in Canada</article-title>. <source>Adv. Skin. WOUND CARE</source> <volume>32</volume> (<issue>5</issue>), <fpage>228</fpage>&#x2013;<lpage>233</lpage>. <pub-id pub-id-type="doi">10.1097/01.ASW.0000554444.52120.f6</pub-id>
<pub-id pub-id-type="pmid">31008758</pub-id>
</mixed-citation>
</ref>
<ref id="B14">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>T. J.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Christian</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Chang</surname>
<given-names>C. W.</given-names>
</name>
<name>
<surname>Lai</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Tai</surname>
<given-names>H. C.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Automatic segmentation and measurement of pressure injuries using deep learning models and a LiDAR camera</article-title>. <source>Sci. REP-UK</source> <volume>13</volume> (<issue>1</issue>), <fpage>680</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-022-26812-9</pub-id>
<pub-id pub-id-type="pmid">36639395</pub-id>
</mixed-citation>
</ref>
<ref id="B15">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mervis</surname>
<given-names>J. S.</given-names>
</name>
<name>
<surname>Phillips</surname>
<given-names>T. J.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Pressure ulcers: pathophysiology, epidemiology, risk factors, and presentation</article-title>. <source>J. Am. Acad. DERMATOL</source> <volume>81</volume> (<issue>4</issue>), <fpage>881</fpage>&#x2013;<lpage>890</lpage>. <pub-id pub-id-type="doi">10.1016/j.jaad.2018.12.069</pub-id>
<pub-id pub-id-type="pmid">30664905</pub-id>
</mixed-citation>
</ref>
<ref id="B16">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Qiu</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Rong</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Ye</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>YOLF-ShipPnet: improved RetinaNet with pyramid vision transformer</article-title>. <source>Int. J. Comput. Int. SYS</source> <volume>16</volume> (<issue>1</issue>), <fpage>58</fpage>. <pub-id pub-id-type="doi">10.1007/s44196-023-00235-4</pub-id>
</mixed-citation>
</ref>
<ref id="B17">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sahbudak</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Gunes</surname>
<given-names>U.</given-names>
</name>
</person-group> (<year>2026</year>). <article-title>Comparing digital, Mobile and three-dimensional methods in pressure injury measurement: agreement in surface area and depth assessments</article-title>. <source>J. Clin. Nurs.</source> <volume>35</volume> (<issue>1</issue>), <fpage>172</fpage>&#x2013;<lpage>181</lpage>. <pub-id pub-id-type="doi">10.1111/jocn.17813</pub-id>
<pub-id pub-id-type="pmid">40346762</pub-id>
</mixed-citation>
</ref>
<ref id="B18">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Shin</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Roth</surname>
<given-names>H. R.</given-names>
</name>
<name>
<surname>Gao</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Lu</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Xu</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Nogues</surname>
<given-names>I.</given-names>
</name>
<etal/>
</person-group> (<year>2016</year>). <article-title>Deep convolutional neural networks for computer-aided detection: CNN architectures, dataset characteristics and transfer learning</article-title>. <source>IEEE T Med. IMAGING</source> <volume>35</volume> (<issue>5</issue>), <fpage>1285</fpage>&#x2013;<lpage>1298</lpage>. <pub-id pub-id-type="doi">10.1109/TMI.2016.2528162</pub-id>
<pub-id pub-id-type="pmid">26886976</pub-id>
</mixed-citation>
</ref>
<ref id="B19">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>&#x160;&#xed;n</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Hokynkov&#xe1;</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Marie</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Andrea</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Kr&#x10d;</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Podrou&#x17e;ek</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Machine learning-based pressure ulcer prediction in modular critical care data</article-title>. <source>DIAGNOSTICS</source> <volume>12</volume> (<issue>4</issue>), <fpage>850</fpage>. <pub-id pub-id-type="doi">10.3390/diagnostics12040850</pub-id>
<pub-id pub-id-type="pmid">35453898</pub-id>
</mixed-citation>
</ref>
<ref id="B20">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sun</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Ni</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Huang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Ye</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Xie</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>A clinicoepidemiological profile of chronic wounds in wound healing department in shanghai</article-title>. <source>International Journal Lower Extremity Wounds</source> <volume>16</volume> (<issue>1</issue>), <fpage>36</fpage>&#x2013;<lpage>44</lpage>. <pub-id pub-id-type="doi">10.1177/1534734617696730</pub-id>
<pub-id pub-id-type="pmid">28682680</pub-id>
</mixed-citation>
</ref>
<ref id="B21">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Tusar</surname>
<given-names>M. H.</given-names>
</name>
<name>
<surname>Fayyazbakhsh</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Zendehdel</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Mochalin</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Melnychuk</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Gould</surname>
<given-names>L.</given-names>
</name>
<etal/>
</person-group> (<year>2025</year>). <article-title>AI-Powered image-based assessment of pressure injuries using you only look once (YOLO) version 8 models</article-title>. <source>Adv. WOUND CARE</source>. <pub-id pub-id-type="doi">10.1089/wound.2024.0245</pub-id>
<pub-id pub-id-type="pmid">40081991</pub-id>
</mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/513570/overview">Peng Zhang</ext-link>, Huazhong University of Science and Technology, China</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1850733/overview">Xiao Li</ext-link>, Hubei University of Technology, China</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3363500/overview">Die Hu</ext-link>, Huazhong University of Science and Technology, China</p>
</fn>
</fn-group>
</back>
</article>