<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" article-type="research-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Comput. Sci.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Computer Science</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Comput. Sci.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2624-9898</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fcomp.2026.1755361</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Accuracy of three-dimensional Gaussian Splatting for virtual crime scene reconstruction</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Cho</surname>
<given-names>Soujin</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3261375"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Woo</surname>
<given-names>Teakbum</given-names>
</name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="author-notes" rid="fn012"><sup>&#x2020;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3277711"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Graduate School of Film, Digital Media and Communication, Hongik University</institution>, <city>Seoul</city>, <country country="kr">Republic of Korea</country></aff>
<aff id="aff2"><label>2</label><institution>Department of Film and Interaction Design, Graduate School, Hongik University</institution>, <city>Seoul</city>, <country country="kr">Republic of Korea</country></aff>
<author-notes>
<corresp id="c001"><label>&#x002A;</label>Correspondence: Soujin Cho, <email xlink:href="mailto:mintgray00@hongik.ac.kr">mintgray00@hongik.ac.kr</email></corresp>
<fn fn-type="other" id="fn012"><label>&#x2020;</label><p>ORCID: Teakbum Woo, <uri xlink:href="https://orcid.org/0009-0009-0515-6198">orcid.org/0009-0009-0515-6198</uri></p></fn>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-09">
<day>09</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>8</volume>
<elocation-id>1755361</elocation-id>
<history>
<date date-type="received">
<day>27</day>
<month>11</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>23</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>23</day>
<month>01</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2026 Cho and Woo.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Cho and Woo</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-09">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<p>With the rapid advancement of artificial intelligence, three-dimensional (3D) Gaussian Splatting (3DGS), which reconstructs 3D data from standard photographs and videos, has garnered increasing attention in digital forensic applications. This study evaluated the quantitative accuracy of 3DGS-based virtual crime scene reconstruction to determine its suitability for forensic documentation. To this end, a mock crime scene was constructed, and both photographs and videos were captured using a DSLR camera to generate a virtual environment through 3DGS. Since the generated environment inherently possesses only relative scale, a &#x2018;Reference Object-based Scale Calibration&#x2019; method was employed to establish absolute dimensions by adjusting the scale of the entire virtual space based on the physical measurements of a single reference object. The reconstructed object dimensions were then compared with actual measurements in two phases: a preliminary test involving seven objects and a main test involving 13 objects provided by the Seoul Metropolitan Police Agency. The results demonstrated millimeter-level accuracy, with mean measurement errors ranging from 0.25 to 0.65&#x202F;mm in the preliminary test and from 1.73 to 3.58&#x202F;mm in the main test. Notably, while larger objects such as desks and doors exhibited stable reconstruction accuracy, smaller or thinner items like bloodstains showed higher relative errors due to scale-induced artifacts; however, their absolute physical precision remained intact. Overall, these findings underscore the potential of 3DGS as a reliable and practical tool for the digital preservation and reconstruction of crime scenes.</p>
</abstract>
<kwd-group>
<kwd>3D Gaussian Splatting</kwd>
<kwd>3D reconstruction</kwd>
<kwd>crime scene preservation</kwd>
<kwd>digital forensics</kwd>
<kwd>reconstruction accuracy</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This research was supported by the Culture, Sports and Tourism R&#x0026;D Program through the Korea Creative Content Agency grant funded by the Ministry of Culture, Sports and Tourism in 2024 (Project Name: Near Real Time 4D Nerf-based VFX System &#x2018;WITH&#x2019; R&#x0026;D and R&#x0026;D PBL, Project Number: RS-2024-00349479, Contribution Rate: 100%).</funding-statement>
</funding-group>
<counts>
<fig-count count="13"/>
<table-count count="6"/>
<equation-count count="5"/>
<ref-count count="14"/>
<page-count count="15"/>
<word-count count="9661"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Computer Graphics and Visualization</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec1">
<label>1</label>
<title>Introduction</title>
<p>The accurate preservation of crime scenes has long played a pivotal role in revealing key facts and supporting legal proceedings. In modern society, the reliability of criminal investigations depends not only on securing scientific evidence but also on the ability to present it effectively. Historically, photographs, sketches, and witness statements served as the primary methods for documenting crime scenes. However, these traditional approaches were limited in their ability to capture the full spatial context. Furthermore, <xref ref-type="bibr" rid="ref2">Durnal (2010)</xref> highlighted the &#x2018;CSI effect,&#x2019; noting that media influence has led jurors and judicial officials to hold extremely high expectations for visual scientific evidence. This phenomenon underscores the growing demand for high-quality crime scene reconstruction technologies that go beyond simple documentation.</p>
<p>To address these limitations and meet such expectations, various three-dimensional (3D) technology-based recording and analysis techniques have been explored over the past several decades. Among these, technologies such as photogrammetry, 3D scanning, and Light Detection and Ranging (LiDAR) have enabled the reconstruction of crime scenes as sophisticated 3D datasets rather than simple 2D images. This added dimensionality allows for a more precise analysis of physical features and spatial relationships among objects. Consequently, these technological advances have substantially improved the fidelity of scene preservation and strengthened the overall reliability of forensic science.</p>
<p>Notably, the field of 3D reconstruction has reached a turning point with the emergence of 3D Gaussian Splatting (3DGS), a technique that leverages recent advances in artificial intelligence (AI) to enable real-time 3D reconstruction and high-resolution video and image rendering without dedicated scanning equipment. A key advantage of this splatting technique is its ability to rapidly generate 3D reconstructions from photos or videos captured with readily available devices, including mirrorless, DSLR, and even phone cameras. This flexibility also allows 3DGS to visualize subjects from multiple viewpoints and using virtual cameras in real time, rendering images at a resolution similar to that of the original content. Compared with conventional 3D scans, which encompass large, high-resolution datasets that require specialized software for viewing, 3DGS can render high-quality results using far fewer data points. Consequently, 3DGS outputs are not only compatible with existing 3D production tools but can also be integrated into real-time, interactive platforms such as web pages, virtual reality environments, and game engines, making them versatile.</p>
<p>However, because photogrammetry reconstructs 3D objects and environments from existing photos and videos, the resulting models exhibit detailed geometry, although their scales are only relative. Furthermore, while 3D scans enable the measurement of dimensions using laser scanning, photogrammetry does not support the determination of absolute sizes and lengths. Consequently, determining whether the reconstructed 3D objects are accurate enough for reliable use in criminal investigations becomes difficult.</p>
<p>Due to these structural characteristics, previous studies on 3DGS have predominantly focused on technical optimizations to enhance visual rendering quality or general benchmark performance. However, applied research verifying the geometric precision of the reconstructed 3D space for practical domains requiring rigorous reliability, such as forensic science, remains relatively scarce. Given that the permanent preservation of the crime scene is of paramount importance in forensics, relying solely on traditional data collection methods like photography is insufficient. To effectively reconstruct camera-captured data into 3D virtual environments and utilize them for various forensic simulations, a rigorous quantitative verification of the 3D data&#x2019;s accuracy from a practical, operational perspective must be established as a prerequisite.</p>
<p>To address this constraint, this study employs a technique termed &#x2018;Reference Object-based Scale Calibration.&#x2019; This method involves designating a specific object physically measured at the scene as a reference to rescale the entire 3DGS-reconstructed environment. This process is executed within general-purpose 3D modeling programs that support standard units, such as 3ds Max, thereby offering the advantage of enabling forensic investigators to utilize 3D data within a familiar software environment. Given that 3DGS reconstructs the entire scene as a single unified entity rather than separate components, this study aimed to evaluate how accurately the measurements of other objects within the calibrated virtual scene correspond to their actual real-world dimensions.</p>
<p>Overall, to assess the accuracy of 3DGS-based scene reconstruction, the following methodology was adopted. First, prior studies on crime scene reconstruction using photogrammetry, 3D scanning, and 3DGS were reviewed to establish a theoretical foundation. Based on this review, two experiments were subsequently conducted to reconstruct a mock crime scene using 3DGS. The accuracy of these reconstructions was evaluated by comparing the virtual measurements with those obtained from actual objects using a tape measure.</p>
<p>This investigation carries significance because if a 3DGS-reconstructed space, scaled using a reference object, achieves an accuracy comparable to that of the physical scene, it may eliminate the need for direct measurements of individual elements during forensic investigations.</p>
<p>This possibility is particularly compelling given that photography already plays a central role in modern forensic evidence collection, meaning that reconstructions can be generated using existing camera equipment without the need for additional hardware. Moreover, as the reconstructed data is compatible with standard 3D graphics software, it allows for practical forensic applications ranging from scene reconstruction to complex simulations. By enabling the permanent virtual preservation of crime scenes, this approach is expected to prove valuable in long-term unsolved cases and in investigations where physical scene access is limited or preservation is unfeasible.</p>
</sec>
<sec id="sec2">
<label>2</label>
<title>Literature review</title>
<sec id="sec3">
<label>2.1</label>
<title>Photogrammetry and 3D scanning</title>
<p>Analytical photogrammetry, which involves analyzing photographs based on the principles of perspective and projective geometry, can be traced back to the early 19th century, when photography first emerged. Building upon these foundational principles, modern 3D scanning methods are generally classified into four categories: laser scanning, structured light, LiDAR, and photogrammetry.</p>
<p>Among these methods, the laser, structured light, and LiDAR scanning approaches operate by detecting the distance traveled by reflected light or laser beams and the angles of projection, enabling rapid and precise measurements of an object&#x2019;s detailed geometry and absolute dimensions. However, because only surfaces that are directly accessible to light or laser beams can be captured, scanning large areas requires multiple passes followed by the alignment of individual scans. This is further compounded by the high cost of professional 3D scanners, often costing tens of thousands of dollars, and the requirement for dedicated post-processing software, which limits accessibility. To address this limitation, LiDAR components were miniaturized and commercialized in 2020 as sensors integrated into Apple iPhones and iPad devices, thereby enabling mobile-based 3D scanning. While these mobile versions also exhibit certain drawbacks, including reduced accuracy compared to professional scanners, shorter detection ranges, and susceptibility to environmental factors like sunlight and weather, they remain highly accessible, offering rapid 3D scanning capabilities using only a mobile device (<xref ref-type="bibr" rid="ref1">Cho and Hwang, 2024</xref>).</p>
<p>In parallel, photogrammetry, a photographic measurement technique, reconstructs 3D models by applying optical principles and projective geometry techniques to overlapping images captured from multiple angles around the subject. From these images, 3D coordinates and color values are calculated based on both the distance between the camera and object and the spatial distribution of points within each image. However, because the resulting 3D measurements are derived from relative differences between photographs, accuracy diminishes in occluded regions and in areas with fine geometric detail. Moreover, generating high-density point cloud data from numerous high-resolution images entails substantial processing time (<xref ref-type="bibr" rid="ref1">Cho and Hwang, 2024</xref>).</p>
<p>Building on this foundation, numerous researchers worldwide have investigated the application of 3D scanning in digital forensics and crime scene reconstruction. <xref ref-type="bibr" rid="ref3">Esposito et al. (2023)</xref> analyzed case studies utilizing laser scanning (LS) technology for forensic investigation. Their work demonstrated the utility of LS in creating immediate scale plans, reconstructing cold cases, clarifying suicidal intent through simulation, and studying COVID-19 transmission pathways. They concluded that LS is a fundamental tool for &#x2018;crystallizing&#x2019; the investigative scene, proving its high value in forensic pathology.</p>
<p>Concurrently, studies focusing on accessible mobile technologies have also been conducted. <xref ref-type="bibr" rid="ref13">Sheshtar et al. (2025)</xref> compared the results of mobile-phone-based photogrammetry and LiDAR scanning when reconstructing indoor crime scenes. They scanned a mock indoor crime scene, comprising dummies, furniture, reference markers, and bloodstains, using an Apple iPhone 15 Pro Max with the Recon-3D application for mobile LiDAR scanning and the Pix4D solution for photogrammetry. Scans were performed under varying lighting conditions (daytime and nighttime) and scan durations (5 and 10&#x202F;min) as independent variables. The results demonstrated that both scanning methods achieved accuracy levels sufficient for forensic use. Specifically, the 5-min daytime LiDAR scan yielded the highest accuracy. However, performance limitations were observed under challenging conditions; nighttime photogrammetry deteriorated when using flash, and LiDAR scans required approximately 9&#x202F;h of cloud processing, thereby underscoring the critical influence of environmental conditions and processing time.</p>
<p>Further, <xref ref-type="bibr" rid="ref14">Yang et al. (2024)</xref> investigated the use of mobile photogrammetry for 3D wound analysis by scanning clay blocks, pig skin, and bone models using the Scaniverse 3D Scanner app on an iPhone. They also assessed the accuracy and practical applicability of the resulting depth data. All models were successfully reconstructed in under 1&#x202F;min on average, and the measurements no statistically significant differences from physical measurements, indicating that mobile photogrammetry could serve as a valuable supplementary tool for documenting wounds at crime scenes.</p>
<p>Based on this evidence, several crime scene units and research institutions in South Korea, including the National Police Agency and the National Forensic Service, have begun using 3D scanners to document real crime scenes during forensic casework (<xref ref-type="bibr" rid="ref12">Seoul Public News, 2016</xref>). Furthermore, in July 2025, the police announced the launch of Policelab 3.0, a next-generation initiative aimed at developing advanced scene analysis technologies integrating 3D scanning and AI from 2025 to 2030 (<xref ref-type="bibr" rid="ref8">MBN, 2025</xref>). However, despite this institutional momentum, the high cost of 3D scanner equipment and stringent security protocols have limited widespread deployment across all forensic investigation sites.</p>
</sec>
<sec id="sec4">
<label>2.2</label>
<title>Neural radiance fields (NeRF) and 3DGS</title>
<p>Recent advances in AI technologies have led to the emergence of approaches such as NeRF and 3DGS, driving substantial shifts across the 3D scanning and graphics industries. Among these, NeRF, introduced by <xref ref-type="bibr" rid="ref9">Mildenhall et al. (2020)</xref>, represents a deep neural network that predicts the required volume density (<italic>&#x03C3;</italic>) and color values for rendering 3D scenes by applying deep learning to 2D images. It processes photographs and videos captured with smartphones or ordinary cameras and generates 3D information far more rapidly than conventional photogrammetry owing to its deep-learning-based training. Instead of constructing a 3D model, it directly renders 2D images or videos from 3D representations. Moreover, by incorporating optimizations proposed in follow-up investigations, it reduces training times to enable image rendering within 30&#x202F;min to 1&#x202F;h. This technique has gained further traction with the advent of 3DGS. For instance, <xref ref-type="bibr" rid="ref5">Kerbl et al. (2023)</xref> presented a volume rendering method, that directly visualizes 3D volumetric data using Gaussians, thereby circumventing the need for mesh conversion. Moreover, because 3DGS produces high-resolution images from low-density 3D data without relying on dense point clouds or large mesh files, it enables real-time high-definition rendering and demonstrates broad application potential in areas such as virtual studios, virtual reality and extended reality platforms, animation, video content production, and gaming.</p>
<p>In a previous study, <xref ref-type="bibr" rid="ref4">Fukuda et al. (2024)</xref> quantitatively compared the strengths and limitations of NeRF and photogrammetry, noting that while conventional photogrammetry performs well for objects with clearly defined patterns or texture-rich surfaces, NeRF yields superior reconstructions for objects with monochromatic, low-texture surfaces, as well as metallic, reflective, or translucent materials.</p>
<p>Traditional photogrammetry requires extensive processing time to generate high-density point cloud data and presents challenges in managing massive datasets. Similarly, Neural Radiance Fields (NeRF) are limited by slow training speeds. In contrast, 3D Gaussian Splatting (3DGS) enables rapid training and real-time rendering, making it highly suitable for forensic investigations where securing the &#x2018;critical time window&#x2019; is essential.</p>
<p>While international studies attempting to apply NeRF and 3DGS technologies to crime scene reconstruction and digital forensics are beginning to emerge, research in this specific domain remains relatively scarce.</p>
<p>For instance, <xref ref-type="bibr" rid="ref7">Malik et al. (2024)</xref> evaluated the feasibility of adopting NeRF as an alternative to photogrammetry during forensic autopsy documentation by comparing reconstructions produced by Instant NeRF and photogrammetry across diverse forensic subjects, including discolored corpses, metal containers, vehicles, and simulated crime scenes. Their results revealed that although the NeRF-based model exhibited lower overall spatial resolution, it achieved higher fidelity in reconstructing forms, colors, and textures of varied surface materials, such as reflective metal surfaces, discolored skin, and plastic components.</p>
<p>Extending the scope of NeRF applications, <xref ref-type="bibr" rid="ref11">Remondino et al. (2023)</xref> reviewed its use in crime scene video analysis, using the CCTV-based UCF Crime Dataset to compare diverse NeRF variants and advanced models focused on multi-object composition, deformation synthesis, and lighting effects.</p>
<p>Further, to evaluate the efficiency, safety, and innovative potential of 3D reconstruction techniques in capturing crime scenes, <xref ref-type="bibr" rid="ref10">Rangelov et al. (2024)</xref> performed 3D reconstructions of simulated laboratory scenes, mock crime scenes at the Dutch Police Academy, and fire scenes in collaboration with the Dutch Twente regional fire department using photogrammetry and NeRF. These reconstructions exhibited high resolution, confirming that both techniques offer adequate fidelity for future crime scene investigations.</p>
<p>Overall, the above studies indicate that 3DGS can rapidly reconstruct real-world scenes and objects in high resolution using only images and photographs. In forensic contexts, where photography plays a crucial role in preserving scene information, this capability is particularly relevant as multi-angle photos and videos captured onsite can later be used to reconstruct crime scenes using 3DGS and support investigations at any stage. Moreover, because 3DGS produces compact high-quality data, managing and preserving forensic evidence becomes more efficient. Additionally, given that an increasing number of 3D software platforms now support 3DGS data and that these data can be converted into meshes and traditional point clouds, the applicability of 3DGS is expected to expand across branches of forensic research that rely on general-purpose 3D software.</p>
</sec>
</sec>
<sec sec-type="materials|methods" id="sec5">
<label>3</label>
<title>Materials and methods</title>
<sec id="sec6">
<label>3.1</label>
<title>Experimental design and workflow</title>
<p>The 3DGS technique, as employed in this study, reconstructs 3D geometry and color information from 2D images or videos captured from multiple viewpoints. While this allows for the reconstruction of relative object shapes and proportions, it does not directly yield precise measurements, unlike laser-based 3D scanning methods. Despite this limitation, 3DGS offers a practical advantage in forensic applications, as it enables scene reconstruction using multi-angle photographs or videos captured using general-purpose cameras or mobile phones, eliminating the need for specialized equipment. Furthermore, to validate previous claims about the high resolution of 3DGS reconstructions, this study evaluates the dimensional accuracy of 3D-reconstructed scenes generated using 3DGS.</p>
<p>When reconstructing a space using 3DGS, the entire scene is captured holistically, and the resulting output is generated as a unified spatial entity rather than distinct, segmented objects. As previously discussed, this reconstructed 3D scene inherently possesses a relative scale rather than absolute metrics. To align the dimensions of the virtual scene with the physical world, this study implements a method termed &#x2018;Reference Object-based Scale Calibration&#x2019;.</p>
<p>Specifically, a reference object within the scene is selected and physically measured (Value A in <xref ref-type="fig" rid="fig1">Figure 1</xref>). This ground truth measurement is then used to align the virtual dimension of the corresponding object (Value X), thereby applying a global scale adjustment (Value Y) to the entire 3DGS model to establish absolute dimensions. Once this calibration is complete, the dimensions of other target objects&#x2014;those not physically measured&#x2014;within the same virtual space (Value Z) are expected to approximate their real-world sizes. Therefore, this study aims to quantitatively verify the accuracy of these inferred measurements (Value Z) within the calibrated virtual environment.</p>
<fig position="float" id="fig1">
<label>Figure 1</label>
<caption>
<p>Implementation principles of <bold>(a)</bold> NeRF (<xref ref-type="bibr" rid="ref9">Mildenhall et al., 2020</xref>) and <bold>(b)</bold> 3DGS (<xref ref-type="bibr" rid="ref5">Kerbl et al., 2023</xref>).</p>
</caption>
<graphic xlink:href="fcomp-08-1755361-g001.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Line drawing compares a bedroom in &#x201C;Real World&#x201D; on the left and &#x201C;3DGS World&#x201D; on the right, showing identical furniture layouts, but with only 3DGS World labeled by XYZ axes, illustrating spatial dimension mapping.</alt-text>
</graphic>
</fig>
<p>As illustrated in <xref ref-type="fig" rid="fig2">Figure 2</xref>, the experimental procedure was conducted in three sequential phases: Phase 1 involved data acquisition and physical measurement of objects in the real-world environment; Phase 2 entailed measuring objects in the virtual space following 3DGS reconstruction; and Phase 3 focused on the quantitative accuracy analysis of the measurement results.</p>
<fig position="float" id="fig2">
<label>Figure 2</label>
<caption>
<p>Scaling of a 3DGS reconstruction using a real-world reference object.</p>
</caption>
<graphic xlink:href="fcomp-08-1755361-g002.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Flowchart with three circles showing sequential phases. Phase 1: Data acquisition and physical measurement, ground truth collection. Phase 2: 3DGS reconstruction and virtual measurement, scale calibration. Phase 3: Quantitative accuracy analysis using MAE, RMSE, MAPE, scatter plots. Black arrows link each phase.</alt-text>
</graphic>
</fig>
<p>Phase 1 involved Data Acquisition and Physical Measurement. The procedure began with the establishment of a mock crime scene, followed by the capture of photographs and video data for 3DGS reconstruction and the physical measurement of the width, length, and height of objects within the scene. For successful 3DGS reconstruction, images must be captured from various angles while moving along a trajectory to allow the software to detect positional differences between frames; this dynamic methodology contrasts with conventional static forensic photography. Following the data capture process, a specific item was designated as the &#x2018;Reference Object,&#x2019; and its exact dimensions were directly measured. Additionally, several other objects of varying sizes were measured for comparative analysis. To complement these physical measurements, the same space was also captured using a 3D scanner to generate a high-fidelity virtual model, which served to provide ground truth data for objects that could not be physically measured on-site.</p>
<p>Phase 2 focused on 3DGS Reconstruction and Virtual Measurement. In this phase, the captured images and videos were processed to digitally reconstruct the scene. The scale of the entire scene was then calibrated by aligning the virtual dimensions of the reference object with its physical measurements obtained in Phase 1. Once this alignment was achieved, the dimensions and proportions of all other objects within the scene were adjusted accordingly to approximate their actual physical sizes. As noted in previous studies by <xref ref-type="bibr" rid="ref10">Rangelov et al. (2024)</xref> and <xref ref-type="bibr" rid="ref6">Lee (2025)</xref>, camera parameters can influence reconstruction accuracy; thus, careful axis scaling was performed to address potential deviations. Detailed software procedures and environments for this phase are described in Section 3.3.</p>
<p>Phase 3 constituted the Quantitative Accuracy Analysis. In this phase, the geometric accuracy of the 3DGS reconstruction was evaluated by comparing the physical measurements obtained from the actual scene with the virtual measurements derived from the virtual space. To quantify the accuracy of width, length, and height dimensions, three standard metrics&#x2014;Mean Absolute Error (MAE), Root Mean Square Error (RMSE), and Mean Absolute Percentage Error (MAPE)&#x2014;were calculated within the R Studio environment. Furthermore, to enhance analytical precision, objects were categorized into groups based on size, or further stratified by specific characteristics such as &#x2018;Thin objects&#x2019; and &#x2018;Beyond-room Area&#x2019; objects. For these specific subgroups, in-depth additional analyses were conducted utilizing supplemental metrics, including Median Absolute Error (MdAE) and Normalized RMSE (NRMSE).</p>
</sec>
<sec id="sec7">
<label>3.2</label>
<title>Data acquisition and experimental setup</title>
<sec id="sec8">
<label>3.2.1</label>
<title>Shooting approach for 3DGS data collection</title>
<p>Collecting 3DGS data requires a shooting approach that differs from that of standard forensic photography. This approach involves capturing one or more videos or consecutive photographs using a handheld camera, which is moved around the scene at upper, middle, and lower heights, without zooming in or out, to capture the objects from various angles. In their study, <xref ref-type="bibr" rid="ref10">Rangelov et al. (2024)</xref> employed four camera movement techniques: truck, pedestal, boom, and arc. These techniques were appropriately combined based on the characteristics of the scene. Following the approach of <xref ref-type="bibr" rid="ref10">Rangelov et al. (2024)</xref>, the camera was moved vertically and horizontally along walls and around the center of the space, while the arc technique was applied at the corners of rooms and around desks.</p>
<p>These movement strategies were referenced in the present study to guide 3DGS data collection.</p>
</sec>
<sec id="sec9">
<label>3.2.2</label>
<title>Pilot study: feasibility testing in controlled environment</title>
<p>Prior to the main experiment, a pilot study was conducted in a controlled laboratory environment to validate the feasibility of the proposed &#x2018;reference object-based scale calibration&#x2019; methodology. The targets for data acquisition included a planar wall, standardized desks, and various small props placed on the desks within the laboratory.</p>
<p>For the 3DGS source data, high-resolution video footage of the desks and props arranged around a wall was acquired using a DSLR camera. The specifications of the camera equipment used for data acquisition and details regarding the recorded video data are summarized in <xref ref-type="fig" rid="fig3">Figure 3</xref>. Concurrently, to establish ground truth data for accuracy verification, the physical dimensions of the furniture and props were precisely measured using a tape measure. A delivery box placed on the desk was selected as the reference object for scale calibration, while the furniture and props listed in <xref ref-type="table" rid="tab1">Table 1</xref> were designated as the target objects for measurement verification.</p>
<fig position="float" id="fig3">
<label>Figure 3</label>
<caption>
<p>Experimental workflow diagram.</p>
</caption>
<graphic xlink:href="fcomp-08-1755361-g003.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Table with four columns labeled Sample Screenshot, Camera and Lens, Data Size, and Time. The row contains a desktop scene with a white cabinet, desk, cardboard box, mug, and camera bag. Equipment listed as Sony ILCE-7CM2 with FE 24-70mm F2.8 GM II lens, video resolution of 1920 by 1080 pixels, and duration of fifty-seven seconds.</alt-text>
</graphic>
</fig>
<table-wrap position="float" id="tab1">
<label>Table 1</label>
<caption>
<p>List of objects measured in the preliminary experiment.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top" rowspan="2">No.</th>
<th align="left" valign="top" rowspan="2">Group</th>
<th align="left" valign="top" rowspan="2">Object</th>
<th align="center" valign="top" colspan="3">Real (mm)</th>
<th align="center" valign="top" colspan="3">Virtual (mm)</th>
</tr>
<tr>
<th align="center" valign="top">Width</th>
<th align="center" valign="top">Length</th>
<th align="center" valign="top">Height</th>
<th align="center" valign="top">Width</th>
<th align="center" valign="top">Length</th>
<th align="center" valign="top">Height</th>
</tr>
</thead>
<tbody>
<tr>
<td/>
<td align="left" valign="middle">Reference Object</td>
<td align="left" valign="middle">P.O. Box</td>
<td align="center" valign="middle">162</td>
<td align="center" valign="middle">220</td>
<td align="center" valign="middle">100</td>
<td align="center" valign="middle">162</td>
<td align="center" valign="middle">220</td>
<td align="center" valign="middle">100</td>
</tr>
<tr>
<td align="left" valign="middle">1</td>
<td align="left" valign="middle">Large</td>
<td align="left" valign="middle">Pedestal</td>
<td align="center" valign="middle">400</td>
<td align="center" valign="middle">552</td>
<td align="center" valign="middle">548</td>
<td align="center" valign="middle">400</td>
<td align="center" valign="middle">548</td>
<td align="center" valign="middle">547</td>
</tr>
<tr>
<td align="left" valign="middle">2</td>
<td align="left" valign="middle">Medium</td>
<td align="left" valign="middle">Mug Cup</td>
<td align="center" valign="middle">81</td>
<td align="center" valign="middle">120</td>
<td align="center" valign="middle">98</td>
<td align="center" valign="middle">81</td>
<td align="center" valign="middle">115</td>
<td align="center" valign="middle">97</td>
</tr>
<tr>
<td align="left" valign="middle">3</td>
<td align="left" valign="middle">Medium</td>
<td align="left" valign="middle">Stapler</td>
<td align="center" valign="middle">133</td>
<td align="center" valign="middle">32</td>
<td align="center" valign="middle">60</td>
<td align="center" valign="middle">133</td>
<td align="center" valign="middle">32</td>
<td align="center" valign="middle">60</td>
</tr>
<tr>
<td align="left" valign="middle">4</td>
<td align="left" valign="middle">Medium, Thin</td>
<td align="left" valign="middle">A4 Paper</td>
<td align="center" valign="middle">210</td>
<td align="center" valign="middle">291</td>
<td align="center" valign="middle">0.2</td>
<td align="center" valign="middle">210</td>
<td align="center" valign="middle">291</td>
<td align="center" valign="middle">1</td>
</tr>
<tr>
<td align="left" valign="middle">5</td>
<td align="left" valign="middle">Small, Thin</td>
<td align="left" valign="middle">Marker A</td>
<td align="center" valign="middle">50</td>
<td align="center" valign="middle">50</td>
<td align="center" valign="middle">0.2</td>
<td align="center" valign="middle">50</td>
<td align="center" valign="middle">50</td>
<td align="center" valign="middle">1</td>
</tr>
<tr>
<td align="left" valign="middle">6</td>
<td align="left" valign="middle">Small, Thin</td>
<td align="left" valign="middle">Marker B</td>
<td align="center" valign="middle">20</td>
<td align="center" valign="middle">20</td>
<td align="center" valign="middle">0.2</td>
<td align="center" valign="middle">20</td>
<td align="center" valign="middle">20</td>
<td align="center" valign="middle">1</td>
</tr>
<tr>
<td align="left" valign="middle">7</td>
<td align="left" valign="middle">Small, Thin</td>
<td align="left" valign="middle">Marker C</td>
<td align="center" valign="middle">10</td>
<td align="center" valign="middle">10</td>
<td align="center" valign="middle">0.2</td>
<td align="center" valign="middle">10</td>
<td align="center" valign="middle">10</td>
<td align="center" valign="middle">1</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="sec10">
<label>3.2.3</label>
<title>Main experiment: realistic mock crime scene</title>
<p>Building on the methodology validated in the pilot study, the main experiment was conducted on May 12, 2025, at a realistic mock crime scene (<xref ref-type="fig" rid="fig4">Figure 4a</xref>) in cooperation with the Seoul Metropolitan Police Agency, to evaluate the applicability of the proposed approach in actual forensic investigation scenarios.</p>
<fig position="float" id="fig4">
<label>Figure 4</label>
<caption>
<p>Indoor camera trajectory used for 3DGS data collection, as described by <xref ref-type="bibr" rid="ref10">Rangelov et al. (2024)</xref>.</p>
</caption>
<graphic xlink:href="fcomp-08-1755361-g004.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Panel a shows a room with a mannequin lying on its back beside an overturned table near a couch, suggesting a simulated accident scene. Panel b displays a close-up of hands measuring the length of a tabletop edge using a tape measure.</alt-text>
</graphic>
</fig>
<p>After the setup was completed, multiple photographs and videos were captured using a DSLR camera to generate input data for 3DGS reconstruction. The first round of image capture began with the camera positioned near the center, where it recorded the surrounding outer walls, while the second round concentrated on the primary incident area. To ensure adequate vertical coverage, both rounds were performed at three height levels, 30&#x202F;cm above the floor, 1.2&#x202F;m, and 2&#x202F;m. The camera used for image acquisition was a Sony ILCE-7CM2. During video recording, a DJI gimbal was used to stabilize the camera and reduce distortions. In contrast, still images were captured without the gimbal. The video and photo recording session lasted for 18&#x202F;min.</p>
<p>After 3DGS data capture, the actual dimensions of the objects were measured using a tape measure, as depicted in <xref ref-type="fig" rid="fig4">Figure 4b</xref>. Subsequently, the forensic team captured photographs of the scene in accordance with standard forensic procedures. To account for objects that could not be measured owing to the limited time available before dismantling the simulated crime scene, the area was scanned using a Leica RTC360 3D scanner owned by the Seoul Metropolitan Police Agency. Details of the photographs, videos, and scans acquired at the scene are presented in <xref ref-type="fig" rid="fig5">Figure 5</xref>. Following virtual measurements and data reconstruction, a second round of physical measurements and interviews with the forensic science team was conducted at the police agency on June 17.</p>
<fig position="float" id="fig5">
<label>Figure 5</label>
<caption>
<p>Information about the video used in the preliminary experiment.</p>
</caption>
<graphic xlink:href="fcomp-08-1755361-g005.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Table showing four methods of scene documentation: photos, video, forensic photos, and 3D scan. Each row includes a sample image, camera or scanner model, data size or quality, and the number of photos or scans collected.</alt-text>
</graphic>
</fig>
<p>The objects measured at the mocked crime scene are listed in <xref ref-type="table" rid="tab2">Table 2</xref>. The TV stand was selected as the reference object, and differences in measurement accuracy were analyzed by selecting items that varied in size and physical characteristics. In particular, the selected objects ranged from a room door approximately 2&#x202F;m tall to small-scale features such as bloodstains, smudges on surfaces, and stab wounds on a dummy. These were subsequently classified by size, as indicated in <xref ref-type="table" rid="tab2">Table 2</xref>. The living room and kitchen were the primary areas targeted during 3DGS image capture, while the entrance and inner room adjacent to the living room were not included. Nevertheless, portions of these peripheral spaces appeared faintly in the 3DGS reconstruction. These included a chair in the inner room and a pair of shoes in the entrance area. To assess the accuracy of reconstructions in areas not primarily targeted during image capture, the presence of the above chair and shoes in the 3DGS reconstruction was confirmed, and their dimensions were measured virtually and compared with those obtained during the second round of physical measurements.</p>
<table-wrap position="float" id="tab2">
<label>Table 2</label>
<caption>
<p>List of objects measured in the main experiment.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">No.</th>
<th align="left" valign="top">Group</th>
<th align="left" valign="top">Object</th>
</tr>
</thead>
<tbody>
<tr>
<td/>
<td align="left" valign="middle">Reference Object</td>
<td align="left" valign="middle">TV Stand</td>
</tr>
<tr>
<td align="left" valign="middle">1</td>
<td align="left" valign="middle">Large</td>
<td align="left" valign="middle">Room Door</td>
</tr>
<tr>
<td align="left" valign="middle">2</td>
<td align="left" valign="middle">Large</td>
<td align="left" valign="middle">Kitchen Table</td>
</tr>
<tr>
<td align="left" valign="middle">3</td>
<td align="left" valign="middle">Large</td>
<td align="left" valign="middle">Dummy Doll</td>
</tr>
<tr>
<td align="left" valign="middle">4</td>
<td align="left" valign="middle">Large</td>
<td align="left" valign="middle">Broken Table</td>
</tr>
<tr>
<td align="left" valign="middle">5</td>
<td align="left" valign="middle">Large, Beyond-room Area</td>
<td align="left" valign="middle">Room Chair</td>
</tr>
<tr>
<td align="left" valign="middle">6</td>
<td align="left" valign="middle">Medium</td>
<td align="left" valign="middle">TV Remote Controller</td>
</tr>
<tr>
<td align="left" valign="middle">7</td>
<td align="left" valign="middle">Medium</td>
<td align="left" valign="middle">Dish</td>
</tr>
<tr>
<td align="left" valign="middle">8</td>
<td align="left" valign="middle">Medium, Beyond-room Area</td>
<td align="left" valign="middle">Shoe</td>
</tr>
<tr>
<td align="left" valign="middle">9</td>
<td align="left" valign="middle">Small, Thin</td>
<td align="left" valign="middle">Stab</td>
</tr>
<tr>
<td align="left" valign="middle">10</td>
<td align="left" valign="middle">Small, Thin</td>
<td align="left" valign="middle">Stain A</td>
</tr>
<tr>
<td align="left" valign="middle">11</td>
<td align="left" valign="middle">Small, Thin</td>
<td align="left" valign="middle">Stain B</td>
</tr>
<tr>
<td align="left" valign="middle">12</td>
<td align="left" valign="middle">Small, Thin</td>
<td align="left" valign="middle">Blood Stain A</td>
</tr>
<tr>
<td align="left" valign="middle">13</td>
<td align="left" valign="middle">Small, Thin</td>
<td align="left" valign="middle">Blood Stain B</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec id="sec11">
<label>3.3</label>
<title>Computing environment and reconstruction setup</title>
<p>To reconstruct the photo and video data acquired from the pilot and main experiments into 3DGS models, Jawset&#x2019;s Postshot software (v0.60) was utilized. This study aims to ensure practical versatility so that forensic practitioners without specialized knowledge in 3D graphics can immediately adopt this technology in the field. Furthermore, to maintain the security of criminal investigations, the software must operate solely using the GPU of a standalone PC, rather than relying on a cloud environment. As Postshot is a commercial software that performs training exclusively on a local GPU, it satisfies these specific requirements.</p>
<p>Regarding the training configuration, all parameters were set to their default values for the pilot study. In contrast, for the main experiment, which involved large-scale datasets, specific adjustments were made to optimize reconstruction quality. Specifically, the &#x2018;Radiance Field Profile&#x2019; was set to &#x2018;Splat3&#x2019;, and the &#x2018;Downsample images&#x2019; option was unchecked (disabled) to preserve the original resolution. The &#x2018;Max image count&#x2019; was set to match the original quantity of the dataset, with a limit of 1,000 frames for video data, while all other parameters remained at their default settings.</p>
<p>The training duration was approximately 8&#x202F;min for the pilot study data. For the main experiment, it took approximately 47&#x202F;min for the photo-based model and 4&#x202F;h and 14&#x202F;min for the video-based model.</p>
<p>The computing environments for each experiment were configured as follows: The pilot experiment was conducted on a workstation running Windows 11 Pro, equipped with an Intel Core i9-14900K (3.20&#x202F;GHz) CPU, 128GB RAM, and an Nvidia RTX 4090 GPU. The main experiment utilized a workstation running Windows 11 Pro, equipped with an AMD Ryzen Threadripper 3,970&#x00D7; 32-Core Processor (3.70&#x202F;GHz) CPU, 128GB RAM, and an Nvidia RTX 4090 GPU.</p>
<p>Upon completion of training, the 3DGS scenes were extracted in PLY format using the &#x2018;Export Scene&#x2019; function, as shown in <xref ref-type="fig" rid="fig6">Figure 6a</xref>. Since the raw 3DGS output often contains background noise or floating artifacts, a post-processing step was necessary to obtain a clean virtual scene. To this end, the extracted PLY data were imported into the web-based editor SuperSplat,<xref ref-type="fn" rid="fn0001"><sup>1</sup></xref> where unnecessary noise Gaussians were removed and the scene was refined before being saved as a Splat PLY file. While this file shares the same extension as a standard point cloud PLY, it additionally contains the specific attribute information required for Gaussian Splatting rendering. The final scene, refined via SuperSplat, was saved as a Splat PLY file sized 440,009 KB, consisting of a total of 1,909,182 splats.</p>
<fig position="float" id="fig6">
<label>Figure 6</label>
<caption>
<p><bold>(a)</bold> Simulated crime scene. <bold>(b)</bold> Actual measurements of crime scene objects.</p>
</caption>
<graphic xlink:href="fcomp-08-1755361-g006.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Panel (a) shows a 3D point cloud reconstruction of a cluttered office scene on the Postshot software interface, with camera positions overlaid. Panel (b) displays a rendered 3D model in Autodesk 3ds Max, depicting a workspace with a mannequin lying on the floor next to an overturned table.</alt-text>
</graphic>
</fig>
<p>Subsequently, the Splat PLY data was imported into 3ds Max, where the system unit was configured to millimeters, as a V-Ray 7 Gaussian Splatting geometry object, as shown in <xref ref-type="fig" rid="fig6">Figure 6b</xref>. The scale and proportions of the entire geometry object were then calibrated to align precisely with the physically measured dimensions of the reference object. As 3ds Max does not natively support 3DGS, the objects were imported and rendered using the V-Ray plugin.</p>
<p>In the pilot study, the scale of the entire 3DGS scene data was calibrated based on the physical dimensions of the reference object (delivery box). The applied scaling factors were set to 1,984.5% for the X-axis, 1,990% for the Y-axis, and 1,985% for the Z-axis. Subsequently, as illustrated in <xref ref-type="fig" rid="fig7">Figures 7a</xref>,<xref ref-type="fig" rid="fig7">b</xref>, the dimensions of the props and furniture within the virtual scene were measured by creating and aligning virtual proxy objects to match the boundaries of each target item.</p>
<fig position="float" id="fig7">
<label>Figure 7</label>
<caption>
<p>Photo, video, and scan data of the main experiment.</p>
</caption>
<graphic xlink:href="fcomp-08-1755361-g007.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Panel (a) shows a white rectangular cabinet next to a desk with various objects, emphasizing edges using black and white checkered markers. Panel (b) focuses on the tabletop with a brown box, white mug, paper, and office supplies, each object outlined by checkered markers indicating key geometry.</alt-text>
</graphic>
</fig>
<p>These measurements revealed that, after importing the 3DGS data into 3ds MAX and scaling the data using the reference object, the X-, Y-, and Z-axes were not scaled uniformly, with minor deviations of approximately 0.25&#x2013;0.27%. This discrepancy was consistent with initial predictions during the experimental design phase and affirmed the necessity of proportion-based adjustments using a reference object rather than uniform size scaling.</p>
<p>For the main experiment, the reconstruction process followed the same workflow established in the pilot study. Consequently, the final reconstructed output was generated as a PLY data file sized 686,091 KB, consisting of a total of 2,976,928 splats.</p>
<p>The TV stand was designated as the reference object. Based on its physical dimensions, the scale of the entire scene geometry was adjusted to 5,180% along the X-axis, 5,076.4% along the Y-axis, and 5,066.2% along the Z-axis. To measure the width, length, and height of the selected target objects within the reconstructed virtual scene, virtual proxy objects were created and aligned with the targets, as illustrated in <xref ref-type="fig" rid="fig8">Figures 8a</xref>,<xref ref-type="fig" rid="fig8">b</xref>, and their dimensions were subsequently verified.</p>
<fig position="float" id="fig8">
<label>Figure 8</label>
<caption>
<p><bold>(a)</bold> screenshot of a 3DGS output generated in the PostShot program and <bold>(b)</bold> visualization in 3ds MAX after importing the 3DGS output, scaled to match the reference object and measured for width, height, and depth using virtual auxiliary tools.</p>
</caption>
<graphic xlink:href="fcomp-08-1755361-g008.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">For panel (a), a simulation of a crime scene features a mannequin lying supine on the floor beside an overturned chair, with marker labels and a computer desk in a domestic room. For panel (b), a dining table with two chairs displays labeled evidence markers next to drinking glasses, an empty bowl, utensils, a bottle, and a cup, all within a kitchenette setting.</alt-text>
</graphic>
</fig>
</sec>
<sec id="sec12">
<label>3.4</label>
<title>Accuracy assessment metrics and object categorization</title>
<sec id="sec13">
<label>3.4.1</label>
<title>Standard evaluation metrics</title>
<p>To evaluate the accuracy of the reconstruction, a quantitative analysis was conducted within the R Studio environment by comparing the physical measurements of width, length, and height obtained from the actual scene with the corresponding virtual measurements derived from the reconstructed objects. Primarily, three standard evaluation metrics were employed: Mean Absolute Error (MAE), Root Mean Square Error (RMSE), and Mean Absolute Percentage Error (MAPE).</p>
<p>Among these metrics, MAE quantifies the average absolute difference between actual and virtual measurements. In this calculation, the sign of the error was disregarded, and only its magnitude was considered. This magnitude was then expressed in physical units (centimeters or millimeters), as defined in <xref ref-type="disp-formula" rid="E1">Equation 1</xref>:</p>
<disp-formula id="E1">
<mml:math id="M1">
<mml:mi mathvariant="italic">MAE</mml:mi>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mi>n</mml:mi>
</mml:mfrac>
<mml:munderover>
<mml:mo movablelimits="false">&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:msub>
<mml:mi>n</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:munderover>
<mml:mo>&#x2223;</mml:mo>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo stretchy="true">&#x0302;</mml:mo>
</mml:mover>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2223;</mml:mo>
</mml:math>
<label>(1)</label>
</disp-formula>
<p>Further, RMSE represents the square root of the mean squared error, making it more sensitive to larger deviations and thereby providing a more conservative estimate of accuracy than MAE. As with MAE, RMSE values were also expressed in physical units (centimeters or millimeters), as shown in <xref ref-type="disp-formula" rid="E2">Equation 2</xref>:</p>
<disp-formula id="E2">
<mml:math id="M2">
<mml:mtext mathvariant="italic">RMSE</mml:mtext>
<mml:mo>=</mml:mo>
<mml:msqrt>
<mml:mrow>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mi>n</mml:mi>
</mml:mfrac>
<mml:munderover>
<mml:mo movablelimits="false">&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>n</mml:mi>
</mml:munderover>
<mml:msup>
<mml:mrow>
<mml:mo stretchy="true">(</mml:mo>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo stretchy="true">&#x0302;</mml:mo>
</mml:mover>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo stretchy="true">)</mml:mo>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:msqrt>
</mml:math>
<label>(2)</label>
</disp-formula>
<p>MAPE is defined as the percentage error relative to the actual value, averaged across all measurements. It assigns weights based on measured values and is therefore useful for understanding relative accuracy among objects of varying sizes. The unit of MAPE is %, and values approaching 100% indicate low reconstruction accuracy, as expressed in <xref ref-type="disp-formula" rid="E3">Equation 3</xref>:</p>
<disp-formula id="E3">
<mml:math id="M3">
<mml:mtext mathvariant="italic">MAPE</mml:mtext>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>100</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
<mml:mi>n</mml:mi>
</mml:mfrac>
<mml:munderover>
<mml:mo movablelimits="false">&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>n</mml:mi>
</mml:munderover>
<mml:mo>&#x2223;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo stretchy="true">&#x0302;</mml:mo>
</mml:mover>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mfrac>
<mml:mo>&#x2223;</mml:mo>
</mml:math>
<label>(3)</label>
</disp-formula>
</sec>
<sec id="sec14">
<label>3.4.2</label>
<title>Object categorization strategy and supplemental metrics for detailed analysis</title>
<p>In addition to analyzing the entire dataset using standard metrics, the objects were categorized into distinct groups based on their varied geometric and spatial characteristics to facilitate comparative analysis. Furthermore, for groups exhibiting specific attributes that may limit the effectiveness of standard metrics, supplemental metrics were employed to conduct more in-depth additional analyses.</p>
<p>First, objects were categorized by size to determine whether reconstruction accuracy is influenced by the scale of the object. Based on the maximum length of the object, they were classified into Small (under 10&#x202F;cm), Medium (10&#x2013;50&#x202F;cm), and Large (over 50&#x202F;cm) groups.</p>
<p>Second, objects were classified into Normal and Thin groups to compare the results for objects with minimal thickness, such as paper. Since 3DGS reconstructs geometry by overlapping soft Gaussian kernels, objects with extremely thin geometric thickness, like paper or bloodstains, tend to exhibit blurred boundaries. Because this characteristic can lead to errors that are disproportionately large relative to the actual thickness, objects with a Height of 5&#x202F;mm or less were classified into the Thin group, while all others were categorized as Normal.</p>
<p>Finally, in the main experiment, during the process of capturing and reconstructing an entire scene using 3DGS, certain spaces and objects&#x2014;such as areas beyond a fence or objects inside a room seen through an open door&#x2014;may be partially generated despite not being the primary focus of the capture. This study aimed to verify whether these peripherally reconstructed objects possess sufficient accuracy for forensic crime scene analysis. Accordingly, these objects were categorized into a separate group labeled Beyond-room Area, and an independent analysis was conducted to evaluate the specific accuracy of this group.</p>
<p>The Thin group tends to exhibit inflated relative errors due to its minimal thickness dimensions, which can bias standard mean-based metrics (MAE, RMSE) and limit the comprehensive assessment of data reliability. To mitigate such statistical bias, two supplemental metrics were introduced specifically for analyzing the height of objects in the Thin group.</p>
<p>First, Median Absolute Error (MdAE) calculates the median value among all absolute errors. This metric provides a stable representation of the central tendency by preventing a small number of unusually high values (outliers) from skewing the overall average, as defined in <xref ref-type="disp-formula" rid="E4">Equation 4</xref>:</p>
<disp-formula id="E4">
<mml:math id="M4">
<mml:mtext mathvariant="italic">MdAE</mml:mtext>
<mml:mo>=</mml:mo>
<mml:mtext mathvariant="italic">Median</mml:mtext>
<mml:mo stretchy="true">(</mml:mo>
<mml:mo>&#x2223;</mml:mo>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo stretchy="true">&#x0302;</mml:mo>
</mml:mover>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2223;</mml:mo>
<mml:mo stretchy="true">)</mml:mo>
</mml:math>
<label>(4)</label>
</disp-formula>
<p>Second, Normalized Root Mean Squared Error (NRMSE) was employed to facilitate objective comparison. Since thin objects possess inherently small absolute dimensions, direct comparison using standard RMSE can be statistically biased. By normalizing the RMSE against the range of the ground-truth measurements, this approach allows for a standardized evaluation of relative accuracy across all object groups, regardless of their scale, as shown in <xref ref-type="disp-formula" rid="E5">Equation 5</xref>:</p>
<disp-formula id="E5">
<mml:math id="M5">
<mml:mtext mathvariant="italic">NRMSE</mml:mtext>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:msqrt>
<mml:mrow>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mi>n</mml:mi>
</mml:mfrac>
<mml:msubsup>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>n</mml:mi>
</mml:msubsup>
<mml:msup>
<mml:mrow>
<mml:mo stretchy="true">(</mml:mo>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo stretchy="true">&#x0302;</mml:mo>
</mml:mover>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo stretchy="true">)</mml:mo>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:msqrt>
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>max</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>min</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfrac>
</mml:math>
<label>(5)</label>
</disp-formula>
</sec>
</sec>
</sec>
<sec sec-type="results" id="sec15">
<label>4</label>
<title>Results</title>
<sec id="sec16">
<label>4.1</label>
<title>Preliminary validation from pilot study</title>
<p>Upon visual inspection of the reconstructed results, it was observed that moderately sized objects were rendered with clarity, whereas very thin objects, such as paper markers, exhibited indistinct boundaries due to the inherent characteristics of 3DGS Gaussian kernels. Additionally, Gaussian scattering artifacts were identified on reflective surfaces such as desk drawers. Since this study focuses on geometric accuracy rather than visual rendering quality, measurements were conducted by aligning tools with the center of the boundaries, regardless of edge distinctness.</p>
<p>According to the quantitative analysis presented in <xref ref-type="table" rid="tab3">Table 3</xref>, the proposed methodology achieved a high level of geometric accuracy suitable for practical applications. Specifically, the Mean Absolute Error (MAE) for the planar dimensions of Width and Length was measured at 0.25&#x202F;mm and 1.25&#x202F;mm, respectively, with Root Mean Square Error (RMSE) also maintaining low values. These quantitative results demonstrate that the reference object-based scale calibration technique effectively synchronizes the overall dimensional proportions of the virtual space with the actual scene.</p>
<table-wrap position="float" id="tab3">
<label>Table 3</label>
<caption>
<p>Dimensional accuracy metrics from the pilot study.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th>Metric</th>
<th align="center" valign="top">Width</th>
<th align="center" valign="top">Length</th>
<th align="center" valign="top">Height</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">MAE</td>
<td align="char" valign="top" char=".">0.25&#x202F;mm</td>
<td align="char" valign="top" char=".">1.25&#x202F;mm</td>
<td align="char" valign="top" char=".">0.65&#x202F;mm</td>
</tr>
<tr>
<td align="left" valign="top">RMSE</td>
<td align="char" valign="top" char=".">0.71&#x202F;mm</td>
<td align="char" valign="top" char=".">2.29&#x202F;mm</td>
<td align="char" valign="top" char=".">0.75&#x202F;mm</td>
</tr>
<tr>
<td align="left" valign="top">MAPE</td>
<td align="char" valign="top" char=".">0.21%</td>
<td align="char" valign="top" char=".">1.65%</td>
<td align="char" valign="top" char=".">200.15%</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>However, a distinct pattern was observed in the Height dimension. Although the absolute precision was good, with an MAE of 0.65&#x202F;mm (remaining below 1&#x202F;mm), the relative metric, MAPE, showed an unusually high value of 200.15%. This discrepancy is interpreted as relative error inflation caused by the minimal physical thickness of objects like A4 paper; despite minute absolute errors, the percentage error is amplified by the extremely small denominator (actual thickness). These pilot study data suggest that while 3DGS accurately specifies the physical location of thin objects, relative proportions may appear distorted. Consequently, these observations served as the rationale for establishing a separate &#x2018;Thin Group&#x2019; and adopting supplemental error analysis in the subsequent main experiment.</p>
</sec>
<sec id="sec17">
<label>4.2</label>
<title>Comprehensive accuracy analysis of main experiment</title>
<sec id="sec18">
<label>4.2.1</label>
<title>Qualitative inspection of reconstructed scene</title>
<p>Upon conducting a visual inspection of the 3D reconstructed mock crime scene in the main experiment, the following characteristics were identified and subsequently reflected in the analysis of the results.</p>
<p>First, certain walls in the mock crime scene were constructed of glass, a material with high reflectivity. The reconstruction of these highly reflective surfaces resulted in scattered artifacts, as visualized in <xref ref-type="fig" rid="fig9">Figure 9a</xref>. This phenomenon, consistent with observations from the pilot study, appears to be attributed to reflected imagery causing geometric errors in the reconstruction of glass, mirrors, or smooth metallic surfaces. Consequently, although bloodstains were present on these glass walls, some were rendered with poor visibility or appeared blurred due to these reflections.</p>
<fig position="float" id="fig9">
<label>Figure 9</label>
<caption>
<p>Rendered images of the 3DGS scene in 3ds Max following scale calibration, illustrating virtual proxy objects positioned adjacent to target items: <bold>(a)</bold> full scene overview, and <bold>(b)</bold> close-up view of the desk area.</p>
</caption>
<graphic xlink:href="fcomp-08-1755361-g009.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Panel (a) shows part of a black armchair with red trim in a room with glass and wooden doors, and a mannequin or figure lying on the floor. Panel (b) displays a blurred, purple object beneath a black-and-white checkerboard reference marker on a pale background.</alt-text>
</graphic>
</fig>
<p>Similarly, regarding floor stains or minute bloodstains, the inherent nature of 3DGS&#x2014;which renders edges as blended splats&#x2014;became apparent when viewed at close range (<xref ref-type="fig" rid="fig9">Figure 9b</xref>). This characteristic made it visually challenging to define precise boundaries for extremely small or thin objects, thereby complicating accurate measurement of their height or exact dimensions. Based on these visual inspections, objects with minimal thickness were categorized into a distinct &#x2018;Thin Group&#x2019; and analyzed separately from the &#x2018;Normal Group&#x2019; in the subsequent quantitative analysis.</p>
<p>While the primary capture was focused on the living room and kitchen areas, the reconstruction results also included peripheral spaces such as the entrance, bathroom, and inner rooms, as shown in <xref ref-type="fig" rid="fig10">Figure 10</xref>. However, the reconstruction quality of these areas was relatively poor, characterized by blurriness or scattered splats, compared to the primary focus areas. To verify whether objects in these unrecorded spaces maintain a functional level of accuracy, the dimensions of the chair inside the room (<xref ref-type="fig" rid="fig10">Figure 10a</xref>) and the shoes at the entrance (<xref ref-type="fig" rid="fig10">Figure 10b</xref>) were measured using secondary on-site measurements and mock forensic photographs, and then compared with the virtual dimensions. These two objects were classified into the Beyond-room Area group for supplemental analysis.</p>
<fig position="float" id="fig10">
<label>Figure 10</label>
<caption>
<p>Rendered images of the virtual mock crime scene reconstructed using 3DGS in the main experiment: <bold>(a)</bold> overview of the living room, and <bold>(b)</bold> view of the dining table in the kitchen.</p>
</caption>
<graphic xlink:href="fcomp-08-1755361-g010.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Panel a shows the interior of an apartment with a view into a bathroom featuring a toilet and a storage cabinet, as well as a desk and chair in a study area. Panel b depicts an entryway with a pair of black shoes, a white cabinet, a red fire extinguisher, and a partial view of a kitchen countertop with a stove.</alt-text>
</graphic>
</fig>
<p>Finally, the length dimensions of the &#x2018;doll&#x2019; and &#x2018;chair&#x2019;, as well as the height dimension of the &#x2018;shoe&#x2019;, were excluded from the analysis. Since the ground truth values for these specific dimensions were not obtained during the physical measurement stage, valid comparisons with the virtual measurements were not feasible.</p>
</sec>
<sec id="sec19">
<label>4.2.2</label>
<title>Overall and group-wise accuracy</title>
<p>To conduct the quantitative evaluation of the main experiment, we first analyzed the geometric accuracy of the width, length, and height dimensions for the entire set of furniture and props within the mock crime scene. For this purpose, the physically measured dimensions (Ground Truth) acquired from the actual site were compared with the virtual measurements of the corresponding objects in the 3DGS environment. Consequently, the quantitative evaluation metrics&#x2014;MAE, RMSE, and MAPE&#x2014;were calculated and are presented in <xref ref-type="table" rid="tab4">Table 4</xref>.</p>
<table-wrap position="float" id="tab4">
<label>Table 4</label>
<caption>
<p>Quantitative evaluation of dimensional accuracy in the main experiment: overall and size-based group analysis.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Metric</th>
<th align="center" valign="top">Width</th>
<th align="center" valign="top">Length</th>
<th align="center" valign="top">Height</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">MAE</td>
<td align="char" valign="middle" char=".">3.585</td>
<td align="char" valign="middle" char=".">1.727</td>
<td align="char" valign="middle" char=".">3.1</td>
</tr>
<tr>
<td align="left" valign="top">RMSE</td>
<td align="char" valign="middle" char=".">6.066</td>
<td align="char" valign="middle" char=".">2.195</td>
<td align="char" valign="middle" char=".">6.547</td>
</tr>
<tr>
<td align="left" valign="top">MAPE (%)</td>
<td align="char" valign="middle" char=".">4.597</td>
<td align="char" valign="middle" char=".">3.258</td>
<td align="char" valign="middle" char=".">100.976</td>
</tr>
</tbody>
</table>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Group</th>
<th align="left" valign="top">Metric</th>
<th align="center" valign="top">Width</th>
<th align="center" valign="top">Length</th>
<th align="center" valign="top">Height</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top" rowspan="3">Large</td>
<td align="left" valign="top">MAE</td>
<td align="char" valign="middle" char=".">4.8</td>
<td align="char" valign="middle" char=".">2.333</td>
<td align="center" valign="middle">7</td>
</tr>
<tr>
<td align="left" valign="top">RMSE</td>
<td align="char" valign="middle" char=".">7.975</td>
<td align="char" valign="middle" char=".">2.380</td>
<td align="center" valign="middle">10.129</td>
</tr>
<tr>
<td align="left" valign="top">MAPE (%)</td>
<td align="char" valign="middle" char=".">0.454</td>
<td align="char" valign="middle" char=".">0.946</td>
<td align="center" valign="middle">0.675</td>
</tr>
<tr>
<td align="left" valign="top" rowspan="3">Medium</td>
<td align="left" valign="top">MAE</td>
<td align="char" valign="middle" char=".">3.334</td>
<td align="char" valign="middle" char=".">1.667</td>
<td align="center" valign="middle">0.5</td>
</tr>
<tr>
<td align="left" valign="top">RMSE</td>
<td align="char" valign="middle" char=".">5.774</td>
<td align="char" valign="middle" char=".">2.889</td>
<td align="center" valign="middle">0.708</td>
</tr>
<tr>
<td align="left" valign="top">MAPE (%)</td>
<td align="char" valign="middle" char=".">1.754</td>
<td align="char" valign="middle" char=".">1.667</td>
<td align="center" valign="middle">4.167</td>
</tr>
<tr>
<td align="left" valign="top" rowspan="3">Small</td>
<td align="left" valign="top">MAE</td>
<td align="char" valign="middle" char=".">2.52</td>
<td align="char" valign="middle" char=".">1.4</td>
<td align="center" valign="middle">0.24</td>
</tr>
<tr>
<td align="left" valign="top">RMSE</td>
<td align="char" valign="middle" char=".">3.474</td>
<td align="char" valign="middle" char=".">1.483</td>
<td align="center" valign="middle">0.268</td>
</tr>
<tr>
<td align="left" valign="top">MAPE (%)</td>
<td align="char" valign="middle" char=".">10.446</td>
<td align="char" valign="middle" char=".">5.599</td>
<td align="center" valign="middle">240</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>According to the findings, the MAEs in width, length, and height for all objects were 3.58&#x202F;mm, 1.73&#x202F;mm, and 3.1&#x202F;mm, respectively, while the corresponding RMSE values were 6.07&#x202F;mm, 2.20&#x202F;mm, and 6.55&#x202F;mm. Furthermore, the MAPE, representing relative error, was low for width and length at 4.60 and 3.26%, respectively, but exhibited an unusually high value of 100.98% for height. This suggests that specific objects within the dataset, such as bloodstains with minimal physical thickness, contributed to relative error inflation.</p>
<p>To conduct a more precise evaluation, the objects were categorized into three groups based on their size: Large, Medium, and Small (<xref ref-type="table" rid="tab4">Table 4</xref>). The analysis indicated that the overall reconstruction precision was satisfactory for practical application. The Large group demonstrated the most stable performance, with MAE values of 4.8&#x202F;mm, 2.34&#x202F;mm, and 7&#x202F;mm, and MAPE values of 0.45, 0.95, and 0.68% for width, length, and height, respectively. The Medium group also maintained high accuracy, recording MAE values of 3.33&#x202F;mm, 1.67&#x202F;mm, and 0.5&#x202F;mm, with MAPE values of 1.75, 1.67, and 4.17%.</p>
<p>A notable trend was observed in the Small group. While this group exhibited the lowest absolute error values (MAE: 2.52&#x202F;mm, 1.4&#x202F;mm, 0.24&#x202F;mm), the MAPE for height increased to 240%. Despite the low absolute error (under 1&#x202F;mm), this substantial increase in relative error implies that the Small group includes objects sensitive to scale-induced artifacts. Consequently, to rigorously clarify these characteristics, a more granular analysis focusing on object thickness and location is presented in the following section.</p>
</sec>
<sec id="sec20">
<label>4.2.3</label>
<title>Accuracy characteristics by object thickness and location</title>
<p>To deeply investigate the impact of geometric characteristics, specifically thickness, on 3D reconstruction accuracy metrics, we classified the dataset into a &#x2018;Normal Group&#x2019; and a &#x2018;Thin Group&#x2019;. We then conducted a comparative analysis of the Median Absolute Error (MdAE) and Normalized Root Mean Square Error (NRMSE) specifically for the height dimension.</p>
<p>The results indicate that the Normal Group achieved stable reconstruction performance in terms of both absolute and relative errors, with an MdAE of 2.0&#x202F;mm and an NRMSE of 0.419% (<xref ref-type="table" rid="tab5">Table 5</xref>). In contrast, the Thin Group exhibited a relatively high NRMSE of 29.814%, yet recorded an MdAE of 0.3&#x202F;mm, which is significantly lower than that of the Normal Group (2.0&#x202F;mm).</p>
<table-wrap position="float" id="tab5">
<label>Table 5</label>
<caption>
<p>Comparative analysis of height accuracy metrics (MdAE and NRMSE) between normal and thin groups in the main experiment.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Group</th>
<th align="center" valign="top">Count</th>
<th align="center" valign="top">MdAE</th>
<th align="center" valign="top">NRMSE (%)</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Normal group height</td>
<td align="center" valign="top">7</td>
<td align="center" valign="top">2</td>
<td align="char" valign="top" char=".">0.419</td>
</tr>
<tr>
<td align="left" valign="top">Thin group height</td>
<td align="center" valign="top">5</td>
<td align="center" valign="top">0.3</td>
<td align="char" valign="top" char=".">29.814</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>These contrasting results have significant implications. The elevated NRMSE in the Thin Group does not indicate a degradation in reconstruction performance; rather, it is attributed to &#x2018;relative error inflation,&#x2019; where even minute errors translate into high percentages relative to the extremely small physical thickness (in the order of millimeters) of the objects. Conversely, the MdAE of 0.3&#x202F;mm empirically demonstrates that the proposed 3DGS-based method possesses sub-millimeter reconstruction accuracy, achieving precise results even for thin objects. In conclusion, despite the numerical sensitivity observed in relative metrics, the absolute precision in terms of physical measurement remains consistently high.</p>
<p>Generally, 3D reconstruction techniques demonstrate high accuracy within the central capture volume where training data is abundant but often suffer from performance degradation in peripheral areas with sparse data. To assess the accuracy of objects located in these peripheral zones, a separate accuracy analysis was conducted on the objects classified into the &#x2018;Beyond-room Area&#x2019; group (specifically, &#x2018;L_Chair_R&#x2019; and &#x2018;M_Shoe_P&#x2019;). Given the limited sample size and missing ground truth data for certain dimensions in this group, a pooled analysis method was adopted, aggregating all valid measurements across width, length, and height into a single metric to ensure statistical validity.</p>
<p>The analysis yielded an MAE of 6.25&#x202F;mm and an RMSE of 10.308&#x202F;mm, with a MAPE of 1.86% (<xref ref-type="table" rid="tab6">Table 6</xref>). Although the absolute error values are slightly higher compared to objects within the central area, the relative error rate (MAPE) remaining below 2% indicates that high precision is maintained. In conclusion, these results confirm the robustness of the proposed 3DGS model, demonstrating its ability to stably reconstruct geometric information without significant degradation, even in boundary regions where training views may be relatively sparse.</p>
<table-wrap position="float" id="tab6">
<label>Table 6</label>
<caption>
<p>Accuracy evaluation results for beyond-room area group objects in the main experiment.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Metric</th>
<th align="center" valign="top">Data_points</th>
<th align="center" valign="top">MAE</th>
<th align="center" valign="top">RMSE</th>
<th align="center" valign="top">MAPE (%)</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="bottom">Beyond_room area group</td>
<td align="center" valign="bottom">4</td>
<td align="char" valign="bottom" char=".">6.25</td>
<td align="char" valign="bottom" char=".">10.308</td>
<td align="char" valign="bottom" char=".">1.860</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="sec21">
<label>4.2.4</label>
<title>Visual verification</title>
<p>To provide a more in-depth verification of the quantitative results, we generated scatter plots separated by object size groups (Large, Medium, Small), as shown in <xref ref-type="fig" rid="fig11">Figures 11</xref>&#x2013;<xref ref-type="fig" rid="fig13">13</xref>. Considering the significant size variations between groups, customized axis scales optimized for each group&#x2019;s data range were applied to clearly identify subtle error trends.</p>
<fig position="float" id="fig11">
<label>Figure 11</label>
<caption>
<p>Rendered images of the reconstructed <bold>(a)</bold> glass wall and <bold>(b)</bold> blood stain.</p>
</caption>
<graphic xlink:href="fcomp-08-1755361-g011.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Three-panel scatter plot compares real and virtual width measurements for three groups: large (red, 300 to 2100 millimeters), medium (green, 10 to 280 millimeters), and small (blue, 0.1 to 135 millimeters). Each plot features a red dashed line indicating ideal agreement.</alt-text>
</graphic>
</fig>
<fig position="float" id="fig12">
<label>Figure 12</label>
<caption>
<p>Rendered images of areas that were not the primary focus of data acquisition, exhibiting blurred reconstruction: <bold>(a)</bold> the bathroom interior and a chair inside the room, and <bold>(b)</bold> shoes at the entrance.</p>
</caption>
<graphic xlink:href="fcomp-08-1755361-g012.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Three scatterplots compare real versus virtual lengths across custom ranges for large, medium, and small groups. Each plot includes colored data points and a red dashed line indicating perfect agreement between real and virtual measurements.</alt-text>
</graphic>
</fig>
<fig position="float" id="fig13">
<label>Figure 13</label>
<caption>
<p>Group-wise scatter plot of width analysis with custom ranges.</p>
</caption>
<graphic xlink:href="fcomp-08-1755361-g013.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Height Analysis (Custom Ranges) consists of three scatterplots comparing real versus virtual heights across large, medium, and small groups, each with a red dashed identity line and observed points closely aligned along the diagonal.</alt-text>
</graphic>
</fig>
<p>The analysis reveals that, overall, the data points across all groups are distributed in close proximity to the <italic>y</italic>&#x202F;=&#x202F;<italic>x</italic> reference line (red dashed line). This confirms that the 3DGS reconstruction model maintains geometric correspondence regardless of object size.</p>
<p>In detail, regarding the Width values in <xref ref-type="fig" rid="fig11">Figure 11</xref>, the overall linear proportional relationship was stably maintained. While slight deviations were observed in a few isolated points within the Medium and Small groups, the majority of data points were distributed in close proximity to the reference line.</p>
<p>In contrast, for both the Length values in <xref ref-type="fig" rid="fig12">Figure 12</xref> and the Height values in <xref ref-type="fig" rid="fig13">Figure 13</xref>, the data points across all groups were distributed in close proximity to the reference line, demonstrating consistent correspondence.</p>
<p>Of particular note in <xref ref-type="fig" rid="fig13">Figure 13</xref> is the distribution of Height in the Small group. It is visually evident that the data is densely clustered within a significantly smaller absolute range (0.1&#x2013;10&#x202F;mm) compared to other axes (Width, Length) or other groups. This visual evidence supports the earlier quantitative finding that the high relative error (MAPE) for the Small group&#x2019;s height, despite its low absolute error (MAE), is attributable to scale-induced artifacts inherent to the objects&#x2019; geometry.</p>
<p>Overall, these visual analysis results demonstrate that 3DGS-based reconstruction provides a reliable level of geometric consistency across objects of varying sizes and shapes, suggesting that this technology is practically viable in forensic contexts for crime scene reconstruction.</p>
</sec>
</sec>
</sec>
<sec sec-type="conclusions" id="sec22">
<label>5</label>
<title>Conclusion</title>
<p>This study empirically verified the quantitative accuracy and forensic applicability of 3D Gaussian Splatting (3DGS) technology for crime scene reconstruction. The results demonstrate that 3DGS can reconstruct 3D scenes with millimeter-level accuracy using only standard DSLRs or smartphones, eliminating the need for expensive laser scanners. In particular, by adopting a workflow that integrates commercial 3DGS software with standard 3D programs commonly used in practice, this study ensured high accessibility for forensic investigators while simultaneously enhancing data security through training based on local PC GPUs. Consequently, it can be concluded that 3DGS possesses high practical value as a scalable tool capable of securing the &#x2018;critical time window&#x2019; of an investigation through immediate scene crystallization, with significant potential for expansion into various forensic applications.</p>
<p>The quantitative analysis revealed that the measurement errors in the main experiment averaged between 1.73 and 3.58&#x202F;mm, falling well within the acceptable tolerance range of conventional manual measurements. In the in-depth analysis based on object characteristics, the Normal group exhibited stable accuracy. Conversely, while the Thin group showed higher relative errors due to scale-induced artifacts, its absolute error (MdAE) remained at 0.3&#x202F;mm, demonstrating sub-millimeter reconstruction accuracy. This indicates that despite potential visual blurring of edges, the physical geometric information is precisely preserved. Thus, regardless of relative error inflation, 3DGS retains sufficient evidentiary value for specifying the location and shape of thin objects.</p>
<p>From a practical perspective, limitations such as artifacts caused by reflective surfaces (glass, glossy materials) and reduced visibility of minute bloodstains were identified. Therefore, rather than relying solely on 3DGS, we recommend a hybrid approach: utilizing 3DGS to precisely reconstruct the overall spatial layout and large objects, while supplementing minute evidence with high-resolution forensic photographs or texture mapping. This methodology will serve as a powerful tool for permanently preserving scenes in cases where preservation is difficult or for verifying hypotheses through simulation in cold case investigations.</p>
<p>Although this study was conducted in a controlled indoor mock crime scene, future research should expand to verify the utility of the technology in more challenging environments, such as outdoor scenes or fire sites with variable lighting conditions. Furthermore, if the &#x2018;Reference Object-based Scale Calibration&#x2019; process proposed in this study is automated or established as a standardized manual, 3DGS is expected to become an essential standard procedure in future forensic investigations.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="sec23">
<title>Data availability statement</title>
<p>The datasets presented in this article are not readily available because the dataset used in this study consists of photographs and video recordings captured within a restricted area managed by the Korean National Police Agency. Due to security regulations and confidentiality requirements, these materials cannot be publicly released or shared. Access to the dataset is strictly limited, and distribution is prohibited to ensure the protection of sensitive locations and law-enforcement protocols. Requests to access the datasets should be directed to the corresponding author.</p>
</sec>
<sec sec-type="author-contributions" id="sec24">
<title>Author contributions</title>
<p>SC: Conceptualization, Data curation, Formal analysis, Investigation, Methodology, Software, Writing &#x2013; original draft. TW: Conceptualization, Investigation, Methodology, Resources, Software, Writing &#x2013; review &#x0026; editing.</p>
</sec>
<sec sec-type="COI-statement" id="sec25">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="sec26">
<title>Generative AI statement</title>
<p>The author(s) declared that Generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="sec27">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="ref1"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cho</surname><given-names>S. J.</given-names></name> <name><surname>Hwang</surname><given-names>Y. J.</given-names></name></person-group> (<year>2024</year>). <article-title>Comparative study on the usability of software for visualization tasks in spatial scanning data</article-title>. <source>J. Digit. Contents Soc.</source> <volume>25</volume>:<fpage>12</fpage>. doi: <pub-id pub-id-type="doi">10.9728/dcs.2024.25.12.3847</pub-id></mixed-citation></ref>
<ref id="ref2"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Durnal</surname><given-names>E. W.</given-names></name></person-group> (<year>2010</year>). <article-title>Crime scene investigation (as seen on TV)</article-title>. <source>Forensic Sci. Int.</source> <volume>199</volume>, <fpage>1</fpage>&#x2013;<lpage>5</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.forsciint.2010.02.015</pub-id>, <pub-id pub-id-type="pmid">20227206</pub-id></mixed-citation></ref>
<ref id="ref3"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Esposito</surname><given-names>M.</given-names></name> <name><surname>Sessa</surname><given-names>F.</given-names></name> <name><surname>Cocimano</surname><given-names>G.</given-names></name> <name><surname>Zuccarello</surname><given-names>P.</given-names></name> <name><surname>Roccuzzo</surname><given-names>S.</given-names></name> <name><surname>Salerno</surname><given-names>M.</given-names></name></person-group> (<year>2023</year>). <article-title>Advances in technologies in crime scene investigation</article-title>. <source>Diagnostics</source> <volume>13</volume>:<fpage>3169</fpage>. doi: <pub-id pub-id-type="doi">10.3390/diagnostics13203169</pub-id>, <pub-id pub-id-type="pmid">37891990</pub-id></mixed-citation></ref>
<ref id="ref4"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Fukuda</surname><given-names>H.</given-names></name> <name><surname>Decker</surname><given-names>S. J.</given-names></name> <name><surname>Ford</surname><given-names>J. M.</given-names></name> <name><surname>Schweitzer</surname><given-names>W.</given-names></name> <name><surname>Ebert</surname><given-names>L. C.</given-names></name></person-group> (<year>2024</year>). <article-title>Neural radiance fields as a complementary method to photogrammetry for forensic 3D documentation: initial comparative insights</article-title>. <source>Forensic Imaging</source> <volume>39</volume>:<fpage>200605</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.fri.2024.200605</pub-id></mixed-citation></ref>
<ref id="ref5"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kerbl</surname><given-names>B.</given-names></name> <name><surname>Kopanas</surname><given-names>G.</given-names></name> <name><surname>Leimkuhler</surname><given-names>T.</given-names></name> <name><surname>Drettakis</surname><given-names>G.</given-names></name></person-group> (<year>2023</year>). <article-title>3D Gaussian splatting for real-time radiance field rendering</article-title>. <source>ACM Trans. Graph.</source> <volume>42</volume>:<fpage>4.139</fpage>. doi: <pub-id pub-id-type="doi">10.1145/3592433</pub-id></mixed-citation></ref>
<ref id="ref6"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Lee</surname><given-names>S.J.</given-names></name></person-group> (<year>2025</year>). <source>A study on compositing familiarizing realistic 3D environments and 3D character models based on 3DGS - focusing on CGI and stop-motion animation</source>, [Dissertation/PhD thesis]. <publisher-loc>Seoul</publisher-loc>: <publisher-name>Hongik University</publisher-name>.</mixed-citation></ref>
<ref id="ref7"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Malik</surname><given-names>S. N.</given-names></name> <name><surname>Chee</surname><given-names>M. H.</given-names></name> <name><surname>Perera</surname><given-names>D. M. A.</given-names></name> <name><surname>Lim</surname><given-names>C. H.</given-names></name></person-group> (<year>2024</year>). <article-title>Feasibility of neural radiance fields for crime scene video reconstruction</article-title>. arXiv2407.08795. doi: <pub-id pub-id-type="doi">10.48550/arXiv.2407.08795</pub-id></mixed-citation></ref>
<ref id="ref8"><mixed-citation publication-type="other"><collab id="coll1">MBN</collab>. (<year>2025</year>). Police analyze crime scene with artificial intelligence&#x2026;Accelerate the adoption of next-generation technology. Available online at: <ext-link xlink:href="https://www.mbn.co.kr/news/society/5127818" ext-link-type="uri">https://www.mbn.co.kr/news/society/5127818</ext-link> (Accessed November 6, 2025).</mixed-citation></ref>
<ref id="ref9"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Mildenhall</surname><given-names>B.</given-names></name> <name><surname>Srinivasan</surname><given-names>P. P.</given-names></name> <name><surname>Tancik</surname><given-names>M.</given-names></name> <name><surname>Barron</surname><given-names>J. T.</given-names></name> <name><surname>Ramamoorthi</surname><given-names>R.</given-names></name> <name><surname>Ng</surname><given-names>R.</given-names></name></person-group> (<year>2020</year>). &#x201C;<article-title>NeRF: representing scenes as neural radiance fields for view synthesis</article-title>&#x201D; in <source>Proceedings of the 16th European conference on computer vision (ECCV 2020)</source>. <publisher-loc>Cham</publisher-loc>: <publisher-name>Springer</publisher-name>.</mixed-citation></ref>
<ref id="ref10"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Rangelov</surname><given-names>D.</given-names></name> <name><surname>Knotter</surname><given-names>J.</given-names></name> <name><surname>Miltchev</surname><given-names>R.</given-names></name></person-group> (<year>2024</year>). Efficiency and safety of 3D reconstruction in crime scenes investigation: evaluating impacts, benefits, and limitations. Preprint. Available online at: <ext-link xlink:href="https://ssrn.com/abstract=4749359" ext-link-type="uri">https://ssrn.com/abstract=4749359</ext-link></mixed-citation></ref>
<ref id="ref11"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Remondino</surname><given-names>F.</given-names></name> <name><surname>Karami</surname><given-names>A.</given-names></name> <name><surname>Yan</surname><given-names>Z.</given-names></name> <name><surname>Mazzacca</surname><given-names>G.</given-names></name> <name><surname>Rigon</surname><given-names>S.</given-names></name> <name><surname>Qin</surname><given-names>R. A.</given-names></name></person-group> (<year>2023</year>). <article-title>Critical analysis of NeRF-based 3D reconstruction</article-title>. <source>Remote Sens</source> <volume>15</volume>:<fpage>3585</fpage>. doi: <pub-id pub-id-type="doi">10.3390/rs15143585</pub-id></mixed-citation></ref>
<ref id="ref12"><mixed-citation publication-type="other"><collab id="coll2">Seoul Public News</collab>. (<year>2016</year>). [Stories from Civil Servants] (18) The National Forensic Service. Available online at: <ext-link xlink:href="https://go.seoul.co.kr/news/newsView.php?id=20160128024003" ext-link-type="uri">https://go.seoul.co.kr/news/newsView.php?id=20160128024003</ext-link> (Accessed November 6, 2025).</mixed-citation></ref>
<ref id="ref13"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sheshtar</surname><given-names>F. M.</given-names></name> <name><surname>Alhatlani</surname><given-names>W. M.</given-names></name> <name><surname>Moulden</surname><given-names>M.</given-names></name> <name><surname>Kim</surname><given-names>J. H.</given-names></name></person-group> (<year>2025</year>). <article-title>Comparative analysis of LiDAR and photogrammetry for 3D crime scene reconstruction</article-title>. <source>Appl. Sci.</source> <volume>15</volume>:<fpage>3</fpage>. doi: <pub-id pub-id-type="doi">10.3390/app15031085</pub-id></mixed-citation></ref>
<ref id="ref14"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Yang</surname><given-names>E. H.</given-names></name> <name><surname>Seo</surname><given-names>Y. I.</given-names></name> <name><surname>Rhee</surname><given-names>K. J.</given-names></name> <name><surname>Lee</surname><given-names>S. Y.</given-names></name></person-group> (<year>2024</year>). <article-title>3D analysis of lesions using mobile photogrammetry technology</article-title>. <source>J. Police Sci.</source> <volume>24</volume>:<fpage>3</fpage>. doi: <pub-id pub-id-type="doi">10.22816/polsci.2024.24.3.003</pub-id></mixed-citation></ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by" id="fn0002">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/967229/overview">Dominic Gascho</ext-link>, University of Zurich, Switzerland</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by" id="fn0003">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1235358/overview">Massimiliano Esposito</ext-link>, Kore University of Enna, Italy</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3316338/overview">Dimitar Rangelov</ext-link>, University of Twente, Netherlands</p>
</fn>
</fn-group>
<fn-group>
<fn id="fn0001"><label>1</label><p><ext-link xlink:href="https://superspl.at/editor/" ext-link-type="uri">https://superspl.at/editor/</ext-link></p></fn>
</fn-group>
</back>
</article>