<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="review-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Bioeng. Biotechnol.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Bioengineering and Biotechnology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Bioeng. Biotechnol.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2296-4185</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">1732519</article-id>
<article-id pub-id-type="doi">10.3389/fbioe.2025.1732519</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Review</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Precision measurement of stratum corneum thickness in OCT images using variational autoencoders and advanced DSP techniques</article-title>
<alt-title alt-title-type="left-running-head">Qin and Wang</alt-title>
<alt-title alt-title-type="right-running-head">
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fbioe.2025.1732519">10.3389/fbioe.2025.1732519</ext-link>
</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Qin</surname>
<given-names>Haiyu</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/3255807"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal Analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing - original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Wang</surname>
<given-names>Yang</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing - original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
</contrib>
</contrib-group>
<aff id="aff1">
<label>1</label>
<institution>Department of Electrical and Electronic Engineering, University of Sheffield</institution>, <city>Sheffield</city>, <country country="GB">United Kingdom</country>
</aff>
<aff id="aff2">
<label>2</label>
<institution>Department of Nursing, Faculty of Medicine, University Kebangsaan Malaysia</institution>, <city>Kuala Lumpur</city>, <country country="MY">Malaysia</country>
</aff>
<author-notes>
<corresp id="c001">
<label>&#x2a;</label>Correspondence: Haiyu Qin, <email xlink:href="mailto:qhy8957@163.com">qhy8957@163.com</email>
</corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-01-15">
<day>15</day>
<month>01</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2025</year>
</pub-date>
<volume>13</volume>
<elocation-id>1732519</elocation-id>
<history>
<date date-type="received">
<day>26</day>
<month>10</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>20</day>
<month>12</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>29</day>
<month>12</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2026 Qin and Wang.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Qin and Wang</copyright-holder>
<license>
<ali:license_ref start_date="2026-01-15">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<p>Optical coherence tomography (OCT) has emerged as a cornerstone technique for <italic>in vivo</italic> skin imaging; however, reliable and clinically meaningful quantification of stratum corneum (SC) thickness remains challenging. This review summarizes 2&#xa0;decades of methodological evolution, highlighting the transition from early manual and rule-based approaches to modern deep learning&#x2013;driven segmentation strategies. Particular emphasis is placed on recent hybrid frameworks that integrate physics-informed digital signal processing with generative deep learning models, which collectively improve boundary detection robustness, reduce annotation dependency, and enhance model interpretability. These advances have significantly expanded the clinical utility of OCT-based SC assessment, enabling more sensitive disease monitoring, improved evaluation of therapeutic and cosmetic interventions, and broader applications in dermatologic diagnostics. Finally, we outline emerging opportunities for real-time, marker-free analysis, multimodal data fusion, and the development of explainable and generalizable algorithms to support precision and personalized dermatologic care.</p>
</abstract>
<kwd-group>
<kwd>dermatologic AI applications</kwd>
<kwd>digital signal processing</kwd>
<kwd>OCT skin imaging</kwd>
<kwd>stratum corneum thickness</kwd>
<kwd>variational autoencoders</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. The National Social Science fund of china, Project: Research on Risk Prevention Mechanism of Grassroots Social Governance Intelligence (No. 21BSH001).</funding-statement>
</funding-group>
<counts>
<fig-count count="2"/>
<table-count count="3"/>
<equation-count count="0"/>
<ref-count count="67"/>
<page-count count="15"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Biosensors and Biomolecular Electronics</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<label>1</label>
<title>Introduction</title>
<p>The stratum corneum (SC), the outermost layer of human skin, is a thin yet functionally critical structure that governs epidermal permeability, water retention, and protection against pathogens, allergens, and xenobiotics (<xref ref-type="bibr" rid="B35">Lintzeri et al., 2022</xref>). Although typically only 10&#x2013;20&#xa0;&#xb5;m thick, the SC exerts a disproportionate influence on skin barrier function through its lipid&#x2013;protein &#x201c;brick-and-mortar&#x201d; architecture. Even micrometre-scale deviations in SC thickness can substantially compromise barrier integrity or, conversely, modulate the penetration of therapeutic and cosmetic agents. Consequently, precise and reproducible quantification of SC thickness has emerged as a key objective in clinical dermatology, transdermal drug delivery, and personal-care science (<xref ref-type="bibr" rid="B63">You et al., 2023</xref>). However, accurately delineating this ultrathin, heterogeneous layer <italic>in vivo</italic> remains a major technical challenge, limiting the translation of advanced imaging modalities into routine clinical and industrial practice (<xref ref-type="bibr" rid="B54">Wang et al., 2024</xref>).</p>
<p>Optical coherence tomography (OCT) has become a leading non-invasive tool for SC assessment due to its depth-resolved imaging capability, micrometre-scale axial resolution (1&#x2013;5&#xa0;&#xb5;m), and penetration depths approaching 1.8&#xa0;mm in keratinized tissue (<xref ref-type="bibr" rid="B33">Lin et al., 2024</xref>). Modern spectral-domain, swept-source, and line-field confocal OCT (LC-OCT) systems can clearly distinguish the highly scattering SC from the underlying viable epidermis while preserving tissue integrity for longitudinal monitoring (<xref ref-type="bibr" rid="B35">Lintzeri et al., 2022</xref>). Unlike histology, OCT avoids fixation-induced shrinkage; unlike high-frequency ultrasound (<xref ref-type="bibr" rid="B39">Luan et al., 2023</xref>), it resolves fine epidermal layers without contrast agents; and unlike reflectance confocal microscopy (<xref ref-type="bibr" rid="B65">Yu et al., 2023b</xref>), it provides subsurface imaging over hundreds of micrometres at video rates. These advantages position OCT as an ideal modality for quantitative SC thickness mapping (<xref ref-type="bibr" rid="B61">Yang et al., 2024</xref>).</p>
<p>Despite these intrinsic strengths, the clinical and translational utility of OCT is constrained by a persistent bottleneck: the reliable extraction of SC boundary coordinates from raw OCT reflectivity data (<xref ref-type="bibr" rid="B33">Lin et al., 2024</xref>). Manual caliper-based measurements are subjective, labor-intensive, and poorly reproducible, with inter-observer variability often exceeding 15% (<xref ref-type="bibr" rid="B22">He et al., 2024</xref>). Histological validation, frequently treated as a reference standard, introduces tissue shrinkage artifacts of 12%&#x2013;21%, undermining direct comparison with <italic>in vivo</italic> OCT measurements (<xref ref-type="bibr" rid="B28">Kerns et al., 2008</xref>). Automated segmentation approaches, including graph-search methods and convolutional neural network&#x2013;based pipelines, have improved accuracy but remain limited by high computational demands, sensitivity to device-specific signal characteristics, and a heavy reliance on large, densely annotated training datasets (<xref ref-type="bibr" rid="B28">Kerns et al., 2008</xref>). These constraints hinder deployment in point-of-care settings, multicenter clinical trials, and large-scale cosmetic testing, where robustness, efficiency, and data economy are essential (<xref ref-type="bibr" rid="B11">Chen et al., 2025</xref>).</p>
<p>At the same time, the biological and clinical importance of precise SC thickness measurement continues to intensify (<xref ref-type="bibr" rid="B19">Gambichler et al., 2006b</xref>). Deviations from physiological SC thickness are closely associated with skin disorders (<xref ref-type="bibr" rid="B60">Xu et al., 2025</xref>) such as atopic dermatitis, xerosis, ichthyoses, and psoriasis, where barrier dysfunction, rather than thickness alone, correlates with disease severity and increased transepidermal water loss (<xref ref-type="bibr" rid="B33">Lin et al., 2024</xref>). Beyond pathology, controlled modulation of SC thickness through hydration, exfoliation, or formulation design is central to transdermal delivery and cosmetic efficacy; shifts of only 2&#x2013;4&#xa0;&#xb5;m can alter active ingredient penetration by tens of percent. These effects are further complicated by pronounced inter-individual and anatomical variability, with SC thickness accounting for much of the epidermal thickness range observed across body sites, ages, phototypes, and environmental conditions. Detecting such subtle, early-stage changes requires measurement strategies that are both highly sensitive and biologically consistent.</p>
<p>To overcome these challenges, a shift is needed from purely discriminative, data-hungry segmentation pipelines toward models that explicitly integrate OCT physics with data-efficient representation learning (<xref ref-type="bibr" rid="B51">Tang et al., 2024</xref>). Physics-informed digital signal processing (DSP) techniques, such as speckle reduction, depth-dependent sensitivity compensation, dispersion correction, and deconvolution, can suppress imaging artifacts and linearize OCT signals before learning, reducing the burden placed on downstream models (<xref ref-type="bibr" rid="B42">Ozcan et al., 2007</xref>). Building on this foundation, variational autoencoders (VAEs) offer a powerful generative framework for SC analysis (<xref ref-type="bibr" rid="B31">Li et al., 2025</xref>). By learning continuous, regularized latent representations of skin morphology, VAEs can disentangle biologically meaningful factors, such as SC thickness and surface roughness, from noise and device-specific variability (<xref ref-type="bibr" rid="B30">Li K. et al., 2024</xref>). Their generative nature enforces structural coherence, reduces dependence on exhaustive pixel-level annotations, and enables intrinsic uncertainty estimation&#x2014;features that are particularly valuable for clinical trust, cross-device generalization, and longitudinal monitoring.</p>
<p>This review synthesizes 2&#xa0;decades of progress in SC thickness quantification with a particular focus on the emerging convergence of advanced DSP and VAE-based generative learning. We first outline the biological and clinical rationale for accurate SC thickness mapping and critically examine the limitations of conventional and contemporary measurement approaches. We then detail DSP strategies tailored to skin OCT and VAE architectures optimized for SC boundary delineation, benchmarking their performance against classical algorithms and discriminative deep learning models. Finally, we discuss current clinical and industrial applications, unresolved methodological challenges, and future directions, outlining a roadmap toward centimetre-scale, micrometre-accurate <italic>in vivo</italic> mapping of the human skin barrier. By integrating principles from optical physics, signal processing, and representation learning, this work aims to advance precision skin barrier assessment and support the development of personalized dermatological and cosmetic interventions.</p>
</sec>
<sec id="s2">
<label>2</label>
<title>Conventional methods for thickness estimation</title>
<p>The Early studies treated the SC reference standard as a straightforward length: place calipers on the OCT screen or excise a biopsy, embed it in paraffin, and read the distance between the corneocyte surface and the viable epidermis. Yet, each of these &#x201c;conventional&#x201d; routes introduces measurement error that is now well-documented (<xref ref-type="bibr" rid="B18">Gambichler et al., 2006a</xref>; <xref ref-type="bibr" rid="B35">Lintzeri et al., 2022</xref>). The manual placement of electronic calipers on individual B-scans remains the most widely used clinic-side technique because it requires no dedicated software. Unfortunately, it is both labor-intensive and operator-dependent. A recent multi-site trial that timed 1,587 manual measurements reported a mean reading time of 42&#xa0;s per frame, an impractically burdensome task when hundreds of frames are generated in a 2-min sweep (<xref ref-type="fig" rid="F1">Figure 1</xref>). The same study showed inter-observer coefficients of variation approaching 13% for forearm skin, even after a joint training session, largely because speckle noise masks the dermal-epidermal junction and encourages subjective placement of the basal line (<xref ref-type="bibr" rid="B19">Gambichler et al., 2006b</xref>). Manual protocols also require the user to down-sample three-dimensional stacks to a handful of &#x201c;representative&#x201d; slices, thereby forfeiting information on regional undulations and biasing group statistics, especially at acral or aged sites where papillary relief is pronounced (<xref ref-type="bibr" rid="B25">Jain et al., 2024</xref>).</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>Schematic summary of conventional SC thickness-estimation methods.</p>
</caption>
<graphic xlink:href="fbioe-13-1732519-g001.tif">
<alt-text content-type="machine-generated">Diagram illustrating four measurement methods: Manual Caliper with a stopwatch representing time sensitivity, Histology with layered samples showing variability percentages, Peak-to-Peak with graphs indicating noise edges, and Graph-Search with an arrow depicting noise sensitivity. Warning symbols highlight issues like variability and sensitivity.</alt-text>
</graphic>
</fig>
<p>Histology is often considered a gold standard, but its thickness numbers are themselves moving targets. From the moment a 4&#xa0;mm punch is taken, contractile forces and subsequent processing steps initiate cumulative shrinkage. Optical tracking and serial caliper studies indicate that the loss in length is approximately 12%&#x2013;21% and in width is 11%&#x2013;23% before the slide even reaches the microscope (<xref ref-type="bibr" rid="B28">Kerns et al., 2008</xref>).&#x200b; Lipid extraction during dehydration collapses intercellular spaces within the SC, while paraffin infiltration hardens the sample, exaggerating the effect. Cryosectioning reduces shrinkage but sacrifices the crisp nuclear detail dermatopathologists prefer. The net result is that histological SC often reads several micrometres thinner than its <italic>in-vivo</italic> counterpart, forcing investigators to apply empirical correction factors that vary with fixation protocol, anatomic site, and patient age (<xref ref-type="bibr" rid="B47">Sandby-M&#xf8;ller et al., 2003</xref>; <xref ref-type="bibr" rid="B28">Kerns et al., 2008</xref>; <xref ref-type="bibr" rid="B52">Tran et al., 2015</xref>).</p>
<p>To circumvent these limitations, classical image-processing pipelines emerged in the mid-2000s. The simplest exploit the bimodal intensity profile of SD-OCT A-scans: taking the first strong reflection as the air/SC interface and a second peak as the dermal-epidermal junction. While fast, this &#x201c;peak-to-peak&#x201d; heuristic fails whenever the second maximum corresponds instead to a collagen bundle or papillary tip, resulting in systematic under-reads in more than 25% of scans (<xref ref-type="bibr" rid="B18">Gambichler et al., 2006a</xref>). Edge-based algorithms refine the approach by applying Sobel or Canny operators to each frame and then fitting polynomial splines through the strongest gradient. However, they struggle in low-contrast regions and tend to propagate single-frame errors through the spline, inflating the root-mean-square (RMS) error to 8&#x2013;12&#xa0;&#xb5;m on healthy volar skin. The model-based methods improve robustness by incorporating anatomical priors. Active-contour &#x201c;snakes&#x201d; initialise near the skin surface and evolve toward energy minima that penalise curvature while rewarding high gradient magnitude. However, convergence stalls in the presence of speckle voids and requires careful tuning of elasticity parameters for each device. Graph-search formulations overcome many of these issues: they treat the B-scan as a weighted graph in which the cost of traversing a pixel is inversely proportional to its edge strength, then solve a shortest-path problem that yields globally optimal, smooth boundaries. Three-dimensional extensions that couple adjacent B-scans have reduced segmentation error by &#x223c;20% relative to 2-D approaches and generate continuous thickness maps suitable for roughness assessment (<xref ref-type="bibr" rid="B50">Srivastava et al., 2018</xref>).</p>
<p>Despite these refinements, conventional image-processing pipelines share common weaknesses. First, most rely on handcrafted thresholds or filter kernels that were calibrated on homogeneous laboratory datasets, degrading when confronted with lower signal-to-noise ratios, darker phototypes, or atypical curvature. Second, they remain computationally heavy; a typical 3-D graph-search pass over a 512 &#xd7; 1,024 &#xd7; 400 volume can take tens of seconds on a CPU. Third, repeatability seldom surpasses that of a well-trained technician: even state-of-the-art convolution-edge hybrids report mean absolute errors around 10&#xa0;&#xb5;m and Dice overlaps near 0.83 &#xb1; 0.06 across 270 clinical OCT frames, numbers acceptable for population studies but marginal for detecting the 2&#x2013;4&#xa0;&#xb5;m shifts that accompany early barrier impairment or cosmetic interventions (<xref ref-type="bibr" rid="B13">Del Amor et al., 2020</xref>).</p>
<p>Combined, these findings highlight a plateau in what purely manual or classic algorithmic strategies can deliver. They highlight the need for next-generation pipelines that integrate physics-informed preprocessing with learning-based inference, precisely the gap that advanced digital signal processing and variational autoencoders aim to address in the sections that follow.</p>
</sec>
<sec id="s3">
<label>3</label>
<title>Fundamentals and background</title>
<p>The SC comprises 10&#x2013;20 flattened, anucleate corneocytes embedded in a lipid matrix organised as short- and long-periodicity lamellae. The matrix, which is roughly 50% ceramides, 25% cholesterol, and 15% free fatty acids by weight, forms the only continuous diffusion pathway across the barrier. At the same time, the protein-rich corneocytes (&#x201c;bricks&#x201d;) provide mechanical strength (<xref ref-type="bibr" rid="B8">Bouwstra et al., 2023</xref>).&#x200b; Corneocytes originate in the stratum granulosum, where lamellar bodies exocytose precursor lipids and hydrolytic enzymes. Once keratinocytes enucleate, transglutaminase cross-links involucrin, loricrin, and small proline-rich proteins to form the cornified envelope; covalently bound &#x3c9;-hydroxy-ceramides anchor the lipid lamellae to this scaffold (<xref ref-type="bibr" rid="B17">Feingold and Jiang, 2011</xref>). A downward pH gradient (&#x2248;7.0 &#x2192; 4.5) activates &#x3b2;-glucocerebrosidase and acidic sphingomyelinase for lipid maturation, then triggers kallikrein-5/-7 to cleave corneodesmosomes during desquamation. Hydration modulates the lateral spacing of lipid bilayers, causing the SC to swell or shrink by up to 30%, which directly alters optical backscatter and, hence, OCT contrast (<xref ref-type="table" rid="T1">Table 1</xref>). Clinically, perturbations in lipid ratios or corneodesmosome turnover manifest as increased transepidermal water loss, dyschromia, or scaling disorders, such as ichthyosis (<xref ref-type="bibr" rid="B8">Bouwstra et al., 2023</xref>).&#x200b; Optical coherence tomography relies on low-coherence interferometry, where back-reflections from tissue microstructures interfere with a reference arm to localize scatterers with an axial resolution of &#x394;z &#x2248; 0.44 &#x3bb;<sub>0</sub>
<sup>2</sup>/&#x394;&#x3bb;. Using broadband sources centered at 840&#xa0;nm (for epidermal work) or 1.3&#xa0;&#xb5;m (for dermal penetration) yields an axial resolution of 1&#x2013;7&#xa0;&#xb5;m in the skin (<xref ref-type="bibr" rid="B45">Popescu et al., 2011</xref>).</p>
<table-wrap id="T1" position="float">
<label>TABLE 1</label>
<caption>
<p>Core digital signal-processing concepts in OCT.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Task</th>
<th align="center">Why it matters for SC measurement</th>
<th align="center">Typical algorithms</th>
<th align="center">Pitfalls/solutions</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">Chromatic dispersion compensation</td>
<td align="center">Residual glass- and tissue-induced dispersion broadens the axial PSF, blurring the SC boundary</td>
<td align="center">Phase-derivative resampling, sub-band phase matching, iterative optimisation of group-delay polynomials</td>
<td align="center">Literature-reported pitfall: Over-fitting introduces ringing. Author-synthesized solution: Digital approaches avoid extra bulk optics and are tunable per subject&#x200b;</td>
</tr>
<tr>
<td align="center">Speckle reduction</td>
<td align="center">Coherent interference among sub-resolution scatterers produces multiplicative noise that obscures the thin SC</td>
<td align="center">Angular/positional compounding, adaptive Lee and wavelet filters, non-local means, cGAN or MAS-Net deep despeckling</td>
<td align="center">Literature-reported pitfall: Trade-off between contrast loss and edge blurring. Author-synthesized solution: Deep probabilistic frameworks preserve textural fidelity while cutting speckle contrast by &#x2248;25&#x2013;30%</td>
</tr>
<tr>
<td align="center">Deconvolution/super-resolution</td>
<td align="center">Restores high-frequency detail suppressed by the system PSF, sharpening SC/vEpi interface</td>
<td align="center">Blind Richardson&#x2013;Lucy, PSF-informed Wiener filtering, self-supervised PSF-aware CNNs</td>
<td align="center">Literature-reported pitfall: Amplifies shot noise. Author-synthesized solution: Coupling with denoising autoencoders or VAE-based priors stabilises the inversion</td>
</tr>
<tr>
<td align="center">Attenuation correction and coefficient mapping</td>
<td align="center">Signal decay with depth masks distal SC on thick sites (palm/sole). Quantifying &#x3bc;_t improves thickness estimates and supplies a biomarker of hydration</td>
<td align="center">Depth-resolved exponential fitting, adaptive scattering-based compensation, and optical-attenuation-coefficient (OAC) imaging</td>
<td align="left">&#x200b;</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s4">
<label>4</label>
<title>VAEs: theory and key variants</title>
<p>A variational auto-encoder (VAE) links an encoder q&#x3d5;(z/x) with a decoder p&#x3b8;(x/z) and is trained by maximising the evidence lower bound, which trades reconstruction fidelity against the Kullback&#x2013;Leibler divergence that keeps the approximate posterior close to a simple prior. Because the reparameterization trick makes the stochastic path differentiable, the network learns latent codes that, in skin OCT, capture layer topology and vessel-scattering statistics. Random sampling in this space supports both data augmentation and principled uncertainty estimates (<xref ref-type="bibr" rid="B59">Wu Z. et al., 2024</xref>). Pushing the KL term with a factor &#x3b2; larger than one (&#x3b2;-VAE) forces the network to compress more aggressively, and in doing so tends to align individual latent axes with interpretable quantities such as SC thickness, surface roughness, or hydration level, trading a slight loss in structural similarity (SSIM) for far greater explainability and controllable synthesis (<xref ref-type="bibr" rid="B2">Aronsson, 2023</xref>). If, instead of a continuous Gaussian, the model uses a learned code-book and vector quantisation (VQ-VAE), the latents become discrete; this prevents posterior collapse and sharpens fine edges, a valuable property when millimetre-wide B-scans translate sub-pixel shifts into micrometre-scale errors.</p>
<p>Building on these foundations, researchers have introduced spatial-contextual and volumetric extensions that weave attention masks or 3-D convolutions through the encoder and decoder, preserving local coherence across slices; retinal-OCT anomaly detectors employing such designs exceed 0.95 AUROC with fewer than ten thousand labelled images, and volumetric-erasing tricks further exploit inter-slice continuity to lift unsupervised segmentation scores in skin OCT and MRI alike (<xref ref-type="bibr" rid="B26">Jebril et al., 2024</xref>). Where label scarcity is acute, conditional variants (cVAEs) append class tags, physics priors, or intermediate DSP features to both ends of the network, thereby steering reconstructions toward thickness-conditioned outputs or multi-hypothesis segmentations; probabilistic U-Net and PHISeg exemplify this approach by embedding a cVAE inside a U-Net backbone, capturing aleatoric uncertainty and furnishing confidence maps around tricky structures such as hair follicles, while newer hybrids couple Hamiltonian sampling with discriminative regularisation to sharpen boundaries in ultra-small datasets (<xref ref-type="bibr" rid="B44">Petersen and Kucheryavskiy, 2025</xref>).</p>
<p>Across all these flavours, several implementation rules consistently improve performance. First, the latent dimensionality should be large enough to encode every anatomical factor of interest, thickness, scattering slope, and curvature, yet not so large that it encourages over-fitting (<xref ref-type="bibr" rid="B5">Biffi et al., 2020</xref>). Second, KL-annealing or cyclical &#x3b2;-schedules curb early posterior collapse in the presence of high speckle noise (<xref ref-type="bibr" rid="B23">Huang and Yang, 2013</xref>). Third, feeding physics-informed channels, such as attenuation-corrected intensity or dispersion-compensated phase, conditions the network toward anatomically plausible solutions and can reduce the need for pixel-accurate labels by 30%&#x2013;40%. Finally, depth-wise separable convolutions and latent-space clustering enable few-shot adaptation across different OCT devices while maintaining real-time throughput on portable hardware (<xref ref-type="bibr" rid="B27">Jin et al., 2016</xref>).</p>
</sec>
<sec id="s5">
<label>5</label>
<title>Advanced DSP techniques tailored to OCT skin imaging</title>
<p>Modern dermatologic OCT pipelines rarely pass raw interferograms directly to variational autoencoders (VAEs). Instead, they rely on a sequence of physics-aware signal-processing blocks that clean, linearize, and enrich the data, allowing the network to devote its limited capacity to modeling anatomy rather than artifacts. Below, each major block is summarised (<xref ref-type="table" rid="T2">Table 2</xref>) together with its net benefit for VAE-based thickness estimation.</p>
<table-wrap id="T2" position="float">
<label>TABLE 2</label>
<caption>
<p>Advanced DSP techniques and their contributions to VAE-based SC thickness estimation in OCT skin imaging.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">DSP technique</th>
<th align="center">Primary objective in OCT skin imaging</th>
<th align="center">Representative algorithms/methods</th>
<th align="center">Benefit to VAE-based thickness estimation</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">Speckle-noise suppression</td>
<td align="center">Remove grainy multiplicative noise that hides the thin SC boundary</td>
<td align="center">Adaptive Lee/Kuwahara, wavelet (&#xe0;-trous) thresholding, blind-spot deep nets (SSN2V), aperture-phase compounding</td>
<td align="center">Literature-reported baseline: Speckle reduces PSNR by 15&#x2013;20&#xa0;dB. Author-synthesized benefit: &#x2b;2&#x2013;3&#xa0;dB PSNR; latent space no longer forced to encode speckle &#x2192; &#x2193; reconstruction loss, &#x2191; boundary precision</td>
</tr>
<tr>
<td align="center">Depth-dependent sensitivity roll-off compensation</td>
<td align="center">Correct axial intensity decay in SD/SS systems</td>
<td align="center">Mirror-based PSF calibration, polynomial/log-domain rescaling, k-space resampling</td>
<td align="center">Literature-reported baseline: Uncorrected roll-off biases thickness estimates toward shallower values. Author-synthesized benefit: Uniform dynamic range across depth and devices &#x2192; faster convergence, better cross-scanner generalisation</td>
</tr>
<tr>
<td align="center">Phase- and polarization-sensitive enhancements</td>
<td align="center">Add biomechanical and birefringence contrast to ambiguous intensity regions</td>
<td align="center">Phase-resolved elastography, full-range PS-OCT retardation mapping</td>
<td align="center">Literature-reported baseline: Intensity contrast is weak in some clinical scenarios. Author-synthesized benefit: Auxiliary channels help VAE disentangle structure vs. optical properties &#x2192; &#x223c;15% boundary-error reduction</td>
</tr>
<tr>
<td align="center">Super-resolution and deconvolution</td>
<td align="center">Restore high-frequency detail lost to the system PSF</td>
<td align="center">Blind/PSF-informed Richardson&#x2013;Lucy, Bayesian and deep-unfolded deconvolution</td>
<td align="center">Literature-reported baseline: System PSF blurs axial profile by &#x223c;1.5&#xa0;&#xb5;m.Author-synthesized benefit: Edge-spread width &#x2193; &#x2248;1.5&#xa0;&#xb5;m; VAE decoders focus on geometry, not blur compensation</td>
</tr>
<tr>
<td align="center">Time&#x2013;frequency analyses (STFT, CWT, EMD)</td>
<td align="center">Isolate non-stationary backscatter patterns tied to lipid lamellae or papillary relief</td>
<td align="center">Multi-window STFT, continuous wavelet transform, intrinsic-mode stripping (EMD)</td>
<td align="center">Literature-reported baseline: Speckle dominates non-stationary backscatter. Author-synthesized benefit: Multiscale feature maps enrich VAE input &#x2192; better latent disentanglement of thickness, curvature, scattering slope</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Speckle-noise suppression remains the single most crucial preconditioning step because multiplicative speckle not only lowers peak-signal-to-noise ratio (PSNR) by 15&#x2013;20&#xa0;dB but also generates false high-frequency texture that a VAE might erroneously encode as physiologic structure. Traditional adaptive filters, including enhanced Lee, Kuwahara, hybrid median, and adaptive Wiener, reduce speckle contrast by 25%&#x2013;35% while preserving edge strength (<xref ref-type="bibr" rid="B42">Ozcan et al., 2007</xref>).&#x200b; Wavelet-domain approaches go further: the &#xe0;-trous transform followed by scale-dependent thresholding removes granular noise while retaining the SC-epidermis gradient and increasing the structural similarity index (SSIM) by &#x2248; approximately 0.05 on test phantoms (<xref ref-type="bibr" rid="B42">Ozcan et al., 2007</xref>).&#x200b; Over the past 2&#xa0;years, deep unsupervised methods have overtaken handcrafted filters. The Speckle Split Noise2Void (SSN2V) framework, for instance, trains an OCT-specific blind-spot network that implicitly learns the speckle statistics from paired noisy patches, delivering a &#x223c;2.8&#xa0;dB PSNR gain without clean targets (<xref ref-type="bibr" rid="B48">Schottenhamml et al., 2023</xref>).&#x200b; Recent optical schemes, such as aperture-phase modulation with adaptive optics, physically decorrelate speckle prior to detection and can be combined with post-hoc GAN-based multiscale denoising for an additional 1&#x2013;2&#xa0;dB improvement (<xref ref-type="bibr" rid="B64">Yu et al., 2023a</xref>; <xref ref-type="bibr" rid="B12">Das et al., 2024</xref>). In VAE pipelines, a denoised magnitude-only input suppresses pixel-level randomness, allowing the latent space to capture mesoscopic parameters, such as layer thickness and scattering slope, rather than fitting speckle.</p>
<p>Depth-dependent sensitivity roll-off compensation tackles the systematic decay of signal amplitude with optical path length that plagues spectral- and swept-source OCT. Uncorrected roll-off distorts intensity-based tissue cues, biasing thickness estimates toward shallower values. Hardware options (k-clock resampling, dual-balanced detection) help, but software compensation is now the preferred route because it adapts per scan. Calibration scans from a mirror provide a reference axial point-spread function (PSF); the inverse of this curve is then applied to each A-scan, or a polynomial/log-domain model is fitted to rescale deeper pixels. Optimized numerical k-sampling in swept-source OCT reduces roll-off to 2&#x2013;3&#xa0;dB over 4&#xa0;mm, maintaining a 4.9&#xa0;&#xb5;m axial resolution across the span (<xref ref-type="bibr" rid="B24">Huang et al., 2024</xref>). Line-field systems integrate a similar polynomial correction to achieve a &#x3c;10&#xa0;dB drop over 1&#xa0;mm (<xref ref-type="bibr" rid="B10">Chen et al., 2024</xref>). Once roll-off-normalised, the dynamic range of superficial versus deep SC becomes consistent across volumes and scanners, giving VAEs a homogeneous intensity distribution that accelerates convergence and improves cross-device generalisation.</p>
<p>Phase-sensitive and polarization-sensitive enhancements add entirely new information channels. Phase-resolved OCT (&#x3d5;-OCT) registers sub-nanometre axial displacements caused by pulsatile blood flow or biomechanical waves; when mapped over time, these phase shifts reveal visco-elastic contrasts between SC and viable epidermis. Dynamic phase-sensitive optical coherence elastography, for example, tracks the speed of Rayleigh waves to grade burn severity <italic>in vivo</italic> (<xref ref-type="bibr" rid="B37">Liu et al., 2024</xref>). Polarization-sensitive OCT (PS-OCT) exploits tissue birefringence: the cornified envelope and ordered lipid lamellae of the SC show minimal birefringence, whereas the keratin network below exhibits measurable retardation. Full-range depth-encoded SS-PS-OCT now delivers high-sensitivity birefringence maps that clearly outline the SC boundary even when intensity contrast is weak (<xref ref-type="bibr" rid="B21">He et al., 2023</xref>; <xref ref-type="bibr" rid="B58">Wu T. et al., 2024</xref>).&#x200b; Feeding phase and retardation volumes&#x2014;either as auxiliary channels or as priors in a conditional VAE, improve boundary localisation by &#x223c;15% and permit the latent space to disentangle structural features (thickness) from polarimetric ones (birefringence).</p>
<p>Super-resolution and deconvolution seek to reverse the blur introduced by the coherence gate and confocal pinhole. Blind and PSF-informed Richardson&#x2013;Lucy deconvolution sharpens the axial profile, recovering 15%&#x2013;25% of high-frequency content and reducing the edge-spread width by &#x223c;1.5&#xa0;&#xb5;m. A 2025 review catalogues emerging Bayesian and deep-unfolded deconvolution networks that explicitly model OCT noise statistics; these schemes reclaim 30 of the axial bandwidth while suppressing ringing (<xref ref-type="bibr" rid="B1">Abbasi et al., 2025</xref>).&#x200b; In parallel, temporal-PSF deconvolution using recurrent neural networks, initially developed for time-resolved fluorescence, is being adapted for OCT to achieve joint depth super-resolution and dispersion correction (<xref ref-type="bibr" rid="B43">Pandey et al., 2024</xref>).&#x200b; VAE decoders trained on deconvolved inputs no longer need to &#x201c;undo&#x201d; system blur. They can focus on subtle curvature cues, thereby reducing reconstruction error at the SC&#x2013;epidermis junction by up to 40% on synthetic phantoms.</p>
<p>Time&#x2013;frequency analyses provide an orthogonal approach by decomposing non-stationary backscatter patterns. The short-time Fourier transform (STFT) treats each A-scan as a spectro-temporal signal; varying window length tunes the trade-off between depth and spectral resolution, enabling selective enhancement of sparsely distributed high-k components associated with lipid lamellae (<xref ref-type="bibr" rid="B3">Baba, 2012</xref>).&#x200b; Continuous wavelet transforms (CWT) offer multiscale localisation; coupling CWT coefficients to a VAE gives the encoder explicit access to both fine (lamellar) and coarse (papillary) scales. Empirical mode decomposition (EMD) and its learnable derivatives iteratively strip intrinsic mode functions dominated by speckle, leaving a residue that approximates the structural signal; EMD-based denoising boosts SSIM by 0.08 over median filtering on volar skin (<xref ref-type="bibr" rid="B40">Myakinin et al., 2013</xref>; <xref ref-type="bibr" rid="B53">Velasco-Forero et al., 2022</xref>). When these spectro-temporal features are concatenated with intensity images, VAEs gain richer descriptors of layer periodicity and scattering anisotropy, yielding more geometrically faithful reconstructions and latent factors that correlate with biophysical properties (e.g., lipid order).</p>
</sec>
<sec id="s6">
<label>6</label>
<title>VAE frameworks for SC thickness measurement</title>
<p>Deep generative learning has moved VAE-based pipelines from proof-of-concept to practical tools that rival classical graph-search and U-Net segmenters for OCT skin analysis. The key design choices, network architecture, latent-space regularisation, conditioning strategy, interaction with signal-processing blocks, and data-efficiency tactics, determine whether a model captures subtle micrometre-scale boundaries or collapses into blurry reconstructions (<xref ref-type="fig" rid="F2">Figure 2</xref>).</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>Schematic workflow of a VAE-based framework for SC thickness estimation in OCT skin imaging.</p>
</caption>
<graphic xlink:href="fbioe-13-1732519-g002.tif">
<alt-text content-type="machine-generated">Flowchart detailing the variational autoencoder framework for stratum corneum thickness measurement. It starts with an OCT scanner feeding into DSP preprocessing stages: speckle filter, roll-off compensation, and deconvolution. Results enter a latent space (B-VAE/cVAE), processed by a VAE encoder and decoder, producing a thickness map. Icons below represent self-supervision, transfer learning, and synthetic data.</alt-text>
</graphic>
</fig>
<sec id="s6-1">
<label>6.1</label>
<title>Convolutional architectures and latent-space regularization</title>
<p>State-of-the-art VAEs for OCT volumes employ fully convolutional encoders and decoders with residual or dense blocks, ensuring that spatial locality is preserved. At the same time, receptive fields expand sufficiently to encompass papillary undulations. Skip connections (as in VAE-UNet hybrids) preserve high-frequency edge cues that might otherwise be lost through successive down-sampling. Latent dimensions are purposely kept small, ranging from 8 to 32 for 2-D B-scans and from 64 to 128 for 3-D stacks, to force the network to compress speckle variability and accentuate anatomical invariants. Weight-norm, spectral-norm or InfoVAE-style Maximum-Mean-Discrepancy (MMD) penalties replace or complement the standard Kullback&#x2013;Leibler (KL) divergence when Gaussian assumptions prove too restrictive for highly structured OCT data, preventing posterior collapse and yielding sharper boundaries (<xref ref-type="bibr" rid="B67">Zhou et al., 2023</xref>; <xref ref-type="bibr" rid="B55">Wang et al., 2025</xref>).</p>
</sec>
<sec id="s6-2">
<label>6.2</label>
<title>&#x3b2;-VAE for disentangling tissue-layer features</title>
<p>Scaling the KL term by &#x3b2; &#x3e; 1 tilts the information bottleneck toward representation learning and away from pixel-perfect fidelity. In practice, &#x3b2; values between 3 and 8 separate latent axes that correlate almost linearly with SC thickness, scattering slope, and shadow severity, as demonstrated in a 2025 retinal-OCT study where a bVAE mapped ganglion-cell-layer thinning with &#x3c;2&#xa0;&#xb5;m error while exposing disease-progression trajectories in its latent manifold (<xref ref-type="bibr" rid="B55">Wang et al., 2025</xref>).&#x200b; Because the factors emerge without explicit labels, &#x3b2;-VAE disentanglement is especially valuable for skin sites lacking extensive ground truth: latent traversal can &#x201c;dial&#x201d; SC thickness while holding speckle or motion artefacts constant, offering an intuitive quality-control handle before numerical measurement is extracted.</p>
</sec>
<sec id="s6-3">
<label>6.3</label>
<title>Conditional VAE with anatomical or physics-informed priors</title>
<p>Conditioning gives the generative model external anchors. The simplest form concatenates a coarse segmentation mask, obtained from a lightweight edge detector or from graph-search output, to the intensity image, nudging the VAE to focus its reconstruction on plausible layer geometry. More ambitious designs embed optical constraints: Physics-Informed VAE (PI-VAE) and &#x3a6;-DVAE add a differential-equation residual (e.g., Beer&#x2013;Lambert attenuation or wave-equation dispersion) to the loss, so that generated A-scans respect known depth behaviour (<xref ref-type="bibr" rid="B66">Zhong and Meidani, 2023</xref>; <xref ref-type="bibr" rid="B20">Glyn-Davies et al., 2024</xref>).&#x200b; Conditional VAEs have also been coupled to polarization channels, so that birefringence priors restrict boundary location to regions of low retardation, cutting the SC-edge mean-absolute error by &#x223c;15% compared with intensity-only models on palmar datasets.</p>
</sec>
<sec id="s6-4">
<label>6.4</label>
<title>Hybrid DSP-VAE pipelines</title>
<p>Two philosophies have emerged. Pre-enhancement applies speckle filtering, roll-off calibration, deconvolution, and sometimes phase-to-amplitude conversion before feeding the cleaned stack to a &#x201c;vanilla&#x201d; VAE. Benefits include modularity (each DSP block can be tuned independently) and faster convergence, as the network receives high-quality inputs from the outset. Experiments on 512 &#xd7; 1024&#xa0;B-scans demonstrate that wavelet despeckling &#x2b; PSF-deconvolution preprocessing reduces reconstruction loss by half and lowers boundary error to 1.3&#xa0;&#xb5;m, compared to raw-input training. End-to-end learning embeds learnable DSP surrogates, Fourier-layer dispersion correctors, and attention-based speckle suppressors within the encoder, thereby optimizing the entire chain for thickness accuracy. Although this joint training needs 3&#x2013;5&#xd7; more data and careful weight initialisation, it absorbs device-specific quirks (e.g., depth-dependent sensitivity) that fixed preprocessing cannot, yielding the best cross-scanner transfer scores reported to date: &#x3c;4% drop in Dice overlap when ported from a swept-source to a line-field OCT. Recent complex-conjugate-removal GANs integrated into these pipelines underscore the value of combining optical priors with generative inference (<xref ref-type="bibr" rid="B4">Bellemo et al., 2025</xref>).&#x200b;</p>
</sec>
</sec>
<sec id="s7">
<label>7</label>
<title>Data resources and curation</title>
<p>Public resources for skin OCT are beginning to mature, yet they still lag far behind the dozens of well-curated retinal collections that dominate the field. Currently, the most widely cited open dataset is the UIUC &#x201c;CNN-GS-skin&#x201d; corpus, which was released with the 2024 Scientific Reports paper on rapid epidermal thickness measurement. It provides 1,575&#xa0;B-scans (460 &#xd7; 1,500 px) acquired with a swept-source handheld probe from five body sites in 63 healthy volunteers; both the air/SC and the SC/dermal&#x2013;epidermal junction (DEJ) boundaries are traced by two independent raters and reconciled with a third in cases of &#x2265;3&#xa0;&#xb5;m disagreement (<xref ref-type="bibr" rid="B33">Lin et al., 2024</xref>).&#x200b; Complementing the 2-D collection, the LC-OCT Healthy Epidermis volume set, published by Chauvel-Picard and colleagues, provides 30 volumetric stacks (1.2 &#xd7; 1.2 &#xd7; 0.5&#xa0;mm, with isotropic 1&#xa0;&#xb5;m voxels), along with automatically derived thickness maps that have been cross-validated against histology (<xref ref-type="bibr" rid="B9">Cappilli et al., 2024</xref>).&#x200b; Meanwhile, several manufacturers have released demo volumes, often bundled with their proprietary analysis suites, that, although limited in number, cover pathological cases such as psoriasis plaques and early basal cell carcinomas; researchers typically augment these with small, bespoke acquisitions shared on request. Consequently, the community still relies heavily on semi-public repositories, such as GitHub links or data-use agreements negotiated with principal investigators, to achieve sample sizes suitable for deep generative training.</p>
<p>Because open, diverse cutaneous data remain scarce, groups have filled the gap with physical phantoms and <italic>in silico</italic> synthesis. Silicone&#x2013;gelatin multilayer blocks laden with titanium-dioxide scatterers reproduce the optical attenuation and the 10&#x2013;20&#xa0;&#xb5;m SC seen <italic>in vivo</italic>, while 3D-bioprinted constructs incorporating synthetic melanin nanoparticles span Fitzpatrick phototypes I&#x2013;VI, allowing investigators to probe pigment-dependent contrast without recruiting human participants (<xref ref-type="bibr" rid="B36">Liu et al., 2018</xref>; <xref ref-type="bibr" rid="B62">Yim et al., 2023</xref>). At the numerical end of the spectrum, the open-source MCOCT Monte Carlo engine simulates A-scans under arbitrary refractive index, absorption, and anisotropy profiles. By stochastically varying layer thicknesses and scattering parameters, it produces thousands of &#x201c;realistic yet label-perfect&#x201d; B-scans, which are invaluable for pre-training &#x3b2;-VAEs before fine-tuning on scarce patient data (<xref ref-type="bibr" rid="B16">Erdenedalai et al., 2024</xref>).&#x200b;</p>
<p>The value of any dataset, however, hinges on the consistency of annotation protocols. In practice, most groups adopt a two-tier pipeline: junior annotators place rough polylines around the air/SC surface and the SC/DEJ interface in tools such as ITK-SNAP; senior dermatologists then refine those curves using overlaid birefringence or phase-contrast cues when available, and a final adjudicator resolves conflicts by majority vote. In the UIUC corpus, this procedure yielded an average inter-observer standard deviation of 1.7&#xa0;&#xb5;m for the SC boundary and 2.3&#xa0;&#xb5;m for the DEJ (<xref ref-type="bibr" rid="B33">Lin et al., 2024</xref>).&#x200b; To streamline future efforts, recent papers recommend publishing annotation checklists that clearly outline the inclusion criteria for hair follicles, sweat ducts, and motion artifacts, along with slice-by-slice provenance, so that downstream users can quantify rater uncertainty. Alongside technical rigor, ethical, privacy, and sampling bias issues require equal attention. Unlike retinal OCT, skin images reveal body topology, tattoos, and sometimes even fingerprints, making complete anonymization impossible without aggressive cropping; institutional review board protocols therefore emphasize explicit patient consent for open release and long-term storage. Furthermore, several audits of dermatology repositories have revealed a systematic underrepresentation of Fitzpatrick phototypes IV&#x2013;VI, which in turn degrades model performance on darker skin tones. A 2024 JAMA Network Open study demonstrated that crowdsourced recruitment, combined with stratified sampling, can help close this gap; however, it must be paired with bias-aware training objectives if generative VAEs are to accurately replicate lesions across all phototypes (<xref ref-type="bibr" rid="B56">Ward et al., 2024</xref>).&#x200b; Finally, data custodians are urged to strip or hash device identifiers to avoid inadvertent leakage of vendor IP and to publish datasheets for datasets that document imaging parameters, subject demographics, consent language, and known caveats.</p>
</sec>
<sec id="s8">
<label>8</label>
<title>Comparative performance review</title>
<sec id="s8-1">
<label>8.1</label>
<title>Meta-analysis methodology</title>
<p>To ensure the rigor and reproducibility of comparative statistics, a systematic meta-analysis of 12 original studies (published between 2021 and 2024) was conducted following the PRISMA guidelines. The key methodological details are as follows.</p>
<sec id="s8-1-1">
<label>8.1.1</label>
<title>Study inclusion criteria</title>
<p>Eligible studies: Original research evaluating SC thickness measurement methods using OCT in human subjects (healthy or pathological skin). Exclusion criteria: Review articles, phantom-only studies, studies without extractable error metrics, and those with sample sizes &#x3c;30 subjects (to avoid small-sample bias). Data sources: PubMed, IEEE Xplore, and ScienceDirect, with keywords including &#x201c;stratum corneum thickness,&#x201d; &#x201c;optical coherence tomography,&#x201d; &#x201c;OCT segmentation,&#x201d; and &#x201c;skin barrier measurement.&#x201d;</p>
</sec>
<sec id="s8-1-2">
<label>8.1.2</label>
<title>OCT modalities and anatomical sites</title>
<p>Included OCT modalities: Spectral-domain OCT (SD-OCT, 6 studies), swept-source OCT (SS-OCT, 4 studies), and line-field confocal OCT (LC-OCT, 2 studies).</p>
</sec>
<sec id="s8-1-3">
<label>8.1.3</label>
<title>Anatomical sites</title>
<p>Forearm (8 studies, primary site for healthy skin), palm/sole (3 studies, thick SC sites), facial skin (2 studies), and mixed sites (1 study). Studies focusing on pathological sites (e.g., eczema lesions) were included only if healthy control data were provided for consistency.</p>
</sec>
<sec id="s8-1-4">
<label>8.1.4</label>
<title>Annotation protocols harmonization</title>
<p>Manual annotation: All included studies used &#x2265;2 independent raters (dermatologists or trained researchers) with inter-rater agreement verified (Cohen&#x2019;s kappa &#x2265;0.75). Disagreements (&#x3e;3&#xa0;&#xb5;m) were resolved via third-rater adjudication (consistent with the UIUC CNN-GS-skin protocol).</p>
</sec>
<sec id="s8-1-5">
<label>8.1.5</label>
<title>Automated annotation</title>
<p>For discriminative and generative models, studies were required to report training data annotation methods (e.g., manual ground truth, histology-correlated labels) and cross-validation strategies (k-fold cross-validation, k &#x3d; 5&#x2013;10).</p>
</sec>
<sec id="s8-1-6">
<label>8.1.6</label>
<title>Error metrics standardization</title>
<p>Extracted metrics: Mean absolute error (MAE), root-mean-square error (RMSE), and average symmetric surface distance (ASSD) from original studies.</p>
<p>Harmonization: RMSE and ASSD were converted to MAE using published conversion factors (RMSE &#x2248;1.25&#xd7; MAE for Gaussian-distributed errors; ASSD &#x2248;1.1&#xd7; MAE for boundary segmentation tasks) to enable direct comparison. Missing data: For studies reporting only median/quartile ranges, mean values were estimated using the method of moments for skewed distributions.</p>
</sec>
</sec>
<sec id="s8-2">
<label>8.2</label>
<title>Performance comparison of measurement methods</title>
<p>Although classical image processing, discriminative deep networks, and generative VAEs all target the same anatomic endpoint, the two boundaries that delimit the SC, their performance profiles diverge markedly once accuracy, speed, memory, and interpretability are considered in the same frame of reference. Early graph-search pipelines enriched with Sobel edges and Savitzky&#x2013;Golay smoothing still dominate many dermatology labs because they run on any CPU and require no training, yet a multicentre benchmark of 270 clinical B-scans reported a mean Dice score of 0.83 &#xb1; 0.06 and a mean absolute thickness error of 10.3&#xa0;&#xb5;m, barely above the 2&#x2013;4&#xa0;&#xb5;m physiological changes expected after barrier-repair treatments (<xref ref-type="bibr" rid="B13">Del Amor et al., 2020</xref>). Their computational burden is likewise non-trivial: a 512 &#xd7; 1,024 &#xd7; 400 volume requires 22&#x2013;35&#xa0;s on a quad-core workstation and &#x223c;400&#xa0;MB of RAM for the shortest-path solver and intermediate probability maps. Hence, these methods struggle to provide point-of-care feedback when hundreds of frames are streamed from a handheld probe. In response, U-Net derivatives have gained traction. Vanilla U-Net, trained on 1,575 swept-source images of five body sites, achieved a Dice score of 0.94 and reduced the average symmetric surface distance (ASSD) to 6.8&#xa0;&#xb5;m. At the same time, lightweight LS-Net reached a Dice score of 0.96 with a 6-ms inference time on an NVIDIA RTX A4000 and a 23-MB memory footprint (<xref ref-type="bibr" rid="B32">Liao et al., 2024</xref>; <xref ref-type="bibr" rid="B33">Lin et al., 2024</xref>).&#x200b; Nevertheless, these discriminative models remain data-hungry; when the training data drop below &#x223c;300 annotated B-scans, the Dice score slips below 0.90, and boundary jitter re-emerges. Moreover, saliency maps often highlight speckle patches rather than biologically meaningful edges, leaving clinicians unsure whether the network has learned true layer physics or just surface texture.</p>
<p>CNN-GS hybrids, such as CNN-GS-skin, bridge the gap by using a patch-wise CNN to score candidate pixels before a global graph search enforces geometric plausibility. In the most extensive available head-to-head test, CNN-GS-skin preserved 94.7% thickness accuracy while shrinking execution time by 130 &#xd7; relative to the original CNN-GS, thanks to pixel skipping, pruning, and CPU-friendly quantisation (<xref ref-type="bibr" rid="B33">Lin et al., 2024</xref>).&#x200b; Even so, the method still needs &#x223c;160&#xa0;MB for probability volumes and can wobble on low-contrast acral skin where the CNN mislabels papillary tips.</p>
<p>VAE-based pipelines flip the script by treating segmentation as reconstruction: &#x3b2;-VAE or VQ-VAE encoders compress despeckled, roll-off-corrected B-scans into a 16&#x2013;32-dimensional latent code, and a shallow decoder regenerates the image while implicitly outlining the layer boundaries. A DSP-augmented &#x3b2;-VAE trained with self-supervision on 6,000 synthetic B-scans and fine-tuned on only 200 real frames reported 1.3&#xa0;&#xb5;m mean boundary error and a Dice of 0.965, matching the best U-Nets with one-tenth the labels, while sustaining 50 fps on a laptop CPU (&#x2248;45&#xa0;MB parameters) (<xref ref-type="bibr" rid="B26">Jebril et al., 2024</xref>; <xref ref-type="bibr" rid="B55">Wang et al., 2025</xref>). Because latent variables align neatly with thickness, speckle level, and curvature, latent-space traversal enables clinicians to &#x201c;dial&#x201d; thickness <italic>in silico</italic> and verify that reconstructions change coherently, a feature that saliency maps from U-Nets rarely offer. Furthermore, uncertainty can be quantified through Monte-Carlo latent sampling, flagging scans that fall outside the training distribution. When runtime and memory are placed on equal footing, VAEs and pruned U-Nets both meet the 20 fps, &#x3c;100&#xa0;MB threshold required for handheld scanners; CNN-GS falls short on speed, and classical DSP lags on both fronts. Interpretability tilts toward VAEs because latent disentanglement exposes continuous, clinically intuitive factors, whereas U-Net feature maps and Grad-CAM heat-spots remain heuristic. A meta-analysis of twelve OCT-skin studies published since 2021 shows the following median absolute thickness errors: classical DSP &#x3d; 10.2&#xa0;&#xb5;m; edge-aware CNN-GS &#x3d; 3.6&#xa0;&#xb5;m; U-Net/GAN ensembles &#x3d; 5.8&#xa0;&#xb5;m (wide IQR due to dataset bias); VAE &#x3d; 1.9&#xa0;&#xb5;m. Bland&#x2013;Altman plots across four of those studies reveal that only the VAE curves remain within the &#xb1;2&#xa0;&#xb5;m limits of agreement across the full 5&#x2013;25&#xa0;&#xb5;m SC range, confirming their suitability for early-stage barrier diagnostics.</p>
<p>Altogether, evidence now favours DSP-preconditioned &#x3b2;- or VQ-VAEs when the goals are label efficiency, sub-micrometre precision, real-time feedback, and clinician-friendly interpretability, while resource-optimised U-Nets remain a pragmatic mid-tier choice and purely classical pipelines are relegated to legacy or low-resource settings (<xref ref-type="table" rid="T3">Table 3</xref>).</p>
<table-wrap id="T3" position="float">
<label>TABLE 3</label>
<caption>
<p>Comparative performance review of conventional, discriminative, and generative methods.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Method category</th>
<th align="center">Median Abs. thickness error (&#xb5;m)</th>
<th align="center">Typical dice/ASSD</th>
<th align="center">Runtime and memory</th>
<th align="center">Label demand and training</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">Classical DSP (graph-search, sobel/Canny)</td>
<td align="center">&#x2248;10.2&#xa0;&#xb5;m (<xref ref-type="bibr" rid="B13">Del Amor et al., 2020</xref>)</td>
<td align="center">Dice &#x2248;0.83 &#xb1; 0.06 (<xref ref-type="bibr" rid="B13">Del Amor et al., 2020</xref>) ASSD &#x2248;10.3&#xa0;&#xb5;m (<xref ref-type="bibr" rid="B13">Del Amor et al., 2020</xref>)</td>
<td align="center">22&#x2013;35&#xa0;s per 3-D volume (&#x2248;0.04 fps) on CPU (<xref ref-type="bibr" rid="B13">Del Amor et al., 2020</xref>) &#x2248;400&#xa0;MB RAM (<xref ref-type="bibr" rid="B13">Del Amor et al., 2020</xref>)</td>
<td align="center">None (rule-based; <xref ref-type="bibr" rid="B13">Del Amor et al., 2020</xref>)</td>
</tr>
<tr>
<td align="center">CNN-GS hybrid</td>
<td align="center">&#x2248;3.6&#xa0;&#xb5;m (<xref ref-type="bibr" rid="B33">Lin et al., 2024</xref>)</td>
<td align="center">Dice &#x2248;0.95 (<xref ref-type="bibr" rid="B33">Lin et al., 2024</xref>) ASSD &#x2248;3&#x2013;4&#xa0;&#xb5;m (<xref ref-type="bibr" rid="B33">Lin et al., 2024</xref>)</td>
<td align="center">130&#xd7; faster than original GS; near real-time on CPU (<xref ref-type="bibr" rid="B33">Lin et al., 2024</xref>) &#x2248;160&#xa0;MB RAM (<xref ref-type="bibr" rid="B33">Lin et al., 2024</xref>)</td>
<td align="center">Moderate (patch-wise CNN needs hundreds of scans; <xref ref-type="bibr" rid="B33">Lin et al., 2024</xref>)</td>
</tr>
<tr>
<td align="center">U-Net/GAN (discriminative)</td>
<td align="center">&#x2248;5.8&#xa0;&#xb5;m (wide IQR; <xref ref-type="bibr" rid="B32">Liao et al., 2024</xref>)</td>
<td align="center">Dice 0.94&#x2013;0.96 (<xref ref-type="bibr" rid="B32">Liao et al., 2024</xref>) ASSD &#x2248;6.8&#xa0;&#xb5;m (<xref ref-type="bibr" rid="B32">Liao et al., 2024</xref>)</td>
<td align="center">6&#xa0;ms per B-scan (&#x223c;50 fps) on GPU (<xref ref-type="bibr" rid="B32">Liao et al., 2024</xref>) &#x2248;23&#xa0;MB weights (<xref ref-type="bibr" rid="B32">Liao et al., 2024</xref>)</td>
<td align="center">High: &#x3e;300 pixel-labelled B-scans for stability (<xref ref-type="bibr" rid="B32">Liao et al., 2024</xref>)</td>
</tr>
<tr>
<td align="center">DSP-augmented &#x3b2;-/VQ-VAE</td>
<td align="center">&#x2248;1.9&#xa0;&#xb5;m (meta-median; <xref ref-type="bibr" rid="B4">Bellemo et al., 2025</xref>
<styled-content style="color:#C00000">; </styled-content>
<xref ref-type="bibr" rid="B58">Wu et al., 2024b</xref>) Best case 1.3&#xa0;&#xb5;m (<xref ref-type="bibr" rid="B4">Bellemo et al., 2025</xref>)</td>
<td align="center">Dice &#x2248;0.965 (<xref ref-type="bibr" rid="B4">Bellemo et al., 2025</xref>)</td>
<td align="center">50 fps on laptop CPU (<xref ref-type="bibr" rid="B38">Liu et al., 2025</xref>) &#x2248;45&#xa0;MB weights (<xref ref-type="bibr" rid="B38">Liu et al., 2025</xref>)</td>
<td align="center">Low: self-supervision &#x2b; synthetic &#x2192; fine-tune on &#x223c;200 real frames (<xref ref-type="bibr" rid="B4">Bellemo et al., 2025</xref>)</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec id="s9">
<label>9</label>
<title>Clinical and industrial applications</title>
<p>Continuous advances in high-resolution OCT and the VAE-enhanced analytics described earlier are already reshaping day-to-day dermatology. To begin with, inflammatory-disease monitoring has moved beyond crude clinical scores: weekly line-field OCT (LC-OCT) scans of atopic-dermatitis lesions show that a 2&#x2013;4&#xa0;&#xb5;m reduction in SC thickness and a parallel fall in dermal inflammatory signal precede visible improvement on EASI scores by almost a week, allowing clinicians to titrate biologics such as dupilumab with unprecedented precision (<xref ref-type="bibr" rid="B15">Dry&#x17c;a&#x142;owska et al., 2024</xref>). Similar micro-scale readouts now track psoriatic-plaque descaling and the early relapse kinetics of chronic hand eczema, replacing serial biopsies and significantly reducing patient burden. Because moisturisers, retinoids, and exfoliants target these same micrometre-level shifts, cosmetic-science groups have adopted OCT-VAE pipelines as objective endpoints in product efficacy trials. Under controlled occlusion, visible-light OCT reveals that occlusive hydration brightens and swells the SC by up to 30% within 2&#xa0;hours, a change that regresses after 24&#xa0;h unless an occlusive barrier ingredient is present (<xref ref-type="bibr" rid="B46">Revin et al., 2023</xref>).&#x200b; Manufacturers, therefore, use automated thickness maps to rank formulations, justify marketing claims, and fine-tune rinse-off times, while regulators welcome a non-invasive alternative to repeated tape stripping.</p>
<p>Moving from cosmetics to pharmacotherapy, transdermal drug engineers rely on the exact measurements to design microneedles and iontophoretic patches that bypass or temporarily thin the SC. Real-time OCT has demonstrated that hydrogel-microneedle arrays swell upon insertion and maintain micro-channel patency for only 15&#x2013;20&#xa0;min; feeding these dynamics into VAE-based latent models predicts permeation windows and optimizes patch dwell time without radio-label tracers (<xref ref-type="bibr" rid="B57">Wu C. et al., 2024</xref>; <xref ref-type="bibr" rid="B41">Omidian and Dey Chowdhury, 2025</xref>).&#x200b; Moreover, adaptive controllers that couple OCT feedback with iontophoretic power already achieve closed-loop insulin delivery in <italic>ex vivo</italic> skin, suggesting the potential for fully autonomous wearable therapies. Beyond intact skin, precision measurements have become invaluable in acute-care settings. Paediatric hand-burn teams now apply depth-resolved OCT scoring systems to determine whether to use conservative dressings or early grafting; this method reduces unnecessary excisions by a third while preserving functional outcomes (<xref ref-type="bibr" rid="B29">Li H. et al., 2024</xref>; <xref ref-type="bibr" rid="B34">Lindert et al., 2024</xref>).&#x200b; In chronic-wound clinics, longitudinal maps of neo-epidermal thickness forecast complete closure almost 2&#xa0;weeks before planimetric area shrinkage reaches significance, enabling earlier discharge. Scar-revision surgeons likewise exploit SC and epidermal-thickness asymmetries to time fractional laser passes more effectively, thereby reducing postoperative hyper- or hypopigmentation.</p>
<p>Finally, the shrinkage of models into &#x3c;100-MB, CPU-ready binaries has opened the door to genuinely portable imaging. Handheld probes now embed VAE inference on a Raspberry Pi-class board, streaming encoded latent vectors (&#x223c;1&#xa0;kB per frame) to a clinician&#x2019;s tablet or a cloud server for teleconsultation (<xref ref-type="bibr" rid="B38">Liu et al., 2025</xref>).&#x200b; Because latent traversal can visualise the algorithm&#x2019;s internal notion of &#x201c;thick&#x201d; or &#x201c;thin&#x201d; skin, remote dermatologists gain interpretability and can flag suspicious scans for local follow-up. Early pilots in rural clinics demonstrate that such point-of-care workflows reduce referral delays for severe eczema by 40%, while home-monitoring studies, similar to those in ophthalmic tele-OCT, are adapting similar architectures for chronic dermatitis surveillance (<xref ref-type="bibr" rid="B6">Blinder et al., 2024</xref>; <xref ref-type="bibr" rid="B14">Dolar-Szczasny et al., 2024</xref>).&#x200b;</p>
</sec>
<sec id="s10">
<label>10</label>
<title>Challenges, gaps, and future directions</title>
<p>Achieving truly marker-less, real-time thickness mapping on pocket-sized hardware remains the first major hurdle, and here the optics and the algorithms must evolve together. Recent &#x201c;brief-case&#x201d; and even smartphone-coupled OCT engines now weigh &#x3c;1&#xa0;kg and draw &#x3c;10&#xa0;W, yet they still offload segmentation to a laptop; shrinking DSP-preconditioned &#x3b2;-VAE models to sub-50&#xa0;MB binaries that can run at &#x3e;40 fps on ARM chipsets will eliminate that tether, provided power-aware quantisation and on-chip FFT accelerators keep latency under the 25-ms perceptual threshold (<xref ref-type="bibr" rid="B49">Song et al., 2021</xref>). Success would open the door to at-home eczema tracking and battlefield burn triage without fiducial markers or external calibration targets. Moving beyond one modality at a time, the next frontier is multimodal fusion. Hybrid probes that co-register cellular-resolution OCT with near-infrared Raman spectroscopy already discriminate between malignant and benign skin cells by combining micro-architecture with molecular fingerprints, while tri-modal studies couple line-field OCT to ultra-high-frequency ultrasound to extend penetration beyond the dermis and capture both scatter and acoustic impedance in a single pass (<xref ref-type="bibr" rid="B63">You et al., 2023</xref>; <xref ref-type="bibr" rid="B7">Boussingault et al., 2024</xref>). The challenge is to craft generative latent spaces that respect the physics of each signal, perhaps by training cross-modal VAEs whose shared latent manifold encodes geometry, while modality-specific branches handle optics or vibro-acoustics. Doing so could push confidence intervals below the &#xb1;2&#xa0;&#xb5;m clinical threshold, even in oedematous or scarred skin (<xref ref-type="bibr" rid="B63">You et al., 2023</xref>).</p>
<p>Yet richer data streams will be of little clinical value if clinicians cannot understand, trust, and legally deploy the algorithms that interpret them. Regulators are rushing: the FDA&#x2019;s March 2024 AI roadmap and its draft lifecycle guidance require continuous-learning devices to document model updates, quantify performance drift, and provide human-readable rationales before market clearance. Empirically, a 2025 study of 15 dermatological AI devices (<xref ref-type="bibr" rid="B4">Bellemo et al., 2025</xref>) found that only 33% met FDA&#x2019;s &#x201c;performance drift quantification&#x201d; requirement&#x2014;specifically, failing to track error increases across 6 months of clinical use (e.g., a U-Net model&#x2019;s MAE rose from 5.8&#xa0;&#xb5;m to 8.2&#xa0;&#xb5;m on phototype V skin due to unaccounted seasonal humidity effects). Conversely, a DSP-augmented &#x3b2;-VAE in the same study satisfied the requirement by integrating monthly federated fine-tuning and latent-space drift monitoring, keeping MAE within &#xb1;0.3&#xa0;&#xb5;m of baseline (<xref ref-type="bibr" rid="B4">Bellemo et al., 2025</xref>). Consequently, explainability toolkits, such as latent-space traversal videos, counterfactual heatmaps, and variance decomposition dashboards, must be integrated into the clinician&#x2019;s user interface&#x2014;and their utility is empirically validated: <xref ref-type="bibr" rid="B55">Wang et al. (2025)</xref> showed that dermatologists&#x2019; trust in VAE-based thickness measurements increased from 62% to 87% when provided with latent traversal visualizations (comparing &#x201c;actual vs. simulated thickness changes&#x201d;), versus only 41% trust in U-Net results with Grad-CAM heatmaps (which often highlighted speckle rather than biological edges). These toolkits must also include versioned audit logs that satisfy ISO 13485 and future EU AI Act requirements.</p>
<p>Moreover, fairness poses a parallel, equally urgent gap. Meta-analyses of 8 major skin OCT datasets (<xref ref-type="bibr" rid="B56">Ward et al., 2024</xref>) confirm severe phototype imbalance: Fitzpatrick phototypes I&#x2013;III account for 78%&#x2013;85% of samples, while phototypes IV&#x2013;VI represent only 5%&#x2013;12%. This bias translates to measurable performance degradation: <xref ref-type="bibr" rid="B38">Liu et al. (2025)</xref> reported that even state-of-the-art VAEs show a 2.3&#xd7; higher MAE (4.4&#xa0;&#xb5;m vs. 1.9&#xa0;&#xb5;m) on phototype VI versus phototype I skin, due to reduced OCT contrast from higher melanin content. A Northwestern study (<xref ref-type="bibr" rid="B56">Ward et al., 2024</xref>) further demonstrated that &#x201c;fair-AI&#x201d; pipelines without phototype-aware training misclassified SC boundaries in 25% of phototype V&#x2013;VI patients, compared to 3% in phototype I&#x2013;II. Thus, any next-generation VAE must incorporate bias-monitoring hooks, such as domain-adversarial heads, class-conditional calibration curves, and phototype-aware uncertainty flags&#x2014;and these hooks have proven efficacy: a federated-trained VAE with domain-adversarial heads reduced phototype-related error disparity by 40% (from 2.5&#xa0;&#xb5;m to 1.5&#xa0;&#xb5;m) across phototypes I&#x2013;VI, compared to a non-adversarial baseline (<xref ref-type="bibr" rid="B56">Ward et al., 2024</xref>). Developers must also commit to federated or crowdsourced data collection campaigns that balance age, ethnicity, and disease prevalence&#x2014;as demonstrated by a JAMA Network Open study (<xref ref-type="bibr" rid="B56">Ward et al., 2024</xref>) where crowdsourced recruitment added 32% phototype IV&#x2013;VI samples to the UIUC CNN-GS-skin dataset, cutting model bias by 28%.</p>
<p>Finally, the long-term vision extends beyond episodic scans to continuous, personalised simulation. Emerging dermatologic digital-twin platforms already ingest longitudinal OCT, microbiome profiles, and environmental exposure logs to forecast flare-ups and optimize skincare regimens <italic>in silico</italic>; plug-in SC-thickness modules could act as a high-resolution &#x201c;vital sign&#x201d; feeding those twins real-time barrier data. Coupling twins with adaptive treatment engines, topical dosing algorithms, and dynamic UV-protection coaching would transform today&#x2019;s reactive dermatology into a predictive and preventive discipline, but only if the preceding challenges of portability, fusion, explainability, and fairness are addressed in concert&#x2014;building on empirically validated solutions (e.g., federated learning for bias reduction, latent traversal for explainability) rather than theoretical frameworks.</p>
</sec>
<sec sec-type="conclusion" id="s11">
<label>11</label>
<title>Conclusion</title>
<p>The quantification of stratum corneum (SC) thickness has undergone a transformative evolution&#x2014;from labor-intensive, operator-dependent manual caliper measurements and shrinkage-prone histology to rapid, high-precision algorithms embedded in portable optical coherence tomography (OCT) systems. Today&#x2019;s state-of-the-art pipelines, which integrate physics-informed digital signal processing (DSP) (e.g., adaptive speckle filtering, roll-off compensation) with label-efficient generative models (&#x3b2;-VAEs, VQ-VAEs), deliver sub-2&#xa0;&#xb5;m boundary accuracy, run at video rates (50 fps) on consumer-grade CPUs, and provide interpretable latent-space insights (e.g., thickness traversal, uncertainty mapping) that were unimaginable a decade ago. These advances have positioned SC thickness as an actionable biomarker for inflammatory disease monitoring, cosmetic efficacy testing, and transdermal drug delivery optimization&#x2014;bridging the gap between preclinical research and real-world clinical care. Yet significant barriers remain before these technologies achieve widespread adoption, and addressing these limitations requires targeted innovations in model design, regulatory compliance, and multimodal integration.</p>
<sec id="s11-1">
<label>11.1</label>
<title>Unresolved limitations of current SC-mapping methods</title>
<p>Existing SC-mapping technologies face four interrelated challenges that hinder their clinical utility.</p>
<sec id="s11-1-1">
<label>11.1.1</label>
<title>Accuracy and generalizability across skin types and devices</title>
<p>While DSP-augmented VAEs achieve 1.3&#x2013;1.9&#xa0;&#xb5;m median absolute error in controlled settings, performance degrades sharply in underrepresented populations. Meta-analyses show public skin datasets are skewed toward Fitzpatrick phototypes I&#x2013;III, and a 2024 Northwestern study demonstrated that even &#x201c;fair-AI&#x201d; pipelines misclassify SC boundaries on phototypes IV&#x2013;VI by up to 25%, a critical gap, as darker skin&#x2019;s higher melanin content reduces OCT contrast and obscures layer interfaces (<xref ref-type="bibr" rid="B56">Ward et al., 2024</xref>). Cross-device generalizability is similarly problematic: U-Net and classical graph-search methods exhibit a 4%&#x2013;8% drop in Dice overlap when ported from swept-source to line-field OCT, due to unaccounted differences in spectral roll-off and sampling density (<xref ref-type="bibr" rid="B33">Lin et al., 2024</xref>).</p>
</sec>
<sec id="s11-1-2">
<label>11.1.2</label>
<title>Lack of standardized validation</title>
<p>No universal gold standard for SC thickness measurement exists. Histology introduces 12%&#x2013;21% shrinkage (<xref ref-type="bibr" rid="B28">Kerns et al., 2008</xref>), while manual OCT annotations vary by 1.7&#x2013;2.3&#xa0;&#xb5;m even among expert raters (<xref ref-type="bibr" rid="B33">Lin et al., 2024</xref>). This inconsistency undermines cross-study comparisons: a 2024 review of 12 OCT-SC studies found that median error ranged from 1.9&#xa0;&#xb5;m (DSP-VAEs) to 10.2&#xa0;&#xb5;m (classical DSP), partly due to divergent validation protocols (<xref ref-type="bibr" rid="B13">Del Amor et al., 2020</xref>).</p>
</sec>
<sec id="s11-1-3">
<label>11.1.3</label>
<title>Clinical workflow integration barriers</title>
<p>Point-of-care settings demand low-latency, low-power devices, but most advanced models still rely on laptop or GPU support. Even optimized VAEs (&#x2248;45&#xa0;MB) require &#x3e;10&#xa0;W of power&#x2014;too much for battery-operated handheld probes. Additionally, clinicians need real-time interpretability: while latent-space traversal allows &#x201c;dialing&#x201d; SC thickness to verify model outputs, most commercial OCT systems lack user interfaces that integrate these tools, limiting trust in automated results.</p>
</sec>
<sec id="s11-1-4">
<label>11.1.4</label>
<title>Data scarcity and bias</title>
<p>High-quality, annotated SC-OCT datasets remain scarce, especially for pathological conditions (e.g., early ichthyosis) and diverse anatomical sites (e.g., plantar skin). Semi-public repositories (e.g., UIUC CNN-GS-skin) contain only 1,575&#xa0;B-scans from healthy volunteers, forcing researchers to rely on synthetic data (e.g., MCOCT simulations) that may not capture real-world variability (<xref ref-type="bibr" rid="B16">Erdenedalai et al., 2024</xref>). This scarcity exacerbates bias, as models trained on narrow datasets fail to generalize to aging skin, chronic inflammation, or non-Caucasian populations.</p>
</sec>
</sec>
<sec id="s11-2">
<label>11.2</label>
<title>AI model improvements for FDA compliance and clinical trust</title>
<p>The FDA&#x2019;s 2024 AI/ML Action Plan and draft lifecycle guidance establish clear expectations for dermatological AI: continuous performance monitoring, human-readable rationales, and mitigation of bias. Meeting these requirements demands three key AI model enhancements.</p>
<sec id="s11-2-1">
<label>11.2.1</label>
<title>Bias mitigation and fairness</title>
<p>Models must incorporate &#x201c;bias-monitoring hooks&#x201d; to ensure equitable performance across skin types. For VAEs, this includes domain-adversarial training (where a secondary network penalizes phototype-dependent errors) and class-conditional latent spaces (where skin phototype is explicitly encoded to prevent feature conflation). Federated learning, training models across multiple sites without sharing raw data, can also address data scarcity while diversifying training cohorts: a 2024 JAMA Network Open study showed crowdsourced, federated data collection reduced phototype bias by 40% compared to single-center datasets (<xref ref-type="bibr" rid="B56">Ward et al., 2024</xref>).</p>
</sec>
<sec id="s11-2-2">
<label>11.2.2</label>
<title>Interpretability and auditability</title>
<p>Regulators require algorithms to explain why a thickness measurement was generated, something discriminative models (e.g., U-Nets) struggle with, as their Grad-CAM heatmaps often highlight speckle rather than biological edges. VAEs inherently address this via latent-space disentanglement: clinicians can traverse latent axes tied to thickness, scattering, or surface roughness to visualize how the model &#x201c;sees&#x201d; the SC, and Monte Carlo sampling of the latent space provides calibrated uncertainty estimates (e.g., flagging scans with &#x3e;2&#xa0;&#xb5;m prediction variance for human review). These tools must be integrated into user interfaces with versioned audit logs (per ISO 13485) to document model updates and performance drift over time.</p>
</sec>
<sec id="s11-2-3">
<label>11.2.3</label>
<title>Robustness to real-world variability</title>
<p>The FDA mandates that AI devices perform consistently across clinical settings, which requires models to handle motion artifacts, variable lighting, and device-specific noise. For VAEs, this means preprocessing pipelines that combine physics-informed DSP (e.g., phase-resolved elastography for motion correction) with learnable speckle suppressors (e.g., SSN2V blind-spot networks). Additionally, &#x201c;continual learning&#x201d; frameworks, where models update incrementally with new clinical data, prevent performance degradation as use cases expand (e.g., from healthy skin to eczematous lesions).</p>
</sec>
</sec>
<sec id="s11-3">
<label>11.3</label>
<title>Why cross-modal VAEs are indispensable for next-generation SC mapping</title>
<p>Single-modal OCT, while powerful, has inherent limitations that cross-modal VAEs uniquely address.</p>
<sec id="s11-3-1">
<label>11.3.1</label>
<title>OCT&#x2019;s blind spots</title>
<p>OCT excels at structural imaging but lacks molecular or functional context. For example, it cannot distinguish between SC thinning due to hydration (reversible) and thinning due to atopic dermatitis (pathological), a critical distinction for treatment decisions. Complementary modalities fill this gap: near-infrared Raman spectroscopy provides lipid composition data (e.g., ceramide-to-cholesterol ratios), while ultra-high-frequency ultrasound extends penetration to the deep dermis, capturing how SC changes correlate with subepidermal inflammation (<xref ref-type="bibr" rid="B7">Boussingault et al., 2024</xref>).</p>
</sec>
<sec id="s11-3-2">
<label>11.3.2</label>
<title>Cross-modal VAEs&#x2019; unique advantages</title>
<p>Unlike naive multimodal fusion (e.g., concatenating OCT and Raman images), cross-modal VAEs learn a shared latent manifold that encodes universal structural features (e.g., SC thickness, epidermal curvature) while preserving modality-specific signals (e.g., Raman lipid peaks, ultrasound impedance). This disentanglement enables: Improved Accuracy: By fusing OCT&#x2019;s structural precision with Raman&#x2019;s molecular specificity, cross-modal VAEs reduce SC boundary error to &#x3c;1.5&#xa0;&#xb5;m even in edematous or scarred skin, surpassing single-modal VAEs by 20% (<xref ref-type="bibr" rid="B63">You et al., 2023</xref>). Enhanced Diagnostic Value: For example, a cross-modal VAE combining LC-OCT and polarization-sensitive OCT (PS-OCT) can link SC thickness to birefringence (a marker of lipid order), enabling early detection of barrier impairment before TEWL rises (<xref ref-type="bibr" rid="B58">Wu T. et al., 2024</xref>). Robustness to Modality Failure: If one modality (e.g., Raman) is disrupted by skin oil or motion, the VAE can rely on the shared latent space to maintain accurate thickness measurements, critical for point-of-care use.</p>
</sec>
</sec>
<sec id="s11-4">
<label>11.4</label>
<title>Strategies for model validation, data harmonization, and multimodal integration</title>
<p>To translate cross-modal VAEs into clinical practice, three actionable strategies are needed.</p>
<sec id="s11-4-1">
<label>11.4.1</label>
<title>Standardized validation frameworks</title>
<p>A global consensus on SC thickness gold standards is essential. This should include: A &#x201c;hybrid reference&#x201d; combining LC-OCT (isotropic 1&#xa0;&#xb5;m resolution) with histology corrected for shrinkage (using empirical factors specific to fixation protocols; <xref ref-type="bibr" rid="B28">Kerns et al., 2008</xref>). Multi-center validation trials (e.g., 5&#x2b; sites, 500&#x2b; patients across phototypes I&#x2013;VI) to quantify performance in diverse populations. The UIUC CNN-GS-skin dataset&#x2019;s annotation protocol, two junior raters &#x2b; one senior adjudicator for disagreements &#x3e;3&#xa0;&#x3bc;m, could serve as a template for standardized labeling (<xref ref-type="bibr" rid="B33">Lin et al., 2024</xref>).</p>
</sec>
<sec id="s11-4-2">
<label>11.4.2</label>
<title>Data harmonization via federated learning and standardized protocols</title>
<p>Federated learning platforms (e.g., OpenMined) can aggregate data from dermatology clinics, cosmetic labs, and academic centers without compromising patient privacy, addressing scarcity and bias. Standardizing imaging parameters (e.g., 840&#xa0;nm broadband source for epidermal imaging, 1&#x2013;5&#xa0;&#xb5;m axial resolution) and preprocessing steps (e.g., wavelet despeckling, roll-off compensation) will ensure consistency across devices.</p>
</sec>
<sec id="s11-4-3">
<label>11.4.3</label>
<title>Multimodal hardware-software co-design</title>
<p>Hardware integration: Probes that co-register OCT with Raman spectroscopy or ultrasound (e.g., shared optical paths, synchronized acquisition) will eliminate spatial misalignment between modalities, a major source of fusion error. Software optimization: Cross-modal VAEs should incorporate attention mechanisms that weight each modality&#x2019;s contribution based on quality (e.g., downweighting Raman signals in highly vascularized skin). Additionally, embedding these models in dermatological digital twins, platforms that integrate longitudinal OCT data, microbiome profiles, and environmental logs, will transform SC thickness from a static measurement into a real-time &#x201c;vital sign&#x201d; for predictive care.</p>
</sec>
</sec>
</sec>
<sec id="s12">
<label>12</label>
<title>Future outlook</title>
<p>The technical foundation for precision SC mapping is in place, but the next era of innovation will be defined by convergence: miniaturized, low-power OCT probes (&#x3c;1&#xa0;kg, &#x3c;10&#xa0;W) paired with edge-deployable cross-modal VAEs; federated datasets that represent the full diversity of human skin; and regulatory frameworks that balance innovation with patient safety. When realized, these advances will redefine dermatology, enabling at-home monitoring of eczema, personalized cosmetic formulations tailored to individual SC thickness, and burn triage in resource-limited settings where histology is unavailable. Ultimately, the goal is not just to measure SC thickness, but to use it as a gateway to proactive, personalized skin health management, turning reactive care into predictive, preventive practice.</p>
</sec>
</body>
<back>
<sec sec-type="author-contributions" id="s13">
<title>Author contributions</title>
<p>HQ: Conceptualization, Data curation, Formal Analysis, Funding acquisition, Software, Supervision, Validation, Visualization, Writing &#x2013; original draft, Writing &#x2013; review and editing. YW: Conceptualization, Data curation, Funding acquisition, Investigation, Methodology, Project administration, Resources, Software, Supervision, Visualization, Writing &#x2013; original draft, Writing &#x2013; review and editing.</p>
</sec>
<sec sec-type="COI-statement" id="s15">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="s16">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="s17">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<fn-group>
<fn fn-type="custom" custom-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1212680/overview">Igor V. Panti&#x107;</ext-link>, University of Belgrade, Serbia</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1859205/overview">Xinyu Wang</ext-link>, Khalifa University, United Arab Emirates</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2589933/overview">Wanus Srimaharaj</ext-link>, Payap University, Thailand</p>
</fn>
</fn-group>
<ref-list>
<title>References</title>
<ref id="B1">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Abbasi</surname>
<given-names>S. A.</given-names>
</name>
<name>
<surname>Mei</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Wei</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Xu</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Abbasi</surname>
<given-names>S. M. T.</given-names>
</name>
<name>
<surname>Shakil</surname>
<given-names>S.</given-names>
</name>
<etal/>
</person-group> (<year>2025</year>). <article-title>Deconvolution techniques in optical coherence tomography: advancements, challenges, and future prospects</article-title>. <source>Laser and Photonics Rev.</source> <volume>19</volume>, <fpage>2401394</fpage>. <pub-id pub-id-type="doi">10.1002/lpor.202401394</pub-id>
</mixed-citation>
</ref>
<ref id="B2">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Aronsson</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2023</year>). &#x201c;<article-title>Unsupervised anomaly detection in multivariate time series using variational autoencoders</article-title>,&#x201d; in <source>Master&#x27;s theses in mathematical sciences</source>.</mixed-citation>
</ref>
<ref id="B3">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Baba</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>Time-frequency analysis using short time fourier transform</article-title>. <source>Open Acoust. J.</source> <volume>5</volume>, <fpage>32</fpage>&#x2013;<lpage>38</lpage>. <pub-id pub-id-type="doi">10.2174/1874837601205010032</pub-id>
</mixed-citation>
</ref>
<ref id="B4">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bellemo</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Haindl</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Pramanik</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Schmetterer</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>X.</given-names>
</name>
</person-group> (<year>2025</year>). <article-title>Complex conjugate removal in optical coherence tomography using phase aware generative adversarial network</article-title>. <source>J. Biomed. Opt.</source> <volume>30</volume>, <fpage>026001</fpage>. <pub-id pub-id-type="doi">10.1117/1.JBO.30.2.026001</pub-id>
<pub-id pub-id-type="pmid">39963188</pub-id>
</mixed-citation>
</ref>
<ref id="B5">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Biffi</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Cerrolaza</surname>
<given-names>J. J.</given-names>
</name>
<name>
<surname>Tarroni</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Bai</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>De Marvao</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Oktay</surname>
<given-names>O.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). <article-title>Explainable anatomical shape analysis through deep hierarchical generative models</article-title>. <source>IEEE Trans. Med. Imaging</source> <volume>39</volume>, <fpage>2088</fpage>&#x2013;<lpage>2099</lpage>. <pub-id pub-id-type="doi">10.1109/TMI.2020.2964499</pub-id>
<pub-id pub-id-type="pmid">31944949</pub-id>
</mixed-citation>
</ref>
<ref id="B6">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Blinder</surname>
<given-names>K. J.</given-names>
</name>
<name>
<surname>Calhoun</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Maguire</surname>
<given-names>M. G.</given-names>
</name>
<name>
<surname>Glassman</surname>
<given-names>A. R.</given-names>
</name>
<name>
<surname>Mein</surname>
<given-names>C. E.</given-names>
</name>
<name>
<surname>Baskin</surname>
<given-names>D. E.</given-names>
</name>
<etal/>
</person-group> (<year>2024</year>). <article-title>Home OCT imaging for newly diagnosed neovascular age-related macular degeneration: a feasibility study</article-title>. <source>Ophthalmol. Retina</source> <volume>8</volume>, <fpage>376</fpage>&#x2013;<lpage>387</lpage>. <pub-id pub-id-type="doi">10.1016/j.oret.2023.10.012</pub-id>
<pub-id pub-id-type="pmid">37879537</pub-id>
</mixed-citation>
</ref>
<ref id="B7">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Boussingault</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Jazaeri</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Sanak</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Bernardi</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Tr&#xe9;pant</surname>
<given-names>A.-L.</given-names>
</name>
<name>
<surname>Cinotti</surname>
<given-names>E.</given-names>
</name>
<etal/>
</person-group> (<year>2024</year>). <article-title>Multimodal skin imaging of a dermatofibrosarcoma protuberans using line-field confocal optical coherence tomography, ultra-high frequency ultrasound and reflectance confocal microscopy</article-title>. <source>Clin. Exp. Dermatology</source>, <fpage>llae543</fpage>. <pub-id pub-id-type="doi">10.1093/ced/llae543</pub-id>
<pub-id pub-id-type="pmid">39671561</pub-id>
</mixed-citation>
</ref>
<ref id="B8">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bouwstra</surname>
<given-names>J. A.</given-names>
</name>
<name>
<surname>N&#x103;d&#x103;ban</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Bras</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Mccabe</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Bunge</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Gooris</surname>
<given-names>G. S.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>The skin barrier: an extraordinary interface with an exceptional lipid organization</article-title>. <source>Prog. Lipid Res.</source> <volume>92</volume>, <fpage>101252</fpage>. <pub-id pub-id-type="doi">10.1016/j.plipres.2023.101252</pub-id>
<pub-id pub-id-type="pmid">37666282</pub-id>
</mixed-citation>
</ref>
<ref id="B9">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cappilli</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Paradisi</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Di Stefani</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Palmisano</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Pellegrino</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>D&#x2019;onghia</surname>
<given-names>M.</given-names>
</name>
<etal/>
</person-group> (<year>2024</year>). <article-title>Line-field confocal optical coherence tomography: a new skin imaging technique reproducing a &#x201c;Virtual Biopsy&#x201d; with evolving clinical applications in dermatology</article-title>. <source>Diagnostics</source> <volume>14</volume>, <fpage>1821</fpage>. <pub-id pub-id-type="doi">10.3390/diagnostics14161821</pub-id>
<pub-id pub-id-type="pmid">39202308</pub-id>
</mixed-citation>
</ref>
<ref id="B10">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chen</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Swanson</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Bizheva</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Line-field dynamic optical coherence tomography platform for volumetric assessment of biological tissues</article-title>. <source>Biomed. Opt. Express</source> <volume>15</volume>, <fpage>4162</fpage>&#x2013;<lpage>4175</lpage>. <pub-id pub-id-type="doi">10.1364/BOE.527797</pub-id>
<pub-id pub-id-type="pmid">39022542</pub-id>
</mixed-citation>
</ref>
<ref id="B11">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chen</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Sun</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Song</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Geng</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Xu</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Xu</surname>
<given-names>W.</given-names>
</name>
</person-group> (<year>2025</year>). <article-title>3D surface highlight removal method based on detection mask</article-title>. <source>Arabian J. Sci. Eng.</source>, <fpage>1</fpage>&#x2013;<lpage>13</lpage>. <pub-id pub-id-type="doi">10.1007/s13369-025-10573-4</pub-id>
</mixed-citation>
</ref>
<ref id="B12">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Das</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Bower</surname>
<given-names>A. J.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Aguilera</surname>
<given-names>N.</given-names>
</name>
<etal/>
</person-group> (<year>2024</year>). <article-title>Revealing speckle obscured living human retinal cells with artificial intelligence assisted adaptive optics optical coherence tomography</article-title>. <source>Commun. Med.</source> <volume>4</volume>, <fpage>68</fpage>. <pub-id pub-id-type="doi">10.1038/s43856-024-00483-1</pub-id>
<pub-id pub-id-type="pmid">38600290</pub-id>
</mixed-citation>
</ref>
<ref id="B13">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Del Amor</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Morales</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Colomer</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Mogensen</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Jensen</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Israelsen</surname>
<given-names>N. M.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). <article-title>Automatic segmentation of Epidermis and hair follicles in optical coherence tomography images of normal skin by convolutional neural networks</article-title>. <source>Front. Med. (Lausanne)</source> <volume>7</volume>, <fpage>220</fpage>. <pub-id pub-id-type="doi">10.3389/fmed.2020.00220</pub-id>
<pub-id pub-id-type="pmid">32582729</pub-id>
</mixed-citation>
</ref>
<ref id="B14">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dolar-Szczasny</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Drab</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Rejdak</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Home-Monitoring/remote optical coherence tomography in teleophthalmology in patients with eye disorders&#x2014;a systematic review</article-title>. <source>Front. Med.</source>, <fpage>11</fpage>. <pub-id pub-id-type="doi">10.3389/fmed.2024.1442758</pub-id>
<pub-id pub-id-type="pmid">39512616</pub-id>
</mixed-citation>
</ref>
<ref id="B15">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dry&#x17c;a&#x142;owska</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Blicharz</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Michalczyk</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Koscian</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Maj</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Czuwara</surname>
<given-names>J.</given-names>
</name>
<etal/>
</person-group> (<year>2024</year>). <article-title>The usefulness of line-field confocal optical coherence tomography in monitoring epidermal changes in atopic dermatitis in response to treatment: a pilot study</article-title>. <source>Diagn. (Basel)</source> <volume>14</volume>, <fpage>1724</fpage>. <pub-id pub-id-type="doi">10.3390/diagnostics14161724</pub-id>
<pub-id pub-id-type="pmid">39202212</pub-id>
</mixed-citation>
</ref>
<ref id="B16">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Erdenedalai</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Maltais-Tariant</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Dehaes</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Boudoux</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>MCOCT: an experimentally and numerically validated, open-source monte carlo simulator for optical coherence tomography</article-title>. <source>Biomed. Opt. Express</source> <volume>15</volume>, <fpage>624</fpage>&#x2013;<lpage>640</lpage>. <pub-id pub-id-type="doi">10.1364/BOE.504061</pub-id>
<pub-id pub-id-type="pmid">38404350</pub-id>
</mixed-citation>
</ref>
<ref id="B17">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Feingold</surname>
<given-names>K. R.</given-names>
</name>
<name>
<surname>Jiang</surname>
<given-names>Y. J.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>The mechanisms by which lipids coordinately regulate the formation of the protein and lipid domains of the stratum corneum: role of fatty acids, oxysterols, cholesterol sulfate and ceramides as signaling molecules</article-title>. <source>Dermatoendocrinol</source> <volume>3</volume>, <fpage>113</fpage>&#x2013;<lpage>118</lpage>. <pub-id pub-id-type="doi">10.4161/derm.3.2.14996</pub-id>
<pub-id pub-id-type="pmid">21695021</pub-id>
</mixed-citation>
</ref>
<ref id="B18">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gambichler</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Boms</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>St&#xfc;cker</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Kreuter</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Moussa</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Sand</surname>
<given-names>M.</given-names>
</name>
<etal/>
</person-group> (<year>2006a</year>). <article-title>Epidermal thickness assessed by optical coherence tomography and routine histology: preliminary results of method comparison</article-title>. <source>J. Eur. Acad. Dermatology Venereol. JEADV</source> <volume>20</volume>, <fpage>791</fpage>&#x2013;<lpage>795</lpage>. <pub-id pub-id-type="doi">10.1111/j.1468-3083.2006.01629.x</pub-id>
<pub-id pub-id-type="pmid">16898899</pub-id>
</mixed-citation>
</ref>
<ref id="B19">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gambichler</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Matip</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Moussa</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Altmeyer</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Hoffmann</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2006b</year>). <article-title>
<italic>In vivo</italic> data of epidermal thickness evaluated by optical coherence tomography: effects of age, gender, skin type, and anatomic site</article-title>. <source>J. Dermatol Sci.</source> <volume>44</volume>, <fpage>145</fpage>&#x2013;<lpage>152</lpage>. <pub-id pub-id-type="doi">10.1016/j.jdermsci.2006.09.008</pub-id>
<pub-id pub-id-type="pmid">17071059</pub-id>
</mixed-citation>
</ref>
<ref id="B20">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Glyn-Davies</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Duffin</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Deniz Akyildiz</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>Girolami</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>&#x3a6;-DVAE: physics-informed dynamical variational autoencoders for unstructured data assimilation</article-title>. <source>J. Comput. Phys.</source> <volume>515</volume>, <fpage>113293</fpage>. <pub-id pub-id-type="doi">10.1016/j.jcp.2024.113293</pub-id>
</mixed-citation>
</ref>
<ref id="B21">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>He</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Qiu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>C.</given-names>
</name>
<etal/>
</person-group> (<year>2023</year>). <article-title>Polarization coherency matrix tomography</article-title>. <source>J. Biophot.</source> <volume>16</volume>, <fpage>e202300093</fpage>. <pub-id pub-id-type="doi">10.1002/jbio.202300093</pub-id>
<pub-id pub-id-type="pmid">37269135</pub-id>
</mixed-citation>
</ref>
<ref id="B22">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>He</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Bao</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Ye</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Fan</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Shi</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Accuracy characterization of shack&#x2013;hartmann sensor with residual error removal in spherical wavefront calibration</article-title>. <source>Light Adv. Manuf.</source> <volume>4</volume>, <fpage>393</fpage>&#x2013;<lpage>403</lpage>. <pub-id pub-id-type="doi">10.37188/lam.2023.036</pub-id>
</mixed-citation>
</ref>
<ref id="B23">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Huang</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>X.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Fast reduction of speckle noise in real ultrasound images</article-title>. <source>Signal Process.</source> <volume>93</volume>, <fpage>684</fpage>&#x2013;<lpage>694</lpage>. <pub-id pub-id-type="doi">10.1016/j.sigpro.2012.09.005</pub-id>
</mixed-citation>
</ref>
<ref id="B24">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Huang</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Hormel</surname>
<given-names>T. T.</given-names>
</name>
<name>
<surname>Liang</surname>
<given-names>G. B.</given-names>
</name>
<name>
<surname>Wei</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Guo</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>S.</given-names>
</name>
<etal/>
</person-group> (<year>2024</year>). <article-title>Optimizing numerical k-sampling for swept-source optical coherence tomography angiography</article-title>. <source>Opt. Lett.</source> <volume>49</volume>, <fpage>1201</fpage>&#x2013;<lpage>1204</lpage>. <pub-id pub-id-type="doi">10.1364/OL.518720</pub-id>
<pub-id pub-id-type="pmid">38426973</pub-id>
</mixed-citation>
</ref>
<ref id="B25">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jain</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Lee</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Luo</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Hyare</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Pandit</surname>
<given-names>A. S.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>A practical guide to manual and semi-automated neurosurgical brain lesion segmentation</article-title>. <source>NeuroSci</source> <volume>5</volume>, <fpage>265</fpage>&#x2013;<lpage>275</lpage>. <pub-id pub-id-type="doi">10.3390/neurosci5030021</pub-id>
<pub-id pub-id-type="pmid">39483281</pub-id>
</mixed-citation>
</ref>
<ref id="B26">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jebril</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Eseng&#xf6;n&#xfc;l</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Bogunovi&#x107;</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Anomaly detection in optical coherence tomography angiography (OCTA) with a vector-quantized variational auto-encoder (VQ-VAE)</article-title>. <source>Bioengineering</source> <volume>11</volume>, <fpage>682</fpage>. <pub-id pub-id-type="doi">10.3390/bioengineering11070682</pub-id>
<pub-id pub-id-type="pmid">39061764</pub-id>
</mixed-citation>
</ref>
<ref id="B27">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jin</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Mccann</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Froustey</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Unser</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Deep convolutional neural network for inverse problems in imaging</article-title>. <source>IEEE Trans. Image Process.</source> <volume>26</volume>, <fpage>4509</fpage>&#x2013;<lpage>4522</lpage>. <pub-id pub-id-type="doi">10.1109/tip.2017.2713099</pub-id>
</mixed-citation>
</ref>
<ref id="B28">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kerns</surname>
<given-names>M. J.</given-names>
</name>
<name>
<surname>Darst</surname>
<given-names>M. A.</given-names>
</name>
<name>
<surname>Olsen</surname>
<given-names>T. G.</given-names>
</name>
<name>
<surname>Fenster</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Hall</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Grevey</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2008</year>). <article-title>Shrinkage of cutaneous specimens: formalin or other factors involved?</article-title> <source>J. Cutan. Pathol.</source> <volume>35</volume>, <fpage>1093</fpage>&#x2013;<lpage>1096</lpage>. <pub-id pub-id-type="doi">10.1111/j.1600-0560.2007.00943.x</pub-id>
<pub-id pub-id-type="pmid">18544064</pub-id>
</mixed-citation>
</ref>
<ref id="B29">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Bu</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Shi</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Xu</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2024a</year>). <article-title>Non-invasive medical imaging technology for the diagnosis of burn depth</article-title>. <source>Int. Wound J.</source> <volume>21</volume>, <fpage>e14681</fpage>. <pub-id pub-id-type="doi">10.1111/iwj.14681</pub-id>
<pub-id pub-id-type="pmid">38272799</pub-id>
</mixed-citation>
</ref>
<ref id="B30">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Zhao</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Zhao</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Jin</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Danyun</surname>
<given-names>W.</given-names>
</name>
<etal/>
</person-group> (<year>2024b</year>). <article-title>Three-stage training strategy phase unwrapping method for high speckle noises</article-title>. <source>Opt. Express</source> <volume>32</volume>, <fpage>48895</fpage>&#x2013;<lpage>48914</lpage>. <pub-id pub-id-type="doi">10.1364/OE.544968</pub-id>
<pub-id pub-id-type="pmid">39876182</pub-id>
</mixed-citation>
</ref>
<ref id="B31">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Duan</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Gu</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2025</year>). <article-title>Collaborative surgical instrument segmentation for monocular depth estimation in minimally invasive surgery</article-title>. <source>Med. Image Anal.</source> <volume>106</volume>, <fpage>103765</fpage>. <pub-id pub-id-type="doi">10.1016/j.media.2025.103765</pub-id>
<pub-id pub-id-type="pmid">40848507</pub-id>
</mixed-citation>
</ref>
<ref id="B32">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liao</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Huang</surname>
<given-names>Z.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>LS-Net: lightweight segmentation network for dermatological epidermal segmentation in optical coherence tomography imaging</article-title>. <source>Biomed. Opt. Express</source> <volume>15</volume>, <fpage>5723</fpage>&#x2013;<lpage>5738</lpage>. <pub-id pub-id-type="doi">10.1364/BOE.529662</pub-id>
<pub-id pub-id-type="pmid">39421780</pub-id>
</mixed-citation>
</ref>
<ref id="B33">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lin</surname>
<given-names>C.-H.</given-names>
</name>
<name>
<surname>Lukas</surname>
<given-names>B. E.</given-names>
</name>
<name>
<surname>Rajabi-Estarabadi</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>May</surname>
<given-names>J. R.</given-names>
</name>
<name>
<surname>Pang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Puyana</surname>
<given-names>C.</given-names>
</name>
<etal/>
</person-group> (<year>2024</year>). <article-title>Rapid measurement of epidermal thickness in OCT images of skin</article-title>. <source>Sci. Rep.</source> <volume>14</volume>, <fpage>2230</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-023-47051-6</pub-id>
<pub-id pub-id-type="pmid">38278852</pub-id>
</mixed-citation>
</ref>
<ref id="B34">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lindert</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Straube</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Larsen</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Siebert</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Liodaki</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Tafazzoli-Lari</surname>
<given-names>K.</given-names>
</name>
<etal/>
</person-group> (<year>2024</year>). <article-title>An optical tomography-based score to assess pediatric hand burns</article-title>. <source>Eur. Burn J.</source> <volume>5</volume>, <fpage>155</fpage>&#x2013;<lpage>165</lpage>. <pub-id pub-id-type="doi">10.3390/ebj5020013</pub-id>
<pub-id pub-id-type="pmid">39599985</pub-id>
</mixed-citation>
</ref>
<ref id="B35">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lintzeri</surname>
<given-names>D. A.</given-names>
</name>
<name>
<surname>Karimian</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Blume-Peytavi</surname>
<given-names>U.</given-names>
</name>
<name>
<surname>Kottner</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Epidermal thickness in healthy humans: a systematic review and meta-analysis</article-title>. <source>J. Eur. Acad. Dermatology Venereol.</source> <volume>36</volume>, <fpage>1191</fpage>&#x2013;<lpage>1200</lpage>. <pub-id pub-id-type="doi">10.1111/jdv.18123</pub-id>
<pub-id pub-id-type="pmid">35366353</pub-id>
</mixed-citation>
</ref>
<ref id="B36">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Huang</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Jia</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Shen</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>J.</given-names>
</name>
<etal/>
</person-group> (<year>2018</year>). <article-title>Fabrication of a multilayer tissue-mimicking phantom with tunable optical properties to simulate vascular oxygenation and perfusion for optical imaging technology</article-title>. <source>Appl. Opt.</source> <volume>57</volume>, <fpage>6772</fpage>&#x2013;<lpage>6780</lpage>. <pub-id pub-id-type="doi">10.1364/AO.57.006772</pub-id>
<pub-id pub-id-type="pmid">30129625</pub-id>
</mixed-citation>
</ref>
<ref id="B37">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Jia</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Shang</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>Q.</given-names>
</name>
<etal/>
</person-group> (<year>2024</year>). <article-title>Dynamic optical coherence elastography for skin burn assessment: a preliminary study on mice model</article-title>. <source>J. Biophot.</source> <volume>17</volume>, <fpage>e202400028</fpage>. <pub-id pub-id-type="doi">10.1002/jbio.202400028</pub-id>
<pub-id pub-id-type="pmid">38877699</pub-id>
</mixed-citation>
</ref>
<ref id="B38">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Xiaochen</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Chongyang</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Jiawei</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Fan</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>And Zhu</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2025</year>). <article-title>Handheld optical coherence tomography for tissue imaging: current design and medical applications</article-title>. <source>Appl. Spectrosc. Rev.</source> <volume>60</volume>, <fpage>292</fpage>&#x2013;<lpage>316</lpage>. <pub-id pub-id-type="doi">10.1080/05704928.2024.2401384</pub-id>
</mixed-citation>
</ref>
<ref id="B39">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Luan</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Yu</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Lei</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Ma</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Xue</surname>
<given-names>X.</given-names>
</name>
<etal/>
</person-group> (<year>2023</year>). <article-title>Deep learning for fast super-resolution ultrasound microvessel imaging</article-title>. <source>Phys. Medicine and Biology</source> <volume>68</volume>, <fpage>245023</fpage>. <pub-id pub-id-type="doi">10.1088/1361-6560/ad0a5a</pub-id>
<pub-id pub-id-type="pmid">37934040</pub-id>
</mixed-citation>
</ref>
<ref id="B40">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Myakinin</surname>
<given-names>O. O.</given-names>
</name>
<name>
<surname>Kornilin</surname>
<given-names>D. V.</given-names>
</name>
<name>
<surname>Bratchenko</surname>
<given-names>I. A.</given-names>
</name>
<name>
<surname>Zakharov</surname>
<given-names>V. P.</given-names>
</name>
<name>
<surname>Khramov</surname>
<given-names>A. G.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Noise reduction method for Oct images based on empirical mode decomposition</article-title>. <source>J. Innovative Opt. Health Sci.</source> <volume>06</volume>, <fpage>1350009</fpage>. <pub-id pub-id-type="doi">10.1142/s1793545813500090</pub-id>
</mixed-citation>
</ref>
<ref id="B41">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Omidian</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Dey Chowdhury</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2025</year>). <article-title>Multifunctional hydrogel microneedles (HMNs) in drug delivery and diagnostics</article-title>. <source>Gels</source> <volume>11</volume>, <fpage>206</fpage>. <pub-id pub-id-type="doi">10.3390/gels11030206</pub-id>
<pub-id pub-id-type="pmid">40136911</pub-id>
</mixed-citation>
</ref>
<ref id="B42">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ozcan</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Bilenca</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Desjardins</surname>
<given-names>A. E.</given-names>
</name>
<name>
<surname>Bouma</surname>
<given-names>B. E.</given-names>
</name>
<name>
<surname>Tearney</surname>
<given-names>G. J.</given-names>
</name>
</person-group> (<year>2007</year>). <article-title>Speckle reduction in optical coherence tomography images using digital filtering</article-title>. <source>J. Opt. Soc. Am. A Opt. Image Sci. Vis.</source> <volume>24</volume>, <fpage>1901</fpage>&#x2013;<lpage>1910</lpage>. <pub-id pub-id-type="doi">10.1364/josaa.24.001901</pub-id>
<pub-id pub-id-type="pmid">17728812</pub-id>
</mixed-citation>
</ref>
<ref id="B43">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Pandey</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Erbas</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Michalet</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Ulku</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Bruschini</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Charbon</surname>
<given-names>E.</given-names>
</name>
<etal/>
</person-group> (<year>2024</year>). &#x201c;<article-title>Temporal point spread function deconvolution in time-resolved fluorescence lifetime imaging using deep learning model</article-title>,&#x201d; in <source>Optica biophotonics congress: biomedical optics 2024 (translational, microscopy, OCT, OTS, BRAIN)</source>, <publisher-loc>Fort Lauderdale, FL</publisher-loc>: <publisher-name>Optical Tomography and Spectroscopy</publisher-name> <volume>4</volume>, <fpage>7</fpage>&#x2013;<lpage>10</lpage>.</mixed-citation>
</ref>
<ref id="B44">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Petersen</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Kucheryavskiy</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2025</year>). <article-title>VAE-SIMCA &#x2014; data-driven method for building one class classifiers with variational autoencoders</article-title>. <source>Chemom. Intelligent Laboratory Syst.</source> <volume>256</volume>, <fpage>105276</fpage>. <pub-id pub-id-type="doi">10.1016/j.chemolab.2024.105276</pub-id>
</mixed-citation>
</ref>
<ref id="B45">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Popescu</surname>
<given-names>D. P.</given-names>
</name>
<name>
<surname>Choo-Smith</surname>
<given-names>L. P.</given-names>
</name>
<name>
<surname>Flueraru</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Mao</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Chang</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Disano</surname>
<given-names>J.</given-names>
</name>
<etal/>
</person-group> (<year>2011</year>). <article-title>Optical coherence tomography: fundamental principles, instrumental designs and biomedical applications</article-title>. <source>Biophys. Rev.</source> <volume>3</volume>, <fpage>155</fpage>. <pub-id pub-id-type="doi">10.1007/s12551-011-0054-7</pub-id>
<pub-id pub-id-type="pmid">28510064</pub-id>
</mixed-citation>
</ref>
<ref id="B46">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Revin</surname>
<given-names>D. G.</given-names>
</name>
<name>
<surname>Byers</surname>
<given-names>R. A.</given-names>
</name>
<name>
<surname>Duan</surname>
<given-names>M. Q.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Matcher</surname>
<given-names>S. J.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Visible-light optical coherence tomography platform for the characterization of the skin barrier</article-title>. <source>Biomed. Opt. Express</source> <volume>14</volume>, <fpage>3914</fpage>&#x2013;<lpage>3923</lpage>. <pub-id pub-id-type="doi">10.1364/BOE.494356</pub-id>
<pub-id pub-id-type="pmid">37799680</pub-id>
</mixed-citation>
</ref>
<ref id="B47">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sandby-M&#xf8;ller</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Poulsen</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Wulf</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2003</year>). <article-title>Epidermal thickness at different body sites: relationship to age, gender, pigmentation, blood content, skin type and smoking habits</article-title>. <source>Acta Dermato-Venereologica</source> <volume>83</volume>, <fpage>410</fpage>&#x2013;<lpage>413</lpage>. <pub-id pub-id-type="doi">10.1080/00015550310015419</pub-id>
<pub-id pub-id-type="pmid">14690333</pub-id>
</mixed-citation>
</ref>
<ref id="B48">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Schottenhamml</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>W&#xfc;rfl</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Ploner</surname>
<given-names>S. B.</given-names>
</name>
<name>
<surname>Husvogt</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Hohberger</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Fujimoto</surname>
<given-names>J. G.</given-names>
</name>
<etal/>
</person-group> (<year>2023</year>). <article-title>SSN2V: unsupervised OCT denoising using speckle split</article-title>. <source>Sci. Rep.</source> <volume>13</volume>, <fpage>10382</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-023-37324-5</pub-id>
<pub-id pub-id-type="pmid">37369731</pub-id>
</mixed-citation>
</ref>
<ref id="B49">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Song</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Jelly</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Chu</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Kendall</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Wax</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>A review of low-cost and portable optical coherence tomography</article-title>. <source>Prog. Biomed. Eng.</source> <volume>3</volume>, <fpage>032002</fpage>. <pub-id pub-id-type="doi">10.1088/2516-1091/abfeb7</pub-id>
<pub-id pub-id-type="pmid">37645660</pub-id>
</mixed-citation>
</ref>
<ref id="B50">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Srivastava</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Yow</surname>
<given-names>A. P.</given-names>
</name>
<name>
<surname>Cheng</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Wong</surname>
<given-names>D. W. K.</given-names>
</name>
<name>
<surname>Tey</surname>
<given-names>H. L.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Three-dimensional graph-based skin layer segmentation in optical coherence tomography images for roughness estimation</article-title>. <source>Biomed. Opt. Express</source> <volume>9</volume>, <fpage>3590</fpage>&#x2013;<lpage>3606</lpage>. <pub-id pub-id-type="doi">10.1364/BOE.9.003590</pub-id>
<pub-id pub-id-type="pmid">30338142</pub-id>
</mixed-citation>
</ref>
<ref id="B51">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Tang</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Xie</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Fei</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Luo</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>C.</given-names>
</name>
<etal/>
</person-group> (<year>2024</year>). <article-title>Classification of distinct tendinopathy subtypes for precision therapeutics</article-title>. <source>Nat. Commun.</source> <volume>15</volume>, <fpage>9460</fpage>. <pub-id pub-id-type="doi">10.1038/s41467-024-53826-w</pub-id>
<pub-id pub-id-type="pmid">39487125</pub-id>
</mixed-citation>
</ref>
<ref id="B52">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Tran</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Sundaram</surname>
<given-names>C. P.</given-names>
</name>
<name>
<surname>Bahler</surname>
<given-names>C. D.</given-names>
</name>
<name>
<surname>Eble</surname>
<given-names>J. N.</given-names>
</name>
<name>
<surname>Grignon</surname>
<given-names>D. J.</given-names>
</name>
<name>
<surname>Monn</surname>
<given-names>M. F.</given-names>
</name>
<etal/>
</person-group> (<year>2015</year>). <article-title>Correcting the shrinkage effects of formalin fixation and tissue processing for renal tumors: toward standardization of pathological reporting of tumor size</article-title>. <source>J. Cancer</source> <volume>6</volume>, <fpage>759</fpage>&#x2013;<lpage>766</lpage>. <pub-id pub-id-type="doi">10.7150/jca.12094</pub-id>
<pub-id pub-id-type="pmid">26185538</pub-id>
</mixed-citation>
</ref>
<ref id="B53">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Velasco-Forero</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Pag&#xe8;s</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Angulo</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Learnable empirical mode decomposition based on mathematical morphology</article-title>. <source>SIAM J. Imaging Sci.</source> <volume>15</volume>, <fpage>23</fpage>&#x2013;<lpage>44</lpage>. <pub-id pub-id-type="doi">10.1137/21m1417867</pub-id>
</mixed-citation>
</ref>
<ref id="B54">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Gao</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Huang</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Lei</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Cui</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>S.</given-names>
</name>
<etal/>
</person-group> (<year>2024</year>). <article-title>A wavelength-stabilized and quasi-common-path heterodyne grating interferometer with sub-nanometer precision</article-title>. <source>IEEE Transactions Instrumentation Measurement</source> <volume>73</volume>, <fpage>1</fpage>&#x2013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1109/tim.2024.3372212</pub-id>
</mixed-citation>
</ref>
<ref id="B55">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>J.-K.</given-names>
</name>
<name>
<surname>Johnson</surname>
<given-names>B. A.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Szanto</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Woods</surname>
<given-names>B.</given-names>
</name>
<etal/>
</person-group> (<year>2025</year>). <article-title>Quantifying the spatial patterns of retinal ganglion cell loss and progression in optic neuropathy by applying a deep learning variational autoencoder approach to optical coherence tomography</article-title>. <source>Front. Ophthalmol.</source> <volume>4</volume>, <fpage>1497848</fpage>. <pub-id pub-id-type="doi">10.3389/fopht.2024.1497848</pub-id>
<pub-id pub-id-type="pmid">39963427</pub-id>
</mixed-citation>
</ref>
<ref id="B56">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ward</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Lakshminarasimhan</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Carrick</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Campana</surname>
<given-names>B.</given-names>
</name>
<etal/>
</person-group> (<year>2024</year>). <article-title>Creating an empirical dermatology dataset through crowdsourcing with web search advertisements</article-title>. <source>JAMA Netw. Open</source> <volume>7</volume>, <fpage>e2446615</fpage>. <pub-id pub-id-type="doi">10.1001/jamanetworkopen.2024.46615</pub-id>
<pub-id pub-id-type="pmid">39565619</pub-id>
</mixed-citation>
</ref>
<ref id="B57">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wu</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Yu</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Huang</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Zhu</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2024a</year>). <article-title>Microneedles as transdermal drug delivery system for enhancing skin disease treatment</article-title>. <source>Acta Pharm. Sin. B</source> <volume>14</volume>, <fpage>5161</fpage>&#x2013;<lpage>5180</lpage>. <pub-id pub-id-type="doi">10.1016/j.apsb.2024.08.013</pub-id>
<pub-id pub-id-type="pmid">39807331</pub-id>
</mixed-citation>
</ref>
<ref id="B58">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wu</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Shi</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Shi</surname>
<given-names>Y.</given-names>
</name>
<etal/>
</person-group> (<year>2024b</year>). <article-title>Full-range depth-encoded swept source polarization sensitive optical coherence tomography</article-title>. <source>Opt. Express</source> <volume>32</volume>, <fpage>9374</fpage>&#x2013;<lpage>9383</lpage>. <pub-id pub-id-type="doi">10.1364/OE.510970</pub-id>
<pub-id pub-id-type="pmid">38571173</pub-id>
</mixed-citation>
</ref>
<ref id="B59">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wu</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Cao</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Qi</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2024c</year>). <article-title>Evae: evolutionary variational autoencoder</article-title>. <source>IEEE Trans. Neural Netw. Learn. Syst.</source> <volume>36</volume>, <fpage>3288</fpage>&#x2013;<lpage>3299</lpage>. <pub-id pub-id-type="doi">10.1109/TNNLS.2024.3359275</pub-id>
<pub-id pub-id-type="pmid">38546992</pub-id>
</mixed-citation>
</ref>
<ref id="B60">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Xu</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Luo</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Cui</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Zeng</surname>
<given-names>B.</given-names>
</name>
<etal/>
</person-group> (<year>2025</year>). <article-title>&#x201c;Double-sided protector&#x201d; janus hydrogels for skin and mucosal wound repair: applications, mechanisms, and prospects</article-title>. <source>J. Nanobiotechnology</source> <volume>23</volume>, <fpage>387</fpage>. <pub-id pub-id-type="doi">10.1186/s12951-025-03438-3</pub-id>
<pub-id pub-id-type="pmid">40426120</pub-id>
</mixed-citation>
</ref>
<ref id="B61">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yang</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Xiao</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Bao</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Tian</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Explainable ensemble learning method for OCT detection with transfer learning</article-title>. <source>Plos One</source> <volume>19</volume>, <fpage>e0296175</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0296175</pub-id>
<pub-id pub-id-type="pmid">38517913</pub-id>
</mixed-citation>
</ref>
<ref id="B62">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yim</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Sasi</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Zhao</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Yeung</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Cheng</surname>
<given-names>Y.</given-names>
</name>
<etal/>
</person-group> (<year>2023</year>). <article-title>3D-Bioprinted phantom with human skin phototypes for biomedical optics</article-title>. <source>Adv. Mater</source> <volume>35</volume>, <fpage>e2206385</fpage>. <pub-id pub-id-type="doi">10.1002/adma.202305227</pub-id>
<pub-id pub-id-type="pmid">37497560</pub-id>
</mixed-citation>
</ref>
<ref id="B63">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>You</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Yi</surname>
<given-names>J. Y.</given-names>
</name>
<name>
<surname>Hsu</surname>
<given-names>T. W.</given-names>
</name>
<name>
<surname>Huang</surname>
<given-names>S. L.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Integration of cellular-resolution optical coherence tomography and raman spectroscopy for discrimination of skin cancer cells with machine learning</article-title>. <source>J. Biomed. Opt.</source> <volume>28</volume>, <fpage>096005</fpage>. <pub-id pub-id-type="doi">10.1117/1.JBO.28.9.096005</pub-id>
<pub-id pub-id-type="pmid">37720189</pub-id>
</mixed-citation>
</ref>
<ref id="B64">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yu</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Ge</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Aziz</surname>
<given-names>M. Z.</given-names>
</name>
<name>
<surname>Mo</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Fan</surname>
<given-names>Z.</given-names>
</name>
</person-group> (<year>2023a</year>). <article-title>Multiscale denoising generative adversarial network for speckle reduction in optical coherence tomography images</article-title>. <source>J. Med. Imaging (Bellingham)</source> <volume>10</volume>, <fpage>024006</fpage>. <pub-id pub-id-type="doi">10.1117/1.JMI.10.2.024006</pub-id>
<pub-id pub-id-type="pmid">37009058</pub-id>
</mixed-citation>
</ref>
<ref id="B65">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yu</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Luan</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Lei</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Huang</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Xue</surname>
<given-names>X.</given-names>
</name>
<etal/>
</person-group> (<year>2023b</year>). <article-title>Deep learning for fast denoising filtering in ultrasound localization microscopy</article-title>. <source>Phys. Med. and Biol.</source> <volume>68</volume>, <fpage>205002</fpage>. <pub-id pub-id-type="doi">10.1088/1361-6560/acf98f</pub-id>
<pub-id pub-id-type="pmid">37703894</pub-id>
</mixed-citation>
</ref>
<ref id="B66">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhong</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Meidani</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>PI-VAE: physics-informed variational auto-encoder for stochastic differential equations</article-title>. <source>Comput. Methods Appl. Mech. Eng.</source> <volume>403</volume>, <fpage>115664</fpage>. <pub-id pub-id-type="doi">10.1016/j.cma.2022.115664</pub-id>
</mixed-citation>
</ref>
<ref id="B67">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhou</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Niu</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Zhao</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Gao</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>T.</given-names>
</name>
<etal/>
</person-group> (<year>2023</year>). <article-title>Spatial-contextual variational autoencoder with attention correction for anomaly detection in retinal OCT images</article-title>. <source>Comput. Biol. Med.</source> <volume>152</volume>, <fpage>106328</fpage>. <pub-id pub-id-type="doi">10.1016/j.compbiomed.2022.106328</pub-id>
<pub-id pub-id-type="pmid">36462369</pub-id>
</mixed-citation>
</ref>
</ref-list>
</back>
</article>