<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" article-type="research-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Med.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Medicine</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Med.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2296-858X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fmed.2026.1752016</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>A multi-model fusion approach incorporating conventional radiological and machine learning features across age spectrum for periorbital fat status prediction</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" equal-contrib="yes"><name><surname>Wang</surname> <given-names>Meng</given-names></name><xref ref-type="aff" rid="aff1"><sup>1</sup></xref><xref ref-type="aff" rid="aff2"><sup>2</sup></xref><xref ref-type="author-notes" rid="fn2001"><sup>&#x2020;</sup></xref><xref ref-type="author-notes" rid="fn0001"><sup>&#x2020;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
</contrib>
<contrib contrib-type="author" equal-contrib="yes"><name><surname>Han</surname> <given-names>Yudi</given-names></name><xref ref-type="aff" rid="aff1"><sup>1</sup></xref><xref ref-type="author-notes" rid="fn0001"><sup>&#x2020;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
</contrib>
<contrib contrib-type="author"><name><surname>Li</surname> <given-names>Li</given-names></name><xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
</contrib>
<contrib contrib-type="author"><name><surname>Lu</surname> <given-names>Xi</given-names></name><xref ref-type="aff" rid="aff1"><sup>1</sup></xref><xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author"><name><surname>Jia</surname> <given-names>Yiqing</given-names></name><xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author" corresp="yes"><name><surname>Guo</surname> <given-names>Lingli</given-names></name><xref ref-type="aff" rid="aff1"><sup>1</sup></xref><xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1869120"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author" corresp="yes"><name><surname>Han</surname> <given-names>Yan</given-names></name><xref ref-type="aff" rid="aff1"><sup>1</sup></xref><xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1403757"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Department of Plastic and Reconstructive Surgery, The First Medical Center, Chinese PLA General Hospital</institution>, <city>Beijing</city>, <country country="cn">China</country></aff>
<aff id="aff2"><label>2</label><institution>Central Medical Branch of PLA General Hospital</institution>, <city>Beijing</city>, <country country="cn">China</country></aff>
<aff id="aff3"><label>3</label><institution>Department of Emergency Medicine, The Sixth Medical Center, Chinese PLA General Hospital</institution>, <city>Beijing</city>, <country country="cn">China</country></aff>
<author-notes>
<corresp id="c001"><label>&#x002A;</label>Correspondence: Lingli Guo, <email xlink:href="mailto:guo_linglidoctor@163.com">guo_linglidoctor@163.com</email>; Yan Han, <email xlink:href="mailto:13720086335@163.com">13720086335@163.com</email></corresp>
<fn fn-type="other" id="fn2001">
<label>&#x2020;</label>
<p>ORCID: Meng Wang, <uri xlink:href="https://orcid.org/0000-0002-5079-5150">orcid.org/0000-0002-5079-5150</uri></p>
</fn>
<fn fn-type="equal" id="fn0001">
<label>&#x2020;</label>
<p>These authors share first authorship</p>
</fn>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-25">
<day>25</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>13</volume>
<elocation-id>1752016</elocation-id>
<history>
<date date-type="received">
<day>22</day>
<month>11</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>03</day>
<month>02</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>10</day>
<month>02</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2026 Wang, Han, Li, Lu, Jia, Guo and Han.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Wang, Han, Li, Lu, Jia, Guo and Han</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-25">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<sec>
<title>Objectives</title>
<p>To develop an ensemble learning model fusing conventional radiomics (CR) and machine learning (ML) features to assess periorbital fat status across the entire age spectrum.</p>
</sec>
<sec>
<title>Methods</title>
<p>Retrospective analysis was conducted on preoperative cranial and facial MRI data of meningioma patients. Patients were categorized into youth, middle-aged, and senior groups and allocated to training and test sets through stratified random sampling. CR and ML features of fat in three periorbital regions were extracted to develop an ensemble learning model, with its clinical application value subsequently evaluated.</p>
</sec>
<sec>
<title>Results</title>
<p>237 patients were enrolled: 165 in the training set and 72 in the test set. The training set comprised 19 youth cases (28.5&#x202F;&#x00B1;&#x202F;5.0, 7 male), 41 middle-aged cases (42.9&#x202F;&#x00B1;&#x202F;4.7, 9 male), and 105 senior cases (60.0&#x202F;&#x00B1;&#x202F;6.5, 26 male). The test set included 8 youth cases (28.6&#x202F;&#x00B1;&#x202F;5.6, 4 male), 18 middle-aged cases (43.9&#x202F;&#x00B1;&#x202F;4.1, 6 male), and 46 senior cases (58.8&#x202F;&#x00B1;&#x202F;6.7, 10 male). The ensemble learning model outperformed the CR model, the ML model, and the CR-ML fusion model on the test set, achieving an AUC-macro of 0.833 (95% CI: 0.737&#x2013;0.902), an F1-score of 0.614, an accuracy (Acc) of 0.597, and a positive predictive value (PPV) of 0.690. Ensemble learning models demonstrated optimal comprehensive capabilities in multi-classification tasks, enhancing generalization and robustness.</p>
</sec>
<sec>
<title>Conclusion</title>
<p>Our ensemble learning model achieved non-invasive and reliable assessment of periorbital fat status across the entire age spectrum, enriching the evaluation methodology for rejuvenation surgery.</p>
</sec>
</abstract>
<kwd-group>
<kwd>machine learning</kwd>
<kwd>MRI</kwd>
<kwd>periorbital fat</kwd>
<kwd>radiomics</kwd>
<kwd>stacking ensemble learning</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was not received for this work and/or its publication.</funding-statement>
</funding-group>
<counts>
<fig-count count="5"/>
<table-count count="5"/>
<equation-count count="2"/>
<ref-count count="33"/>
<page-count count="13"/>
<word-count count="7232"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Dermatology</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec1">
<label>1</label>
<title>Introduction</title>
<p>The eyes and periorbital areas, as central components of the midface region, represent the visual focus of the face and are one of the primary targets for facial rejuvenation surgery. With advancing age, the periorbital tissues&#x2014;including skin, fascia, fat, muscle, and bone&#x2014;undergo varying degrees of alteration across the entire age spectrum (<xref ref-type="bibr" rid="ref1 ref2 ref3">1&#x2013;3</xref>). In the midface, aging characteristics predominantly correlate with soft tissue changes (<xref ref-type="bibr" rid="ref4">4</xref>). Consequently, comprehensively understanding the trajectory of age-related soft tissue changes guides clinicians performing periorbital rejuvenation procedures. For instance, it enables precise preoperative prediction of treatment strategies tailored to specific age groups, such as determining the volume of periorbital fat to be excised or grafted, or planning localized injections of fillers and nutraceuticals (<xref ref-type="bibr" rid="ref5 ref6 ref7">5&#x2013;7</xref>). Previous studies on periorbital fat morphology are predominantly based on cadaveric dissection (<xref ref-type="bibr" rid="ref8">8</xref>, <xref ref-type="bibr" rid="ref9">9</xref>). This methodology, however, is constrained by limited sample sizes and postmortem tissue alterations, and thus fails to reflect accurately the characteristics of living tissues.</p>
<p>Imaging data of the mid facial region (CT, MRI) represent clinical data reflecting the true status of various tissues (<xref ref-type="bibr" rid="ref10">10</xref>, <xref ref-type="bibr" rid="ref11">11</xref>). However, they do not permit direct assessment of tissue conditions due to the absence of quantitative data and intuitive features. In recent years, numerous researchers have attempted to quantify facial characteristics to indirectly evaluate changes in deep tissues. Examples include: using 3D facial photography to measure periorbital volume changes as a proxy for periorbital fat volume alterations (<xref ref-type="bibr" rid="ref12">12</xref>, <xref ref-type="bibr" rid="ref13">13</xref>); applying scoring systems to assess periorbital tissue status (<xref ref-type="bibr" rid="ref14">14</xref>); and utilizing grayscale values from periorbital photographs to evaluate fat grafting efficacy (<xref ref-type="bibr" rid="ref15">15</xref>). Studies have also employed tomographic imaging (CT, MRI) data to investigate relationships between periorbital or facial fat and aging. Nevertheless, these studies rely on overly simplistic and limited metrics&#x2014;such as thickness at different anatomical levels, maximum width, volume, and positional changes of periorbital fat (<xref ref-type="bibr" rid="ref16 ref17 ref18">16&#x2013;18</xref>). While such research offers preliminary insights into age-related periorbital fat dynamics, comprehensive imaging studies across the full age spectrum with large sample sizes and intelligent analytical approaches remain rarely explored.</p>
<p>Development of a foundational age-prediction model required extraction of conventional radiomics (CR) and machine learning (ML) features from periorbital fat on MRI scans across the entire age spectrum. We enhanced the model&#x2019;s comprehensive capability through feature fusion and ensemble learning methodologies. As a preliminary exploratory investigation, this research primarily aimed to establish a clinical prediction model fusing conventional imaging and ML techniques. The objective was to improve clinicians&#x2019; assessment proficiency regarding periorbital fat status in patients of various age groups, thereby providing guidance for periocular rejuvenation therapies.</p>
</sec>
<sec sec-type="materials|methods" id="sec2">
<label>2</label>
<title>Materials and methods</title>
<sec id="sec3">
<label>2.1</label>
<title>Patient population and selection</title>
<p>The study obtained approval from the ethics committee of the First Medical Center of Chinese PLA General Hospital (No. 2025&#x2013;071). As a single-center retrospective study, it waived the requirement for informed consent forms.</p>
<p>Imaging data were retrospectively collected from patients who underwent cranial and facial MRI scans for meningioma at the First Medical Center of Chinese PLA General Hospital between January 2014 and December 2024. All cases were stratified by age group (youth group: &#x2265; 18 and &#x003C; 35&#x202F;years; middle-aged group: &#x2265; 35 and &#x003C; 60&#x202F;years; senior group: &#x2265; 60&#x202F;years) and randomly divided into training and test sets in a 7:3 ratio using stratified random sampling. A total of 237 patients were included (<xref ref-type="fig" rid="fig1">Figure 1</xref>), with 27 in the youth group, 59 in the middle-aged group, and 151 in the senior group (<xref ref-type="table" rid="tab1">Table 1</xref>, <xref ref-type="table" rid="tab2">Table 2</xref>).</p>
<fig position="float" id="fig1">
<label>Figure 1</label>
<caption>
<p>The flow chart for patient screening.</p>
</caption>
<graphic xlink:href="fmed-13-1752016-g001.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Flowchart showing patient selection for a meningioma study from 2782 initial cases, detailing exclusion criteria at each step, resulting in 237 patients divided into a training set of 165 and a test set of 72.</alt-text>
</graphic>
</fig>
<table-wrap position="float" id="tab1">
<label>Table 1</label>
<caption>
<p>Baseline characteristics of patients in each group.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top" rowspan="2">Category</th>
<th align="center" valign="top" colspan="2">Youth group (<italic>n</italic> =&#x202F;27)</th>
<th align="center" valign="top" colspan="2">Middle-aged group (<italic>n</italic> =&#x202F;59)</th>
<th align="center" valign="top" colspan="2">Senior group (<italic>n</italic> =&#x202F;151)</th>
<th align="center" valign="top"><italic>p</italic> value</th>
</tr>
<tr>
<th align="center" valign="top">Female</th>
<th align="center" valign="top">Male</th>
<th align="center" valign="top">Female</th>
<th align="center" valign="top">Male</th>
<th align="center" valign="top">Female</th>
<th align="center" valign="top">Male</th>
<th/>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Patient</td>
<td align="center" valign="middle">16 (6.8)</td>
<td align="center" valign="middle">11 (4.6)</td>
<td align="center" valign="middle">45 (19.0)</td>
<td align="center" valign="middle">14 (5.9)</td>
<td align="center" valign="middle">115 (48.5)</td>
<td align="center" valign="middle">36 (15.2)</td>
<td/>
</tr>
<tr>
<td align="left" valign="middle">Age (year)</td>
<td align="center" valign="middle">28.2&#x202F;&#x00B1;&#x202F;5.3</td>
<td align="center" valign="middle">29.1&#x202F;&#x00B1;&#x202F;5.0</td>
<td align="center" valign="middle">43.5&#x202F;&#x00B1;&#x202F;4.7</td>
<td align="center" valign="middle">42.4&#x202F;&#x00B1;&#x202F;3.7</td>
<td align="center" valign="middle">59.5&#x202F;&#x00B1;&#x202F;6.4</td>
<td align="center" valign="middle">59.8&#x202F;&#x00B1;&#x202F;7.2</td>
<td/>
</tr>
<tr>
<td align="left" valign="middle">BMI (kg/m<sup>2</sup>)</td>
<td align="center" valign="middle">22.8&#x202F;&#x00B1;&#x202F;3.7</td>
<td align="center" valign="middle">25.0&#x202F;&#x00B1;&#x202F;2.6</td>
<td align="center" valign="middle">24.2&#x202F;&#x00B1;&#x202F;2.6</td>
<td align="center" valign="middle">24.6&#x202F;&#x00B1;&#x202F;2.7</td>
<td align="center" valign="middle">24.6&#x202F;&#x00B1;&#x202F;2.4</td>
<td align="center" valign="middle">25.3&#x202F;&#x00B1;&#x202F;1.9</td>
<td align="center" valign="middle">0.3295<sup>#</sup></td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>Data are shown as number (percentage) or mean &#x00B1; SD; SD, standard deviation; BMI body mass index. <sup>#</sup>Analysis results of data differences among various age groups (Kruskal-Wallis H test).</p>
</table-wrap-foot>
</table-wrap>
<table-wrap position="float" id="tab2">
<label>Table 2</label>
<caption>
<p>Baseline characteristics of patients in the training set and test set.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Category</th>
<th align="center" valign="top">Training set (<italic>n</italic> =&#x202F;165)</th>
<th align="center" valign="top">Test set (<italic>n</italic> =&#x202F;72)</th>
<th align="center" valign="top"><italic>p</italic> value</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Youth group</td>
<td align="center" valign="middle">19</td>
<td align="center" valign="middle">8</td>
<td/>
</tr>
<tr>
<td align="left" valign="middle">&#x2003;Sex (male)</td>
<td align="center" valign="middle">7 (36.8)</td>
<td align="center" valign="middle">4 (50.0)</td>
<td/>
</tr>
<tr>
<td align="left" valign="middle">&#x2003;Age (years)</td>
<td align="center" valign="middle">28.5&#x202F;&#x00B1;&#x202F;5.0</td>
<td align="center" valign="middle">28.6&#x202F;&#x00B1;&#x202F;5.6</td>
<td align="center" valign="middle">0.669</td>
</tr>
<tr>
<td align="left" valign="middle">&#x2003;BMI (kg/m<sup>2</sup>)</td>
<td align="center" valign="middle">24.4&#x202F;&#x00B1;&#x202F;3.1</td>
<td align="center" valign="middle">22.2&#x202F;&#x00B1;&#x202F;3.7</td>
<td align="center" valign="middle">0.106</td>
</tr>
<tr>
<td align="left" valign="middle">Middle-aged group</td>
<td align="center" valign="middle">41</td>
<td align="center" valign="middle">18</td>
<td/>
</tr>
<tr>
<td align="left" valign="middle">&#x2003;Sex (male)</td>
<td align="center" valign="middle">9 (22.0)</td>
<td align="center" valign="middle">6 (31.6)</td>
<td/>
</tr>
<tr>
<td align="left" valign="middle">&#x2003;Age (years)</td>
<td align="center" valign="middle">42.9&#x202F;&#x00B1;&#x202F;4.7</td>
<td align="center" valign="middle">43.9&#x202F;&#x00B1;&#x202F;4.1</td>
<td align="center" valign="middle">0.552</td>
</tr>
<tr>
<td align="left" valign="middle">&#x2003;BMI (kg/m<sup>2</sup>)</td>
<td align="center" valign="middle">24.2&#x202F;&#x00B1;&#x202F;2.6</td>
<td align="center" valign="middle">24.5&#x202F;&#x00B1;&#x202F;2.6</td>
<td align="center" valign="middle">0.731</td>
</tr>
<tr>
<td align="left" valign="middle">Senior group</td>
<td align="center" valign="middle">105</td>
<td align="center" valign="middle">46</td>
<td/>
</tr>
<tr>
<td align="left" valign="middle">&#x2003;Sex (male)</td>
<td align="center" valign="middle">26 (24.8)</td>
<td align="center" valign="middle">10 (21.7)</td>
<td/>
</tr>
<tr>
<td align="left" valign="middle">&#x2003;Age (years)</td>
<td align="center" valign="middle">60.0&#x202F;&#x00B1;&#x202F;6.5</td>
<td align="center" valign="middle">58.8&#x202F;&#x00B1;&#x202F;6.7</td>
<td align="center" valign="middle">0.278</td>
</tr>
<tr>
<td align="left" valign="middle">&#x2003;BMI (kg/m<sup>2</sup>)</td>
<td align="center" valign="middle">24.8&#x202F;&#x00B1;&#x202F;2.2</td>
<td align="center" valign="middle">24.7&#x202F;&#x00B1;&#x202F;2.5</td>
<td align="center" valign="middle">0.838</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>Data are shown as number (percentage) or mean &#x00B1; SD; SD, standard deviation; BMI body mass index. The <italic>t</italic> test is employed to analyze the differences between distinct data sets.</p>
</table-wrap-foot>
</table-wrap>
<p>All patients met the following inclusion criteria: (1) age &#x2265;18&#x202F;years; (2) no history of head or facial surgery, with primary disease excluding periorbital fat involvement; (3) absence of malignant tumors or immune system disorders; (4) no prior radiotherapy, chemotherapy, or glucocorticoid therapy; (5) no history of craniofacial dysplasia; (6) cranial and facial MRI scans with 1-mm slice thickness, covering a range from the skull vertex superiorly to the upper incisor plane inferiorly.</p>
<p>Patients were excluded based on the following criteria: (1) body mass index (BMI)&#x202F;&#x003C;&#x202F;18.5 or &#x2265; 28.0; (2) MRI images with poor resolution or indistinct boundaries, precluding accurate regions of interest (ROI) annotation; (3) incomplete coverage of target regions in cranial and facial MRI scans; (4) absence of T1-weighted sequences in cranial and facial MRI protocols; (5) history of facial or periorbital deformities and prior surgeries.</p>
</sec>
<sec id="sec4">
<label>2.2</label>
<title>Collection of clinical information and MRI images</title>
<p>Clinical information and MRI images were collected retrospectively from the hospital&#x2019;s electronic medical record system and imaging database. Patients diagnosed with meningioma by the neurosurgery department underwent preoperative high-resolution head and facial T1-weighted imaging, primarily with a slice thickness of 1&#x202F;mm, meeting the study&#x2019;s requirements. Identical MRI equipment and unchanging imaging parameters ensured data stability and reliability for this investigation.</p>
</sec>
<sec id="sec5">
<label>2.3</label>
<title>MRI acquisition</title>
<p>Preoperative patients routinely underwent cranial and facial MRI scanning using high-resolution T1-weighted imaging. The scanning was performed with the following parameters: slice thickness of 1&#x202F;mm, interslice gap of 1&#x202F;mm, matrix size of 260&#x202F;&#x00D7;&#x202F;260, and an average of 2 signal acquisitions. The imaging was conducted on a 1.5&#x202F;T high-field superconducting magnet (Siemens Espree, Erlangen, Germany) equipped with a 32-channel phased-array body coil.</p>
</sec>
<sec id="sec6">
<label>2.4</label>
<title>Region of interest segmentation</title>
<p>Based on reviewing previous research data (<xref ref-type="bibr" rid="ref19">19</xref>), three anatomical regions of periorbital fat were identified (<xref ref-type="fig" rid="fig2">Figure 2A</xref>, <xref ref-type="fig" rid="fig3">Figure 3</xref>): retro-orbicularis oculi fat (ROOF), sub-orbicularis oculi fat (SOOF) and deep medial cheek fat (DMCF). Two senior plastic surgeons (each with over 10&#x202F;years of clinical experience) independently annotated the ROIs on all imaging data using 3D Slicer software (version 5.7.0). For complex or ambiguous images, annotations were guided by one expert plastic surgeon (with over 20&#x202F;years of clinical experience). To ensure reproducibility, two senior plastic surgeons concurrently ambiguous imaging data from 50 randomly selected patients; an intraclass correlation coefficient (ICC) evaluation was subsequently performed (<xref ref-type="bibr" rid="ref20">20</xref>). Metrics with ICC values below the reliability threshold (defined as ICC&#x202F;&#x2264;&#x202F;0.75) were excluded as unstable indicators.</p>
<fig position="float" id="fig2">
<label>Figure 2</label>
<caption>
<p>Flowchart of this study which contains three main steps. Extract conventional radiomics (CR) feature data and machine learning (ML) feature data <bold>(A)</bold>. Dimensionality reduction and feature fusion of CR and ML data respectively; development and evaluation of nine distinct models <bold>(B)</bold>. Fusion of CR and ML features; development of a stacking ensemble learning model utilizing the prediction probabilities from the nine models <bold>(C)</bold>.</p>
</caption>
<graphic xlink:href="fmed-13-1752016-g002.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Three-panel scientific workflow diagram: Panel A outlines MRI image acquisition, region of interest selection over facial regions, continuous annotation, feature extraction (radiomics and machine learning), and database creation. Panel B shows feature selection, dimensionality reduction, feature fusion, and model development using various machine learning algorithms. Panel C depicts feature fusion, prediction probability, and ensemble learning via stacking.</alt-text>
</graphic>
</fig>
<fig position="float" id="fig3">
<label>Figure 3</label>
<caption>
<p>Consecutive coronal T1-weighted images of the face and three ROIs: ROOF (green), SOOF (yellow), and DMCF (pink).</p>
</caption>
<graphic xlink:href="fmed-13-1752016-g003.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Nine-panel MRI image grid of a human head in coronal sections, with three structures highlighted: SOOF in green, DMCF in pink, and ROOF in yellow. Labels and arrows indicate each structure&#x2019;s location in the upper right panel.</alt-text>
</graphic>
</fig>
</sec>
<sec id="sec7">
<label>2.5</label>
<title>Images preprocessing</title>
<p>All craniofacial T1-weighted MRI images underwent N4 bias field correction algorithm using the Python (version v3.9.23) programming language to mitigate potential artifacts arising from local magnetic field in homogeneities. Subsequently, coordinate system standardization was performed on all imaging data to ensure accurate feature extraction.</p>
</sec>
<sec id="sec8">
<label>2.6</label>
<title>Conventional radiological features</title>
<p>The open-source library &#x201C;PyRadiomics&#x201D; (version v3.1.0) was employed to extract radiomics feature data from segmented ROIs using the Python programming language. Prior to feature extraction, all images were preprocessed: images underwent resampled to a uniform voxel size of 1&#x202F;&#x00D7;&#x202F;1&#x202F;&#x00D7;&#x202F;1&#x202F;mm<sup>3</sup>, and grey-level data were discretized into 25 bins using nearest-neighbor interpolation. Ultimately, a total of 1,688 radiomics features were extracted, including 14 shape-based features, 324 first-order statistical features, 432 gray level co-occurrence matrix (GLCM) features, 288 gray level run length matrix (GLRLM) features, 288 gray level size zone matrix (GLSZM) features, 252 gray level dependence matrix (GLDM) features, and 90 neighboring gray tone difference matrix (NGTDM) features.</p>
</sec>
<sec id="sec9">
<label>2.7</label>
<title>Machine learning features and model development</title>
<p>Consistent with conventional radiomics, the Python programming language was employed to extract machine learning features from segmented medical images. A 3D ResNet18 backbone network was utilized, optionally embedding squeeze-and-excitation (SE) modules after residual blocks. The SE mechanism compressed spatial information through global average pooling, generated channel-wise weights via fully-connected layers (compression ratio: 16), and recalibrated features using sigmoid activation to enhance discriminative channel responses. Input MRI scans (including images and masks) were uniformly resampled to 1&#x202F;&#x00D7;&#x202F;1&#x202F;&#x00D7;&#x202F;1&#x202F;mm<sup>3</sup> voxels. ROIs were extracted and cropped to minimum bounding boxes, with intensity values normalized to the 0&#x2013;1 range using min-max scaling. ROI volumes were padded or cropped to 32&#x202F;&#x00D7;&#x202F;32&#x202F;&#x00D7;&#x202F;32 tensors (zero-padded for undersized volumes), retaining only masked regions. Features were extracted through global average pooling and fully-connected layers, yielding a 512-dimensional output vector without preserved spatial dimensions.</p>
<p>Using CR and ML features extracted from the training set, we conducted separate training procedures for nine different ML models (<xref ref-type="fig" rid="fig2">Figure 2B</xref>). Given the sample size and distribution characteristics of the study cohort, five-fold cross-validation with five repeats was employed to enhance model stability during validation. Procedure: (1) Features with ICC&#x202F;&#x003E;&#x202F;0.75 were retained due to high stability. All datasets were standardized using the Z-score method. (2) Analysis of variance (ANOVA) screened features exhibiting statistically significant differences (<italic>p</italic>&#x202F;&#x003C;&#x202F;0.05) across youth, middle-aged, and senior groups. (3) Pearson and Spearman correlation analyses were applied to normally and non-normally distributed features, respectively, to remove redundant features with correlation coefficients (<italic>r</italic>)&#x202F;&#x003E;&#x202F;0.9. (4) Synthetic minority over-sampling technique (SMOTE) was employed to balance sample sizes in the two non-senior subgroups, aligning them with the senior group (<italic>n</italic>&#x202F;=&#x202F;105). (5) Random forest (RF) feature selection was implemented to retain only features with importance scores meeting or exceeding the mean value. (6) Least absolute shrinkage and selection operator (LASSO) regression was used for feature screening, with the penalty coefficient (<italic>&#x03BB;</italic>) determined by minimum mean square error (MSE). (7) Principal component analysis (PCA) was synchronously applied to reduce dimensionality of selected features in the training, internal validation, and test sets. (8) nine ML models were trained on the training set, with performance rigorously evaluated on an independent test set.: multiclass logistic regression (MLR), neural network (NN), support vector machine (SVM), multilayer perceptron (MLP), random forest (RF), gradient boosting machine (GBM), light gradient boosting machine (Light GBM), na&#x00EF;ve Bayes, and extreme gradient boosting (XGBoost).</p>
</sec>
<sec id="sec10">
<label>2.8</label>
<title>Data fusion and stacking ensemble learning models</title>
<p>MRI feature data from each patient were extracted from three ROIs (<xref ref-type="fig" rid="fig2">Figure 2C</xref>): Roof, Soof, and DMCF. These regions generated three distinct sets of feature data. Initially, the filtered CR features from these groups were fused through direct concatenation to construct a CR model (identical methodology was applied to the ML model). Subsequently, the screened features from CR and ML were fused through direct concatenation to construct a CR-ML fusion model. Finally, the predicted probabilities generated by the CR-ML fusion model were utilized as training data to build a stacking ensemble learning framework, with a logistic regression classifier employed as the meta-learner.</p>
</sec>
<sec id="sec11">
<label>2.9</label>
<title>Statistical analysis</title>
<p>All data analyses were conducted using open-source libraries in Python. The ICC was employed to evaluate feature reproducibility, with ICC&#x202F;&#x003E;&#x202F;0.75 indicating good consistency. Model discriminative performance was assessed using the internal validation set and testing set through the following metrics: area under the curve (AUC), AUC macro-average (AUC-macro), 95% confidence interval (CI), accuracy (Acc), positive predictive value (PPV), sensitivity (Sen), F1-score, and confusion matrices. Owing to the imbalanced class distribution in the study sample, AUC-macro (calculated via interpolation) was selected as the primary evaluation metric due to its minimal susceptibility to sample distribution bias and superior stability (<xref ref-type="bibr" rid="ref21">21</xref>). Based on the macro-average ROC curve, the AUC-macro (Macro_ AUC) is calculated using the trapezoidal rule:</p>
<disp-formula id="E1">
<mml:math id="M1">
<mml:mtext mathvariant="italic">Macro</mml:mtext>
<mml:mo>_</mml:mo>
<mml:mi mathvariant="italic">AUC</mml:mi>
<mml:mo>=</mml:mo>
<mml:msubsup>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn mathvariant="bold">1</mml:mn>
</mml:mrow>
<mml:mi>M</mml:mi>
</mml:msubsup>
<mml:mo stretchy="true">(</mml:mo>
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo stretchy="true">)</mml:mo>
<mml:mo>&#x22C5;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mtext mathvariant="italic">macro</mml:mtext>
<mml:mo>_</mml:mo>
<mml:mi mathvariant="italic">tpr</mml:mi>
<mml:mo stretchy="true">(</mml:mo>
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
<mml:mo stretchy="true">)</mml:mo>
<mml:mo>+</mml:mo>
<mml:mtext mathvariant="italic">macro</mml:mtext>
<mml:mo>_</mml:mo>
<mml:mi mathvariant="italic">tpr</mml:mi>
<mml:mo stretchy="true">(</mml:mo>
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo stretchy="true">)</mml:mo>
</mml:mrow>
<mml:mn mathvariant="bold">2</mml:mn>
</mml:mfrac>
</mml:math>
</disp-formula>
<p><inline-formula>
<mml:math id="M2">
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
</mml:math>
</inline-formula> and <inline-formula>
<mml:math id="M3">
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
</mml:math>
</inline-formula> are adjacent false positive rate (FPR) grid points. <inline-formula>
<mml:math id="M4">
<mml:mtext mathvariant="italic">macro</mml:mtext>
<mml:mo>_</mml:mo>
<mml:mi mathvariant="italic">tpr</mml:mi>
<mml:mo stretchy="true">(</mml:mo>
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
<mml:mo stretchy="true">)</mml:mo>
</mml:math>
</inline-formula> and <inline-formula>
<mml:math id="M5">
<mml:mtext mathvariant="italic">macro</mml:mtext>
<mml:mo>_</mml:mo>
<mml:mi mathvariant="italic">tpr</mml:mi>
<mml:mo stretchy="true">(</mml:mo>
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo stretchy="true">)</mml:mo>
</mml:math>
</inline-formula> are the macro-average true positive rate (TPR) values at the corresponding FPR points. <inline-formula>
<mml:math id="M6">
<mml:mi>M</mml:mi>
</mml:math>
</inline-formula> is the total number of points in the FPR grid.</p>
<disp-formula id="E2">
<mml:math id="M7">
<mml:mtext mathvariant="italic">macro</mml:mtext>
<mml:mo>_</mml:mo>
<mml:mi mathvariant="italic">tpr</mml:mi>
<mml:mo stretchy="true">(</mml:mo>
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
<mml:mspace width="0em"/>
<mml:mo stretchy="true">)</mml:mo>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mn mathvariant="bold">1</mml:mn>
<mml:mi>K</mml:mi>
</mml:mfrac>
<mml:msubsup>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn mathvariant="bold">0</mml:mn>
</mml:mrow>
<mml:mi>K</mml:mi>
</mml:msubsup>
<mml:mi mathvariant="italic">tp</mml:mi>
<mml:msub>
<mml:mi>r</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo stretchy="true">(</mml:mo>
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
<mml:mspace width="0em"/>
<mml:mo stretchy="true">)</mml:mo>
</mml:math>
</disp-formula>
<p><inline-formula>
<mml:math id="M8">
<mml:mi>K</mml:mi>
</mml:math>
</inline-formula> is the total number of classes. <inline-formula>
<mml:math id="M9">
<mml:mi mathvariant="italic">tp</mml:mi>
<mml:msub>
<mml:mi>r</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo stretchy="true">(</mml:mo>
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
<mml:mspace width="0em"/>
<mml:mo stretchy="true">)</mml:mo>
</mml:math>
</inline-formula> is the true positive rate for the i-th class at FPR point <inline-formula>
<mml:math id="M10">
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
</mml:math>
</inline-formula>&#x200B;, obtained through linear interpolation. <inline-formula>
<mml:math id="M11">
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mi>j</mml:mi>
</mml:msub>
</mml:math>
</inline-formula> is the j-th point in the FPR grid, where <inline-formula>
<mml:math id="M12">
<mml:mi>j</mml:mi>
</mml:math>
</inline-formula> = 0, 1, &#x2026;, <inline-formula>
<mml:math id="M13">
<mml:mi>M</mml:mi>
</mml:math>
</inline-formula>, typically with <inline-formula>
<mml:math id="M14">
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mn>0</mml:mn>
</mml:msub>
</mml:math>
</inline-formula> = 0 and <inline-formula>
<mml:math id="M15">
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mi>M</mml:mi>
</mml:msub>
</mml:math>
</inline-formula>&#x200B;&#x202F;=&#x202F;1.</p>
<p>The F1-score, which integrates PPV and Sen, was designated a secondary metric as it may overestimate model performance in imbalanced data; other metrics served as supplementary indicators. To enable precise model comparison, all evaluation results were reported to three decimal places. For normally distributed data with homogeneity of variance, independent samples <italic>t</italic> tests and one-way ANOVA were applied; otherwise, non-parametric tests (Kruskal&#x2013;Wallis <italic>H</italic> test) were utilized, with statistical significance defined as <italic>p</italic>&#x202F;&#x003C;&#x202F;0.05.</p>
</sec>
</sec>
<sec sec-type="results" id="sec12">
<label>3</label>
<title>Result</title>
<sec id="sec13">
<label>3.1</label>
<title>Patient characteristics</title>
<p>The baseline characteristics of the study participants are presented in <xref ref-type="table" rid="tab1">Tables 1</xref>, <xref ref-type="table" rid="tab2">2</xref>. A total of 237 patients were enrolled: 27 in the young group (11 male), 59 in the middle-aged group (14 male), and 151 in the senior group (36 male). Through stratified sampling, the cohort was divided into a training set (including an internal validation set) of 165 cases: young group (<italic>n</italic>&#x202F;=&#x202F;19, 28.5&#x202F;&#x00B1;&#x202F;5.0&#x202F;years, 7 male), middle-aged group (<italic>n</italic>&#x202F;=&#x202F;41, 42.9&#x202F;&#x00B1;&#x202F;4.7&#x202F;years, 9 male), and senior group (<italic>n</italic>&#x202F;=&#x202F;105, 60.0&#x202F;&#x00B1;&#x202F;6.5&#x202F;years, 26 male); along with a testing set of 72 cases: young group (<italic>n</italic>&#x202F;=&#x202F;8, 28.6&#x202F;&#x00B1;&#x202F;5.6&#x202F;years, 4 male), middle-aged group (<italic>n</italic>&#x202F;=&#x202F;18, 43.9&#x202F;&#x00B1;&#x202F;4.1&#x202F;years, 6 male), and senior group (<italic>n</italic>&#x202F;=&#x202F;46, 58.8&#x202F;&#x00B1;&#x202F;6.7&#x202F;years, 10 male). Within identical age strata, no statistically significant differences in BMI were observed between the training and test sets (<italic>p</italic>&#x202F;&#x003E;&#x202F;0.05). Similarly, no statistically significant BMI differences existed across distinct age groups (<italic>p</italic>&#x202F;&#x003E;&#x202F;0.05).</p>
</sec>
<sec id="sec14">
<label>3.2</label>
<title>Conventional radiological model</title>
<p>Following feature selection and SMOTE oversampling for the Roof, Soof, and DMCF regions respectively, the feature dimensionality of the Roof and DMCF regions was reduced to 5 <italic>via</italic> PCA, while the Soof region was reduced to 8 features. Each patient thus contributed a total of 18 features. All models demonstrated stable performance on the training set (<xref ref-type="table" rid="tab3">Table 3</xref>). In the test set, the Naive Bayes model exhibited optimal results with an AUC-macro of 0.757 (95% CI, 0.628&#x2013;0.856) and an F1-score of 0.598.</p>
<table-wrap position="float" id="tab3">
<label>Table 3</label>
<caption>
<p>Predictive performance of CR model, ML model and fusion model (CR-ML).</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top" rowspan="2">Model</th>
<th align="center" valign="top" colspan="4">Training set</th>
<th align="center" valign="top" colspan="4">Test set</th>
</tr>
<tr>
<th align="center" valign="top">Acc (Sen)&#x002A;</th>
<th align="center" valign="top">PPV</th>
<th align="center" valign="top">F1</th>
<th align="center" valign="top">AUC-macro (95% CI)</th>
<th align="center" valign="top">Acc (Sen)&#x002A;</th>
<th align="center" valign="top">PPV</th>
<th align="center" valign="top">F1</th>
<th align="center" valign="top">AUC-macro (95% CI)</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top" colspan="9">MLR</td>
</tr>
<tr>
<td align="left" valign="top">CR</td>
<td align="center" valign="top">0.768</td>
<td align="center" valign="top">0.768</td>
<td align="center" valign="top">0.767</td>
<td align="center" valign="top">0.927 (0.902&#x2013;0.946)</td>
<td align="center" valign="top">0.583</td>
<td align="center" valign="top">0.712</td>
<td align="center" valign="top">0.620</td>
<td align="center" valign="top">0.748 (0.623&#x2013;0.843)</td>
</tr>
<tr>
<td align="left" valign="top">ML</td>
<td align="center" valign="top">0.730</td>
<td align="center" valign="top">0.729</td>
<td align="center" valign="top">0.728</td>
<td align="center" valign="top">0.883 (0.85&#x2013;0.909)</td>
<td align="center" valign="top">0.486</td>
<td align="center" valign="top">0.635</td>
<td align="center" valign="top">0.504</td>
<td align="center" valign="top">0.771 (0.651&#x2013;0.862)</td>
</tr>
<tr>
<td align="left" valign="top">CR-ML</td>
<td align="center" valign="top">0.819</td>
<td align="center" valign="top">0.818</td>
<td align="center" valign="top">0.816</td>
<td align="center" valign="top">0.943 (0.92&#x2013;0.96)</td>
<td align="center" valign="top">0.597</td>
<td align="center" valign="top">0.721</td>
<td align="center" valign="top">0.619</td>
<td align="center" valign="top">0.806 (0.703&#x2013;0.884)</td>
</tr>
<tr>
<td align="left" valign="top" colspan="9">NN</td>
</tr>
<tr>
<td align="left" valign="top">CR</td>
<td align="center" valign="top">0.917</td>
<td align="center" valign="top">0.92</td>
<td align="center" valign="top">0.917</td>
<td align="center" valign="top">0.982 (0.967&#x2013;0.990)</td>
<td align="center" valign="top">0.583</td>
<td align="center" valign="top">0.700</td>
<td align="center" valign="top">0.614</td>
<td align="center" valign="top">0.749 (0.639&#x2013;0.837)</td>
</tr>
<tr>
<td align="left" valign="top">ML</td>
<td align="center" valign="top">0.895</td>
<td align="center" valign="top">0.899</td>
<td align="center" valign="top">0.893</td>
<td align="center" valign="top">0.980 (0.969&#x2013;0.987)</td>
<td align="center" valign="top">0.458</td>
<td align="center" valign="top">0.666</td>
<td align="center" valign="top">0.476</td>
<td align="center" valign="top">0.706 (0.593&#x2013;0.804)</td>
</tr>
<tr>
<td align="left" valign="top">CR-ML</td>
<td align="center" valign="top">0.933</td>
<td align="center" valign="top">0.933</td>
<td align="center" valign="top">0.933</td>
<td align="center" valign="top">0.990 (0.980&#x2013;0.996)</td>
<td align="center" valign="top">0.597</td>
<td align="center" valign="top">0.694</td>
<td align="center" valign="top">0.615</td>
<td align="center" valign="top">0.819 (0.720&#x2013;0.893)</td>
</tr>
<tr>
<td align="left" valign="top" colspan="9">XGBoost</td>
</tr>
<tr>
<td align="left" valign="top">CR</td>
<td align="center" valign="top">0.813</td>
<td align="center" valign="top">0.818</td>
<td align="center" valign="top">0.811</td>
<td align="center" valign="top">0.929 (0.902&#x2013;0.946)</td>
<td align="center" valign="top">0.458</td>
<td align="center" valign="top">0.642</td>
<td align="center" valign="top">0.496</td>
<td align="center" valign="top">0.748 (0.651&#x2013;0.824)</td>
</tr>
<tr>
<td align="left" valign="top">ML</td>
<td align="center" valign="top">0.727</td>
<td align="center" valign="top">0.728</td>
<td align="center" valign="top">0.720</td>
<td align="center" valign="top">0.900 (0.87&#x2013;0.921)</td>
<td align="center" valign="top">0.417</td>
<td align="center" valign="top">0.604</td>
<td align="center" valign="top">0.419</td>
<td align="center" valign="top">0.721 (0.614&#x2013;0.807)</td>
</tr>
<tr>
<td align="left" valign="top">CR-ML</td>
<td align="center" valign="top">0.797</td>
<td align="center" valign="top">0.801</td>
<td align="center" valign="top">0.794</td>
<td align="center" valign="top">0.932 (0.91&#x2013;0.949)</td>
<td align="center" valign="top">0.528</td>
<td align="center" valign="top">0.689</td>
<td align="center" valign="top">0.548</td>
<td align="center" valign="top">0.789 (0.692&#x2013;0.861)</td>
</tr>
<tr>
<td align="left" valign="top" colspan="9">RF</td>
</tr>
<tr>
<td align="left" valign="top">CR</td>
<td align="center" valign="top">0.832</td>
<td align="center" valign="top">0.835</td>
<td align="center" valign="top">0.829</td>
<td align="center" valign="top">0.958 (0.941&#x2013;0.972)</td>
<td align="center" valign="top">0.528</td>
<td align="center" valign="top">0.709</td>
<td align="center" valign="top">0.570</td>
<td align="center" valign="top">0.727 (0.605&#x2013;0.822)</td>
</tr>
<tr>
<td align="left" valign="top">ML</td>
<td align="center" valign="top">0.867</td>
<td align="center" valign="top">0.870</td>
<td align="center" valign="top">0.863</td>
<td align="center" valign="top">0.966 (0.952&#x2013;0.978)</td>
<td align="center" valign="top">0.417</td>
<td align="center" valign="top">0.586</td>
<td align="center" valign="top">0.422</td>
<td align="center" valign="top">0.717 (0.619&#x2013;0.800)</td>
</tr>
<tr>
<td align="left" valign="top">CR-ML</td>
<td align="center" valign="top">0.902</td>
<td align="center" valign="top">0.909</td>
<td align="center" valign="top">0.900</td>
<td align="center" valign="top">0.977 (0.965&#x2013;0.987)</td>
<td align="center" valign="top">0.597</td>
<td align="center" valign="top">0.725</td>
<td align="center" valign="top">0.612</td>
<td align="center" valign="top">0.779 (0.691&#x2013;0.851)</td>
</tr>
<tr>
<td align="left" valign="top" colspan="9">SVM</td>
</tr>
<tr>
<td align="left" valign="top">CR</td>
<td align="center" valign="top">0.933</td>
<td align="center" valign="top">0.934</td>
<td align="center" valign="top">0.933</td>
<td align="center" valign="top">0.99 (0.98&#x2013;0.995)</td>
<td align="center" valign="top">0.569</td>
<td align="center" valign="top">0.652</td>
<td align="center" valign="top">0.594</td>
<td align="center" valign="top">0.741 (0.648&#x2013;0.825)</td>
</tr>
<tr>
<td align="left" valign="top">ML</td>
<td align="center" valign="top">0.933</td>
<td align="center" valign="top">0.933</td>
<td align="center" valign="top">0.933</td>
<td align="center" valign="top">0.989 (0.979&#x2013;0.994)</td>
<td align="center" valign="top">0.500</td>
<td align="center" valign="top">0.592</td>
<td align="center" valign="top">0.522</td>
<td align="center" valign="top">0.727 (0.616&#x2013;0.817)</td>
</tr>
<tr>
<td align="left" valign="top">CR-ML</td>
<td align="center" valign="top">0.943</td>
<td align="center" valign="top">0.946</td>
<td align="center" valign="top">0.943</td>
<td align="center" valign="top">0.995 (0.987&#x2013;0.998)</td>
<td align="center" valign="top">0.556</td>
<td align="center" valign="top">0.634</td>
<td align="center" valign="top">0.579</td>
<td align="center" valign="top">0.739 (0.637&#x2013;0.829)</td>
</tr>
<tr>
<td align="left" valign="top" colspan="9">GBM</td>
</tr>
<tr>
<td align="left" valign="top">CR</td>
<td align="center" valign="top">0.737</td>
<td align="center" valign="top">0.737</td>
<td align="center" valign="top">0.735</td>
<td align="center" valign="top">0.893 (0.867&#x2013;0.917)</td>
<td align="center" valign="top">0.472</td>
<td align="center" valign="top">0.633</td>
<td align="center" valign="top">0.508</td>
<td align="center" valign="top">0.706 (0.583&#x2013;0.796)</td>
</tr>
<tr>
<td align="left" valign="top">ML</td>
<td align="center" valign="top">0.775</td>
<td align="center" valign="top">0.776</td>
<td align="center" valign="top">0.774</td>
<td align="center" valign="top">0.901 (0.872&#x2013;0.925)</td>
<td align="center" valign="top">0.361</td>
<td align="center" valign="top">0.502</td>
<td align="center" valign="top">0.391</td>
<td align="center" valign="top">0.682 (0.573&#x2013;0.773)</td>
</tr>
<tr>
<td align="left" valign="top">CR-ML</td>
<td align="center" valign="top">0.797</td>
<td align="center" valign="top">0.797</td>
<td align="center" valign="top">0.796</td>
<td align="center" valign="top">0.915 (0.889&#x2013;0.936)</td>
<td align="center" valign="top">0.500</td>
<td align="center" valign="top">0.626</td>
<td align="center" valign="top">0.530</td>
<td align="center" valign="top">0.716 (0.599&#x2013;0.816)</td>
</tr>
<tr>
<td align="left" valign="top" colspan="9">Light GBM</td>
</tr>
<tr>
<td align="left" valign="top">CR</td>
<td align="center" valign="top">0.775</td>
<td align="center" valign="top">0.780</td>
<td align="center" valign="top">0.769</td>
<td align="center" valign="top">0.925 (0.901&#x2013;0.944)</td>
<td align="center" valign="top">0.472</td>
<td align="center" valign="top">0.669</td>
<td align="center" valign="top">0.510</td>
<td align="center" valign="top">0.715 (0.603&#x2013;0.806)</td>
</tr>
<tr>
<td align="left" valign="top">ML</td>
<td align="center" valign="top">0.810</td>
<td align="center" valign="top">0.809</td>
<td align="center" valign="top">0.805</td>
<td align="center" valign="top">0.936 (0.914&#x2013;0.953)</td>
<td align="center" valign="top">0.403</td>
<td align="center" valign="top">0.554</td>
<td align="center" valign="top">0.412</td>
<td align="center" valign="top">0.676 (0.568&#x2013;0.773)</td>
</tr>
<tr>
<td align="left" valign="top">CR-ML</td>
<td align="center" valign="top">0.844</td>
<td align="center" valign="top">0.854</td>
<td align="center" valign="top">0.841</td>
<td align="center" valign="top">0.954 (0.935&#x2013;0.969)</td>
<td align="center" valign="top">0.528</td>
<td align="center" valign="top">0.667</td>
<td align="center" valign="top">0.561</td>
<td align="center" valign="top">0.767 (0.668&#x2013;0.85)</td>
</tr>
<tr>
<td align="left" valign="top" colspan="9">MLP</td>
</tr>
<tr>
<td align="left" valign="top">CR</td>
<td align="center" valign="top">0.787</td>
<td align="center" valign="top">0.785</td>
<td align="center" valign="top">0.784</td>
<td align="center" valign="top">0.898 (0.866&#x2013;0.922)</td>
<td align="center" valign="top">0.528</td>
<td align="center" valign="top">0.730</td>
<td align="center" valign="top">0.579</td>
<td align="center" valign="top">0.737 (0.634&#x2013;0.82)</td>
</tr>
<tr>
<td align="left" valign="top">ML</td>
<td align="center" valign="top">0.730</td>
<td align="center" valign="top">0.732</td>
<td align="center" valign="top">0.730</td>
<td align="center" valign="top">0.877 (0.843&#x2013;0.906)</td>
<td align="center" valign="top">0.458</td>
<td align="center" valign="top">0.589</td>
<td align="center" valign="top">0.477</td>
<td align="center" valign="top">0.682 (0.565&#x2013;0.793)</td>
</tr>
<tr>
<td align="left" valign="top">CR-ML</td>
<td align="center" valign="top">0.740</td>
<td align="center" valign="top">0.742</td>
<td align="center" valign="top">0.737</td>
<td align="center" valign="top">0.872 (0.834&#x2013;0.901)</td>
<td align="center" valign="top">0.542</td>
<td align="center" valign="top">0.646</td>
<td align="center" valign="top">0.577</td>
<td align="center" valign="top">0.713 (0.59&#x2013;0.818)</td>
</tr>
<tr>
<td align="left" valign="top" colspan="9">Naive bayes</td>
</tr>
<tr>
<td align="left" valign="top">CR</td>
<td align="center" valign="top">0.749</td>
<td align="center" valign="top">0.750</td>
<td align="center" valign="top">0.747</td>
<td align="center" valign="top">0.891 (0.858&#x2013;0.917)</td>
<td align="center" valign="top">0.556</td>
<td align="center" valign="top">0.736</td>
<td align="center" valign="top">0.598</td>
<td align="center" valign="top">0.757 (0.628&#x2013;0.856)</td>
</tr>
<tr>
<td align="left" valign="top">ML</td>
<td align="center" valign="top">0.702</td>
<td align="center" valign="top">0.699</td>
<td align="center" valign="top">0.696</td>
<td align="center" valign="top">0.868 (0.834&#x2013;0.897)</td>
<td align="center" valign="top">0.361</td>
<td align="center" valign="top">0.501</td>
<td align="center" valign="top">0.365</td>
<td align="center" valign="top">0.624 (0.506&#x2013;0.733)</td>
</tr>
<tr>
<td align="left" valign="top">CR-ML</td>
<td align="center" valign="top">0.778</td>
<td align="center" valign="top">0.780</td>
<td align="center" valign="top">0.774</td>
<td align="center" valign="top">0.911 (0.881&#x2013;0.934)</td>
<td align="center" valign="top">0.542</td>
<td align="center" valign="top">0.732</td>
<td align="center" valign="top">0.558</td>
<td align="center" valign="top">0.775 (0.665&#x2013;0.856)</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>AUC-macro, AUC macro-average; AUC, the area under the receiver operating characteristic curves; Acc, accuracy; PPV, positive predictive value; Sen, sensitivity; F1, F1-score; CI, confidence interval; CR, conventional radiation; ML, machine learning; MLR, multiclass logistic regression; MLP, multilayer perceptron; NN, convolutional neural network; RF, random forest; GBM, gradient boosting machine; XGBoost, extreme gradient boosting; Light GBM, light gradient boosting machine. &#x002A;Sen is calculated in multi-class problems using a weighted average, which is mathematically equal to Acc.</p>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="sec15">
<label>3.3</label>
<title>ML feature model</title>
<p>Identical to the CR model, each patient contributed a total of 18 ML features. Within the training set evaluation metrics, all models exhibited stable performance, with results which were comparable to those of the CR model (<xref ref-type="table" rid="tab3">Table 3</xref>). In the testing set, the optimal model was MLR, attaining an AUC-macro of 0.771 (95% CI, 0.651&#x2013;0.862) and an F1-score of 0.504. The predictive capability of models developed using ML features was comparable to that of the CR model; the optimal model in the testing set showed an increase of 0.014 (1.8%) in AUC-macro but exhibited a decrease of 0.095 (&#x2212;15.8%) in F1-score.</p>
</sec>
<sec id="sec16">
<label>3.4</label>
<title>Feature fusion model</title>
<p>To further enhance model performance, we directly concatenated CR and ML feature data to develop a fusion model. In the evaluation results of the CR-ML fusion model (<xref ref-type="table" rid="tab3">Table 3</xref>), the training set metrics remained stable. In the test set, the NN model performed optimally, achieving an AUC-macro of 0.819 (95% CI: 0.720&#x2013;0.893) and an F1-score of 0.615. Following data fusion, the performance of most models (seven models) improved to varying degrees. Compared to the CR and ML models, the optimal model&#x2019;s test set AUC-macro increased by 0.062 (8.2%) and 0.048 (6.2%), respectively, and the F1-score increased by 0.017 (2.8%) and 0.111 (22.1%), respectively.</p>
</sec>
<sec id="sec17">
<label>3.5</label>
<title>Stacking ensemble learning model</title>
<p>A stacking ensemble learning model was developed using the probability data from both the training and test sets of the CR-ML fusion model, with a logistic regression model selected as the meta-learner. The base models produced nine sets of predicted probability data, and all possible combinations were exhaustively evaluated. As shown in <xref ref-type="table" rid="tab4">Table 4</xref>, the evaluation metrics of the top 10 model combinations were compiled according to their test set AUC-macro ranking. In the training set, all models exhibited excellent performance; in the test set, the stacking model fusing three base models (GMB, Light GBM and NN) achieved optimal performance, with an AUC-macro of 0.833 (95% CI: 0.737&#x2013;0.902), an F1-score of 0.614, an Acc of 0.597, and a positive predictive value (PPV) of 0.690. Compared to the optimal CR-ML fusion models (<xref ref-type="table" rid="tab5">Table 5</xref>), the top-performing model demonstrated an increase in test set AUC-macro by 0.014 (1.7%), while other metrics remained relatively stable.</p>
<table-wrap position="float" id="tab4">
<label>Table 4</label>
<caption>
<p>Multi-model stacking ensemble learning evaluation (Top 10).</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top" rowspan="2">Model composition</th>
<th align="center" valign="top" colspan="4">Training set</th>
<th align="center" valign="top" colspan="4">Test set</th>
</tr>
<tr>
<th align="center" valign="top">Acc (Sen)&#x002A;</th>
<th align="center" valign="top">PPV</th>
<th align="center" valign="top">F1</th>
<th align="center" valign="top">AUC-macro (95% CI)</th>
<th align="center" valign="top">Acc (Sen)&#x002A;</th>
<th align="center" valign="top">PPV</th>
<th align="center" valign="top">F1</th>
<th align="center" valign="top">AUC-macro (95% CI)</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">GMB, Light GBM, NN</td>
<td align="center" valign="top">0.946</td>
<td align="center" valign="top">0.946</td>
<td align="center" valign="top">0.946</td>
<td align="center" valign="top">0.991 (0.981&#x2013;0.996)</td>
<td align="center" valign="top">0.597</td>
<td align="center" valign="top">0.690</td>
<td align="center" valign="top">0.614</td>
<td align="center" valign="top">0.833(0.737&#x2013;0.902)</td>
</tr>
<tr>
<td align="left" valign="top">Light GBM, NN</td>
<td align="center" valign="top">0.946</td>
<td align="center" valign="top">0.946</td>
<td align="center" valign="top">0.946</td>
<td align="center" valign="top">0.991(0.981&#x2013;0.996)</td>
<td align="center" valign="top">0.597</td>
<td align="center" valign="top">0.690</td>
<td align="center" valign="top">0.614</td>
<td align="center" valign="top">0.833(0.733&#x2013;0.904)</td>
</tr>
<tr>
<td align="left" valign="top">Light GBM, MLP, NN, Naive bayes</td>
<td align="center" valign="top">0.946</td>
<td align="center" valign="top">0.946</td>
<td align="center" valign="top">0.946</td>
<td align="center" valign="top">0.990(0.979&#x2013;0.995)</td>
<td align="center" valign="top">0.597</td>
<td align="center" valign="top">0.713</td>
<td align="center" valign="top">0.617</td>
<td align="center" valign="top">0.827(0.728&#x2013;0.900)</td>
</tr>
<tr>
<td align="left" valign="top">GMB, Light GBM, MLP, NN, Naive bayes</td>
<td align="center" valign="top">0.946</td>
<td align="center" valign="top">0.946</td>
<td align="center" valign="top">0.946</td>
<td align="center" valign="top">0.990(0.978&#x2013;0.996)</td>
<td align="center" valign="top">0.597</td>
<td align="center" valign="top">0.713</td>
<td align="center" valign="top">0.617</td>
<td align="center" valign="top">0.826(0.728&#x2013;0.903)</td>
</tr>
<tr>
<td align="left" valign="top">GMB, Light GBM, MLP, NN, Naive bayes, XGBoost</td>
<td align="center" valign="top">0.946</td>
<td align="center" valign="top">0.946</td>
<td align="center" valign="top">0.946</td>
<td align="center" valign="top">0.990(0.980&#x2013;0.996)</td>
<td align="center" valign="top">0.597</td>
<td align="center" valign="top">0.713</td>
<td align="center" valign="top">0.617</td>
<td align="center" valign="top">0.826(0.725&#x2013;0.899)</td>
</tr>
<tr>
<td align="left" valign="top">Light GBM, MLP, NN, Naive bayes, XGBoost</td>
<td align="center" valign="top">0.946</td>
<td align="center" valign="top">0.946</td>
<td align="center" valign="top">0.946</td>
<td align="center" valign="top">0.990(0.979&#x2013;0.996)</td>
<td align="center" valign="top">0.597</td>
<td align="center" valign="top">0.713</td>
<td align="center" valign="top">0.617</td>
<td align="center" valign="top">0.826(0.725&#x2013;0.896)</td>
</tr>
<tr>
<td align="left" valign="top">MLP, NN, Naive bayes</td>
<td align="center" valign="top">0.943</td>
<td align="center" valign="top">0.943</td>
<td align="center" valign="top">0.943</td>
<td align="center" valign="top">0.989(0.978&#x2013;0.996)</td>
<td align="center" valign="top">0.597</td>
<td align="center" valign="top">0.709</td>
<td align="center" valign="top">0.616</td>
<td align="center" valign="top">0.825 (0.725&#x2013;0.898)</td>
</tr>
<tr>
<td align="left" valign="top">MLP, NN, Naive bayes, XGBoost</td>
<td align="center" valign="top">0.943</td>
<td align="center" valign="top">0.943</td>
<td align="center" valign="top">0.943</td>
<td align="center" valign="top">0.989(0.978&#x2013;0.996)</td>
<td align="center" valign="top">0.597</td>
<td align="center" valign="top">0.709</td>
<td align="center" valign="top">0.616</td>
<td align="center" valign="top">0.825(0.730&#x2013;0.896)</td>
</tr>
<tr>
<td align="left" valign="top">Light GBM, NN, XGBoost</td>
<td align="center" valign="top">0.946</td>
<td align="center" valign="top">0.946</td>
<td align="center" valign="top">0.946</td>
<td align="center" valign="top">0.992 (0.983&#x2013;0.996)</td>
<td align="center" valign="top">0.597</td>
<td align="center" valign="top">0.690</td>
<td align="center" valign="top">0.614</td>
<td align="center" valign="top">0.825(0.718&#x2013;0.895)</td>
</tr>
<tr>
<td align="left" valign="top">GMB, MLP, NN, Naive bayes</td>
<td align="center" valign="top">0.943</td>
<td align="center" valign="top">0.943</td>
<td align="center" valign="top">0.943</td>
<td align="center" valign="top">0.989 (0.979&#x2013;0.996)</td>
<td align="center" valign="top">0.597</td>
<td align="center" valign="top">0.709</td>
<td align="center" valign="top">0.616</td>
<td align="center" valign="top">0.825(0.726&#x2013;0.904)</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>AUC-macro, AUC macro-average; AUC, the area under the receiver operating characteristic curves; Acc, accuracy; PPV, positive predictive value; Sen, sensitivity; F1, F1-score; CI, confidence interval; CR, conventional radiation; ML, machine learning; MLP, multilayer perceptron; NN, convolutional neural network; GBM, gradient boosting machine; XGBoost, extreme gradient boosting; Light GBM, light gradient boosting machine. &#x002A;Sen is calculated in multi-class problems using a weighted average, which is mathematically equal to Acc.</p>
</table-wrap-foot>
</table-wrap>
<table-wrap position="float" id="tab5">
<label>Table 5</label>
<caption>
<p>Evaluation metrics of optimal models by method on test set.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Model composition</th>
<th align="center" valign="top">Acc (sen)&#x002A;</th>
<th align="center" valign="top">PPV</th>
<th align="center" valign="top">F1</th>
<th align="center" valign="top">AUC-macro (95% CI)</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">CR (Naive bayes)</td>
<td align="center" valign="top">0.556</td>
<td align="center" valign="top">0.736</td>
<td align="center" valign="top">0.598</td>
<td align="center" valign="top">0.757 (0.628&#x2013;0.856)</td>
</tr>
<tr>
<td align="left" valign="top">ML (MLR)</td>
<td align="center" valign="top">0.486</td>
<td align="center" valign="top">0.635</td>
<td align="center" valign="top">0.504</td>
<td align="center" valign="top">0.771 (0.651&#x2013;0.862)</td>
</tr>
<tr>
<td align="left" valign="top">CR-ML (NN)</td>
<td align="center" valign="top">0.597</td>
<td align="center" valign="top">0.694</td>
<td align="center" valign="top">0.615</td>
<td align="center" valign="top">0.819 (0.720&#x2013;0.893)</td>
</tr>
<tr>
<td align="left" valign="top">Stacking (GMB, Light GBM, NN)</td>
<td align="center" valign="top">0.597</td>
<td align="center" valign="top">0.690</td>
<td align="center" valign="top">0.614</td>
<td align="center" valign="top">0.833 (0.737&#x2013;0.902)</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>AUC-macro, AUC macro-average; AUC, the area under the receiver operating characteristic curves; Acc, accuracy; PPV, positive predictive value; Sen, sensitivity; F1, F1-score; CI, confidence interval; CR, conventional radiation; ML, machine learning; MLR, multiclass logistic regression; NN, convolutional neural network; GBM, gradient boosting machine; Light GBM, light gradient boosting machine. &#x002A;Sen is calculated in multi-class problems using a weighted average, which is mathematically equal to Acc.</p>
</table-wrap-foot>
</table-wrap>
</sec>
</sec>
<sec sec-type="discussion" id="sec18">
<label>4</label>
<title>Discussion</title>
<p>All models developed independently from CR and ML features alone exhibited limited discriminative capacity, falling below the threshold for reliable clinical deployment (<xref ref-type="fig" rid="fig4">Figures 4A</xref>&#x2013;<xref ref-type="fig" rid="fig4">D</xref>). We observed that CR and ML models exhibited complementary strengths in discriminating periorbital fat compartments (different age groups): as shown in <xref ref-type="fig" rid="fig4">Figures 4B</xref>,<xref ref-type="fig" rid="fig4">D</xref>, both models showed uneven predictive performance across classes in the test set: The CR model outperformed in Class 2 (senior group) with an AUC of 0.781, whereas the ML model excelled in Class 0 (youth group) with an AUC of 0.871. Fusion of these datasets was hypothesized to enhance overall predictive capability. Among CR-ML fusion models, the NN model yielded optimal test-set performance (<xref ref-type="fig" rid="fig4">Figure 4F</xref>, <xref ref-type="table" rid="tab3">Table 3</xref>). Key metrics improved significantly: AUC-macro, 0.819 (95% CI: 0.720&#x2013;0.893); F1-score, 0.615; accuracy, 0.597; PPV, 0.694, collectively demonstrating robust discriminative power. Furthermore, other fusion models exhibited performance gains to varying degrees.</p>
<fig position="float" id="fig4">
<label>Figure 4</label>
<caption>
<p>The performance of the optimal model: receiver operating characteristic (ROC) curves and confusion matrices of conventional radiomics (CR) model (Naive Bayes) training set <bold>(A)</bold> and test set <bold>(B)</bold>. ROC curves and confusion matrices of machine learning (ML) model (multiclass logistic regression) training set <bold>(C)</bold> and test set <bold>(D)</bold>. ROC curves and confusion matrices of fusion model (multiclass logistic regression) training set <bold>(E)</bold> and test set <bold>(F)</bold>. ROC curves and confusion matrices of stacking model training set <bold>(G)</bold> and test set <bold>(H)</bold>. Class 0 represents the youth group. Class 1 represents the middle-aged group. Class 2 represents the senior group. Comprehensive evaluation of the optimal models <bold>(I)</bold>.</p>
</caption>
<graphic xlink:href="fmed-13-1752016-g004.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Eight panels labeled A through H each show a receiver operating characteristic (ROC) curve on the left and a confusion matrix heatmap on the right for multi-class classification models, including AUC scores for each class and macro averages. Panel I presents a horizontal bar chart comparing four models&#x2014;Stacking, CR+ML, ML, and CR&#x2014;across four performance metrics: AUC-macro, F1, PPV, and accuracy, with colored bars for each metric and a legend at the bottom right.</alt-text>
</graphic>
</fig>
<p>Stacking ensemble learning, as a stacked generalization model, enhanced generalization capability and stability by combining multiple base models, thus improving prediction performance (<xref ref-type="bibr" rid="ref22">22</xref>). As shown in <xref ref-type="table" rid="tab4">Table 4</xref>, the optimal model combination (GBM, Light GBM, and NN) achieved a test set AUC-macro of 0.833 (0.737&#x2013;0.902) and an F1-score of 0.614. The AUC values for each class (Class 0, 1, 2) showed improvement (<xref ref-type="fig" rid="fig4">Figures 4G</xref>,<xref ref-type="fig" rid="fig4">H</xref>), and the predictive capability across all three classes was more balanced, stable, and better suited for the requirements of practical clinical application. From <xref ref-type="fig" rid="fig4">Figure 4I</xref>, it is evident that the comprehensive performance of the stacking model surpassed that of the other three models, yielding the highest cumulative values for AUC-macro, F1-score, Acc, and PPV. Further analysis of the magnitude of performance improvement in the stacking model revealed an AUC-macro increase of 0.014 (1.7%) compared to the best CR-ML fusion model (NN). In ML, an AUC improvement &#x003E;0.01 is typically considered significant (<xref ref-type="bibr" rid="ref23">23</xref>), while the F1-score, Acc, and PPV remained stable.</p>
<p>The noninvasive, data-driven, and intelligent assessment of periorbital fat status represents a future direction for guiding facial rejuvenation surgery and serves as a critical indicator for evaluating surgical outcomes. Facial fat exhibits regional distribution patterns, with varying degrees of age-related changes across different areas, which is why this study extracted and analyzed features from the three primary periorbital fat compartments&#x2014;Roof, Soof, and DMCF&#x2014;separately (<xref ref-type="bibr" rid="ref24 ref25 ref26">24&#x2013;26</xref>). Previous studies indicated that facial fat volume increased with BMI but showed no statistically significant differences based on gender or age (<xref ref-type="bibr" rid="ref27">27</xref>). Conversely, other research identified age, gender, and BMI as significant factors influencing mid facial fat volume (<xref ref-type="bibr" rid="ref28">28</xref>). These conclusions, derived from traditional measurement metrics, displayed both consistencies and contradictions, likely due to insufficient data mining of deep-layer fat characteristics&#x2014;a gap this study aimed to address. Furthermore, all collected cases were of Asian ethnicity (with 97.0% being Han Chinese), offering relatively controlled population variability and ensuring study reliability due to the typically abundant periorbital fat in this group (<xref ref-type="bibr" rid="ref29">29</xref>). Acquiring more comprehensive metrics may yield more accurate and nuanced results. CR features, renowned for their interpretability, are widely used in other medical fields (e.g., malignant tumor differentiation, disease prognosis) (<xref ref-type="bibr" rid="ref30">30</xref>, <xref ref-type="bibr" rid="ref31">31</xref>). Their integration with ML methods achieved robust predictive efficacy in this context. Current research on periorbital fat assessment using ML combined with imaging remains exploratory, with limited reference study designs. Multiclass studies are particularly scarce owing to their complexity and high costs (<xref ref-type="bibr" rid="ref32">32</xref>).</p>
<p>As a multi-class classification model (three-class), optimizing and enhancing model performance presented a considerable challenge. The final model demonstrated robust capability in evaluating periorbital fat, thereby providing valuable insights for future research. Nevertheless, several inherent limitations should be acknowledged: First, the retrospective design involved a limited sample size with unavoidable selection bias, making more granular age stratification beyond three groups unfeasible. Second, the absence of standardized facial photographic documentation restricted phenotypic correlation analysis. Third, the lack of multi-center imaging datasets precluded rigorous validation of the model&#x2019;s generalization capability across diverse populations and equipment. Periorbital aging is a process of coordinated degradation involving &#x201C;bone&#x2013;muscle&#x2013;ligament&#x2013;fat.&#x201D; Clarifying the aging characteristics of these tissues is a key objective in rejuvenation surgery. Within a limited timeframe, we aim to investigate changes in one specific tissue type rather than pursuing a comprehensive analysis of the overall aging process. Future efforts will focus on expanding datasets, refining feature engineering for periorbital fat and other anatomical substructures, and validating robustness through external cohorts. Ultimately, we aim to translate this model into a clinical decision-support tool integrated with electronic health records.</p>
<p>We attempted to investigate the periorbital skin, muscle, and fat as a unified composite (<xref ref-type="supplementary-material" rid="SM1">Supplementary Figures S3, S4</xref>); however, the performance of the developed CR, ML, CR&#x202F;+&#x202F;ML, and Stacking ensemble learning models failed to surpass that of the models based solely on the three fat compartments (<xref ref-type="supplementary-material" rid="SM1">Supplementary Figure S5</xref>; <xref ref-type="supplementary-material" rid="SM1">Supplementary Tables S1, S2</xref>). Among these, the MLP (CR&#x202F;+&#x202F;ML) model emerged as the best-performing combination, yielding a macro-average AUC of 0.771 (95% CI: 0.683&#x2013;0.847) and an F1 score of 0.614. We attribute this to the fact that distinct anatomical regions may exhibit heterogeneous characteristics across different age groups; consequently, region-specific feature extraction contributes to enhanced model accuracy. Furthermore, given the high sensitivity of periorbital adipose tissue to aging, the utilization of multiple Regions of Interest (ROIs) for multimodal model development serves to improve the discriminative power of the models.</p>
<p>Artificial intelligence possesses the intrinsic capacity to capture latent, yet critical, feature information, thereby assisting in the resolution of significant clinical challenges. For instance, a study in the field of endocrinology demonstrated the feasibility of identifying prediabetic patients using solely a single-lead electrocardiogram (Lead I) (<xref ref-type="bibr" rid="ref33">33</xref>). This approach achieved an area under the receiver operating characteristic curve (AUROC) of 0.844 (sensitivity: 0.823; specificity: 0.702) in an external validation cohort.</p>
<p>Four types of models were developed sequentially: single-feature models, feature fusion models, and the ensemble learning model. The optimal ensemble learning model can assess the status of periorbital fat across the entire adult age spectrum, providing a radiological perspective on whether the periorbital fat status aligns with the normal status expected for the patient&#x2019;s age group. If the evaluation indicated that a patient&#x2019;s periorbital fat had prematurely advanced to the next age group, this finding suggested a higher necessity for the patient to undergo periorbital rejuvenation treatment. Moreover, the model holds promise as a reference for pre and post-operative evaluation of periorbital rejuvenation treatments (<xref ref-type="fig" rid="fig5">Figure 5</xref>). Improvement in the assessed age group based on periorbital fat status can reflect the efficacy of periorbital rejuvenation surgery.</p>
<fig position="float" id="fig5">
<label>Figure 5</label>
<caption>
<p>The application of the ensemble learning model in facial rejuvenation surgery.</p>
</caption>
<graphic xlink:href="fmed-13-1752016-g005.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Flowchart illustration showing the process from cranial and facial MRI data through an ensemble learning model, classifying individuals as youth, middle-aged, or senior, leading to evaluation of surgical outcomes before and after rejuvenation surgery.</alt-text>
</graphic>
</fig>
</sec>
<sec sec-type="conclusions" id="sec19">
<label>5</label>
<title>Conclusion</title>
<p>The prediction models developed from both the CR features and ML features of periorbital fat successfully discriminated populations across three distinct age groups. Fusion CR and ML features enhanced the model&#x2019;s capability to discriminatory capability between these age groups. Subsequently, the prediction probabilities generated by the CR-ML fusion model were utilized to construct a stacking ensemble learning model, which further improved the discriminatory accuracy across age strata. Continued refinement of training data and parameter optimization will provide clinicians with a straightforward and efficient tool to evaluate periorbital fat status. This model is anticipated to become a pivotal metric for assessing periorbital fat dynamics, thereby offering robust clinical support for periorbital rejuvenation surgeries.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="sec20">
<title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p>
</sec>
<sec sec-type="ethics-statement" id="sec21">
<title>Ethics statement</title>
<p>The studies involving humans were approved by the ethics committee of the First Medical Center of Chinese PLA General Hospital. The studies were conducted in accordance with the local legislation and institutional requirements. Written informed consent for participation was not required from the participants or the participants&#x2019; legal guardians/next of kin in accordance with the national legislation and institutional requirements. Written informed consent was obtained from the individual(s) for the publication of any potentially identifiable images or data included in this article.</p>
</sec>
<sec sec-type="author-contributions" id="sec22">
<title>Author contributions</title>
<p>MW: Data curation, Conceptualization, Validation, Methodology, Writing &#x2013; review &#x0026; editing, Investigation, Writing &#x2013; original draft, Software, Formal analysis, Visualization. YuH: Formal analysis, Validation, Data curation, Writing &#x2013; review &#x0026; editing, Investigation, Conceptualization. LL: Investigation, Writing &#x2013; review &#x0026; editing, Conceptualization, Data curation. XL: Investigation, Data curation, Conceptualization, Writing &#x2013; review &#x0026; editing. YJ: Data curation, Writing &#x2013; review &#x0026; editing. LG: Supervision, Methodology, Conceptualization, Investigation, Project administration, Writing &#x2013; review &#x0026; editing. YaH: Conceptualization, Writing &#x2013; review &#x0026; editing, Investigation, Project administration, Methodology, Supervision.</p>
</sec>
<ack>
<title>Acknowledgments</title>
<p>The facial images in the visualized images in the article have been obtained with the consent of the individuals concerned. We would like to express our gratitude to all the partners who have supported this research.</p>
</ack>
<sec sec-type="COI-statement" id="sec23">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="sec24">
<title>Generative AI statement</title>
<p>The author(s) declared that Generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="sec25">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec sec-type="supplementary-material" id="sec26">
<title>Supplementary material</title>
<p>The Supplementary material for this article can be found online at: <ext-link xlink:href="https://www.frontiersin.org/articles/10.3389/fmed.2026.1752016/full#supplementary-material" ext-link-type="uri">https://www.frontiersin.org/articles/10.3389/fmed.2026.1752016/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Data_Sheet_1.pdf" id="SM1" mimetype="application/pdf" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<ref-list>
<title>References</title>
<ref id="ref1"><label>1.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Yang</surname> <given-names>E</given-names></name> <name><surname>Xinhao</surname> <given-names>L</given-names></name> <name><surname>Hengshu</surname> <given-names>Z</given-names></name></person-group>. <article-title>Analysis of aging-related changes in the lower eyelid tissue structure in Han Chinese women</article-title>. <source>J Plast Reconstr Aesthet Surg</source>. (<year>2022</year>) <volume>75</volume>:<fpage>3420</fpage>&#x2013;<lpage>8</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.bjps.2022.04.095</pub-id>, <pub-id pub-id-type="pmid">35715308</pub-id></mixed-citation></ref>
<ref id="ref2"><label>2.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Miotti</surname> <given-names>G</given-names></name> <name><surname>De Marco</surname> <given-names>L</given-names></name> <name><surname>Quaglia</surname> <given-names>D</given-names></name> <name><surname>Grando</surname> <given-names>M</given-names></name> <name><surname>Salati</surname> <given-names>C</given-names></name> <name><surname>Spadea</surname> <given-names>L</given-names></name> <etal/></person-group>. <article-title>Fat or fillers: the dilemma in eyelid surgery</article-title>. <source>World J Clin Cases</source>. (<year>2024</year>) <volume>12</volume>:<fpage>2951</fpage>&#x2013;<lpage>65</lpage>. doi: <pub-id pub-id-type="doi">10.12998/wjcc.v12.i17.2951</pub-id>, <pub-id pub-id-type="pmid">38898854</pub-id></mixed-citation></ref>
<ref id="ref3"><label>3.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kim</surname> <given-names>JW</given-names></name> <name><surname>Han</surname> <given-names>JW</given-names></name> <name><surname>Kim</surname> <given-names>YK</given-names></name></person-group>. <article-title>Difference in midface rejuvenation strategy between east Asians and Caucasians based on analysis of age-related changes in the orbit and midcheek using computed tomography</article-title>. <source>Aesthet Plast Surg</source>. (<year>2019</year>) <volume>43</volume>:<fpage>1547</fpage>&#x2013;<lpage>52</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00266-019-01478-3</pub-id>, <pub-id pub-id-type="pmid">31468136</pub-id></mixed-citation></ref>
<ref id="ref4"><label>4.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ugradar</surname> <given-names>S</given-names></name> <name><surname>Kim</surname> <given-names>JS</given-names></name> <name><surname>Massry</surname> <given-names>G</given-names></name></person-group>. <article-title>A review of midface aging</article-title>. <source>Ophthalmic Plast Reconstr Surg</source>. (<year>2023</year>) <volume>39</volume>:<fpage>123</fpage>&#x2013;<lpage>31</lpage>. doi: <pub-id pub-id-type="doi">10.1097/IOP.0000000000002282</pub-id>, <pub-id pub-id-type="pmid">36700849</pub-id></mixed-citation></ref>
<ref id="ref5"><label>5.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Larsson</surname> <given-names>JC</given-names></name> <name><surname>Chen</surname> <given-names>T-Y</given-names></name> <name><surname>Lao</surname> <given-names>WW</given-names></name></person-group>. <article-title>Integrating fat graft with blepharoplasty to rejuvenate the Asian periorbita</article-title>. <source>Plast Reconstr Surg Glob Open</source>. (<year>2019</year>) <volume>7</volume>:<fpage>e2365</fpage>. doi: <pub-id pub-id-type="doi">10.1097/GOX.0000000000002365</pub-id>, <pub-id pub-id-type="pmid">31772873</pub-id></mixed-citation></ref>
<ref id="ref6"><label>6.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>Q</given-names></name> <name><surname>Guo</surname> <given-names>L</given-names></name> <name><surname>Zhu</surname> <given-names>Y</given-names></name> <name><surname>Song</surname> <given-names>B</given-names></name> <name><surname>Zeng</surname> <given-names>X</given-names></name> <name><surname>Liang</surname> <given-names>Z</given-names></name> <etal/></person-group>. <article-title>Prospective comparative clinical study: efficacy evaluation of collagen combined with hyaluronic acid injections for tear trough deformity</article-title>. <source>J Cosmet Dermatol</source>. (<year>2024</year>) <volume>23</volume>:<fpage>1613</fpage>&#x2013;<lpage>9</lpage>. doi: <pub-id pub-id-type="doi">10.1111/jocd.16211</pub-id>, <pub-id pub-id-type="pmid">38299745</pub-id></mixed-citation></ref>
<ref id="ref7"><label>7.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lim</surname> <given-names>Y-K</given-names></name> <name><surname>Jung</surname> <given-names>C-J</given-names></name> <name><surname>Lee</surname> <given-names>M-Y</given-names></name> <name><surname>Moon</surname> <given-names>I-J</given-names></name> <name><surname>Won</surname> <given-names>C-H</given-names></name></person-group>. <article-title>The evaluation of efficacy and safety of a radiofrequency hydro-injector device for the skin around the eye area</article-title>. <source>J Clin Med</source>. (<year>2021</year>) <volume>10</volume>:<fpage>2582</fpage>. doi: <pub-id pub-id-type="doi">10.3390/jcm10122582</pub-id>, <pub-id pub-id-type="pmid">34208109</pub-id></mixed-citation></ref>
<ref id="ref8"><label>8.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>X</given-names></name> <name><surname>Wang</surname> <given-names>H</given-names></name></person-group>. <article-title>Anatomical study and clinical observation of retro-orbicularis oculi fat (ROOF)</article-title>. <source>Aesthet Plast Surg</source>. (<year>2020</year>) <volume>44</volume>:<fpage>89</fpage>&#x2013;<lpage>92</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00266-019-01530-2</pub-id>, <pub-id pub-id-type="pmid">31696242</pub-id></mixed-citation></ref>
<ref id="ref9"><label>9.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Schenck</surname> <given-names>TL</given-names></name> <name><surname>Koban</surname> <given-names>KC</given-names></name> <name><surname>Schlattau</surname> <given-names>A</given-names></name> <name><surname>Frank</surname> <given-names>K</given-names></name> <name><surname>Sykes</surname> <given-names>JM</given-names></name> <name><surname>Targosinski</surname> <given-names>S</given-names></name> <etal/></person-group>. <article-title>The functional anatomy of the superficial fat compartments of the face: a detailed imaging study</article-title>. <source>Plast Reconstr Surg</source>. (<year>2018</year>) <volume>141</volume>:<fpage>1351</fpage>&#x2013;<lpage>9</lpage>. doi: <pub-id pub-id-type="doi">10.1097/PRS.0000000000004364</pub-id>, <pub-id pub-id-type="pmid">29750762</pub-id></mixed-citation></ref>
<ref id="ref10"><label>10.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Park</surname> <given-names>CC</given-names></name> <name><surname>Nguyen</surname> <given-names>P</given-names></name> <name><surname>Hernandez</surname> <given-names>C</given-names></name> <name><surname>Bettencourt</surname> <given-names>R</given-names></name> <name><surname>Ramirez</surname> <given-names>K</given-names></name> <name><surname>Fortney</surname> <given-names>L</given-names></name> <etal/></person-group>. <article-title>Magnetic resonance elastography <italic>vs</italic> transient elastography in detection of fibrosis and noninvasive measurement of steatosis in patients with biopsy-proven nonalcoholic fatty liver disease</article-title>. <source>Gastroenterology</source>. (<year>2017</year>) <volume>152</volume>:<fpage>598</fpage>&#x2013;<lpage>607.e2</lpage>. doi: <pub-id pub-id-type="doi">10.1053/j.gastro.2016.10.026</pub-id>, <pub-id pub-id-type="pmid">27911262</pub-id></mixed-citation></ref>
<ref id="ref11"><label>11.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nur</surname> <given-names>WFH</given-names></name> <name><surname>Ferriastuti</surname> <given-names>W</given-names></name> <name><surname>Soeprijanto</surname> <given-names>B</given-names></name></person-group>. <article-title>The correlation between apparent diffusion coefficient value on MRI and the pathology consistency of meningioma</article-title>. <source>Biomol Health Sci J</source>. (<year>2020</year>) <volume>3</volume>:<fpage>101</fpage>. doi: <pub-id pub-id-type="doi">10.20473/bhsj.v3i2.22171</pub-id></mixed-citation></ref>
<ref id="ref12"><label>12.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Miller</surname> <given-names>TR</given-names></name></person-group>. <article-title>Long-term 3-dimensional volume assessment after fat repositioning lower blepharoplasty</article-title>. <source>JAMA Facial Plast Surg</source>. (<year>2016</year>) <volume>18</volume>:<fpage>108</fpage>&#x2013;<lpage>13</lpage>. doi: <pub-id pub-id-type="doi">10.1001/jamafacial.2015.2184</pub-id>, <pub-id pub-id-type="pmid">26847158</pub-id></mixed-citation></ref>
<ref id="ref13"><label>13.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Miranda</surname> <given-names>RE</given-names></name> <name><surname>Matayoshi</surname> <given-names>S</given-names></name></person-group>. <article-title>Vectra 3D simulation in lower eyelid blepharoplasty: how accurate is it?</article-title> <source>Aesthet Plast Surg</source>. (<year>2022</year>) <volume>46</volume>:<fpage>1241</fpage>&#x2013;<lpage>50</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00266-021-02661-1</pub-id>, <pub-id pub-id-type="pmid">34786617</pub-id></mixed-citation></ref>
<ref id="ref14"><label>14.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tuin</surname> <given-names>AJ</given-names></name> <name><surname>Schepers</surname> <given-names>RH</given-names></name> <name><surname>Spijkervet</surname> <given-names>FKL</given-names></name> <name><surname>Vissink</surname> <given-names>A</given-names></name> <name><surname>Jansma</surname> <given-names>J</given-names></name></person-group>. <article-title>Volumetric effect and patient satisfaction after facial fat grafting</article-title>. <source>Plast Reconstr Surg</source>. (<year>2022</year>) <volume>150</volume>:<fpage>307e</fpage>&#x2013;<lpage>18e</lpage>. doi: <pub-id pub-id-type="doi">10.1097/PRS.0000000000009337</pub-id>, <pub-id pub-id-type="pmid">35666147</pub-id></mixed-citation></ref>
<ref id="ref15"><label>15.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>X</given-names></name> <name><surname>Li</surname> <given-names>J</given-names></name> <name><surname>Ma</surname> <given-names>J</given-names></name></person-group>. <article-title>Tear trough deformity correction with autologous fat grafting evidenced by linear gray scale analysis</article-title>. <source>J Craniofac Surg</source>. (<year>2025</year>) <volume>36</volume>:<fpage>e714</fpage>&#x2013;<lpage>9</lpage>. doi: <pub-id pub-id-type="doi">10.1097/SCS.0000000000011519</pub-id>, <pub-id pub-id-type="pmid">40540693</pub-id></mixed-citation></ref>
<ref id="ref16"><label>16.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Foissac</surname> <given-names>R</given-names></name> <name><surname>Camuzard</surname> <given-names>O</given-names></name> <name><surname>Piereschi</surname> <given-names>S</given-names></name> <name><surname>Staccini</surname> <given-names>P</given-names></name> <name><surname>Andreani</surname> <given-names>O</given-names></name> <name><surname>Georgiou</surname> <given-names>C</given-names></name> <etal/></person-group>. <article-title>High-resolution magnetic resonance imaging of aging upper face fat compartments</article-title>. <source>Plast Reconstr Surg</source>. (<year>2017</year>) <volume>139</volume>:<fpage>829</fpage>&#x2013;<lpage>37</lpage>. doi: <pub-id pub-id-type="doi">10.1097/PRS.0000000000003173</pub-id></mixed-citation></ref>
<ref id="ref17"><label>17.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cevik Cenkeri</surname> <given-names>H</given-names></name> <name><surname>Sarigul Guduk</surname> <given-names>S</given-names></name> <name><surname>Derin Cicek</surname> <given-names>E</given-names></name></person-group>. <article-title>Aging changes of the superficial fat compartments of the midface over time: a magnetic resonance imaging study</article-title>. <source>Dermatologic Surg</source>. (<year>2020</year>) <volume>46</volume>:<fpage>1600</fpage>&#x2013;<lpage>5</lpage>. doi: <pub-id pub-id-type="doi">10.1097/DSS.0000000000002646</pub-id></mixed-citation></ref>
<ref id="ref18"><label>18.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Paluch</surname> <given-names>&#x0141;</given-names></name> <name><surname>Pietruski</surname> <given-names>P</given-names></name> <name><surname>Kwiek</surname> <given-names>B</given-names></name> <name><surname>Noszczyk</surname> <given-names>B</given-names></name> <name><surname>Ambroziak</surname> <given-names>M</given-names></name></person-group>. <article-title>Age-related changes in elastographically determined strain of the facial fat compartments: a new frontier of research on face aging processes</article-title>. <source>Adv Dermatol Allergol</source>. (<year>2020</year>) <volume>37</volume>:<fpage>353</fpage>&#x2013;<lpage>9</lpage>. doi: <pub-id pub-id-type="doi">10.5114/ada.2018.79778</pub-id>, <pub-id pub-id-type="pmid">32792875</pub-id></mixed-citation></ref>
<ref id="ref19"><label>19.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cotofana</surname> <given-names>S</given-names></name> <name><surname>Lachman</surname> <given-names>N</given-names></name></person-group>. <article-title>Anatomy of the facial fat compartments and their relevance in aesthetic surgery</article-title>. <source>JDDG J Dtsch Dermatol Ges</source>. (<year>2019</year>) <volume>17</volume>:<fpage>399</fpage>&#x2013;<lpage>413</lpage>. doi: <pub-id pub-id-type="doi">10.1111/ddg.13737</pub-id>, <pub-id pub-id-type="pmid">30698919</pub-id></mixed-citation></ref>
<ref id="ref20"><label>20.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Koo</surname> <given-names>TK</given-names></name> <name><surname>Li</surname> <given-names>MY</given-names></name></person-group>. <article-title>A guideline of selecting and reporting intraclass correlation coefficients for reliability research</article-title>. <source>J Chiropr Med</source>. (<year>2016</year>) <volume>15</volume>:<fpage>155</fpage>&#x2013;<lpage>63</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jcm.2016.02.012</pub-id>, <pub-id pub-id-type="pmid">27330520</pub-id></mixed-citation></ref>
<ref id="ref21"><label>21.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>Y</given-names></name> <name><surname>Wu</surname> <given-names>G</given-names></name> <name><surname>Wang</surname> <given-names>B</given-names></name> <name><surname>Pang</surname> <given-names>T</given-names></name> <name><surname>Sun</surname> <given-names>H</given-names></name> <name><surname>Yin</surname> <given-names>Y</given-names></name></person-group>. <article-title>Towards macro-AUC oriented imbalanced multi-label continual learning</article-title>. <source>Proc AAAI Conf Artif Intell</source>. (<year>2024</year>) <volume>39</volume>:<fpage>22614</fpage>&#x2013;<lpage>22</lpage>. doi: <pub-id pub-id-type="doi">10.48550/arXiv.2412.18231</pub-id></mixed-citation></ref>
<ref id="ref22"><label>22.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ghasemian</surname> <given-names>A</given-names></name> <name><surname>Hosseinmardi</surname> <given-names>H</given-names></name> <name><surname>Galstyan</surname> <given-names>A</given-names></name> <name><surname>Airoldi</surname> <given-names>EM</given-names></name> <name><surname>Clauset</surname> <given-names>A</given-names></name></person-group>. <article-title>Stacking models for nearly optimal link prediction in complex networks</article-title>. <source>Proc Natl Acad Sci USA</source>. (<year>2020</year>) <volume>117</volume>:<fpage>23393</fpage>&#x2013;<lpage>400</lpage>. doi: <pub-id pub-id-type="doi">10.1073/pnas.1914950117</pub-id>, <pub-id pub-id-type="pmid">32887799</pub-id></mixed-citation></ref>
<ref id="ref23"><label>23.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ling</surname> <given-names>T</given-names></name> <name><surname>Zuo</surname> <given-names>Z</given-names></name> <name><surname>Huang</surname> <given-names>M</given-names></name> <name><surname>Ma</surname> <given-names>J</given-names></name> <name><surname>Wu</surname> <given-names>L</given-names></name></person-group>. <article-title>Stacking classifiers based on integrated machine learning model: fusion of CT radiomics and clinical biomarkers to predict lymph node metastasis in locally advanced gastric cancer patients after neoadjuvant chemotherapy</article-title>. <source>BMC Cancer</source>. (<year>2025</year>) <volume>25</volume>:<fpage>834</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s12885-025-14259-w</pub-id>, <pub-id pub-id-type="pmid">40329193</pub-id></mixed-citation></ref>
<ref id="ref24"><label>24.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lohakitsatian</surname> <given-names>P</given-names></name> <name><surname>Tunlayadechanont</surname> <given-names>P</given-names></name> <name><surname>Tantitham</surname> <given-names>T</given-names></name></person-group>. <article-title>Decoding periorbital aging: a multilayered analysis of anatomical changes</article-title>. <source>Aesthet Plast Surg</source>. (<year>2025</year>) <volume>49</volume>:<fpage>664</fpage>&#x2013;<lpage>71</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00266-024-04590-1</pub-id>, <pub-id pub-id-type="pmid">39779502</pub-id></mixed-citation></ref>
<ref id="ref25"><label>25.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sarigul Guduk</surname> <given-names>S</given-names></name> <name><surname>Cevik Cenkeri</surname> <given-names>H</given-names></name> <name><surname>Derin Cicek</surname> <given-names>E</given-names></name> <name><surname>Kus</surname> <given-names>S</given-names></name></person-group>. <article-title>Evaluation of aging changes of the superficial fat compartments of the midface over time: a computed tomography study</article-title>. <source>J Cosmet Dermatol</source>. (<year>2022</year>) <volume>21</volume>:<fpage>1430</fpage>&#x2013;<lpage>5</lpage>. doi: <pub-id pub-id-type="doi">10.1111/jocd.14292</pub-id>, <pub-id pub-id-type="pmid">34129735</pub-id></mixed-citation></ref>
<ref id="ref26"><label>26.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rohrich</surname> <given-names>RJ</given-names></name> <name><surname>Avashia</surname> <given-names>YJ</given-names></name> <name><surname>Savetsky</surname> <given-names>IL</given-names></name></person-group>. <article-title>Prediction of facial aging using the facial fat compartments</article-title>. <source>Plast Reconstr Surg</source>. (<year>2021</year>) <volume>147</volume>:<fpage>38S</fpage>&#x2013;<lpage>42S</lpage>. doi: <pub-id pub-id-type="doi">10.1097/PRS.0000000000007624</pub-id>, <pub-id pub-id-type="pmid">33347073</pub-id></mixed-citation></ref>
<ref id="ref27"><label>27.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Estler</surname> <given-names>A</given-names></name> <name><surname>Gr&#x00F6;zinger</surname> <given-names>G</given-names></name> <name><surname>Estler</surname> <given-names>E</given-names></name> <name><surname>Hepp</surname> <given-names>T</given-names></name> <name><surname>Feng</surname> <given-names>Y-S</given-names></name> <name><surname>Daigeler</surname> <given-names>A</given-names></name> <etal/></person-group>. <article-title>Quantification of facial fat compartment variations: a three-dimensional morphometric analysis of the cheek</article-title>. <source>Plast Reconstr Surg</source>. (<year>2023</year>) <volume>152</volume>:<fpage>617e</fpage>&#x2013;<lpage>27e</lpage>. doi: <pub-id pub-id-type="doi">10.1097/PRS.0000000000010357</pub-id>, <pub-id pub-id-type="pmid">36877747</pub-id></mixed-citation></ref>
<ref id="ref28"><label>28.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tower</surname> <given-names>JI</given-names></name> <name><surname>Seifert</surname> <given-names>K</given-names></name> <name><surname>Paskhover</surname> <given-names>B</given-names></name></person-group>. <article-title>Patterns of superficial mid facial fat volume distribution differ by age and body mass index</article-title>. <source>Aesthet Plast Surg</source>. (<year>2019</year>) <volume>43</volume>:<fpage>83</fpage>&#x2013;<lpage>90</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00266-018-1249-0</pub-id>, <pub-id pub-id-type="pmid">30283987</pub-id></mixed-citation></ref>
<ref id="ref29"><label>29.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Manta</surname> <given-names>AI</given-names></name><collab id="coll1">F.R.C. O</collab><name><surname>Demer</surname> <given-names>JL</given-names></name></person-group>. <article-title>Magnetic resonance imaging demonstrates differences in brow and upper eyelid fat and muscle layers between east Asians and Caucasians</article-title>. <source>Ophthalmic Plast Reconstr Surg</source>. (<year>2025</year>) <volume>41</volume>:<fpage>535</fpage>&#x2013;<lpage>8</lpage>. doi: <pub-id pub-id-type="doi">10.1097/IOP.0000000000002904</pub-id>, <pub-id pub-id-type="pmid">40919988</pub-id></mixed-citation></ref>
<ref id="ref30"><label>30.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sandoval</surname> <given-names>V</given-names></name> <name><surname>Chuang</surname> <given-names>Z</given-names></name> <name><surname>Power</surname> <given-names>N</given-names></name> <name><surname>Chin</surname> <given-names>JLK</given-names></name></person-group>. <article-title>Artificial intelligence for prostate cancer histopathology diagnostics</article-title>. <source>Can Urol Assoc J</source>. (<year>2022</year>) <volume>16</volume>:<fpage>439</fpage>&#x2013;<lpage>41</lpage>. doi: <pub-id pub-id-type="doi">10.5489/cuaj.7918</pub-id></mixed-citation></ref>
<ref id="ref31"><label>31.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>D&#x2019;Ascenzo</surname> <given-names>F</given-names></name> <name><surname>De Filippo</surname> <given-names>O</given-names></name> <name><surname>Gallone</surname> <given-names>G</given-names></name> <name><surname>Mittone</surname> <given-names>G</given-names></name> <name><surname>Deriu</surname> <given-names>MA</given-names></name> <name><surname>Iannaccone</surname> <given-names>M</given-names></name> <etal/></person-group>. <article-title>Machine learning-based prediction of adverse events following an acute coronary syndrome (PRAISE): a modelling study of pooled datasets</article-title>. <source>Lancet</source>. (<year>2021</year>) <volume>397</volume>:<fpage>199</fpage>&#x2013;<lpage>207</lpage>. doi: <pub-id pub-id-type="doi">10.1016/S0140-6736(20)32519-8</pub-id></mixed-citation></ref>
<ref id="ref32"><label>32.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hughes</surname> <given-names>CML</given-names></name> <name><surname>Zhang</surname> <given-names>Y</given-names></name> <name><surname>Pourhossein</surname> <given-names>A</given-names></name> <name><surname>Jurasova</surname> <given-names>T</given-names></name></person-group>. <article-title>A comparative analysis of binary and multi-class classification machine learning algorithms to detect current frailty status using the English longitudinal study of aging (ELSA)</article-title>. <source>Front Aging</source>. (<year>2025</year>) <volume>6</volume>:<fpage>1501168</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fragi.2025.1501168</pub-id>, <pub-id pub-id-type="pmid">40330071</pub-id></mixed-citation></ref>
<ref id="ref33"><label>33.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Koga</surname> <given-names>D</given-names></name> <name><surname>Kaneda</surname> <given-names>R</given-names></name> <name><surname>Komiya</surname> <given-names>C</given-names></name> <name><surname>Ohno</surname> <given-names>S</given-names></name> <name><surname>Takeuchi</surname> <given-names>A</given-names></name> <name><surname>Hara</surname> <given-names>K</given-names></name> <etal/></person-group>. <article-title>Artificial intelligence identifies individuals with prediabetes using single-lead electrocardiograms</article-title>. <source>Cardiovasc Diabetol</source>. (<year>2025</year>) <volume>24</volume>:<fpage>415</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s12933-025-02982-4</pub-id>, <pub-id pub-id-type="pmid">41214697</pub-id></mixed-citation></ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by" id="fn0002">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2207985/overview">Na Luo</ext-link>, Third Affiliated Hospital of Chongqing Medical University, China</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by" id="fn0003">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3292075/overview">Zhi Yang</ext-link>, Third Affiliated Hospital of Chongqing Medical University, China</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3325487/overview">Wenping Wang</ext-link>, Chongqing Medical University, China</p>
</fn>
</fn-group>
</back>
</article>