<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="brief-report">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Hum. Neurosci.</journal-id>
<journal-title>Frontiers in Human Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Hum. Neurosci.</abbrev-journal-title>
<issn pub-type="epub">1662-5161</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnhum.2025.1661214</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Human Neuroscience</subject>
<subj-group>
<subject>Brief Research Report</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Tuning into flavor: predicting coffee sensory attributes from EEG with boosted-tree regression models</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name><surname>Bilucaglia</surname> <given-names>Marco</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1132815/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Bellati</surname> <given-names>Mara</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/2675437/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Fici</surname> <given-names>Alessandro</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1099523/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Russo</surname> <given-names>Vincenzo</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/94084/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Zito</surname> <given-names>Margherita</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/343165/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Behavior and Brain Laboratory IULM &#x02013; Neuromarketing Research Center, Universit&#x000E0; IULM</institution>, <addr-line>Milan</addr-line>, <country>Italy</country></aff>
<aff id="aff2"><sup>2</sup><institution>Department of Business, Law, Economics and Consumer Behaviour &#x0201C;Carlo A. Ricciardi, &#x0201D; Universit&#x000E0; IULM</institution>, <addr-line>Milan</addr-line>, <country>Italy</country></aff>
<aff id="aff3"><sup>3</sup><institution>Department of Philosophy and Cultural Heritage, Ca&#x00027; Foscari University</institution>, <addr-line>Venice</addr-line>, <country>Italy</country></aff>
<author-notes>
<fn fn-type="edited-by"><p>Edited by: Filippo Brighina, University of Palermo, Italy</p></fn>
<fn fn-type="edited-by"><p>Reviewed by: Alessandro Tonacci, National Research Council (CNR), Italy</p>
<p>Nazan Turhan, &#x00130;zmir University of Economics, T&#x000FC;rkiye</p>
<p>Tianyi Yang, Nanjing University of Aeronautics and Astronautics, China</p></fn>
<corresp id="c001">&#x0002A;Correspondence: Marco Bilucaglia <email>marco.bilucaglia&#x00040;studenti.iulm.it</email></corresp>
</author-notes>
<pub-date pub-type="epub">
<day>09</day>
<month>10</month>
<year>2025</year>
</pub-date>
<pub-date pub-type="collection">
<year>2025</year>
</pub-date>
<volume>19</volume>
<elocation-id>1661214</elocation-id>
<history>
<date date-type="received">
<day>07</day>
<month>07</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>17</day>
<month>09</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2025 Bilucaglia, Bellati, Fici, Russo and Zito.</copyright-statement>
<copyright-year>2025</copyright-year>
<copyright-holder>Bilucaglia, Bellati, Fici, Russo and Zito</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license>
</permissions>
<abstract>
<p>Flavor, a multimodal perception based on taste, smell, and tactile cues, plays a significant role in consumer preferences and purchase intentions toward coffee. In this exploratory study, we assessed the potential of electroencephalography (EEG) and machine learning (ML) techniques to predict coffee sensory attributes. We extracted spectral and temporal features from a professional panel while tasting coffee samples and basic water solutions. We trained multiple Least-Squares Boosted Trees (LSBoost) and optimized their hyperparameters through a 100-step Bayesian approach based on a Leave-One-Subject-Out (LOSO) scheme. The models achieved, overall, high predictive accuracy (MAE &#x0003C; 0.75 on a 0 &#x02212; 10 scale) and medium-to-large robustness (Cohen&#x00027;s<italic>d</italic>&#x0003E;0.6) with respect to mean and lasso benchmark regressors. Feature importance analysis revealed that spectral powers and Hjorth&#x00027;s parameters within parietal, central, and frontal regions were the most predictive. Our findings endorse the use of EEG-based ML models as an alternative to traditional flavor evaluation methods, such as Descriptive Sensory Analysis (DSA).</p></abstract>
<kwd-group>
<kwd>coffee flavor prediction</kwd>
<kwd>electroencephalography (EEG)</kwd>
<kwd>machine learning (ML)</kwd>
<kwd>ensemble learning</kwd>
<kwd>boosted-tree regression</kwd>
<kwd>Descriptive Sensory Analysis (DSA)</kwd>
</kwd-group>
<counts>
<fig-count count="3"/>
<table-count count="3"/>
<equation-count count="7"/>
<ref-count count="74"/>
<page-count count="10"/>
<word-count count="7018"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Cognitive Neuroscience</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<title>1 Introduction</title>
<p>Coffee stands as the major component of the global hot drink market, with a worldwide production exceeding 176 million bags and consumption reaching 7 billion kilograms (<xref ref-type="bibr" rid="B37">Kim Y. et al., 2025</xref>). Among over 60 coffee plant species, only 10 are extensively cultivated, with <italic>Coffea arabica</italic> (arabica), <italic>Coffea canephora</italic> (robusta), and <italic>Coffea liberica</italic> being the most prevalent (<xref ref-type="bibr" rid="B24">Feria-Morales, 2002</xref>). Arabica and robusta make up 99% of global production (<xref ref-type="bibr" rid="B34">Jayakumar et al., 2017</xref>) and commercial coffee typically results from blending their beans in varying proportions (<xref ref-type="bibr" rid="B53">Seninde and Chambers, 2020</xref>).</p>
<p>Consumer preference and purchasing intentions for coffee are mainly driven by subjective factors, such as taste, aroma, and body (<xref ref-type="bibr" rid="B42">Li et al., 2019</xref>). These elements belong to the broader concept of flavor, a multimodal experience in which gustatory, olfactory, trigeminal, and somatosensory inputs are individually processed before being integrated (<xref ref-type="bibr" rid="B55">Small, 2012</xref>). Gustatory signals ascend via the nucleus of the solitary tract (NST) and ventroposteromedial nucleus (VPM) to the primary taste cortex in the rostral insula and adjoining frontal operculum, where the identity and intensity of basic tastes, as well as oral texture and temperature, are encoded. Retronasal olfactory inputs, initially processed in the piriform cortex, converge with gustatory signals in the orbitofrontal cortex (OFC), which also integrates oral somatosensory and trigeminal inputs such as viscosity, temperature, irritation, and astringency (<xref ref-type="bibr" rid="B51">Rolls, 2005</xref>). Within the OFC, convergent inputs give rise to multimodal flavor representations, in which sensory modalities are integrated and assigned hedonic value. At the same time, projections to the amygdala and anterior cingulate cortex (ACC) further embed these representations within affective and motivational systems (<xref ref-type="bibr" rid="B55">Small, 2012</xref>).</p>
<p>Flavor assessment often relies on Descriptive Sensory Analysis (DSA), wherein expert panels assign numerical scores to standardized sensory attributes (<xref ref-type="bibr" rid="B71">Yang and Lee, 2019</xref>). Various coffee attributes have been suggested (<xref ref-type="bibr" rid="B57">Spencer et al., 2016</xref>). However, only bitter, sour, sweet, and astringent (i.e., mouth-drying sensation) have established reference solutions (<xref ref-type="bibr" rid="B5">Batali et al., 2022</xref>).</p>
<p>Based on self-reports, DSA can be confounded by physiological and psychological biases. Physiological phenomena include sensory adaptation and multimodal enhancement/suppression. Psychological phenomena include expectation, stimulus/proximity/logical errors, habituation, halo effect, presentation order, mutual suggestion, and central/extreme rating tendency (<xref ref-type="bibr" rid="B58">Stone et al., 2021</xref>; <xref ref-type="bibr" rid="B14">Civille et al., 2024</xref>). To mitigate these risks, international standards (e.g., ISO 13299, ISO 11132, and ISO 8586) and the scientific literature recommend extraneous cues blinding, randomized or Williams-balanced presentations, adequate rests/rinses alternation, and ongoing panel performance monitoring (<xref ref-type="bibr" rid="B54">Sipos et al., 2021</xref>). Additionally, direct techniques based on bioelectrical measures have recently been advocated (<xref ref-type="bibr" rid="B62">Torrico et al., 2023</xref>; <xref ref-type="bibr" rid="B50">Rodrigues et al., 2024</xref>).</p>
<p>Previous studies have explored the use of electroencephalography (EEG) for flavor assessment. Global field power and scalp topographies (<xref ref-type="bibr" rid="B17">Crouzet et al., 2015</xref>), as well as phase in the delta band (<xref ref-type="bibr" rid="B67">Wallroth et al., 2018</xref>) and spectral powers in alpha and theta bands (<xref ref-type="bibr" rid="B72">Yang et al., 2023</xref>) have emerged as candidate neurometrics. Similar results, involving alpha, beta, and theta powers, have been observed in coffee tasting tasks (<xref ref-type="bibr" rid="B31">Hsu and Chen, 2021</xref>; <xref ref-type="bibr" rid="B61">Tonacci et al., 2024</xref>). However, their correlational&#x02014;rather than causal&#x02014;nature poses a risk of reverse inference problems (<xref ref-type="bibr" rid="B49">Poldrack, 2006</xref>). Pattern-decoding methods based on Machine Learning (ML) models have been suggested to mitigate this issue (<xref ref-type="bibr" rid="B46">Nathan and Del Pinal, 2017</xref>). Furthermore, being free of rigid theoretical assumptions, ML methods could also be helpful in revealing latent structures in the data, providing new theoretical insights and hypotheses (<xref ref-type="bibr" rid="B65">Verzelli et al., 2024</xref>).</p>
<p>Research on flavor prediction with ML and EEG data is still limited (see the following Section 2 for details). Most of the studies employed basic water solutions as eliciting stimuli, and the few examining coffee focused on other target variables than taste. Moreover, nearly all existing models were classifiers to discriminate among basic tastes (e.g., sour, sweet, bitter, salty, umami, and neutral) instead of predicting the intensity level of sensory attributes. Therefore, such models are ill-suited to replace or even complement traditional DSA.</p>
<p>To address these limitations, we performed an exploratory study recording the EEG data from expert coffee tasters while they tasted both reference solutions and coffee samples. We trained multiple tree-based ensemble regressors to predict the intensity level of bitter, sweet, acid and astringent, achieving high performances and robustness against benchmark models. We interpreted the fitted models, identifying as most informative, spectral and temporal features within parietal, central and frontal regions.</p>
</sec>
<sec id="s2">
<title>2 Related work</title>
<p>As previously mentioned, most of the past studies on taste prediction using EEG data and ML methods trained classifiers to discriminate among basic water solutions. <xref ref-type="bibr" rid="B18">De et al. (2023)</xref> fed temporal (maximum/minimum values, mean, kurtosis and skewness) and spectral [Power Spectral Densities (PSDs) in theta, delta, alpha and beta bands] features into a Long Short-Term Memory Recurrent Neural Network (LSTM-RNN) to discriminate sour, sweet, bitter, salty, umami, and neutral solutions from 46 subjects. They obtained an accuracy of 97.16%. <xref ref-type="bibr" rid="B69">Xia et al. (2024)</xref> employed a Convolutional Neural Network (CNN) with spatiotemporally augmented raw EEG data to identify sour, sweet, bitter, and salty solutions from 20 subjects. They reached 99.5% of accuracy. <xref ref-type="bibr" rid="B41">Li et al. (2025)</xref> trained a Support Vector Machine (SVM) with spectral features (wavelet decompositions in &#x003B1; and &#x003B8; bands) to classify sour, sweet, bitter, salty, and umami solutions from 22 subjects. They reported a maximum accuracy of 76.13%. <xref ref-type="bibr" rid="B66">Vo et al. (2023)</xref> trained a feed-forward Neural Network (NN) using spectral features (powers in delta, theta, alpha, beta, and gamma bands) to discriminate between salty and sour solutions from 15 people. The accuracy was 84.36%.</p>
<p>Only one study moved from discrimination to intensity level prediction. <xref ref-type="bibr" rid="B74">Zhao et al. (2022)</xref> contrasted linear, tree, and ensemble regressors, trained with temporal and information-related features (energy, absolute mean value, and wavelet entropy), to predict the intensity level of sour, sweet, bitter, salty, and umami from 10 subjects. The best model, Extreme Gradient Boosting (XGBoost), achieved a goodness-of-fit (measured through the <italic>R</italic><sup>2</sup> coefficient), ranging from &#x02212;0.22 to 0.18.</p>
<p>Two studies focused on other-than-basic water solutions. <xref ref-type="bibr" rid="B27">Gonz&#x000E1;lez-Espa&#x000F1;a et al. (2023)</xref> aimed to discriminate wine vs. water and wine vs. wine tasting tasks of 10 participants through an SVM with temporal and spatial features (global field powers and channel averages). They reported accuracies greater than the chance level of 70% for both predictions. <xref ref-type="bibr" rid="B45">Naser and Aydemir (2024)</xref> trained a k-Nearest Neighbors (kNN) and a Random Forest (RF) with temporal and spectral features (mean value of the Hilbert-transformed EEG signal and level-2 wavelet coefficients) to discriminate four food substances (oils of Orange, Mint, Thyme, and Clove) from 10 subjects. The highest accuracy, obtained with the kNN, was 87.5%.</p>
<p>Coffee was selected as an eliciting stimulus in two studies. However, as previously mentioned, the target belonged to other aspects than taste. <xref ref-type="bibr" rid="B43">Maram et al. (2023)</xref> trained a CNN with raw EEG data to classify the preference of 3 coffee brands from 12 participants, obtaining an accuracy of 83.43%. <xref ref-type="bibr" rid="B70">Xu et al. (2021)</xref> compared several Bayesian Regression (BR) models, trained with spectral features (powers in theta, alpha, beta, and gamma bands), to predict the emotional responses to tasting tasks from 32 subjects. The best model achieved a goodness-of-fit [measured through the Watanabe-Akaike Information Criterion (WAIC)] of 963.55.</p>
</sec>
<sec sec-type="materials and methods" id="s3">
<title>3 Materials and methods</title>
<sec>
<title>3.1 Study population</title>
<p>A total of 15 subjects (9 females) in the age range 24&#x02013;59 years (<italic>M</italic> &#x0003D; 40.13, <italic>SD</italic> &#x0003D; 13.80) took part in the experiment. They were recruited as professional coffee tasters with proficiency in DSA and grouped as trained (T, less than 3 years of experience) or experts (E, more than 3 years of experience). Despite the sample size being below the average when compared to the surveyed past studies (i.e. 19.67 &#x000B1; 12.31, range: 10 &#x02212; 46), it was still in line with DSA studies that typically consist of 5&#x02013;15 experts (<xref ref-type="bibr" rid="B26">Gacula and Rutenbeck, 2006</xref>).</p>
<p>The participants resulted group- and gender-balanced in terms of mean age [<italic>t</italic>(13) &#x0003D; 0.818, <italic>p</italic> &#x0003D; 0.428 and <italic>t</italic>(13) &#x0003D; 1.034, <italic>p</italic> &#x0003D; 0.320, respectively]. However, the groups resulted in an unbalanced in terms of gender proportion [E: 2 females, T: 7 females, &#x003C7;<sup>2</sup>(1) &#x0003D; 5.402, <italic>p</italic> &#x0003D; 0.020].</p>
<p>A sensitivity analysis performed with G&#x0002A;Power (<xref ref-type="bibr" rid="B22">Faul et al., 2007</xref>) considering a within-between design with 2 groups, 8 measures, and standard parameters (&#x003B1; &#x0003D; 0.05, 1&#x02212;&#x003B2; &#x0003D; 0.95, &#x003F5; &#x0003D; 1, &#x003C1; &#x0003D; 0.5) confirmed a minimum detectable effect size of <italic>f</italic> &#x0003D; 0.302, interpreted as medium-to-large (<xref ref-type="bibr" rid="B15">Cohen, 1992</xref>).</p>
<p>The study was approved by the Ethical Committee of Universit&#x000E1; IULM (approval number: 0067814). All the procedures adhered to the guidelines of the Helsinki Declaration, and informed consent was secured from each participant.</p>
</sec>
<sec>
<title>3.2 Instrumentation</title>
<p>The EEG was acquired using the NVX-52 device (Medical Computer System Ltd.) from 38 Ag/AgCl scalp electrodes, 2 Ag/AgCl ear clips (A1 and A2), and 1 adhesive Ag/AgCl patch placed on the left mastoids (M1). The electrode positioning, detailed in <xref ref-type="bibr" rid="B9">Bilucaglia et al. (2024)</xref>, followed the 10-10 system (<xref ref-type="bibr" rid="B47">Nuwer, 2018</xref>), and the montage was monopolar, reference-free, and grounded to M1. Neorec software (Medical Computer System Ltd) was used to record the data at a sample frequency of 2<italic>kHz</italic> and a resolution of 24<italic>bits</italic>.</p>
<p>The iMotions software (iMotions A/S) was used to deliver the experiment instructions and collect the sensory evaluations.</p>
<p>Data synchronization was ensured by a transistor-to-transistor (TTL) pulse, sent by iMotions at the beginning of the experiment and fed into the NVX-52 by means of the ESB synchronization box (<xref ref-type="bibr" rid="B8">Bilucaglia et al., 2020</xref>).</p>
<p>All computations were carried out on a workstation equipped with an AMD Ryzen&#x02122; Threadripper PRO 5975WX CPU (32 cores, 64 threads, 3.6 GHz base clock) and 256 GB of DDR4-3,200 MHz ECC. No GPU acceleration was used. Code was executed in MATLAB R2024b (The Mathworks, Inc.) with Statistics and Machine Learning Toolbox 12.3.</p>
</sec>
<sec>
<title>3.3 Experimental protocol</title>
<p>The experiment consisted of a starting 60<italic>s</italic> eye-closed baseline (BSL) and two experimental phases, namely benchmark (Be) and coffee (Co).</p>
<p>The Be phase involved 4 tasting trials with solutions of sucrose (20<italic>g</italic>/<italic>l</italic>), caffeine (0.6<italic>g</italic>/<italic>l</italic>), citric acid (0.6<italic>g</italic>/<italic>l</italic>), and potassium aluminum sulfate (1<italic>g</italic>/<italic>l</italic>), to elicit sweet, bitter acid, and astringent flavors respectively (<xref ref-type="bibr" rid="B2">Anbarasan et al., 2022</xref>). Micro-filtered mineral water was used as diluent. According to <xref ref-type="bibr" rid="B52">Rousmans et al. (2000)</xref>, the exact concentration of the solutions was determined from a previous pilot test.</p>
<p>According to <xref ref-type="bibr" rid="B1">Abubakar et al. (2020)</xref>, the Co phase involved four tasting trials with coffees at various arabica/robusta ratios (100:0, 80:20, 85:15, and 70:30).</p>
<p>The phase order was fixed (i.e., first Be and then Co), and the tasting trials were randomized within each phase.</p>
<p>The administration of liquids was masked. The Be solutions were served at room temperature, while the Co at approximately 60&#x000B0;C. According to <xref ref-type="bibr" rid="B21">Di Flumeri et al. (2017)</xref>, participants were instructed to rinse the palate with a glass of water before any tasting trial (WR task) and to keep the liquid (solution or coffee) in the mouth for 10<italic>s</italic> (TL task) before swallowing. At the end of each tasting trial, subjective ratings for bitter, astringent, sweet, and acid attributes were collected on a 0 &#x02212; 10 scale.</p>
<p>The following <xref ref-type="fig" rid="F1">Figure 1</xref> summarizes the experimental protocol.</p>
<fig position="float" id="F1">
<label>Figure 1</label>
<caption><p>Schematic representation of the experimental protocol.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-19-1661214-g0001.tif">
<alt-text>Schematic representation of the experimental protocol. Starting with eye-closed baseline, followed by water rinsing, liquid tasting, liquid keeping, and swallowing. Attributes are rated. Repeated for both benchmark and coffee trials, each including four random target substances.</alt-text>
</graphic>
</fig>
</sec>
<sec>
<title>3.4 Data processing</title>
<p>Data processing was performed using the EEGLab toolbox (<xref ref-type="bibr" rid="B19">Delorme and Makeig, 2004</xref>). The EEG was resampled at 512<italic>Hz</italic> and filtered in the 0.1 &#x02212; 40<italic>Hz</italic> band (IV zero-phase Butterworth filter). Power line interference (50 and 100<italic>Hz</italic>) was reduced through the CleanLine, a multi-taper-based regression technique (<xref ref-type="bibr" rid="B10">Bokil et al., 2010</xref>), while non-stationary artifacts were corrected using the Artifact Subspace Reconstruction (ASR) method (<xref ref-type="bibr" rid="B13">Chang et al., 2020</xref>) with standard cutoff parameter (&#x003BA; &#x0003D; 20). ASR represents the gold standard for handling high-amplitude artifacts, such as those related to locomotor tasks in real-world and Mobile Brain Imaging (MoBI) contexts (<xref ref-type="bibr" rid="B36">Kim H. et al., 2025</xref>). Independent component analysis (ICA) decomposition was carried out using the second-order blind identification (SOBI) algorithm (<xref ref-type="bibr" rid="B64">Urig&#x000FC;en and Garcia-Zapirain, 2015</xref>) on a resampled (100<italic>Hz</italic>) and heavily filtered (1 &#x02212; 30<italic>Hz</italic>, IV order zero-phase Butterworth filter) copy of the data. According to <xref ref-type="bibr" rid="B9">Bilucaglia et al. (2024)</xref>, the resulting weight matrix was multiplied by the original data to obtain the independent components (ICs). The ICLabel classifier (<xref ref-type="bibr" rid="B48">Pion-Tonachini et al., 2019</xref>) was used to detect artifactual ICs as those with not-brain probability Pr{<monospace>!brain</monospace>}&#x02265;0.9. On average, 3.8 &#x000B1; 1.373 (min &#x0003D; 2, max &#x0003D; 7) artifactual ICs over 38 were identified and removed. Finally, a re-reference to the approximately zero ideal potential was performed through the Representational State Transfer (REST) algorithm (<xref ref-type="bibr" rid="B73">Yao, 2001</xref>).</p>
<p>The cleaned EEG was offline aligned to the starting TTL pulse and epoched according to the experimental phases (i.e., EYC and TL tasks of Be and Co). The following <xref ref-type="fig" rid="F2">Figure 2</xref> shows a representative segment of raw and pre-processed EEG data.</p>
<fig position="float" id="F2">
<label>Figure 2</label>
<caption><p>Representative 5<italic>s</italic>&#x02212;long segments showing the <bold>(a)</bold> raw and <bold>(b)</bold> pre-processed EEG signal. The data refer to the TL task of As solution, with onset marked by the pink vertical line.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-19-1661214-g0002.tif">
<alt-text>Raw and processed EEG signal. Panel (a) display high amplitude artefacts and power line noise. Panel (b) shows the same signal after the pre-processing step. The verticale scale is thirty microvolts, while the horizontal scale is 1 second.</alt-text>
</graphic>
</fig>
<p>For each subject, the Individual Alpha Frequency (IAF) was computed as the center of gravity of the Power Spectral Density (PSD) in the extended (7.5 &#x02212; 12.5<italic>Hz</italic>) alpha band (<xref ref-type="bibr" rid="B38">Klimesch, 1999</xref>). As PSD, the average occipital Welch&#x00027;s PSD (1<italic>s</italic>&#x02212;long Hamming window at 50% of overlapping) estimated in the BSL epoch was considered (<xref ref-type="bibr" rid="B7">Bilucaglia et al., 2019</xref>). The IAF served to define the following subject-specific &#x003B4;, &#x003B8;, &#x003B1;, &#x003B2;, and &#x003B3; EEG bands (<xref ref-type="bibr" rid="B11">Borghini et al., 2019</xref>):</p>
<disp-formula id="E1"><label>(1)</label><mml:math id="M1"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>&#x003B4;</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:mtext class="textrm" mathvariant="normal">IAF</mml:mtext><mml:mo>-</mml:mo><mml:mn>6</mml:mn></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mi>&#x003B8;</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mtext class="textrm" mathvariant="normal">IAF</mml:mtext><mml:mo>-</mml:mo><mml:mn>6</mml:mn><mml:mo>,</mml:mo><mml:mtext class="textrm" mathvariant="normal">IAF</mml:mtext><mml:mo>-</mml:mo><mml:mn>2</mml:mn></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mi>&#x003B1;</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mtext class="textrm" mathvariant="normal">IAF</mml:mtext><mml:mo>-</mml:mo><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mtext class="textrm" mathvariant="normal">IAF</mml:mtext><mml:mo>&#x0002B;</mml:mo><mml:mn>2</mml:mn></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mi>&#x003B2;</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mtext class="textrm" mathvariant="normal">IAF</mml:mtext><mml:mo>&#x0002B;</mml:mo><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mtext class="textrm" mathvariant="normal">IAF</mml:mtext><mml:mo>&#x0002B;</mml:mo><mml:mn>16</mml:mn></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mi>&#x003B3;</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mtext class="textrm" mathvariant="normal">IAF</mml:mtext><mml:mo>&#x0002B;</mml:mo><mml:mn>16</mml:mn><mml:mo>,</mml:mo><mml:mtext class="textrm" mathvariant="normal">IAF</mml:mtext><mml:mo>&#x0002B;</mml:mo><mml:mn>25</mml:mn></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
</sec>
<sec>
<title>3.5 Feature extraction</title>
<p>Features were extracted exclusively from WR and TL epochs, thereby excluding non-task-related activity that could also have been potentially contaminated by residual muscle artifacts. For each channel <italic>C</italic> and band <italic>B</italic> &#x0003D; {&#x003B4;, &#x003B8;, &#x003B1;, &#x003B2;, &#x003B3;}, normalized spectral powers <italic>p</italic><sub><italic>C, B</italic></sub> were computed as (<xref ref-type="bibr" rid="B6">Bilucaglia et al., 2022</xref>):</p>
<disp-formula id="E2"><label>(2)</label><mml:math id="M2"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mi>C</mml:mi><mml:mo>,</mml:mo><mml:mi>B</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mstyle displaystyle="true"><mml:msub><mml:mrow><mml:mo>&#x0222B;</mml:mo></mml:mrow><mml:mrow><mml:mi>B</mml:mi></mml:mrow></mml:msub></mml:mstyle><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>C</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mi>d</mml:mi><mml:mi>f</mml:mi></mml:mrow><mml:mrow><mml:mstyle displaystyle="true"><mml:msubsup><mml:mrow><mml:mo>&#x0222B;</mml:mo></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mi>&#x0221E;</mml:mi></mml:mrow><mml:mrow><mml:mo>&#x0002B;</mml:mo><mml:mi>&#x0221E;</mml:mi></mml:mrow></mml:msubsup></mml:mstyle><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>C</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>f</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mi>d</mml:mi><mml:mi>f</mml:mi></mml:mrow></mml:mfrac><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>where <italic>x</italic><sub><italic>C</italic></sub>(<italic>f</italic>) is the Welch&#x00027;s PSD (1<italic>s</italic>&#x02212;long Hamming window with 50% of overlapping).</p>
<p>Additionally, the following activity (<italic>A</italic><sub><italic>C</italic></sub>), mobility (<italic>M</italic><sub><italic>C</italic></sub>), and complexity (<italic>C</italic><sub><italic>C</italic></sub>) temporal parameters were computed as (<xref ref-type="bibr" rid="B30">Hjorth, 1970</xref>):</p>
<disp-formula id="E3"><label>(3)</label><mml:math id="M3"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>A</mml:mi></mml:mrow><mml:mrow><mml:mi>C</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>C</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>}</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mi>&#x003C3;</mml:mi><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>C</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>M</mml:mi></mml:mrow><mml:mrow><mml:mi>C</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>C</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>}</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msqrt><mml:mrow><mml:mfrac><mml:mrow><mml:msub><mml:mrow><mml:mi>A</mml:mi></mml:mrow><mml:mrow><mml:mi>C</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mfrac><mml:mrow><mml:mi>d</mml:mi><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>d</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:mfrac></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>A</mml:mi></mml:mrow><mml:mrow><mml:mi>C</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>C</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:mrow></mml:mfrac></mml:mrow></mml:msqrt><mml:mo>,</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>C</mml:mi></mml:mrow><mml:mrow><mml:mi>C</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>C</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>}</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:msub><mml:mrow><mml:mi>M</mml:mi></mml:mrow><mml:mrow><mml:mi>C</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mfrac><mml:mrow><mml:mi>d</mml:mi><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>d</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:mfrac></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>M</mml:mi></mml:mrow><mml:mrow><mml:mi>C</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>C</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:mrow></mml:mfrac></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>where &#x003C3;{&#x000B7;} is the temporal variance operator.</p>
<p>Expertise level (group) and age were also considered, since their impact on flavor evaluation has been previously reported (<xref ref-type="bibr" rid="B16">Croijmans and Majid, 2016</xref>; <xref ref-type="bibr" rid="B44">Mojet, 2003</xref>).</p>
<p>The feature vectors (306 &#x02212; long) were finally obtained by concatenating spectral (38 &#x000D7; 5 &#x0003D; 190) and temporal (38 &#x000D7; 3 &#x0003D; 114) vectors, as well as age and group (categorical: T, E) scalars. For each tasting trial <italic>T</italic> and each phase <italic>P</italic>, the TL vectors <italic><bold>x</bold></italic><sub><italic>T, P</italic></sub> were normalized with respect to the WL vectors <italic><bold>y</bold></italic><sub><italic>T, P</italic></sub> as:</p>
<disp-formula id="E4"><label>(4)</label><mml:math id="M4"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msubsup><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>x</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mo>,</mml:mo><mml:mi>P</mml:mi></mml:mrow><mml:mrow><mml:mi>&#x02032;</mml:mi></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>x</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mo>,</mml:mo><mml:mi>P</mml:mi></mml:mrow></mml:msub><mml:mo>-</mml:mo><mml:msub><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>y</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mo>,</mml:mo><mml:mi>P</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x02298;</mml:mo><mml:msub><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>y</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mo>,</mml:mo><mml:mi>P</mml:mi></mml:mrow></mml:msub></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>where &#x02298; represents the Hadamard (i.e., element-wise) division operator.</p>
<p>Three datasets corresponding to the Be and Co phases (60 &#x000D7; 306 each) as well as the BeCo (120 &#x000D7; 360) consisting of the normalized TL vectors <inline-formula><mml:math id="M5"><mml:msubsup><mml:mrow><mml:mstyle mathvariant="bold-italic"><mml:mi>x</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mo>,</mml:mo><mml:mi>P</mml:mi></mml:mrow><mml:mrow><mml:mi>&#x02032;</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula> were finally built. The target variables <italic>y</italic>, consisting of the attribute ratings, were transformed as log(1&#x0002B;<italic>y</italic>), following general recommendations for ratio scales (<xref ref-type="bibr" rid="B35">Keene, 1995</xref>).</p>
</sec>
<sec>
<title>3.6 Model training and evaluation</title>
<p>The selected model was LSBoost, a least-squares variant of Boosted Trees (<xref ref-type="bibr" rid="B25">Friedman, 2001</xref>). It was chosen for the enhanced predictive performance, as a non-linear ensemble method, and because it incorporates feature selection within the weak learners (Decision Trees, DTs). Regressors belonging to the boosted trees family have been previously shown to outperform in EEG prediction tasks (<xref ref-type="bibr" rid="B32">Hussain et al., 2019</xref>; <xref ref-type="bibr" rid="B29">He et al., 2022</xref>; <xref ref-type="bibr" rid="B33">Isabona et al., 2022</xref>).</p>
<p>LSBoost&#x00027;s tunable hyperparameters included the number of learners (<italic>n</italic>) and the learning rate (&#x003C1;), while DTs&#x00027; ones included the leaf size (<italic>l</italic><sub><italic>s</italic></sub>) and the maximum number of splits (<italic>n</italic><sub><italic>s</italic></sub>). Since the dataset pre-processing is known to impact the performance of the EEG-based prediction models (<xref ref-type="bibr" rid="B3">Apicella et al., 2023</xref>; <xref ref-type="bibr" rid="B63">Tryon et al., 2025</xref>), different standardization techniques (<italic>S</italic>) were also considered. They included the subject-wise z-score and min-max normalisations, as well as a non-linear transformation based on the median value (<xref ref-type="bibr" rid="B4">Arevalillo-Herr&#x000E1;ez et al., 2019</xref>) and the lack of standardization (none).</p>
<p>The best hyperparameters <inline-formula><mml:math id="M6"><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mi>n</mml:mi></mml:mrow><mml:mrow><mml:mo>&#x0002A;</mml:mo></mml:mrow></mml:msup><mml:mo>,</mml:mo><mml:msup><mml:mrow><mml:mi>&#x003C1;</mml:mi></mml:mrow><mml:mrow><mml:mo>&#x0002A;</mml:mo></mml:mrow></mml:msup><mml:mo>,</mml:mo><mml:msubsup><mml:mrow><mml:mi>l</mml:mi></mml:mrow><mml:mrow><mml:mi>s</mml:mi></mml:mrow><mml:mrow><mml:mo>&#x0002A;</mml:mo></mml:mrow></mml:msubsup><mml:mo>,</mml:mo><mml:msubsup><mml:mrow><mml:mi>n</mml:mi></mml:mrow><mml:mrow><mml:mi>s</mml:mi></mml:mrow><mml:mrow><mml:mo>&#x0002A;</mml:mo></mml:mrow></mml:msubsup><mml:mo>,</mml:mo><mml:msup><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mo>&#x0002A;</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula> were obtained through a Bayesian optimization (<xref ref-type="bibr" rid="B56">Snoek et al., 2012</xref>), considering the cross-validated Mean Absolute Error (MAE) as objective function <inline-formula><mml:math id="M7"><mml:mrow><mml:mstyle mathvariant="script"><mml:mi>L</mml:mi></mml:mstyle></mml:mrow></mml:math></inline-formula>. This solved the following Combined Algorithm Selection and Hyperparameter (CASH) problem (<xref ref-type="bibr" rid="B39">Kotthoff et al., 2017</xref>):</p>
<disp-formula id="E5"><label>(5)</label><mml:math id="M8"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mi>n</mml:mi></mml:mrow><mml:mrow><mml:mo>&#x0002A;</mml:mo></mml:mrow></mml:msup><mml:mo>,</mml:mo><mml:msup><mml:mrow><mml:mi>&#x003C1;</mml:mi></mml:mrow><mml:mrow><mml:mo>&#x0002A;</mml:mo></mml:mrow></mml:msup><mml:mo>,</mml:mo><mml:msubsup><mml:mrow><mml:mi>l</mml:mi></mml:mrow><mml:mrow><mml:mi>s</mml:mi></mml:mrow><mml:mrow><mml:mo>&#x0002A;</mml:mo></mml:mrow></mml:msubsup><mml:mo>,</mml:mo><mml:msubsup><mml:mrow><mml:mi>n</mml:mi></mml:mrow><mml:mrow><mml:mi>s</mml:mi></mml:mrow><mml:mrow><mml:mo>&#x0002A;</mml:mo></mml:mrow></mml:msubsup><mml:mo>,</mml:mo><mml:msup><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mo>&#x0002A;</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mstyle displaystyle="true"><mml:munder class="msub"><mml:mrow><mml:mo class="qopname">argmin</mml:mo></mml:mrow><mml:mrow><mml:mi>n</mml:mi><mml:mo>,</mml:mo><mml:mi>&#x003C1;</mml:mi><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>l</mml:mi></mml:mrow><mml:mrow><mml:mi>s</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>n</mml:mi></mml:mrow><mml:mrow><mml:mi>s</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mi>S</mml:mi></mml:mrow></mml:munder></mml:mstyle><mml:mrow><mml:mstyle mathvariant="script"><mml:mi>L</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>n</mml:mi><mml:mo>,</mml:mo><mml:mi>&#x003C1;</mml:mi><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>l</mml:mi></mml:mrow><mml:mrow><mml:mi>s</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>n</mml:mi></mml:mrow><mml:mrow><mml:mi>s</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mi>S</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>where <italic>n</italic>&#x02208;[1 &#x02212; 500], &#x003C1;&#x02208;[0.01, 1], <italic>l</italic><sub><italic>s</italic></sub>, <italic>n</italic><sub><italic>s</italic></sub>&#x02208;[1 &#x02212; 100] and <italic>S</italic>&#x02208;{z-score, minmax, median, none}.</p>
<p>The maximum number of evaluations was set to 100 and the seed was fixed at <monospace>rng(1)</monospace>, to ensure replicability.</p>
<p>The cross-validation followed a Leave-One-Subject-Out (LOSO) (<xref ref-type="bibr" rid="B23">Fazli et al., 2009</xref>) scheme to address the subject-wise data dependence.</p>
<p>As baselines for a robust model evaluation, a regressor constantly predicting the training set&#x00027;s mean target (Me) and a lasso regressor (LR) (<xref ref-type="bibr" rid="B60">Tibshirani, 1996</xref>) were additionally fitted following the same LOSO approach. The lasso&#x00027;s penalisation term &#x003BB; was set at (<xref ref-type="bibr" rid="B12">B&#x000FC;hlmann and Van De Geer, 2011</xref>, p. 14):</p>
<disp-formula id="E6"><label>(6)</label><mml:math id="M9"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mo>&#x003BB;</mml:mo><mml:mo>=</mml:mo><mml:msqrt><mml:mrow><mml:mn>2</mml:mn><mml:mo class="qopname">log</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>/</mml:mo><mml:mi>n</mml:mi></mml:mrow></mml:msqrt><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>where <italic>n</italic> &#x0003D; 56 and <italic>p</italic> &#x0003D; 306 are the dimensions of the training set.</p>
<p>The cross-validated MAEs of LSBoost vs. Me and LBoost vs. LR were compared by means of the following Cohen&#x00027;s d-scores (<xref ref-type="bibr" rid="B28">Goulet-Pelletier and Cousineau, 2018</xref>):</p>
<disp-formula id="E7"><label>(7)</label><mml:math id="M10"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>d</mml:mi></mml:mrow><mml:mrow><mml:mi>M</mml:mi><mml:mi>e</mml:mi><mml:mo>,</mml:mo><mml:mi>L</mml:mi><mml:mi>R</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:msub><mml:mrow><mml:mi>m</mml:mi></mml:mrow><mml:mrow><mml:mi>M</mml:mi><mml:mi>e</mml:mi><mml:mo>,</mml:mo><mml:mi>L</mml:mi><mml:mi>R</mml:mi></mml:mrow></mml:msub><mml:mo>-</mml:mo><mml:msub><mml:mrow><mml:mi>m</mml:mi></mml:mrow><mml:mrow><mml:mi>L</mml:mi><mml:mi>S</mml:mi><mml:mi>B</mml:mi><mml:mi>o</mml:mi><mml:mi>o</mml:mi><mml:mi>s</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mover accent="true"><mml:mrow><mml:mi>s</mml:mi></mml:mrow><mml:mo>&#x00304;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>M</mml:mi><mml:mi>e</mml:mi><mml:mo>,</mml:mo><mml:mi>L</mml:mi><mml:mi>R</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mfrac></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>where <inline-formula><mml:math id="M11"><mml:msub><mml:mrow><mml:mover accent="true"><mml:mrow><mml:mi>s</mml:mi></mml:mrow><mml:mo>&#x00304;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>M</mml:mi><mml:mi>e</mml:mi><mml:mo>,</mml:mo><mml:mi>L</mml:mi><mml:mi>R</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msqrt><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mi>s</mml:mi></mml:mrow><mml:mrow><mml:mi>M</mml:mi><mml:mi>e</mml:mi><mml:mo>,</mml:mo><mml:mi>L</mml:mi><mml:mi>R</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>&#x0002B;</mml:mo><mml:msubsup><mml:mrow><mml:mi>s</mml:mi></mml:mrow><mml:mrow><mml:mi>L</mml:mi><mml:mi>S</mml:mi><mml:mi>b</mml:mi><mml:mi>o</mml:mi><mml:mi>o</mml:mi><mml:mi>s</mml:mi><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>/</mml:mo><mml:mn>2</mml:mn></mml:mrow></mml:msqrt></mml:math></inline-formula>, <italic>m</italic><sub>(&#x000B7;)</sub> and <italic>s</italic><sub>(&#x000B7;)</sub> are the mean and standard deviation of the cross-validated MAEs, respectively. Cutoffs for small, medium, and large differences are placed at 0.2, 0.5, and 0.8, respectively (<xref ref-type="bibr" rid="B15">Cohen, 1992</xref>).</p>
<p>The significance at &#x003B1; &#x0003D; 0.05 level of each <italic>d</italic> coefficient was assessed from its 95% Confidence Intervals, estimated from a non-central <italic>t</italic> distribution (<xref ref-type="bibr" rid="B28">Goulet-Pelletier and Cousineau, 2018</xref>).</p>
<p>To assess the predictive power of the features, models showing significant <italic>d</italic><sub><italic>Me, LR</italic></sub> coefficients were trained on the complete datasets using optimal hyperparameters. Then, LSBoost&#x00027;s feature importance scores were extracted, normalized to the total importance score, and then summed across the channels. According to <xref ref-type="bibr" rid="B7">Bilucaglia et al. (2019)</xref>, a topographic map showing the feature importance distribution was then obtained by averaging the scores across datasets, targets, and models.</p>
</sec>
</sec>
<sec sec-type="results" id="s4">
<title>4 Results</title>
<p>Four models reported significant improvements from the benchmarks, with an overall MAE of 0.537 &#x000B1; 0.073 (anti-log:0.714 &#x000B1; 0.124) and <italic>d</italic> scores of <italic>d</italic><sub><italic>Me</italic></sub> &#x0003D; 0.858 &#x000B1; 0.341, <italic>d</italic><sub><italic>LR</italic></sub> &#x0003D; 0.897 &#x000B1; 0.326. The targets were Bi (trained on Be), Sw (on Be), Ac (on Co), and As (on BeCo).</p>
<p>The Ac prediction obtained the best MAE (Co: 0.459 &#x000B1; 0.178), while Sw the worst one (Be: 0.600 &#x000B1; 0.282). The highest robustness against benchmark regressors was achieved by Bi trained on Be (<italic>d</italic><sub><italic>Me</italic></sub> &#x0003D; 1.344, <italic>d</italic><sub><italic>LR</italic></sub> &#x0003D; 1.372), while the lowest one was observed for Sw trained on Be (<italic>d</italic><sub><italic>Me</italic></sub> &#x0003D; 0.651, <italic>d</italic><sub><italic>LR</italic></sub> &#x0003D; 0.660).</p>
<p>The following <xref ref-type="table" rid="T1">Table 1</xref> reports the best hyperparameters and the training time (in seconds) of the significant models, split for dataset and target. The following <xref ref-type="table" rid="T2">Table 2</xref> summarizes the performances (cross-validated MAEs and Cohen&#x00027;s <italic>d</italic> coefficients) of the significant models, split for datasets and targets.</p>
<table-wrap position="float" id="T1">
<label>Table 1</label>
<caption><p>Significant models with best hyper-parameters and training time (in seconds) split for dataset and target.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left"><bold><inline-formula><mml:math id="M12"><mml:mstyle mathvariant="bold"><mml:mrow><mml:mstyle mathvariant="script"><mml:mi>D</mml:mi></mml:mstyle></mml:mrow></mml:mstyle></mml:math></inline-formula></bold></th>
<th valign="top" align="center"><bold><inline-formula><mml:math id="M13"><mml:mstyle mathvariant="bold"><mml:mrow><mml:mstyle mathvariant="script"><mml:mi>T</mml:mi></mml:mstyle></mml:mrow></mml:mstyle></mml:math></inline-formula></bold></th>
<th valign="top" align="center"><italic><bold>n</bold></italic></th>
<th valign="top" align="center"><bold>&#x003C1;</bold></th>
<th valign="top" align="center"><bold><italic>l</italic><sub><italic>s</italic></sub></bold></th>
<th valign="top" align="center"><bold><italic>n</italic><sub><italic>s</italic></sub></bold></th>
<th valign="top" align="center"><bold><italic>S</italic></bold></th>
<th valign="top" align="center"><bold>t</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Be</td>
<td valign="top" align="center">Bi</td>
<td valign="top" align="center">20</td>
<td valign="top" align="center">0.078</td>
<td valign="top" align="center">4</td>
<td valign="top" align="center">27</td>
<td valign="top" align="center">Z-score</td>
<td valign="top" align="center">38.115</td>
</tr>
<tr>
<td valign="top" align="left">Be</td>
<td valign="top" align="center">Sw</td>
<td valign="top" align="center">16</td>
<td valign="top" align="center">0.657</td>
<td valign="top" align="center">4</td>
<td valign="top" align="center">4</td>
<td valign="top" align="center">Median</td>
<td valign="top" align="center">41.324</td>
</tr>
<tr>
<td valign="top" align="left">Co</td>
<td valign="top" align="center">Ac</td>
<td valign="top" align="center">4</td>
<td valign="top" align="center">0.531</td>
<td valign="top" align="center">10</td>
<td valign="top" align="center">79</td>
<td valign="top" align="center">Median</td>
<td valign="top" align="center">35.779</td>
</tr>
<tr>
<td valign="top" align="left">BeCo</td>
<td valign="top" align="center">As</td>
<td valign="top" align="center">38</td>
<td valign="top" align="center">0.075</td>
<td valign="top" align="center">13</td>
<td valign="top" align="center">96</td>
<td valign="top" align="center">Min-max</td>
<td valign="top" align="center">37.162</td>
</tr></tbody>
</table>
<table-wrap-foot>
<p><inline-formula><mml:math id="M14"><mml:mrow><mml:mstyle mathvariant="script"><mml:mi>D</mml:mi></mml:mstyle></mml:mrow></mml:math></inline-formula>, dataset; <inline-formula><mml:math id="M15"><mml:mrow><mml:mstyle mathvariant="script"><mml:mi>T</mml:mi></mml:mstyle></mml:mrow></mml:math></inline-formula>, target; <italic>n</italic>, number of learners; &#x003C1;, learning rate; <italic>l</italic><sub><italic>s</italic></sub>, leaf size; <italic>n</italic><sub><italic>s</italic></sub>, number of splits; <italic>S</italic>, standardization method; <italic>t</italic>, training time [s].</p>
</table-wrap-foot>
</table-wrap>
<table-wrap position="float" id="T2">
<label>Table 2</label>
<caption><p>Performances of the significant models split for the dataset and target.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left" rowspan="2"><inline-formula><mml:math id="M16"><mml:mstyle mathvariant="bold"><mml:mrow><mml:mstyle mathvariant="script"><mml:mi>D</mml:mi></mml:mstyle></mml:mrow></mml:mstyle></mml:math></inline-formula></th>
<th valign="top" align="center" rowspan="2"><inline-formula><mml:math id="M17"><mml:mstyle mathvariant="bold"><mml:mrow><mml:mstyle mathvariant="script"><mml:mi>T</mml:mi></mml:mstyle></mml:mrow></mml:mstyle></mml:math></inline-formula></th>
<th valign="top" align="center" colspan="2"><bold>LSBoost</bold></th>
<th valign="top" align="center" colspan="2"><bold>Me</bold></th>
<th valign="top" align="center" colspan="2"><bold>LR</bold></th>
<th valign="top" align="center" rowspan="2"><bold><italic>d</italic><sub><italic>Me</italic></sub></bold></th>
<th valign="top" align="center" rowspan="2"><bold><italic>d</italic><sub><italic>LR</italic></sub></bold></th>
</tr>
<tr>
<th valign="top" align="center"><bold>M</bold></th>
<th valign="top" align="center"><bold>SD</bold></th>
<th valign="top" align="center"><bold>M</bold></th>
<th valign="top" align="center"><bold>SD</bold></th>
<th valign="top" align="center"><bold>M</bold></th>
<th valign="top" align="center"><bold>SD</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Be</td>
<td valign="top" align="center">Bi</td>
<td valign="top" align="center">0.490</td>
<td valign="top" align="center">0.176</td>
<td valign="top" align="center">0.694</td>
<td valign="top" align="center">0.127</td>
<td valign="top" align="center">0.692</td>
<td valign="top" align="center">0.118</td>
<td valign="top" align="center">1.344</td>
<td valign="top" align="center">1.372</td>
</tr>
<tr>
<td valign="top" align="left">Be</td>
<td valign="top" align="center">Sw</td>
<td valign="top" align="center">0.600</td>
<td valign="top" align="center">0.282</td>
<td valign="top" align="center">0.743</td>
<td valign="top" align="center">0.158</td>
<td valign="top" align="center">0.774</td>
<td valign="top" align="center">0.247</td>
<td valign="top" align="center">0.651</td>
<td valign="top" align="center">0.660</td>
</tr>
<tr>
<td valign="top" align="left">Co</td>
<td valign="top" align="center">Ac</td>
<td valign="top" align="center">0.459</td>
<td valign="top" align="center">0.178</td>
<td valign="top" align="center">0.604</td>
<td valign="top" align="center">0.165</td>
<td valign="top" align="center">1.008</td>
<td valign="top" align="center">1.125</td>
<td valign="top" align="center">0.844</td>
<td valign="top" align="center">0.841</td>
</tr>
<tr>
<td valign="top" align="left">BeCo</td>
<td valign="top" align="center">As</td>
<td valign="top" align="center">0.598</td>
<td valign="top" align="center">0.132</td>
<td valign="top" align="center">0.681</td>
<td valign="top" align="center">0.146</td>
<td valign="top" align="center">1.058</td>
<td valign="top" align="center">1.153</td>
<td valign="top" align="center">0.595</td>
<td valign="top" align="center">0.717</td>
</tr></tbody>
</table>
<table-wrap-foot>
<p><inline-formula><mml:math id="M18"><mml:mrow><mml:mstyle mathvariant="script"><mml:mi>D</mml:mi></mml:mstyle></mml:mrow></mml:math></inline-formula>, dataset; <inline-formula><mml:math id="M19"><mml:mrow><mml:mstyle mathvariant="script"><mml:mi>T</mml:mi></mml:mstyle></mml:mrow></mml:math></inline-formula>, target; LSBoost, Least-Square Boosted Tree; Me, mean regressor; LR, lasso regressor; M, mean; SD, standard deviation; <italic>d</italic>, Cohen&#x00027;s d.</p>
</table-wrap-foot>
</table-wrap>
<p>Both spectral and temporal features contributed to the predictions, but their importance scores varied substantially across datasets and targets. The highest median score was observed for <italic>p</italic><sub>&#x003B1;</sub> (Med &#x0003D; 0.139, IQR &#x0003D; 0.190), whereas the lowest was for <italic>M</italic> (Med &#x0003D; 0.034, IQR &#x0003D; 0.037). Age and Group appeared as predictors in two models each: Age in Sw with Be and Ac with Co, while Group in Ac with Co and As with BeCo. Group achieved the highest importance, not only compared to Age but also across all features (Med &#x0003D; 0.252, IQR &#x0003D; 0.136). <xref ref-type="table" rid="T3">Table 3</xref> summarizes the channel-averaged feature importance scores, split by dataset and target.</p>
<table-wrap position="float" id="T3">
<label>Table 3</label>
<caption><p>Feature importance scores, split for dataset and target.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left"><bold><inline-formula><mml:math id="M20"><mml:mstyle mathvariant="bold"><mml:mrow><mml:mstyle mathvariant="script"><mml:mi>D</mml:mi></mml:mstyle></mml:mrow></mml:mstyle></mml:math></inline-formula></bold></th>
<th valign="top" align="center"><bold><inline-formula><mml:math id="M21"><mml:mstyle mathvariant="bold"><mml:mrow><mml:mstyle mathvariant="script"><mml:mi>T</mml:mi></mml:mstyle></mml:mrow></mml:mstyle></mml:math></inline-formula></bold></th>
<th valign="top" align="center"><bold>Age</bold></th>
<th valign="top" align="center"><bold>Group</bold></th>
<th valign="top" align="center"><bold><italic>p</italic><sub>&#x003B4;</sub></bold></th>
<th valign="top" align="center"><bold><italic>p</italic><sub>&#x003B8;</sub></bold></th>
<th valign="top" align="center"><bold><italic>p</italic><sub>&#x003B1;</sub></bold></th>
<th valign="top" align="center"><bold><italic>p</italic><sub>&#x003B2;</sub></bold></th>
<th valign="top" align="center"><bold><italic>p</italic><sub>&#x003B3;</sub></bold></th>
<th valign="top" align="center"><bold>A</bold></th>
<th valign="top" align="center"><bold>M</bold></th>
<th valign="top" align="center"><bold>C</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Be</td>
<td valign="top" align="center">Bi</td>
<td/>
<td/>
<td valign="top" align="center">0.076</td>
<td valign="top" align="center">0.115</td>
<td valign="top" align="center">0.454</td>
<td valign="top" align="center">0.055</td>
<td valign="top" align="center">0.009</td>
<td valign="top" align="center">0.009</td>
<td valign="top" align="center">0.005</td>
<td valign="top" align="center">0.276</td>
</tr>
<tr>
<td valign="top" align="left">Be</td>
<td valign="top" align="center">Sw</td>
<td valign="top" align="center">0.158</td>
<td/>
<td valign="top" align="center">0.003</td>
<td valign="top" align="center">0.135</td>
<td valign="top" align="center">0.080</td>
<td valign="top" align="center">0.123</td>
<td valign="top" align="center">0.037</td>
<td valign="top" align="center">0.355</td>
<td valign="top" align="center">0.037</td>
<td valign="top" align="center">0.072</td>
</tr>
<tr>
<td valign="top" align="left">Co</td>
<td valign="top" align="center">Ac</td>
<td valign="top" align="center">0.088</td>
<td valign="top" align="center">0.388</td>
<td/>
<td valign="top" align="center">0.035</td>
<td valign="top" align="center">0.048</td>
<td valign="top" align="center">0.084</td>
<td valign="top" align="center">0.116</td>
<td valign="top" align="center">0.039</td>
<td valign="top" align="center">0.030</td>
<td valign="top" align="center">0.173</td>
</tr>
<tr>
<td valign="top" align="left">BeCo</td>
<td valign="top" align="center">As</td>
<td/>
<td valign="top" align="center">0.116</td>
<td valign="top" align="center">0.089</td>
<td valign="top" align="center">0.121</td>
<td valign="top" align="center">0.198</td>
<td valign="top" align="center">0.140</td>
<td valign="top" align="center">0.073</td>
<td valign="top" align="center">0.069</td>
<td valign="top" align="center">0.131</td>
<td valign="top" align="center">0.063</td>
</tr></tbody>
</table>
<table-wrap-foot>
<p><inline-formula><mml:math id="M22"><mml:mrow><mml:mstyle mathvariant="script"><mml:mi>D</mml:mi></mml:mstyle></mml:mrow></mml:math></inline-formula>, dataset; <inline-formula><mml:math id="M23"><mml:mrow><mml:mstyle mathvariant="script"><mml:mi>T</mml:mi></mml:mstyle></mml:mrow></mml:math></inline-formula>, target; Age, participant&#x00027;s age; Group, expertise level; <italic>p</italic><sub><italic>B</italic></sub>, normalized spectral power in band <italic>B</italic> &#x0003D; {&#x003B4;, &#x003B8;, &#x003B1;, &#x003B2;, &#x003B3;}; <italic>A</italic>, activity; <italic>M</italic>, mobility; <italic>C</italic>, complexity.</p>
</table-wrap-foot>
</table-wrap>
<p>The topographic plot in <xref ref-type="fig" rid="F3">Figure 3</xref> qualitatively identified central, occipital, parietal, and frontal regions as most important for the overall prediction.</p>
<fig position="float" id="F3">
<label>Figure 3</label>
<caption><p>Topographic distribution of the feature importance scores.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-19-1661214-g0003.tif">
<alt-text>Topographic map showing feature importance across EEG channels, with colors ranging from blue to red indicating varying intensities. A color bar on the right shows values from negative 0.06 to positive 0.06, representing feature importance in arbitrary units.</alt-text>
</graphic>
</fig>
</sec>
<sec sec-type="discussion" id="s5">
<title>5 Discussion</title>
<p>In this study, we trained Least-Squares Boosted Trees (LSBoost) with spectral and temporal EEG features to predict sensory attributes&#x02014;bitterness (Bi), sweetness (Sw), acidity (Ac), and astringency (As)&#x02014;of Coffee (Co) and basic solutions (Be). The best configuration of hyperparameters and data normalization was obtained through a Bayesian optimisation approach, following a Leave-One-Subject-Out (LOSO) scheme. The LSBoost&#x00027;s performances were compared with mean (Me) and lasso (LR) regressors through Cohen&#x00027;s <italic>d</italic> coefficients, and the feature importance for type and channel was assessed from the trees&#x00027; coefficients.</p>
<p>The significant models achieved high performances, with an average anti-log MAE of about 7% of the scale range and a medium-to-large (<italic>d</italic><sub><italic>Me, LR</italic></sub>&#x0003E;0.5) (<xref ref-type="bibr" rid="B15">Cohen, 1992</xref>) robustness against the benchmarks. The lowest MAE of Ac is in line with previous studies that identified sour as the best predictable flavor, with the highest <italic>R</italic><sup>2</sup> coefficient in <xref ref-type="bibr" rid="B74">Zhao et al. (2022)</xref> and the second (after salty) highest accuracy in <xref ref-type="bibr" rid="B41">Li et al. (2025)</xref>. Compared to other dimensions, Sw has already shown poor performances (<xref ref-type="bibr" rid="B18">De et al., 2023</xref>) and low feature discriminability (<xref ref-type="bibr" rid="B69">Xia et al., 2024</xref>), supporting the obtained highest MAE and lowest <italic>d</italic> coefficients. Finally, the robustness of Bi against benchmarks may reflect the well-known evolutionary adaptation in vertebrates toward heightened bitter taste sensitivity for early toxin detection and avoidance (<xref ref-type="bibr" rid="B68">Wooding et al., 2021</xref>).</p>
<p>The feature importance of <italic>p</italic><sub>&#x003B1;</sub>, <italic>p</italic><sub>&#x003B2;</sub>, and <italic>p</italic><sub>&#x003B3;</sub> is in line with past studies that effectively trained ML models using spectral powers in &#x003B1;, &#x003B2;, and &#x003B3; bands (<xref ref-type="bibr" rid="B18">De et al., 2023</xref>; <xref ref-type="bibr" rid="B66">Vo et al., 2023</xref>). The role of <italic>p</italic><sub>&#x003B4;</sub>, and <italic>p</italic><sub>&#x003B8;</sub> as key predictors is supported by previous research studies that identified differences in &#x003B4; and &#x003B8; bands during flavor evaluation (<xref ref-type="bibr" rid="B67">Wallroth et al., 2018</xref>; <xref ref-type="bibr" rid="B72">Yang et al., 2023</xref>). Overall, the involvement of EEG features from specific central, parietal, and frontal regions has already been observed in predictive (<xref ref-type="bibr" rid="B41">Li et al., 2025</xref>) and experimental (<xref ref-type="bibr" rid="B40">Lejap et al., 2024</xref>) studies. The contribution of temporal parameters, represented by <italic>C, M</italic> and <italic>A</italic>, matches the good performance of past deep-learning models (e.g., CNNs and RNNs) trained with the raw EEG signal (<xref ref-type="bibr" rid="B18">De et al., 2023</xref>; <xref ref-type="bibr" rid="B69">Xia et al., 2024</xref>). Finally, the significance of Group and Age could be related to the previously reported influence of expertise (<xref ref-type="bibr" rid="B16">Croijmans and Majid, 2016</xref>) and aging (<xref ref-type="bibr" rid="B44">Mojet, 2003</xref>) on sensory evaluations. However, the reasons why they impacted only in two models require further investigation.</p>
<p>This study acknowledges some limitations. First, despite being in line with DSA studies that typically involve 5&#x02013;15 experts (<xref ref-type="bibr" rid="B26">Gacula and Rutenbeck, 2006</xref>), the sample size must still be considered limited. Increasing it in both magnitude and heterogeneity (e.g., adding non-expert tasters and accounting for their coffee-consumption frequency) would potentially improve not only the performance but also the generalisability of the models. An increase in dataset size would also yield less noisy results, which is particularly relevant in chemo-sensory studies. In fact, despite the use of advanced denoising techniques and the selection of short epochs with minimal muscular artifacts, the data quality in the present study should still be regarded as suboptimal. Second, the experiment has not accounted for confounders given by the fixed Be-Co order, the temperature difference between the Be and Co samples, as well as potential visual cues (e.g., the colors of the liquids), potentially biasing the sensory analyses (<xref ref-type="bibr" rid="B20">Delwiche, 2023</xref>). Future confirmatory studies should be, thus, based on a fully-randomized and truly-blind design. Third, our models were trained and validated in a single session per subject. Although being standard practice in multivariate-pattern-analysis with brain-imaging data (<xref ref-type="bibr" rid="B59">Taxali et al., 2021</xref>), it prevented us from quantifying the test-retest reliability of the models. Future works should acquire longitudinal recordings&#x02014;at least a second session separated by days or weeks&#x02014;to determine the stability of features and models over time.</p>
<p>Nevertheless, our exploratory study endorses the use of regression techniques based on EEG data in flavor assessment, as an alternative to self-report sensory evaluations.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s6">
<title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p>
</sec>
<sec sec-type="ethics-statement" id="s7">
<title>Ethics statement</title>
<p>The studies involving humans were approved by Ethical Committee of Universit&#x000E1; IULM. The studies were conducted in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study. Written informed consent was obtained from the individual(s) for the publication of any potentially identifiable images or data included in this article.</p>
</sec>
<sec sec-type="author-contributions" id="s8">
<title>Author contributions</title>
<p>MBi: Writing &#x02013; original draft, Writing &#x02013; review &#x00026; editing, Formal analysis, Methodology. MBe: Investigation, Conceptualization, Writing &#x02013; review &#x00026; editing. AF: Writing &#x02013; review &#x00026; editing, Data curation, Conceptualization. VR: Funding acquisition, Writing &#x02013; review &#x00026; editing, Supervision. MZ: Writing &#x02013; review &#x00026; editing, Supervision, Project administration.</p>
</sec>
<sec sec-type="funding-information" id="s9">
<title>Funding</title>
<p>The author(s) declare that financial support was received for the research and/or publication of this article. The authors declare that this study received funding from Lavazza Group. The funder was not involved in the study design, collection, analysis, interpretation of data, the writing of this article or the decision to submit it for publication.</p>
</sec>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
<p>The author(s) declared that they were an editorial board member of Frontiers, at the time of submission. This had no impact on the peer review process and the final decision.</p>
</sec>
<sec sec-type="ai-statement" id="s10">
<title>Generative AI statement</title>
<p>The author(s) declare that Gen AI was used in the creation of this manuscript. Generative AI (ChatGPT by OpenAI) was used solely for (i) grammar and typos corrections and (ii) generating elements of <xref ref-type="fig" rid="F1">Figure 1</xref> (schematic representations of the experimental protocol).</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="s11">
<title>Publisher&#x00027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Abubakar</surname> <given-names>Y.</given-names></name> <name><surname>Gemasih</surname> <given-names>T.</given-names></name> <name><surname>Muzaifa</surname> <given-names>M.</given-names></name> <name><surname>Hasni</surname> <given-names>D.</given-names></name> <name><surname>Sulaiman</surname> <given-names>M.</given-names></name></person-group> (<year>2020</year>). <article-title>Effect of blend percentage and roasting degree on sensory quality of arabica-robusta coffee blend</article-title>. <source>IOP Conf. Ser. Earth Environ. Sci</source>. <volume>425</volume>:<fpage>12081</fpage>. <pub-id pub-id-type="doi">10.1088/1755-1315/425/1/012081</pub-id></citation>
</ref>
<ref id="B2">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Anbarasan</surname> <given-names>R.</given-names></name> <name><surname>Gomez Carmona</surname> <given-names>D.</given-names></name> <name><surname>Mahendran</surname> <given-names>R.</given-names></name></person-group> (<year>2022</year>). <article-title>Human taste-perception: brain computer interface (BCI) and its application as an engineering tool for taste-driven sensory studies</article-title>. <source>Food Eng. Rev</source>. <volume>14</volume>, <fpage>408</fpage>&#x02013;<lpage>434</lpage>. <pub-id pub-id-type="doi">10.1007/s12393-022-09308-0</pub-id></citation>
</ref>
<ref id="B3">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Apicella</surname> <given-names>A.</given-names></name> <name><surname>Isgr</surname> <given-names>F.</given-names></name> <name><surname>Pollastro</surname> <given-names>A.</given-names></name> <name><surname>Prevete</surname> <given-names>R.</given-names></name></person-group> (<year>2023</year>). <article-title>On the effects of data normalization for domain adaptation on EEG data</article-title>. <source>Eng. Appl. Artif. Intell</source>. <volume>123</volume>:<fpage>106205</fpage>. <pub-id pub-id-type="doi">10.1016/j.engappai.2023.106205</pub-id></citation>
</ref>
<ref id="B4">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Arevalillo-Herr&#x000E1;ez</surname> <given-names>M.</given-names></name> <name><surname>Cobos</surname> <given-names>M.</given-names></name> <name><surname>Roger</surname> <given-names>S.</given-names></name> <name><surname>Garc&#x000ED;a-Pineda</surname> <given-names>M.</given-names></name></person-group> (<year>2019</year>). <article-title>Combining inter-subject modeling with a subject-based data transformation to improve affect recognition from EEG signals</article-title>. <source>Sensors</source> <volume>19</volume>:<fpage>2999</fpage>. <pub-id pub-id-type="doi">10.3390/s19132999</pub-id><pub-id pub-id-type="pmid">31288378</pub-id></citation></ref>
<ref id="B5">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Batali</surname> <given-names>M. E.</given-names></name> <name><surname>Lim</surname> <given-names>L. X.</given-names></name> <name><surname>Liang</surname> <given-names>J.</given-names></name> <name><surname>Yeager</surname> <given-names>S. E.</given-names></name> <name><surname>Thompson</surname> <given-names>A. N.</given-names></name> <name><surname>Han</surname> <given-names>J.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Sensory analysis of full immersion coffee: cold brew is more floral, and less bitter, sour, and rubbery than hot brew</article-title>. <source>Foods</source> <volume>11</volume>:<fpage>2440</fpage>. <pub-id pub-id-type="doi">10.3390/foods11162440</pub-id><pub-id pub-id-type="pmid">36010440</pub-id></citation></ref>
<ref id="B6">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Bilucaglia</surname> <given-names>M.</given-names></name> <name><surname>Laureanti</surname> <given-names>R.</given-names></name> <name><surname>Circi</surname> <given-names>R.</given-names></name> <name><surname>Zito</surname> <given-names>M.</given-names></name> <name><surname>Bellati</surname> <given-names>M.</given-names></name> <name><surname>Fici</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>&#x0201C;Spectral differences in resting-state EEG associated to individual emotional styles,&#x0201D;</article-title> in <source>2022 44th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC)</source> (<publisher-loc>Glasgow</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>4052</fpage>&#x02013;<lpage>4055</lpage>. <pub-id pub-id-type="doi">10.1109/EMBC48229.2022.9871191</pub-id><pub-id pub-id-type="pmid">36086662</pub-id></citation></ref>
<ref id="B7">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Bilucaglia</surname> <given-names>M.</given-names></name> <name><surname>Laureanti</surname> <given-names>R.</given-names></name> <name><surname>Zito</surname> <given-names>M.</given-names></name> <name><surname>Circi</surname> <given-names>R.</given-names></name> <name><surname>Fici</surname> <given-names>A.</given-names></name> <name><surname>Rivetti</surname> <given-names>F.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>&#x0201C;Looking through blue glasses: bioelectrical measures to assess the awakening after a calm situation,&#x0201D;</article-title> in <source>2019 41st Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC)</source> (<publisher-loc>Berlin</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>526</fpage>&#x02013;<lpage>529</lpage>. <pub-id pub-id-type="doi">10.1109/EMBC.2019.8856486</pub-id><pub-id pub-id-type="pmid">31945953</pub-id></citation></ref>
<ref id="B8">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bilucaglia</surname> <given-names>M.</given-names></name> <name><surname>Masi</surname> <given-names>R.</given-names></name> <name><surname>Stanislao</surname> <given-names>G. D.</given-names></name> <name><surname>Laureanti</surname> <given-names>R.</given-names></name> <name><surname>Fici</surname> <given-names>A.</given-names></name> <name><surname>Circi</surname> <given-names>R.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Esb: a low-cost EEG synchronization box</article-title>. <source>HardwareX</source> <volume>8</volume>:<fpage>e00125</fpage>. <pub-id pub-id-type="doi">10.1016/j.ohx.2020.e00125</pub-id><pub-id pub-id-type="pmid">35498268</pub-id></citation></ref>
<ref id="B9">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bilucaglia</surname> <given-names>M.</given-names></name> <name><surname>Zito</surname> <given-names>M.</given-names></name> <name><surname>Fici</surname> <given-names>A.</given-names></name> <name><surname>Casiraghi</surname> <given-names>C.</given-names></name> <name><surname>Rivetti</surname> <given-names>F.</given-names></name> <name><surname>Bellati</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>I dare: iulm dataset of affective responses</article-title>. <source>Front. Hum. Neurosci</source>. <volume>18</volume>:<fpage>1347327</fpage>. <pub-id pub-id-type="doi">10.3389/fnhum.2024.1347327</pub-id><pub-id pub-id-type="pmid">38571521</pub-id></citation></ref>
<ref id="B10">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bokil</surname> <given-names>H.</given-names></name> <name><surname>Andrews</surname> <given-names>P.</given-names></name> <name><surname>Kulkarni</surname> <given-names>J. E.</given-names></name> <name><surname>Mehta</surname> <given-names>S.</given-names></name> <name><surname>Mitra</surname> <given-names>P. P.</given-names></name></person-group> (<year>2010</year>). <article-title>Chronux: a platform for analyzing neural signals</article-title>. <source>J. Neurosci. Methods</source> <volume>192</volume>, <fpage>146</fpage>&#x02013;<lpage>151</lpage>. <pub-id pub-id-type="doi">10.1016/j.jneumeth.2010.06.020</pub-id><pub-id pub-id-type="pmid">20637804</pub-id></citation></ref>
<ref id="B11">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Borghini</surname> <given-names>G.</given-names></name> <name><surname>Aric&#x000F3;</surname> <given-names>P.</given-names></name> <name><surname>Flumeri</surname> <given-names>G. D.</given-names></name> <name><surname>Sciaraffa</surname> <given-names>N.</given-names></name> <name><surname>Babiloni</surname> <given-names>F.</given-names></name></person-group> (<year>2019</year>). <article-title>Correlation and similarity between cerebral and non-cerebral electrical activity for user&#x00027;s states assessment</article-title>. <source>Sensors</source> <volume>19</volume>:<fpage>704</fpage>. <pub-id pub-id-type="doi">10.3390/s19030704</pub-id><pub-id pub-id-type="pmid">30744081</pub-id></citation></ref>
<ref id="B12">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>B&#x000FC;hlmann</surname> <given-names>P.</given-names></name> <name><surname>Van De Geer</surname> <given-names>S.</given-names></name></person-group> (<year>2011</year>). <source>Statistics for High-Dimensional Data: Methods, Theory and Applications</source>. Springer Science and Business Media: New York. <pub-id pub-id-type="doi">10.1007/978-3-642-20192-9</pub-id></citation>
</ref>
<ref id="B13">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chang</surname> <given-names>C.-Y.</given-names></name> <name><surname>Hsu</surname> <given-names>S.-H.</given-names></name> <name><surname>Pion-Tonachini</surname> <given-names>L.</given-names></name> <name><surname>Jung</surname> <given-names>T.-P.</given-names></name></person-group> (<year>2020</year>). <article-title>Evaluation of artifact subspace reconstruction for automatic artifact components removal in multi-channel EEG recordings</article-title>. <source>IEEE Trans. Biomed. Eng</source>. <volume>67</volume>, <fpage>1114</fpage>&#x02013;<lpage>1121</lpage>. <pub-id pub-id-type="doi">10.1109/TBME.2019.2930186</pub-id><pub-id pub-id-type="pmid">31329105</pub-id></citation></ref>
<ref id="B14">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Civille</surname> <given-names>G. V.</given-names></name> <name><surname>Carr</surname> <given-names>B. T.</given-names></name> <name><surname>Osdoba</surname> <given-names>K. E.</given-names></name></person-group> (<year>2024</year>). <source>Sensory Evaluation Techniques</source>. <publisher-loc>Boca Raton, FL</publisher-loc>: <publisher-name>CRC Press</publisher-name>. <pub-id pub-id-type="doi">10.1201/9781003352082</pub-id></citation>
</ref>
<ref id="B15">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cohen</surname> <given-names>J.</given-names></name></person-group> (<year>1992</year>). <article-title>A power primer</article-title>. <source>Psychol. Bull</source>. <volume>112</volume>, <fpage>155</fpage>&#x02013;<lpage>159</lpage>. <pub-id pub-id-type="doi">10.1037//0033-2909.112.1.155</pub-id><pub-id pub-id-type="pmid">19565683</pub-id></citation></ref>
<ref id="B16">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Croijmans</surname> <given-names>I.</given-names></name> <name><surname>Majid</surname> <given-names>A.</given-names></name></person-group> (<year>2016</year>). <article-title>Not all flavor expertise is equal: the language of wine and coffee experts</article-title>. <source>PLoS ONE</source> <volume>11</volume>:<fpage>e0155845</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0155845</pub-id><pub-id pub-id-type="pmid">27322035</pub-id></citation></ref>
<ref id="B17">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Crouzet</surname> <given-names>S. M.</given-names></name> <name><surname>Busch</surname> <given-names>N. A.</given-names></name> <name><surname>Ohla</surname> <given-names>K.</given-names></name></person-group> (<year>2015</year>). <article-title>Taste quality decoding parallels taste sensations</article-title>. <source>Curr. Biol</source>. <volume>25</volume>, <fpage>890</fpage>&#x02013;<lpage>896</lpage>. <pub-id pub-id-type="doi">10.1016/j.cub.2015.01.057</pub-id><pub-id pub-id-type="pmid">25772445</pub-id></citation></ref>
<ref id="B18">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>De</surname> <given-names>S.</given-names></name> <name><surname>Mukherjee</surname> <given-names>P.</given-names></name> <name><surname>Konar</surname> <given-names>D.</given-names></name> <name><surname>Roy</surname> <given-names>A. H.</given-names></name></person-group> (<year>2023</year>). <article-title>&#x0201C;EEG-based taste perception classification using pca enhanced attention-tlstm neural network,&#x0201D;</article-title> in <source>2023 4th International Conference on Communication, Computing and Industry 6.0 (C216)</source> (<publisher-loc>Bangalore</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>1</fpage>&#x02013;<lpage>6</lpage>. <pub-id pub-id-type="doi">10.1109/C2I659362.2023.10430968</pub-id></citation>
</ref>
<ref id="B19">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Delorme</surname> <given-names>A.</given-names></name> <name><surname>Makeig</surname> <given-names>S.</given-names></name></person-group> (<year>2004</year>). <article-title>EEGLAB: an open source toolbox for analysis of single-trial EEG dynamics including independent component analysis</article-title>. <source>J. Neurosci. Methods</source> <volume>134</volume>, <fpage>9</fpage>&#x02013;<lpage>21</lpage>. <pub-id pub-id-type="doi">10.1016/j.jneumeth.2003.10.009</pub-id><pub-id pub-id-type="pmid">15102499</pub-id></citation></ref>
<ref id="B20">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Delwiche</surname> <given-names>J.</given-names></name></person-group> (<year>2023</year>). <article-title>&#x0201C;Psychological considerations in sensory analysis,&#x0201D;</article-title> in <source>The Sensory Evaluation of Dairy Products</source>, eds. S. Clark, M. Drake, and K. Kaylegian (Springer Cham), <fpage>9</fpage>&#x02013;<lpage>17</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-031-30019-6_2</pub-id></citation>
</ref>
<ref id="B21">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Di Flumeri</surname> <given-names>G.</given-names></name> <name><surname>Arico</surname> <given-names>P.</given-names></name> <name><surname>Borghini</surname> <given-names>G.</given-names></name> <name><surname>Sciaraffa</surname> <given-names>N.</given-names></name> <name><surname>Maglione</surname> <given-names>A. G.</given-names></name> <name><surname>Rossi</surname> <given-names>D.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>&#x0201C;EEG-based approach-withdrawal index for the pleasantness evaluation during taste experience in realistic settings,&#x0201D;</article-title> in <source>2017 39th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC)</source> (<publisher-loc>Jeju</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>3228</fpage>&#x02013;<lpage>3231</lpage>. <pub-id pub-id-type="doi">10.1109/EMBC.2017.8037544</pub-id><pub-id pub-id-type="pmid">29060585</pub-id></citation></ref>
<ref id="B22">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Faul</surname> <given-names>F.</given-names></name> <name><surname>Erdfelder</surname> <given-names>E.</given-names></name> <name><surname>Lang</surname> <given-names>A. G.</given-names></name> <name><surname>Buchner</surname> <given-names>A.</given-names></name></person-group> (<year>2007</year>). <article-title>G&#x0002A;power 3: a flexible statistical power analysis program for the social, behavioral, and biomedical sciences</article-title>. <source>Behav. Res. Methods</source> <volume>39</volume>, <fpage>175</fpage>&#x02013;<lpage>191</lpage>. <pub-id pub-id-type="doi">10.3758/BF03193146</pub-id><pub-id pub-id-type="pmid">17695343</pub-id></citation></ref>
<ref id="B23">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fazli</surname> <given-names>S.</given-names></name> <name><surname>Popescu</surname> <given-names>F.</given-names></name> <name><surname>Dan&#x000F3;czy</surname> <given-names>M.</given-names></name> <name><surname>Blankertz</surname> <given-names>B.</given-names></name> <name><surname>M&#x000FC;ller</surname> <given-names>K.-R.</given-names></name> <name><surname>Grozea</surname> <given-names>C.</given-names></name></person-group> (<year>2009</year>). <article-title>Subject-independent mental state classification in single trials</article-title>. <source>Neural Netw</source>. <volume>22</volume>, <fpage>1305</fpage>&#x02013;<lpage>1312</lpage>. <pub-id pub-id-type="doi">10.1016/j.neunet.2009.06.003</pub-id><pub-id pub-id-type="pmid">19560898</pub-id></citation></ref>
<ref id="B24">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Feria-Morales</surname> <given-names>A. M.</given-names></name></person-group> (<year>2002</year>). <article-title>Examining the case of green coffee to illustrate the limitations of grading systems/expert tasters in sensory evaluation for quality control</article-title>. <source>Food Qual. Prefer</source>. <volume>13</volume>, <fpage>355</fpage>&#x02013;<lpage>367</lpage>. <pub-id pub-id-type="doi">10.1016/S0950-3293(02)00028-9</pub-id></citation>
</ref>
<ref id="B25">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Friedman</surname> <given-names>J. H.</given-names></name></person-group> (<year>2001</year>). <article-title>Greedy function approximation: a gradient boosting machine</article-title>. <source>Ann. Stat</source>. <volume>29</volume>, <fpage>1189</fpage>&#x02013;<lpage>1232</lpage>. <pub-id pub-id-type="doi">10.1214/aos/1013203451</pub-id><pub-id pub-id-type="pmid">38281721</pub-id></citation></ref>
<ref id="B26">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gacula</surname> <given-names>M.</given-names></name> <name><surname>Rutenbeck</surname> <given-names>S.</given-names></name></person-group> (<year>2006</year>). <article-title>Sample size in consumer test and descriptive analysis</article-title>. <source>J. Sens. Stud</source>. <volume>21</volume>, <fpage>129</fpage>&#x02013;<lpage>145</lpage>. <pub-id pub-id-type="doi">10.1111/j.1745-459X.2006.00055.x</pub-id></citation>
</ref>
<ref id="B27">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Gonz&#x000E1;lez-Espa&#x000F1;a</surname> <given-names>J. J.</given-names></name> <name><surname>Back</surname> <given-names>K.-J.</given-names></name> <name><surname>Reynolds</surname> <given-names>D.</given-names></name> <name><surname>Contreras-Vidal</surname> <given-names>J. L.</given-names></name></person-group> (<year>2023</year>). <article-title>&#x0201C;Decoding taste from EEG: gustatory evoked potentials during wine tasting,&#x0201D;</article-title> in <source>2023 IEEE International Conference on Systems, Man, and Cybernetics (SMC)</source> (<publisher-loc>Honolulu, HI</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>4253</fpage>&#x02013;<lpage>4258</lpage>. <pub-id pub-id-type="doi">10.1109/SMC53992.2023.10394408</pub-id></citation>
</ref>
<ref id="B28">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Goulet-Pelletier</surname> <given-names>J.-C.</given-names></name> <name><surname>Cousineau</surname> <given-names>D.</given-names></name></person-group> (<year>2018</year>). <article-title>A review of effect sizes and their confidence intervals, part I: the cohen&#x00027;sd family</article-title>. <source>Quant. Methods Psychol</source>. <volume>14</volume>, <fpage>242</fpage>&#x02013;<lpage>265</lpage>. <pub-id pub-id-type="doi">10.20982/tqmp.14.4.p242</pub-id></citation>
</ref>
<ref id="B29">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>He</surname> <given-names>J.</given-names></name> <name><surname>Yang</surname> <given-names>L.</given-names></name> <name><surname>Liu</surname> <given-names>D.</given-names></name> <name><surname>Song</surname> <given-names>Z.</given-names></name></person-group> (<year>2022</year>). <article-title>Automatic recognition of high-density epileptic EEG using support vector machine and gradient-boosting decision tree</article-title>. <source>Brain Sci</source>. <volume>12</volume>:<fpage>1197</fpage>. <pub-id pub-id-type="doi">10.3390/brainsci12091197</pub-id><pub-id pub-id-type="pmid">36138933</pub-id></citation></ref>
<ref id="B30">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hjorth</surname> <given-names>B.</given-names></name></person-group> (<year>1970</year>). <article-title>EEG analysis based on time domain properties</article-title>. <source>Electroencephalogr. Clin. Neurophysiol</source>. <volume>29</volume>, <fpage>306</fpage>&#x02013;<lpage>310</lpage>. <pub-id pub-id-type="doi">10.1016/0013-4694(70)90143-4</pub-id><pub-id pub-id-type="pmid">4195653</pub-id></citation></ref>
<ref id="B31">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hsu</surname> <given-names>L.</given-names></name> <name><surname>Chen</surname> <given-names>Y.-J.</given-names></name></person-group> (<year>2021</year>). <article-title>Does coffee taste better with latte art? A neuroscientific perspective</article-title>. <source>Br. Food J</source>. <volume>123</volume>, <fpage>1931</fpage>&#x02013;<lpage>1946</lpage>. <pub-id pub-id-type="doi">10.1108/BFJ-07-2020-0612</pub-id></citation>
</ref>
<ref id="B32">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hussain</surname> <given-names>L.</given-names></name> <name><surname>Saeed</surname> <given-names>S.</given-names></name> <name><surname>Idris</surname> <given-names>A.</given-names></name> <name><surname>Awan</surname> <given-names>I. A.</given-names></name> <name><surname>Shah</surname> <given-names>S. A.</given-names></name> <name><surname>Majid</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>Regression analysis for detecting epileptic seizure with different feature extracting strategies</article-title>. <source>Biomed. Eng./Biomed. Tech</source>. <volume>64</volume>, <fpage>619</fpage>&#x02013;<lpage>642</lpage>. <pub-id pub-id-type="doi">10.1515/bmt-2018-0012</pub-id><pub-id pub-id-type="pmid">31145684</pub-id></citation></ref>
<ref id="B33">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Isabona</surname> <given-names>J.</given-names></name> <name><surname>Imoize</surname> <given-names>A. L.</given-names></name> <name><surname>Kim</surname> <given-names>Y.</given-names></name></person-group> (<year>2022</year>). <article-title>Machine learning-based boosted regression ensemble combined with hyperparameter tuning for optimal adaptive learning</article-title>. <source>Sensors</source> <volume>22</volume>:<fpage>3776</fpage>. <pub-id pub-id-type="doi">10.3390/s22103776</pub-id><pub-id pub-id-type="pmid">35632184</pub-id></citation></ref>
<ref id="B34">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jayakumar</surname> <given-names>M.</given-names></name> <name><surname>Rajavel</surname> <given-names>M.</given-names></name> <name><surname>Surendran</surname> <given-names>U.</given-names></name> <name><surname>Gopinath</surname> <given-names>G.</given-names></name> <name><surname>Ramamoorthy</surname> <given-names>K.</given-names></name></person-group> (<year>2017</year>). <article-title>Impact of climate variability on coffee yield in India&#x02014;with a micro-level case study using long-term coffee yield data of humid tropical kerala</article-title>. <source>Clim. Change</source> <volume>145</volume>, <fpage>335</fpage>&#x02013;<lpage>349</lpage>. <pub-id pub-id-type="doi">10.1007/s10584-017-2101-2</pub-id></citation>
</ref>
<ref id="B35">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Keene</surname> <given-names>O. N.</given-names></name></person-group> (<year>1995</year>). <article-title>The log transformation is special</article-title>. <source>Stat. Med</source>. <volume>14</volume>, <fpage>811</fpage>&#x02013;<lpage>819</lpage>. <pub-id pub-id-type="doi">10.1002/sim.4780140810</pub-id><pub-id pub-id-type="pmid">7644861</pub-id></citation></ref>
<ref id="B36">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kim</surname> <given-names>H.</given-names></name> <name><surname>Chang</surname> <given-names>C.-Y.</given-names></name> <name><surname>Kothe</surname> <given-names>C.</given-names></name> <name><surname>Iversen</surname> <given-names>J. R.</given-names></name> <name><surname>Miyakoshi</surname> <given-names>M.</given-names></name></person-group> (<year>2025</year>). <article-title>Juggler&#x00027;s ASR: unpacking the principles of artifact subspace reconstruction for revision toward extreme mobi</article-title>. <source>J. Neurosci. Methods</source> <volume>420</volume>:<fpage>110465</fpage>. <pub-id pub-id-type="doi">10.1016/j.jneumeth.2025.110465</pub-id><pub-id pub-id-type="pmid">40324599</pub-id></citation></ref>
<ref id="B37">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kim</surname> <given-names>Y.</given-names></name> <name><surname>An</surname> <given-names>J.</given-names></name> <name><surname>Lee</surname> <given-names>J.</given-names></name></person-group> (<year>2025</year>). <article-title>The espresso protocol as a tool for sensory quality evaluation</article-title>. <source>Food Res. Int</source>. <volume>202</volume>:<fpage>115670</fpage>. <pub-id pub-id-type="doi">10.1016/j.foodres.2025.115670</pub-id><pub-id pub-id-type="pmid">39967142</pub-id></citation></ref>
<ref id="B38">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Klimesch</surname> <given-names>W.</given-names></name></person-group> (<year>1999</year>). <article-title>EEG alpha and theta oscillations reflect cognitive and memory performance: a review and analysis</article-title>. <source>Brain Res. Rev</source>. <volume>29</volume>, <fpage>169</fpage>&#x02013;<lpage>195</lpage>. <pub-id pub-id-type="doi">10.1016/S0165-0173(98)00056-3</pub-id><pub-id pub-id-type="pmid">10209231</pub-id></citation></ref>
<ref id="B39">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kotthoff</surname> <given-names>L.</given-names></name> <name><surname>Thornton</surname> <given-names>C.</given-names></name> <name><surname>Hoos</surname> <given-names>H. H.</given-names></name> <name><surname>Hutter</surname> <given-names>F.</given-names></name> <name><surname>Leyton-Brown</surname> <given-names>K.</given-names></name></person-group> (<year>2017</year>). <article-title>Auto-weka 2.0: automatic model selection and hyperparameter optimization in weka</article-title>. <source>J. Mach. Learn. Res</source>. <volume>18</volume>, <fpage>1</fpage>&#x02013;<lpage>5</lpage>.</citation>
</ref>
<ref id="B40">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Lejap</surname> <given-names>M. Y. L.</given-names></name> <name><surname>Wibawa</surname> <given-names>A. D.</given-names></name> <name><surname>Mukti</surname> <given-names>P. H.</given-names></name></person-group> (<year>2024</year>). <article-title>&#x0201C;EEG brain heatmap visualization for sweet and sour tasting,&#x0201D;</article-title> in <source>2024 International Seminar on Intelligent Technology and Its Applications (ISITIA)</source> (<publisher-loc>Mataram</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>367</fpage>&#x02013;<lpage>372</lpage>. <pub-id pub-id-type="doi">10.1109/ISITIA63062.2024.10668352</pub-id></citation>
</ref>
<ref id="B41">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>H.</given-names></name> <name><surname>Feng</surname> <given-names>X.</given-names></name> <name><surname>Liu</surname> <given-names>Z.</given-names></name> <name><surname>Wang</surname> <given-names>W.</given-names></name> <name><surname>Tian</surname> <given-names>L.</given-names></name> <name><surname>Xu</surname> <given-names>D.</given-names></name> <etal/></person-group>. (<year>2025</year>). <article-title>The influence of different flavor peptides on brain perception via scalp electroencephalogram and development of a taste model</article-title>. <source>Food Chem</source>. <volume>465</volume>:<fpage>141953</fpage>. <pub-id pub-id-type="doi">10.1016/j.foodchem.2024.141953</pub-id><pub-id pub-id-type="pmid">39561591</pub-id></citation></ref>
<ref id="B42">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>J.</given-names></name> <name><surname>Streletskaya</surname> <given-names>N. A.</given-names></name> <name><surname>Gmez</surname> <given-names>M. I.</given-names></name></person-group> (<year>2019</year>). <article-title>Does taste sensitivity matter? The effect of coffee sensory tasting information and taste sensitivity on consumer preferences</article-title>. <source>Food Qual. Prefer</source>. <volume>71</volume>, <fpage>447</fpage>&#x02013;<lpage>451</lpage>. <pub-id pub-id-type="doi">10.1016/j.foodqual.2018.08.006</pub-id></citation>
</ref>
<ref id="B43">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Maram</surname> <given-names>M.</given-names></name> <name><surname>Khalil</surname> <given-names>M. A.</given-names></name> <name><surname>George</surname> <given-names>K.</given-names></name></person-group> (<year>2023</year>). <article-title>&#x0201C;Analysis of consumer coffee brand preferences using brain-computer interface and deep learning,&#x0201D;</article-title> in <source>2023 IEEE 7th International Conference on Information Technology, Information Systems and Electrical Engineering (ICITISEE)</source> (<publisher-loc>Purwokerto</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>227</fpage>&#x02013;<lpage>232</lpage>. <pub-id pub-id-type="doi">10.1109/ICITISEE58992.2023.10404368</pub-id></citation>
</ref>
<ref id="B44">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mojet</surname> <given-names>J.</given-names></name></person-group> (<year>2003</year>). <article-title>Taste perception with age: generic or specific losses in supra-threshold intensities of five taste qualities?</article-title> <source>Chem. Senses</source> <volume>28</volume>, <fpage>397</fpage>&#x02013;<lpage>413</lpage>. <pub-id pub-id-type="doi">10.1093/chemse/28.5.397</pub-id><pub-id pub-id-type="pmid">12826536</pub-id></citation></ref>
<ref id="B45">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Naser</surname> <given-names>A.</given-names></name> <name><surname>Aydemir</surname> <given-names>&#x000D6;.</given-names></name></person-group> (<year>2024</year>). <article-title>Enhancing EEG signal classification with a novel random subset channel selection approach: applications in taste, odor, and motor imagery analysis</article-title>. <source>IEEE Access</source> <volume>12</volume>, <fpage>145608</fpage>&#x02013;<lpage>145618</lpage>. <pub-id pub-id-type="doi">10.1109/ACCESS.2024.3473810</pub-id></citation>
</ref>
<ref id="B46">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nathan</surname> <given-names>M. J.</given-names></name> <name><surname>Del Pinal</surname> <given-names>G.</given-names></name></person-group> (<year>2017</year>). <article-title>The future of cognitive neuroscience? Reverse inference in focus</article-title>. <source>Philos. Compass</source> <volume>12</volume>:<fpage>e12427</fpage>. <pub-id pub-id-type="doi">10.1111/phc3.12427</pub-id></citation>
</ref>
<ref id="B47">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nuwer</surname> <given-names>M. R.</given-names></name></person-group> (<year>2018</year>). <article-title>10-10 electrode system for EEG recording</article-title>. <source>Clin. Neurophysiol</source>. <volume>129</volume>:<fpage>1103</fpage>. <pub-id pub-id-type="doi">10.1016/j.clinph.2018.01.065</pub-id><pub-id pub-id-type="pmid">29496396</pub-id></citation></ref>
<ref id="B48">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pion-Tonachini</surname> <given-names>L.</given-names></name> <name><surname>Kreutz-Delgado</surname> <given-names>K.</given-names></name> <name><surname>Makeig</surname> <given-names>S.</given-names></name></person-group> (<year>2019</year>). <article-title>Iclabel: an automated electroencephalographic independent component classifier, dataset, and website</article-title>. <source>Neuroimage</source> <volume>198</volume>, <fpage>181</fpage>&#x02013;<lpage>197</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2019.05.026</pub-id><pub-id pub-id-type="pmid">31103785</pub-id></citation></ref>
<ref id="B49">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Poldrack</surname> <given-names>R. A.</given-names></name></person-group> (<year>2006</year>). <article-title>Can cognitive processes be inferred from neuroimaging data?</article-title> <source>Trends Cogn. Sci</source>. <volume>10</volume>, <fpage>59</fpage>&#x02013;<lpage>63</lpage>. <pub-id pub-id-type="doi">10.1016/j.tics.2005.12.004</pub-id><pub-id pub-id-type="pmid">16406760</pub-id></citation></ref>
<ref id="B50">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rodrigues</surname> <given-names>S. S. Q.</given-names></name> <name><surname>Dias</surname> <given-names>L. G.</given-names></name> <name><surname>Teixeira</surname> <given-names>A.</given-names></name></person-group> (<year>2024</year>). <article-title>Emerging methods for the evaluation of sensory quality of food: technology at service</article-title>. <source>Curr. Food Sci. Technol. Rep</source>. <volume>2</volume>, <fpage>77</fpage>&#x02013;<lpage>90</lpage>. <pub-id pub-id-type="doi">10.1007/s43555-024-00019-7</pub-id></citation>
</ref>
<ref id="B51">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rolls</surname> <given-names>E.</given-names></name></person-group> (<year>2005</year>). <article-title>Taste, olfactory, and food texture processing in the brain, and the control of food intake</article-title>. <source>Physiol. Behav</source>. <volume>85</volume>, <fpage>45</fpage>&#x02013;<lpage>56</lpage>. <pub-id pub-id-type="doi">10.1016/j.physbeh.2005.04.012</pub-id><pub-id pub-id-type="pmid">15924905</pub-id></citation></ref>
<ref id="B52">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rousmans</surname> <given-names>S.</given-names></name> <name><surname>Robin</surname> <given-names>O.</given-names></name> <name><surname>Dittmar</surname> <given-names>A.</given-names></name> <name><surname>Vernet-Maury</surname> <given-names>E.</given-names></name></person-group> (<year>2000</year>). <article-title>Autonomic nervous system responses associated with primary tastes</article-title>. <source>Chem. Senses</source> <volume>25</volume>, <fpage>709</fpage>&#x02013;<lpage>718</lpage>. <pub-id pub-id-type="doi">10.1093/chemse/25.6.709</pub-id><pub-id pub-id-type="pmid">11114149</pub-id></citation></ref>
<ref id="B53">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Seninde</surname> <given-names>D. R.</given-names></name> <name><surname>Chambers</surname> <given-names>E.</given-names></name></person-group> (<year>2020</year>). <article-title>Coffee flavor: a review</article-title>. <source>Beverages</source> <volume>6</volume>:<fpage>44</fpage>. <pub-id pub-id-type="doi">10.3390/beverages6030044</pub-id></citation>
</ref>
<ref id="B54">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sipos</surname> <given-names>L.</given-names></name> <name><surname>Nyitrai</surname> <given-names>A.</given-names></name> <name><surname>Hitka</surname> <given-names>G.</given-names></name> <name><surname>Friedrich</surname> <given-names>L. F.</given-names></name> <name><surname>K&#x000F3;kai</surname> <given-names>Z.</given-names></name></person-group> (<year>2021</year>). <article-title>Sensory panel performance evaluation&#x02014;comprehensive review of practical approaches</article-title>. <source>Appl. Sci</source>. <volume>11</volume>:<fpage>11977</fpage>. <pub-id pub-id-type="doi">10.3390/app112411977</pub-id></citation>
</ref>
<ref id="B55">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Small</surname> <given-names>D. M.</given-names></name></person-group> (<year>2012</year>). <article-title>Flavor is in the brain</article-title>. <source>Physiol. Behav</source>. <volume>107</volume>, <fpage>540</fpage>&#x02013;<lpage>552</lpage>. <pub-id pub-id-type="doi">10.1016/j.physbeh.2012.04.011</pub-id><pub-id pub-id-type="pmid">22542991</pub-id></citation></ref>
<ref id="B56">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Snoek</surname> <given-names>J.</given-names></name> <name><surname>Larochelle</surname> <given-names>H.</given-names></name> <name><surname>Adams</surname> <given-names>R. P.</given-names></name></person-group> (<year>2012</year>). <article-title>&#x0201C;Practical bayesian optimization of machine learning algorithms,&#x0201D;</article-title> in <source>Advances in Neural Information Processing Systems 25 (NIPS 2012)</source>, <fpage>1</fpage>&#x02013;<lpage>9</lpage>.</citation>
</ref>
<ref id="B57">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Spencer</surname> <given-names>M.</given-names></name> <name><surname>Sage</surname> <given-names>E.</given-names></name> <name><surname>Velez</surname> <given-names>M.</given-names></name> <name><surname>Guinard</surname> <given-names>J.</given-names></name></person-group> (<year>2016</year>). <article-title>Using single free sorting and multivariate exploratory methods to design a new coffee taster&#x00027;s flavor wheel</article-title>. <source>J. Food Sci</source>. <volume>81</volume>, <fpage>S2997</fpage>&#x02013;<lpage>S3005</lpage>. <pub-id pub-id-type="doi">10.1111/1750-3841.13555</pub-id><pub-id pub-id-type="pmid">27861864</pub-id></citation></ref>
<ref id="B58">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Stone</surname> <given-names>H.</given-names></name> <name><surname>Bleibaum</surname> <given-names>R. N.</given-names></name> <name><surname>Thomas</surname> <given-names>H. A.</given-names></name></person-group> (<year>2021</year>). <source>Sensory Evaluation Practices</source>. <publisher-loc>San Diego, CA</publisher-loc>: <publisher-name>Academic Press</publisher-name>.</citation>
</ref>
<ref id="B59">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Taxali</surname> <given-names>A.</given-names></name> <name><surname>Angstadt</surname> <given-names>M.</given-names></name> <name><surname>Rutherford</surname> <given-names>S.</given-names></name> <name><surname>Sripada</surname> <given-names>C.</given-names></name></person-group> (<year>2021</year>). <article-title>Boost in test-retest reliability in resting state fmri with predictive modeling</article-title>. <source>Cereb. Cortex</source> <volume>31</volume>, <fpage>2822</fpage>&#x02013;<lpage>2833</lpage>. <pub-id pub-id-type="doi">10.1093/cercor/bhaa390</pub-id><pub-id pub-id-type="pmid">33447841</pub-id></citation></ref>
<ref id="B60">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tibshirani</surname> <given-names>R.</given-names></name></person-group> (<year>1996</year>). <article-title>Regression shrinkage and selection via the lasso</article-title>. <source>J. R. Stat. Soc. Ser. B Stat. Methodol</source>. <volume>58</volume>, <fpage>267</fpage>&#x02013;<lpage>288</lpage>. <pub-id pub-id-type="doi">10.1111/j.2517-6161.1996.tb02080.x</pub-id></citation>
</ref>
<ref id="B61">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tonacci</surname> <given-names>A.</given-names></name> <name><surname>Taglieri</surname> <given-names>I.</given-names></name> <name><surname>Sanmartin</surname> <given-names>C.</given-names></name> <name><surname>Billeci</surname> <given-names>L.</given-names></name> <name><surname>Crifaci</surname> <given-names>G.</given-names></name> <name><surname>Ferroni</surname> <given-names>G.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>Taste the emotions: pilot for a novel, sensors-based approach to emotional analysis during coffee tasting</article-title>. <source>J. Sci. Food Agric</source>. <volume>105</volume>, <fpage>1420</fpage>&#x02013;<lpage>1429</lpage>. <pub-id pub-id-type="doi">10.1002/jsfa.13172</pub-id><pub-id pub-id-type="pmid">38009337</pub-id></citation></ref>
<ref id="B62">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Torrico</surname> <given-names>D. D.</given-names></name> <name><surname>Mehta</surname> <given-names>A.</given-names></name> <name><surname>Borssato</surname> <given-names>A. B.</given-names></name></person-group> (<year>2023</year>). <article-title>New methods to assess sensory responses: a brief review of innovative techniques in sensory evaluation</article-title>. <source>Curr. Opin. Food Sci</source>. <volume>49</volume>:<fpage>100978</fpage>. <pub-id pub-id-type="doi">10.1016/j.cofs.2022.100978</pub-id></citation>
</ref>
<ref id="B63">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tryon</surname> <given-names>J.</given-names></name> <name><surname>Guillermo Colli Alfaro</surname> <given-names>J.</given-names></name> <name><surname>Trejos</surname> <given-names>A. L.</given-names></name></person-group> (<year>2025</year>). <article-title>Effects of image normalization on CNN-based EEG&#x02013;MG fusion</article-title>. <source>IEEE Sens. J</source>. <volume>25</volume>, <fpage>20894</fpage>&#x02013;<lpage>20906</lpage>. <pub-id pub-id-type="doi">10.1109/JSEN.2025.3559438</pub-id></citation>
</ref>
<ref id="B64">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Urig&#x000FC;en</surname> <given-names>J. A.</given-names></name> <name><surname>Garcia-Zapirain</surname> <given-names>B.</given-names></name></person-group> (<year>2015</year>). <article-title>EEG artifact removal&#x02014;state-of-the-art and guidelines</article-title>. <source>J. Neural Eng</source>. <volume>12</volume>:<fpage>31001</fpage>. <pub-id pub-id-type="doi">10.1088/1741-2560/12/3/031001</pub-id><pub-id pub-id-type="pmid">25834104</pub-id></citation></ref>
<ref id="B65">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Verzelli</surname> <given-names>P.</given-names></name> <name><surname>Tchumatchenko</surname> <given-names>T.</given-names></name> <name><surname>Kotaleski</surname> <given-names>J. H.</given-names></name></person-group> (<year>2024</year>). <article-title>Editorial overview: computational neuroscience as a bridge between artificial intelligence, modeling and data</article-title>. <source>Curr. Opin. Neurobiol</source>. <volume>84</volume>:<fpage>102835</fpage>. <pub-id pub-id-type="doi">10.1016/j.conb.2023.102835</pub-id><pub-id pub-id-type="pmid">38183889</pub-id></citation></ref>
<ref id="B66">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Vo</surname> <given-names>H.-T.-T.</given-names></name> <name><surname>Nguyen</surname> <given-names>T.-N.-Q.</given-names></name> <name><surname>Cuong</surname> <given-names>D. D.</given-names></name> <name><surname>Van Huynh</surname> <given-names>T.</given-names></name></person-group> (<year>2023</year>). <article-title>&#x0201C;Classification taste-EEG signals using base neural network,&#x0201D;</article-title> in <source>2023 RIVF International Conference on Computing and Communication Technologies (RIVF)</source> (<publisher-loc>Hanoi</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>107</fpage>&#x02013;<lpage>111</lpage>. <pub-id pub-id-type="doi">10.1109/RIVF60135.2023.10471803</pub-id></citation>
</ref>
<ref id="B67">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wallroth</surname> <given-names>R.</given-names></name> <name><surname>H&#x000F6;chenberger</surname> <given-names>R.</given-names></name> <name><surname>Ohla</surname> <given-names>K.</given-names></name></person-group> (<year>2018</year>). <article-title>Delta activity encodes taste information in the human brain</article-title>. <source>Neuroimage</source> <volume>181</volume>, <fpage>471</fpage>&#x02013;<lpage>479</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2018.07.034</pub-id><pub-id pub-id-type="pmid">30016677</pub-id></citation></ref>
<ref id="B68">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wooding</surname> <given-names>S. P.</given-names></name> <name><surname>Ramirez</surname> <given-names>V. A.</given-names></name> <name><surname>Behrens</surname> <given-names>M.</given-names></name></person-group> (<year>2021</year>). <article-title>Bitter taste receptors</article-title>. <source>Evol. Med. Public Health</source> <volume>9</volume>, <fpage>431</fpage>&#x02013;<lpage>447</lpage>. <pub-id pub-id-type="doi">10.1093/emph/eoab031</pub-id><pub-id pub-id-type="pmid">35154779</pub-id></citation></ref>
<ref id="B69">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Xia</surname> <given-names>X.</given-names></name> <name><surname>Yang</surname> <given-names>Y.</given-names></name> <name><surname>Shi</surname> <given-names>Y.</given-names></name> <name><surname>Zheng</surname> <given-names>W.</given-names></name> <name><surname>Men</surname> <given-names>H.</given-names></name></person-group> (<year>2024</year>). <article-title>Decoding human taste perception by reconstructing and mining temporal-spatial features of taste-related EEGs</article-title>. <source>Appl. Intell</source>. <volume>54</volume>, <fpage>3902</fpage>&#x02013;<lpage>3917</lpage>. <pub-id pub-id-type="doi">10.1007/s10489-024-05374-5</pub-id></citation>
</ref>
<ref id="B70">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Xu</surname> <given-names>K.</given-names></name> <name><surname>Katahira</surname> <given-names>K.</given-names></name> <name><surname>Yamazaki</surname> <given-names>Y.</given-names></name> <name><surname>Zhang</surname> <given-names>F.</given-names></name> <name><surname>Nishida</surname> <given-names>N.</given-names></name> <name><surname>Tamai</surname> <given-names>Y.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>&#x0201C;Estimating beverage preference based on subjective emotional reactions and EEG activity,&#x0201D;</article-title> in <source>2021 Asia-Pacific Signal and Information Processing Association Annual Summit and Conference (APSIPA ASC)</source> (<publisher-loc>Tokyo</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>366</fpage>&#x02013;<lpage>372</lpage>.</citation>
</ref>
<ref id="B71">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yang</surname> <given-names>J.</given-names></name> <name><surname>Lee</surname> <given-names>J.</given-names></name></person-group> (<year>2019</year>). <article-title>Application of sensory descriptive analysis and consumer studies to investigate traditional and authentic foods: a review</article-title>. <source>Foods</source> <volume>8</volume>:<fpage>54</fpage>. <pub-id pub-id-type="doi">10.3390/foods8020054</pub-id><pub-id pub-id-type="pmid">30717367</pub-id></citation></ref>
<ref id="B72">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yang</surname> <given-names>T.</given-names></name> <name><surname>Zhang</surname> <given-names>P.</given-names></name> <name><surname>Xing</surname> <given-names>L.</given-names></name> <name><surname>Hu</surname> <given-names>J.</given-names></name> <name><surname>Feng</surname> <given-names>R.</given-names></name> <name><surname>Zhong</surname> <given-names>J.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Insights into brain perceptions of the different taste qualities and hedonic valence of food via scalp electroencephalogram</article-title>. <source>Food Res. Int</source>. <volume>173</volume>:<fpage>113311</fpage>. <pub-id pub-id-type="doi">10.1016/j.foodres.2023.113311</pub-id><pub-id pub-id-type="pmid">37803622</pub-id></citation></ref>
<ref id="B73">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yao</surname> <given-names>D.</given-names></name></person-group> (<year>2001</year>). <article-title>A method to standardize a reference of scalp EEG recordings to a point at infinity</article-title>. <source>Physiol. Meas</source>. <volume>22</volume>, <fpage>693</fpage>&#x02013;<lpage>711</lpage>. <pub-id pub-id-type="doi">10.1088/0967-3334/22/4/305</pub-id><pub-id pub-id-type="pmid">11761077</pub-id></citation></ref>
<ref id="B74">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Zhao</surname> <given-names>S.</given-names></name> <name><surname>Gao</surname> <given-names>H.</given-names></name> <name><surname>Wang</surname> <given-names>H.</given-names></name> <name><surname>Li</surname> <given-names>H.</given-names></name> <name><surname>Wang</surname> <given-names>Y.</given-names></name> <name><surname>Li</surname> <given-names>G.</given-names></name></person-group> (<year>2022</year>). <article-title>&#x0201C;Basic taste intensity recognition based on semg and EEG signals,&#x0201D;</article-title> in <source>2022 China Automation Congress (CAC)</source> (<publisher-loc>Xiamen</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>5437</fpage>&#x02013;<lpage>5441</lpage>. <pub-id pub-id-type="doi">10.1109/CAC57257.2022.10055465</pub-id></citation>
</ref>
</ref-list>
</back>
</article>