<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="2.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Psychol.</journal-id>
<journal-title>Frontiers in Psychology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Psychol.</abbrev-journal-title>
<issn pub-type="epub">1664-1078</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fpsyg.2025.1498143</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Psychology</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>HDRPS+: a new affective pictorial scale applicable to organizational contexts</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Liu</surname> <given-names>Ping</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Wang</surname> <given-names>Ya&#x2019;Nan</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2846268/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Liu</surname> <given-names>Yanlin</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Hu</surname> <given-names>Jiangning</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Li</surname> <given-names>Yunyi</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Zhao</surname> <given-names>Ke</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Mao</surname> <given-names>Jian&#x2019;guo</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Business School, Sichuan University</institution>, <addr-line>Chengdu</addr-line>, <country>China</country></aff>
<aff id="aff2"><sup>2</sup><institution>School of Economics and Management, Tibet University</institution>, <addr-line>Lhasa</addr-line>, <country>China</country></aff>
<aff id="aff3"><sup>3</sup><institution>School of the Art Institute of Chicago</institution>, <addr-line>Chicago, IL</addr-line>, <country>United States</country></aff>
<aff id="aff4"><sup>4</sup><institution>College of Electronics and Information Engineering, Sichuan University</institution>, <addr-line>Chengdu</addr-line>, <country>China</country></aff>
<author-notes>
<fn fn-type="edited-by" id="fn0001"><p>Edited by: Alessia Celeghin, University of Turin, Italy</p></fn>
<fn fn-type="edited-by" id="fn0002"><p>Reviewed by: Juha M. Lahnakoski, LVR Klinik D&#x00FC;sseldorf, Germany, Germany</p><p>Elena Carlotta Olivetti, Polytechnic University of Turin, Italy</p></fn>
<corresp id="c001">&#x002A;Correspondence: Ya&#x2019;Nan Wang, <email>wangyanan@utibet.edu.cn</email>; Yanlin Liu, <email>yanlin_liu@stu.scu.edu.cn</email></corresp>
</author-notes>
<pub-date pub-type="epub">
<day>08</day>
<month>08</month>
<year>2025</year>
</pub-date>
<pub-date pub-type="collection">
<year>2025</year>
</pub-date>
<volume>16</volume>
<elocation-id>1498143</elocation-id>
<history>
<date date-type="received">
<day>18</day>
<month>09</month>
<year>2024</year>
</date>
<date date-type="accepted">
<day>15</day>
<month>07</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2025 Liu, Wang, Liu, Hu, Li, Zhao and Mao.</copyright-statement>
<copyright-year>2025</copyright-year>
<copyright-holder>Liu, Wang, Liu, Hu, Li, Zhao and Mao</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<sec>
<title>Introduction</title>
<p>Repeatedly capturing individuals&#x2019; emotions is challenging in organizational settings, especially for low-literacy groups, and existing pictorial scales cover arousal only narrowly. We therefore developed the Highly Dynamic and Reusable Picture-based Scale Plus (HDRPS+), an optimized successor to HDRPS that measures valence and arousal simultaneously.</p>
</sec>
<sec>
<title>Methods</title>
<p>Three sub-studies were conducted. (1) Picture pool construction: 20 thematic images were created to span the affective space. (2) Picture screening: crowdsourced ratings anchored each image&#x2019;s valence -arousal coordinates. (3) Validation: a 7-day diary study with 442 participants (age 13-69, M = 27.06) tested reliability and validity.</p>
</sec>
<sec>
<title>Results</title>
<p>HDRPS+ achieved good user retention, with 80.3&#x202F;% of participants providing data on at least five days. It also showed acceptable stability (consistency = 0.69 valence, 0.65 arousal) without materially influencing the affect it is intended to measure. Correlations with the Self-Assessment Manikin (SAM) confirmed concurrent validity (r = 0.63 for valence; 0.52 for arousal), while all coefficients with PANAS were &#x003C; 0.45, supporting discriminant validity. Participants judged the scale accurate or very accurate in 78 % of cases, and indirect checks (vs.&#x202F;SAM) indicated reduced social_desirability bias.</p>
</sec>
<sec>
<title>Discussion</title>
<p>HDRPS+ is low_cost, quick, and well_tolerated, enabling continuous affect tracking in diverse organizational settings. Future work should keep refining emotional granularity, broaden application formats, and test cross_cultural use. HDRPS+ images with normative scores are available at <ext-link xlink:href="https://osf.io/d4wcn" ext-link-type="uri">https://osf.io/d4wcn</ext-link>.</p>
</sec>
</abstract>
<kwd-group>
<kwd>HDRPS+</kwd>
<kwd>affective measurement</kwd>
<kwd>organizational contexts</kwd>
<kwd>pictorial scale</kwd>
<kwd>valence</kwd>
<kwd>arousal</kwd>
</kwd-group>
<counts>
<fig-count count="4"/>
<table-count count="8"/>
<equation-count count="11"/>
<ref-count count="49"/>
<page-count count="15"/>
<word-count count="11489"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Emotion Science</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec1">
<label>1</label>
<title>Introduction</title>
<p>Affect (or emotion) is a critical factor influencing prosocial behavior (<xref ref-type="bibr" rid="ref17">Eisenberg, 2020</xref>), innovation (<xref ref-type="bibr" rid="ref12">Davis, 2009</xref>; <xref ref-type="bibr" rid="ref33">Park et al., 2022</xref>), safety attention (<xref ref-type="bibr" rid="ref43">Wang and Liao, 2021</xref>), and performance (<xref ref-type="bibr" rid="ref19">Inness et al., 2010</xref>; <xref ref-type="bibr" rid="ref24">Lee et al., 2017</xref>) of organizational members. Accurately identifying and coping with affects of organizational members is essential for improving organizational competitiveness and adaptability.</p>
<p>Currently, there are relatively few affect measurement methods applicable to organizational contexts. Affects are dynamic and highly variable (<xref ref-type="bibr" rid="ref43">Wang and Liao, 2021</xref>; <xref ref-type="bibr" rid="ref45">Weiss and Cropanzano, 1996</xref>), yet organizational behavior research often relies on lengthy questionnaires administered at the start and end of studies to minimize disruption to subjects&#x2019; daily life (<xref ref-type="bibr" rid="ref34">Pollak et al., 2011</xref>). Such methods, while informative, may not capture the dynamic nature of affects due to reliance on recall, raising concerns about the influence of autobiographical memory, mood congruence (<xref ref-type="bibr" rid="ref20">Isomursu et al., 2007</xref>; <xref ref-type="bibr" rid="ref46">Wilhelm and Schoebi, 2007</xref>), and recency effects (<xref ref-type="bibr" rid="ref34">Pollak et al., 2011</xref>; <xref ref-type="bibr" rid="ref47">Wissmath et al., 2010</xref>) on the results.</p>
<p>Recognizing these constraints, the remainder of the Introduction first surveys contemporary approaches to ambulatory affect assessment, from wearable physiological sensors to pictorial self-report instruments, and then evaluates their limitations when rapid, low-literacy tracking of both valence and arousal is required in workplace settings. This analysis highlights an enduring need for a lightweight tool that can repeatedly capture both dimensions among diverse employees&#x2014;a need addressed by the HDRPS+ introduced in this study.</p>
<sec id="sec2">
<label>1.1</label>
<title>Ambulatory assessment of affect</title>
<p>In organizational settings, employees&#x2019; affective states are highly dynamic. Affective Events Theory asserts that discrete daily work incidents trigger short-lived emotional reactions that subsequently color attitudes and behavior (<xref ref-type="bibr" rid="ref45">Weiss and Cropanzano, 1996</xref>). Diary and experience-sampling research confirms this volatility: a substantial share of the total variance in momentary positive and negative affect resides within rather than between persons (<xref ref-type="bibr" rid="ref18">Ilies et al., 2015</xref>). Such intra-individual fluctuations matter: ambulatory assessment studies show that transient shifts in affect forecast on-the-spot safety compliance, vigilance errors and interaction quality (<xref ref-type="bibr" rid="ref21">Klumb et al., 2009</xref>). Therefore, organizations require assessment tools that can capture affect <italic>in situ</italic> and at cadences fast enough to support just-in-time interventions, for example, prompting micro-breaks when arousal drops or alerting supervisors when tension spikes.</p>
<p>Ambulatory assessment therefore seeks to record psychological states in situ through computer-assisted self-reports, behavioral logs or physiological sensors while people perform their normal duties (<xref ref-type="bibr" rid="ref1">Bachmann et al., 2015</xref>). Likewise, ambulatory assessment is highly suitable for affective research, which are complex neurobiological and psychological phenomena characterized by high variability (<xref ref-type="bibr" rid="ref45">Weiss and Cropanzano, 1996</xref>).</p>
<p>Three broad methodological streams can be distinguished: Physiological indicator measures, External behavioral observations, and Self-reports. (1) Physiological measurements monitor heart-rate variability, electromyography, skin conductance, and other autonomic or central nervous system signals. This approach yields comprehensive data but remains intrusive and can disrupt normal activity (<xref ref-type="bibr" rid="ref8">Cowie and Douglas-Cowie, 1996</xref>). Even though recent research has introduced the feasibility of lightweight devices such as smartwatches (<xref ref-type="bibr" rid="ref41">Toshnazarov et al., 2024</xref>), detailed physiological data continue to pose non-trivial privacy compliance risks, and the costs of both hardware and software still remain substantial (<xref ref-type="bibr" rid="ref4">Bolpagni et al., 2024</xref>; <xref ref-type="bibr" rid="ref15">Doherty et al., 2025</xref>). (2) External behavioral observations focus on visible cues like facial expressions, postures, and voice tone. Utilizing high-definition cameras and streaming video for real-time analysis, this approach provides a relatively objective measure of emotional fluctuations. However, this non-intrusive method could infringe on personal privacy and confidentiality, making it less viable in organizational settings (<xref ref-type="bibr" rid="ref3">Betella and Verschure, 2016</xref>). (3) In the self-reporting method, emotion is typically assessed through textual scales. Unfortunately, although such methods collect extensive emotional data, it requires that the scales be simple and engaging, and may pose significant challenges for individuals with limited reading skills (<xref ref-type="bibr" rid="ref10">Cranford et al., 2006</xref>; <xref ref-type="bibr" rid="ref16">Ebner-Priemer and Sawitzki, 2007</xref>; <xref ref-type="bibr" rid="ref46">Wilhelm and Schoebi, 2007</xref>).</p>
<p>Taken together, existing ambulatory techniques struggle to balance ecological validity, intrusiveness and scalability in organizational contexts. Physiological measures and external behavioral observations are costly and primarily limited to experimental scenarios. Textual self-report methods, though cheaper and more versatile, require well-designed scales and a certain level of participant literacy. Considering these limitations, pictorial scales may prove to be a solution.</p>
</sec>
<sec id="sec3">
<label>1.2</label>
<title>Pictorial scales</title>
<p>Pictorial scales are a measurement method that utilizes visual elements to convey the meaning of items (<xref ref-type="bibr" rid="ref37">Sauer et al., 2021</xref>). These scales offer several advantages, including simplicity (<xref ref-type="bibr" rid="ref47">Wissmath et al., 2010</xref>), rapidity (<xref ref-type="bibr" rid="ref39">Schreiber and Jenny, 2020</xref>), low dropout rates (<xref ref-type="bibr" rid="ref2">Baumgartner et al., 2019</xref>), and low cognitive demands (<xref ref-type="bibr" rid="ref13">Desmet et al., 2016</xref>; <xref ref-type="bibr" rid="ref32">Obaid et al., 2015</xref>). These scales are particularly effective for individuals with lower educational attainment or limited reading skills. Consequently, pictorial scales can be better suited to organizational settings that include a diverse workforce, especially employees with limited literacy (<xref ref-type="bibr" rid="ref37">Sauer et al., 2021</xref>).</p>
<p>Depending on different forms of presentation, pictorial scales can be divided into three categories: grid-based, humanoid-based, and realistic-picture-based, as shown in <xref ref-type="table" rid="tab1">Table 1</xref>. As for grid-based scales, some typically use a two-dimensional affective space (valence &#x002A; arousal), such as Affect Grid (<xref ref-type="bibr" rid="ref36">Russell et al., 1989</xref>) and Feeltrace (<xref ref-type="bibr" rid="ref9">Cowie et al., 2000</xref>). However, grid-based tools also have notable drawbacks. Because valence and arousal are reported with a single click inside a matrix, respondents must trade one dimension against the other, which lowers the effective resolution for each construct. Locating a precise cell further imposes a non-trivial cognitive load that slows response time and disadvantages participants with limited numeracy or literacy (<xref ref-type="bibr" rid="ref8">Cowie and Douglas-Cowie, 1996</xref>; <xref ref-type="bibr" rid="ref36">Russell et al., 1989</xref>).</p>
<table-wrap position="float" id="tab1">
<label>Table 1</label>
<caption>
<p>An overview of several typical pictorial scales.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Scale</th>
<th align="left" valign="top">Source</th>
<th align="left" valign="top">Display format</th>
<th align="center" valign="top">Preview</th>
<th align="left" valign="top">Measured dimensions</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Affect grid</td>
<td align="left" valign="middle">
<xref ref-type="bibr" rid="ref36">Russell et al. (1989)</xref>
</td>
<td align="left" valign="middle">Grid</td>
<td align="center" valign="middle">
<inline-graphic xlink:href="fpsyg-16-1498143-i001.tif">
<alt-text content-type="machine-generated">A grid with vertical axis labeled from "Extremely unpleasant feeling" to "Extremely pleasant feeling" and horizontal axis labeled from "Extremely sleepy" to "Extremely aroused," both numbered one to nine.</alt-text>
</inline-graphic>
</td>
<td align="left" valign="middle">Pleasure and arousal</td>
</tr>
<tr>
<td align="left" valign="middle">Feeltrace</td>
<td align="left" valign="middle">
<xref ref-type="bibr" rid="ref9">Cowie et al. (2000)</xref>
</td>
<td align="left" valign="middle">Grid</td>
<td align="center" valign="middle">
<inline-graphic xlink:href="fpsyg-16-1498143-i002.tif">
<alt-text content-type="machine-generated">Emotion wheel chart depicting various emotions in a circular layout with axes labeled as very active, very passive, very positive, and very negative. Colors range from red for anger and sadness to green for joy and anticipation. Emotions are distributed around the circle, indicating their position on the active-passive and positive-negative spectrum.</alt-text>
</inline-graphic>
</td>
<td align="left" valign="middle">Activation and positivity</td>
</tr>
<tr>
<td align="left" valign="middle">SAM</td>
<td align="left" valign="middle">
<xref ref-type="bibr" rid="ref5">Bradley and Lang (1994)</xref>
</td>
<td align="left" valign="middle">Humanoid</td>
<td align="center" valign="middle">
<inline-graphic xlink:href="fpsyg-16-1498143-i003.tif">
<alt-text content-type="machine-generated">A series of sixteen diagrams depicting progressively larger and more complex humanoid figures. The sequence shows varied facial expressions and symbols inside the figures, including question marks, spirals, arrows, stars, and fragmented sections. The final images in the series display an oversized figure split into pieces, suggesting themes of growth, complexity, and potential fragmentation.</alt-text>
</inline-graphic>
</td>
<td align="left" valign="middle">Pleasure, arousal, and dominance</td>
</tr>
<tr>
<td align="left" valign="middle">Affective slider</td>
<td align="left" valign="middle">
<xref ref-type="bibr" rid="ref3">Betella and Verschure (2016)</xref>
</td>
<td align="left" valign="middle">Humanoid</td>
<td align="center" valign="middle">
<inline-graphic xlink:href="fpsyg-16-1498143-i004.tif">
<alt-text content-type="machine-generated">Two gradient scales with sliders in the middle, each transitioning from dark to light and back to dark. The top scale shows a sad face on the left and a neutral face on the right. The bottom scale shows a crying face on the left and a laughing face on the right.</alt-text>
</inline-graphic>
</td>
<td align="left" valign="middle">Pleasure and arousal</td>
</tr>
<tr>
<td align="left" valign="middle">PAM</td>
<td align="left" valign="middle">
<xref ref-type="bibr" rid="ref34">Pollak et al. (2011)</xref>
</td>
<td align="left" valign="middle">Realistic pictures</td>
<td align="center" valign="middle">
<inline-graphic xlink:href="fpsyg-16-1498143-i005.tif">
<alt-text content-type="machine-generated">A grid of sixteen images includes a cat in the grass, a lightning strike, people making gestures, a silhouette, a cow, a girl smiling, a couple posing, a sad face mask, a person contemplating, a landscape, a bearded man with a hat, cloudy skies, a yawning baby, a sleeping dog, and two people sitting by a lake at sunset.</alt-text>
</inline-graphic>
</td>
<td align="left" valign="middle">Valence and arousal</td>
</tr>
<tr>
<td align="left" valign="middle">HDRPS</td>
<td align="left" valign="middle">
<xref ref-type="bibr" rid="ref27">Liu et al. (2023)</xref>
</td>
<td align="left" valign="middle">Realistic pictures</td>
<td align="center" valign="middle">
<inline-graphic xlink:href="fpsyg-16-1498143-i006.tif">
<alt-text content-type="machine-generated">Top row shows a skydiver above clouds, two people working in a field, a woman covering her ears in a crowd, and a person with a backache. Middle row features a butterfly on a flower, a frog inside a shell, a puppy resting its head, and a tiger snarling. Bottom row includes a tree-lined path with yellow leaves, red and green fruits on a branch, a close-up of a flower with yellow and red hues, and a wilted rose on a dark background.</alt-text>
</inline-graphic>
</td>
<td align="left" valign="middle">Valence</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Humanoid pictorial scales utilize emoticons or humanoid cartoons as anchors. For instance, the Self-Assessment Manikin (SAM) uses five progressively changing humanoid cartoons to measure valence, arousal, and dominance. Humanoid or manikin scales likewise present limitations. Interpretation of the stylized figures is filtered through culturally embedded gender and age stereotypes, leading to systematic bias across demographic groups. Moreover, the extreme postures used to depict very high arousal become visually ambiguous, reducing discriminability at the upper end of the arousal continuum (<xref ref-type="bibr" rid="ref3">Betella and Verschure, 2016</xref>; <xref ref-type="bibr" rid="ref5">Bradley and Lang, 1994</xref>).</p>
<p>The realistic-picture-based scales are based on psychological projection techniques and use a multitude of real pictures as anchors, measuring emotions by having participants select the image that best matches their mood. For example, the Photographic Affect Meter (PAM) scale presents 16 images of different categories and themes in a single session, and determines participant&#x2019;s valence and arousal based on affective labels corresponding to the selected image (<xref ref-type="bibr" rid="ref34">Pollak et al., 2011</xref>). Similarly, the Highly Dynamic and Reusable Picture-based Scale (HDRPS) displays a quartet of thematically matched photographs (e.g., four animal images) that span an unpleasant-to-pleasant continuum. Participants click the single image that best matches their current affect, and its ordinal rank is recorded as the valence score for that trial (<xref ref-type="bibr" rid="ref27">Liu et al., 2023</xref>).</p>
<p>Compared with instruments like grid and manikin, realistic photographs offer three distinct advantages for high-frequency organizational use. First, they deliver rich, multimodal cues that can be decoded in under a few seconds, matching the pace of diary sampling (<xref ref-type="bibr" rid="ref23">Lang et al., 2005</xref>). Second, the concrete imagery eliminates the abstraction burden of coordinate mapping or icon interpretation, thereby reducing error variance and respondent fatigue (<xref ref-type="bibr" rid="ref11">Dan-Glauser and Scherer, 2011</xref>). Third, photo sets can portray diverse contexts and actor identities, which helps minimize gender- or culture-specific stereotype bias and yields smaller social-desirability effects than stylized faces (<xref ref-type="bibr" rid="ref3">Betella and Verschure, 2016</xref>). These properties make realistic-picture scales particularly suitable for continuous affect monitoring in the workplace.</p>
<p>As the main problem addressed in this study is the continuous measurement of individual affects in organizational contexts. On the one hand, in labor-intensive sectors such as construction, manufacturing, warehousing, and hospitality where many frontline employees have limited reading proficiency, measurement tools must remain simple and easy to understand. On the other hand, considering the need for repeated measurements, these tools should also be engaged to enhance participant motivation. Considering these factors, measurement tools that use real images as anchors are particularly suitable for this context.</p>
<p>However, there are relatively few realistic-picture-based scales available, and those that exist exhibit obvious limitations. For example, the PAM presents 16 images simultaneously to measure emotional states, raising unresolved questions about whether this multi-image display method might alter the subjects&#x2019; emotional states. Second, the image materials in PAM have not undergone rigorous rating, making the use of self-reported emotions as emotional labels for the images less precise. Third, PAM also mixes images of different types and themes, which does not account for the influence of personal preferences or visual appeal on the measurement results (<xref ref-type="bibr" rid="ref37">Sauer et al., 2021</xref>). Finally, with only 100 images in its database and presenting 16 images at once, there is a high possibility of repetition, which can decrease response motivation.</p>
</sec>
<sec id="sec4">
<label>1.3</label>
<title>Contributions and limitations of HDRPS</title>
<p>Considering the limitations of existing realistic-picture-based scales, our team previously developed HDRPS. This scale initially collected 22,054 raw images, which were subjected to a three-step evaluation process: image usability testing, emotional type assessment, and emotional scoring experiments. Through more rigorous evaluations, images were carefully selected and categorized, resulting in the creation of a structured pictorial scale with 3,386 images, each tagged with detailed attribute labels.</p>
<p>Thus, HDRPS refines the selection and display of images to enhance measurement accuracy and reduce bias. By presenting fewer images at once and employing a rigorous vetting process for each image, it ensures more consistent and reliable emotional assessments. Additionally, HDRPS expands the image database, thereby reducing repetition and sustaining participant engagement throughout the study. This results in a more robust and effective tool for capturing nuanced emotional responses in research settings.</p>
<p>However, HDRPS still faces two significant limitations that necessitate further optimization. Firstly, in terms of measurable dimensions, while HDRPS images are labeled with both valence and arousal, the scarcity of high arousal images has led to a design focus primarily on valence. This limits the scale&#x2019;s ability to accurately capture the precise emotions of the subjects. Secondly, regarding the presentation of images, although HDRPS reduces the influence of personal preferences and visual appeal by displaying four images of the same category (e.g., all animal images) at once, it still fails to eliminate the impact of content differences (e.g., images of a cat, dog, bird, and fish) on the measurement outcomes. These issues highlight the need for further refinements in HDRPS to enhance its accuracy and applicability.</p>
</sec>
<sec id="sec5">
<label>1.4</label>
<title>About this study</title>
<p>In this article, we present the development and validation of HDRPS+, an affect measurement instrument designed for real organizational contexts. In response to the practical needs for dynamic and continuous measurement, we conducted 3 sub-studies to develop and validate HDRPS+, which can continuously measure changes in valence and arousal of organizational members. As an important optimization of HDRPS, developed by our team previously (<xref ref-type="bibr" rid="ref27">Liu et al., 2023</xref>), HDRPS+ incorporates arousal dimension into measurement process, which can more accurately characterize individuals&#x2019; emotional states. Compared to traditional emotion research instruments, HDRPS+ is shown to have broader applicability, lower economic costs, simpler testing procedures, higher response rates, and lower susceptibility to social desirability effects. As a result, it can provide valuable support for continuous affect measurement in organizations.</p>
<p>Specifically, the three sub-studies were built upon the foundational designs of PAM and HDRPS to refine and test HDRPS+ thoroughly. First, we identified image acquisition ideas based on the results of semi-structured interviews, and constructed a picture pool through picture acquisition and supplemental acquisition (Study 1). Then, we screened and constructed picture attributes by evaluations on available, affective category, and affective score (Study 2). Finally, after comparing different picture presentation methods, we determined the optimal measurement and conducted a large-scale trial to verify the reliability and validity of the newly developed tool (Study 3).</p>
<p>Compared with prior measurement tools, the special contributions of this study are fourfold. First, the development and validation process of HDRPS+ is more rigorous, providing a beneficial reference for the development of pictorial scales. Second, the design of HDRPS+ draws on the principles of simple-drawing scales such as SAM, emphasizing the correlation of appearance or content among each set of images, thus effectively controls the influence of personal preferences and image attractiveness on the measurement results. Third, HDRPS+ was developed based on the valence-arousal emotion model, using 3 same-themed images to measure the subjects&#x2019; valence and arousal, which can draw individual affect portraits more quickly and accurately. Fourth, the database comprises 20 thematic image sets, including animals, plants, and scenes, offering richer material and a more balanced category distribution that can accommodate a wide range of subsequent research designs.</p>
</sec>
</sec>
<sec id="sec6">
<label>2</label>
<title>Study 1: item generation</title>
<sec id="sec7">
<label>2.1</label>
<title>Step 1: determining the approach for picture acquisition</title>
<p>As realistic pictures are diverse and complex, clear clues for picture collection must be established when using them as measurement anchors. To this end, we recruited 57 students (29 males and 28 females, with an average age of 21.81) from Sichuan University who had participated in HDRPS picture ratings, and conducted semi-structured interviews in which lasted approximately 10&#x202F;min each, aimed to address the following questions:</p>
<list list-type="simple">
<list-item><p>(1) Comparison of the difficulty level of evaluating different categories of pictures (valence and arousal) in HDRPS.</p></list-item>
<list-item><p>(2) What are the factors that influence the judgment of picture affects (valence and arousal)? Please provide examples.</p></list-item>
</list>
<p>These interviews yielded 8&#x202F;h and 43&#x202F;min of audio recordings, which were transcribed into 150,600 words. Using ATLAS.ti 9.0 software, the data underwent three stages of coding, resulting in 496 primary codes, 26 secondary codes, and 3 core propositions (overall factors, emotional dimensions, and picture categories). The key findings are as follows: (1) Personal preferences would affect the evaluation of pictures in animals, plants and scenes. To avoid this, HDRPS+ should use group pictures with the same main subject matter. For example, in the case of images featuring people, all images within a set should feature the same individual, while for animal images, all images within a set should have similar appearances. (2) The factors influencing the evaluation of valence and arousal differ. Therefore, image collection should be dimension-specific to accurately convey emotional meanings. (3) The collection strategy for specific images should be adapted according to the circumstances. Due to the poor emotional discernibility of object-related images, we have temporarily restricted the image categories to people, animals, plants, and scenes. Additionally, considering that expressions and postures can affect the affective judgment of animal-related images, we have only selected mammalian animals with clear and visible facial expressions during picture acquisition.</p>
</sec>
<sec id="sec8">
<label>2.2</label>
<title>Step 2: picture acquisition</title>
<p>We referenced the methods utilized by HDRPS and NAPS (Nencki Affective Picture System, <xref ref-type="bibr" rid="ref9001">Marchewka et al., 2014</xref>) for picture acquisition. With the exception of some self-taken photographs, the majority of the pictures were obtained from publicly available networks. A total of 60 sets of pictures (415 pictures in all) were collected, including 4 sets of people, 17 sets of animals, 19 sets of plants, and 20 sets of scenes. Since it was difficult to collect pictures of the same person in different affective states from open networks, we attempted to use facial expression databases (e.g., the Japanese Female Facial Expression Database, <xref ref-type="bibr" rid="ref31">Lyons, 2021</xref>) or screenshots of film and TV works as picture materials. However, due to the following four considerations, we ultimately decided to remove images of people (26 in total). Firstly, the purpose of developing these facial expression databases was to provide standard materials for inducing specific emotions. Therefore, using such images as emotional anchors is highly likely to induce affective changes in the subjects. Secondly, the performers in film and TV works are often celebrities, and the subjects&#x2019; preferences for these performers could influence the evaluation results. Thirdly, the emotional suggestiveness of people figures can be strong, and subjects are more susceptible to be influenced by social desirability during testing, leading to measurement bias. Fourthly, there are potential legal risks related to issues such as portrait rights.</p>
</sec>
<sec id="sec9">
<label>2.3</label>
<title>Step 3: determine the evaluation baseline pictures</title>
<p>To ensure consistent evaluation baselines for each HDRPS+ image set (i.e., having representative &#x201C;neutral/medium&#x201D; images for each dimension), we individually evaluated the valence (positive, neutral, negative) and arousal (high, medium, low) dimensions of all 389 images. This approach enables the identification of the valence baseline image (with a neutral valence) and arousal baseline image (with a neutral arousal) for each set of images.</p>
<p>At the operational level, we utilized OpenCV and Visual Studio 2015 to adjust 389 original images to 512&#x002A;512 pixel JPG images. In addition, to facilitate comparison of images in the same set, images of the same evaluation dimension within the set were presented simultaneously using slides. During the process, 5 master&#x2019;s and doctoral students with more than 2&#x202F;years of emotion research experience successively evaluated the valence and arousal dimension of each set of pictures and were given a 5-min break between the two modules.</p>
<p>The agreement percentage (AP) refers to the percentage of evaluators who perceive a particular image to belong to a specific emotional category out of all evaluators (<xref ref-type="bibr" rid="ref42">Wang and Chu, 2013</xref>; as shown in <xref ref-type="disp-formula" rid="EQ1">equation 1</xref>). In this study, an 80% agreement percentage was used as the threshold for judgement. Eventually, a total of 23 sets of pictures, comprising 10 sets of animals, 5 sets of plants, and 8 sets of scenes, were identified to possess both the valence and arousal baselines.</p>
<disp-formula id="EQ1"><label>(1)</label><mml:math id="M1"><mml:msub><mml:mi mathvariant="italic">AP</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mfrac><mml:msub><mml:mi mathvariant="italic">NE</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mi>N</mml:mi></mml:mfrac><mml:mo>&#x00D7;</mml:mo><mml:mn>100</mml:mn><mml:mo>%</mml:mo></mml:math></disp-formula>
<p>Note: <inline-formula><mml:math id="M2"><mml:mi mathvariant="italic">AP</mml:mi></mml:math></inline-formula> refers to the agreement percentage; <inline-formula><mml:math id="M3"><mml:mi>i</mml:mi></mml:math></inline-formula> represents the emotional category of the image (including positive valence, neutral valence, negative valence, high arousal, medium arousal, and low arousal); <inline-formula><mml:math id="M4"><mml:msub><mml:mi mathvariant="italic">NE</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:math></inline-formula> denotes the number of evaluators who consider a certain picture to belong to the emotional category <inline-formula><mml:math id="M5"><mml:mi>i</mml:mi></mml:math></inline-formula>; <inline-formula><mml:math id="M6"><mml:mi>N</mml:mi></mml:math></inline-formula> represents the total number of evaluators.</p>
</sec>
<sec id="sec10">
<label>2.4</label>
<title>Step 4: supplemental acquisition</title>
<p>As not every set among the 23 sets in Step 3 contains all categories under the arousal dimension (e.g., Scenes-1 lacks highly arousing images), and the number of pictures varied greatly between groups, we conducted supplementary picture acquisition. Except for two sets of images that were difficult to collect, the remaining 21 sets of images were moderately supplemented (10 images per set).</p>
<p>Following the supplemental acquisition, we obtained the initial pool for HDRPS+, which contains 9 sets of animals, 5 sets of plants, and 7 sets of scenes.</p>
</sec>
</sec>
<sec id="sec11">
<label>3</label>
<title>Study 2: picture screening and attribute construction</title>
<sec id="sec12">
<label>3.1</label>
<title>Step 1: usability evaluation</title>
<p>In this paper, we set up an available evaluation session of picture materials inspired by the development of textual scales. Twenty MBA students from Sichuan University participated in this experimental session, including 6 males and 14 females who confirmed that they were currently mentally and cognitively normal, without psychiatric diagnoses or use of psychotropic medications. As this phase was a small-scale professional evaluation rather than an effort to establish a population statistical sample, perfect gender balance was not strictly controlled.</p>
<p>To address potential dimensional contamination issues of evaluating valence and arousal simultaneously (<xref ref-type="bibr" rid="ref22">Kurdi et al., 2017</xref>), we divided the experiment into two modules: valence evaluation and arousal evaluation. Prior to the experiment, the experimenter provided informed consent forms, instructions (including an introduction to the evaluation indexes and task descriptions), and scoring sheets. During the evaluation, participants judged the &#x201C;availability&#x201D; of each of the 21 picture sets&#x2014;that is, whether the set met our predefined criteria of relevance, clarity, and specificity for both valence and arousal (see <xref ref-type="table" rid="tab2">Table 2</xref>)&#x2014;and then recorded a Yes/No response on the scoring sheet. Considering individual differences in evaluation speed, we did not impose restrictions on the playback speed of the images, but only required each subject to respond as quickly and accurately as possible based on their intuition.</p>
<table-wrap position="float" id="tab2">
<label>Table 2</label>
<caption>
<p>Available evaluation index.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Dimension</th>
<th align="left" valign="top">Index</th>
<th align="left" valign="top">Explanation</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle" rowspan="3">Valence</td>
<td align="left" valign="middle">Relevance</td>
<td align="left" valign="middle">Can this set of images reflect the definition and description of valence?</td>
</tr>
<tr>
<td align="left" valign="middle">Clarity</td>
<td align="left" valign="middle">Is the emotional meaning of this set of images clear and unambiguous?</td>
</tr>
<tr>
<td align="left" valign="middle">Specificity</td>
<td align="left" valign="middle">Can this set of images constitute a complete and distinguishable valence levels, rather than being overly general or vague?</td>
</tr>
<tr>
<td align="left" valign="middle" rowspan="3">Arousal</td>
<td align="left" valign="middle">Relevance</td>
<td align="left" valign="middle">Can this set of images reflect the definition and description of arousal?</td>
</tr>
<tr>
<td align="left" valign="middle">Clarity</td>
<td align="left" valign="middle">Is the emotional meaning of this set of images clear and unambiguous?</td>
</tr>
<tr>
<td align="left" valign="middle">Specificity</td>
<td align="left" valign="middle">Can this set of images constitute a complete and distinguishable arousal levels, rather than being overly general or vague?</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The agreement percentage of 6 available evaluation indexes for each set of pictures was calculated according to <xref ref-type="disp-formula" rid="EQ1">Equation 1</xref>. Following the criteria proposed by <xref ref-type="bibr" rid="ref25">Li (2014)</xref>, sets of images were screened with a threshold of 60% agreement ratio. The results showed that a total of 15 sets of pictures had an agreement ratio higher than 60% for all 6 available indexes, including 8 sets of animals, 5 sets of plants, and 2 sets of scenes.</p>
</sec>
<sec id="sec13">
<label>3.2</label>
<title>Step 2: affective category labelling</title>
<sec id="sec14">
<label>3.2.1</label>
<title>Participants</title>
<p>Affective category evaluation was performed to determine the affective types (affective labels) of each picture. To ensure the scientific validity of picture labels, PhD students with relevant research backgrounds were recruited from Business School of Sichuan University for image evaluation. Participants were required to have studied psychology or other related courses, and have research experience in emotional science.</p>
<p>Subsequently, we conducted a qualification test to confirm the exact candidates of the participants. The qualification test consisted of three parts: Ishihara Color Blindness Test, Self-Rating Anxiety Scale (SAS) test, and Self-Rating Depression Scale (SDS) test. A SAS score lower than 50 and an SDS score lower than 53 indicated normal mental status (<xref ref-type="bibr" rid="ref48">Zung, 1965</xref>, <xref ref-type="bibr" rid="ref49">1971</xref>), and passing all three tests was considered as passing the qualification test (<xref ref-type="bibr" rid="ref26">Li et al., 2020</xref>). Eventually, a total of 12 applicants (6 males and 6 females, aged 25&#x2013;33&#x202F;years) passed the background check and qualification test, and received a compensation of 100 CNY after completing a 30-min experiment.</p>
</sec>
<sec id="sec15">
<label>3.2.2</label>
<title>Procedure</title>
<p>The entire experiment consisted of four phases: baseline affective self-reporting, pre-test practice, picture evaluation, and post-test affective self-reporting (as shown in <xref ref-type="fig" rid="fig1">Figure 1</xref>). The post-test assessment was included to verify that the rating task itself did not systematically alter participants&#x2019; mood; confirming stable pre- and post-scores allows us to attribute picture evaluations to the images rather than task-induced state changes.</p>
<fig position="float" id="fig1">
<label>Figure 1</label>
<caption>
<p>Process of affective category evaluation.</p>
</caption>
<graphic xlink:href="fpsyg-16-1498143-g001.tif">
<alt-text content-type="machine-generated">Flowchart illustrating a testing procedure starting with "Baseline affective self-reporting", followed by "Pre-test practice: 2 trials". The main section is "Picture evaluation", with a "Valence module" of 135 trials, a 3-minute break, and an "Arousal module" of 135 trials. It concludes with "Post-test affective self-reporting." Images of animals are shown in the evaluation modules.</alt-text>
</graphic>
</fig>
<p>We developed a web-based image-evaluation system to implement the above experiments and record all responses. Both baseline and post-test affective self-reports employed a 9-level SAM scale to distinguish individual differences in affects (<xref ref-type="bibr" rid="ref13">Desmet et al., 2016</xref>), and the Positive and Negative Affect Schedule (PANAS) scale (<xref ref-type="bibr" rid="ref44">Watson et al., 1988</xref>) was added to the post-test affective self-reporting session. During the picture-evaluation phase, 15 picture sets were presented in random order. Participants first completed the valence module and then the arousal module. To give each set a clear point of reference, the baseline image identified in Study 1 Step 3 was displayed on the left side of the interface, while the target image appeared on the right side of the evaluation interface. Thus, participants used a pairwise comparison method to determine the valence (options included more positive, consistent, more negative, and unable to judge) and arousal (options included higher, consistent, lower, and unable to judge) types of the images to be evaluated (<xref ref-type="bibr" rid="ref32">Obaid et al., 2015</xref>).</p>
</sec>
<sec id="sec16">
<label>3.2.3</label>
<title>Results</title>
<p>Baseline self-reports indicated a mean valence of 5.92 and a mean arousal of 4.17. After the picture-evaluation task, mean valence was 6.00 and mean arousal 4.67; the mean PANAS score was 13. Paired-samples t-tests showed no significant change in valence (<italic>p</italic>&#x202F;=&#x202F;0.754) or arousal (<italic>p</italic>&#x202F;=&#x202F;0.438) from pre- to post-test, suggesting that participants&#x2019; emotional state remained stable during the task and therefore exerted little influence on their picture ratings. After removing redundant data due to network problems, we used the Cronbach&#x2019;s alpha to calculate the consistency of the valence and arousal ratings provided by 12 subjects. It showed that Cronbach&#x2019;s alpha of valence was 0.937 (Cronbach&#x2019;s minimum after removal of items was 0.928), and Cronbach&#x2019;s alpha of arousal was 0.929 (Cronbach&#x2019;s minimum after removal of items was 0.917), which exceeded the standard set by <xref ref-type="bibr" rid="ref7">Cook et al. (2018)</xref>, and passed the reliability test.</p>
<p>We also calculated the valence and arousal agreement percentage of 135 evaluated pictures, respectively, based on <xref ref-type="disp-formula" rid="EQ1">Equation 1</xref>, and pictures with agreement percentage above 60% were retained (<xref ref-type="bibr" rid="ref25">Li, 2014</xref>) and labeled with their corresponding emotional categories. Finally, 127 pictures were retained in the valence dimension (including 45 positive images, 27 neutral images, and 55 negative images), and 134 pictures were retained in the arousal dimension (including 48 low arousal images, 28 medium arousal images, and 58 high arousal images).</p>
</sec>
</sec>
<sec id="sec17">
<label>3.3</label>
<title>Step 3: affective scoring</title>
<sec id="sec18">
<label>3.3.1</label>
<title>Method</title>
<p>In Step 2, the affective categories of each picture have been determined. In this section, we will further ascertain the affective score of each picture with the help of a web-based picture evaluation system to determine the accurate anchoring of pictures. To ensure the generalizability of the scoring results, this experiment did not restrict the subject&#x2019; professional backgrounds, but only tested their health status (including Ishihara Color Blindness Test, SAS test, SDS test). 79 undergraduate and graduate students from Sichuan University applied to participate in this experiment, and finally, a total of 58 subjects (23 males and 35 females; Mean of age&#x202F;=&#x202F;22.68) formally participated in this experiment and received a reward of 30 CNY after completing the experiment. The specific operation process of this experiment is basically similar to that of Step 2, except for the 9-level scale of the baseline and post-test affective self-reporting were adjusted to a 5-level scale according to <xref ref-type="bibr" rid="ref13">Desmet et al. (2016)</xref>.</p>
</sec>
<sec id="sec19">
<label>3.3.2</label>
<title>Results</title>
<p>At baseline, mean valence and arousal were 3.61 and 2.88, respectively. Post-task means were 2.88 (valence) and 3.46 (arousal); the PANAS mean was 13.22. Paired-samples t-tests again revealed no significant change in valence (<italic>p</italic>&#x202F;=&#x202F;0.135) or arousal (<italic>p</italic>&#x202F;=&#x202F;0.669), indicating that participants&#x2019; affective state remained largely constant throughout the evaluation and had relatively small impact on their judgments. Using the Cronbach&#x2019;s alpha to calculate the consistency of the 58 subjects&#x2019; picture rating results separately, and the data revealed that Cronbach&#x2019;s alpha of valence was 0.991 (Cronbach&#x2019;s minimum after removal of items was 0.991) and Cronbach&#x2019;s alpha of arousal was 0.992 (Cronbach&#x2019;s minimum after removal of items was 0.992), indicating high consistency (<xref ref-type="bibr" rid="ref25">Li, 2014</xref>).</p>
<p>Based on <xref ref-type="disp-formula" rid="EQ2">Equations 2</xref>, <xref ref-type="disp-formula" rid="EQ3">3</xref>, the agreement percentage of valence and arousal for each image were calculated, and images were selected based on a standard of 60% (<xref ref-type="bibr" rid="ref25">Li, 2014</xref>). The results show that there were 118 remaining images in the valence dimension and 131 in the arousal dimension. <xref ref-type="disp-formula" rid="EQ4">Equations 4</xref>, <xref ref-type="disp-formula" rid="EQ5">5</xref> were then used to calculate the valence and arousal scores of each image.</p>
<disp-formula id="EQ2"><label>(2)</label><mml:math id="M7"><mml:msub><mml:mi>AP</mml:mi><mml:mi mathvariant="normal">V</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mo>max</mml:mo><mml:mo stretchy="true">(</mml:mo><mml:msub><mml:mi mathvariant="normal">N</mml:mi><mml:mn>1</mml:mn></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi mathvariant="normal">N</mml:mi><mml:mn>2</mml:mn></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi mathvariant="normal">N</mml:mi><mml:mn>3</mml:mn></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi mathvariant="normal">N</mml:mi><mml:mn>4</mml:mn></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi mathvariant="normal">N</mml:mi><mml:mn>5</mml:mn></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi mathvariant="normal">N</mml:mi><mml:mn>6</mml:mn></mml:msub><mml:mo stretchy="true">)</mml:mo><mml:mo>/</mml:mo><mml:msup><mml:mi mathvariant="normal">N</mml:mi><mml:mo>&#x2217;</mml:mo></mml:msup><mml:mn>100</mml:mn><mml:mo>%</mml:mo></mml:math></disp-formula>
<disp-formula id="EQ3"><label>(3)</label><mml:math id="M8"><mml:msub><mml:mi>AP</mml:mi><mml:mi mathvariant="normal">A</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mo>max</mml:mo><mml:mo stretchy="true">(</mml:mo><mml:msub><mml:mi mathvariant="normal">M</mml:mi><mml:mn>1</mml:mn></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi mathvariant="normal">M</mml:mi><mml:mn>2</mml:mn></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi mathvariant="normal">M</mml:mi><mml:mn>3</mml:mn></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi mathvariant="normal">M</mml:mi><mml:mn>4</mml:mn></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi mathvariant="normal">M</mml:mi><mml:mn>5</mml:mn></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi mathvariant="normal">M</mml:mi><mml:mn>6</mml:mn></mml:msub><mml:mo stretchy="true">)</mml:mo><mml:mo>/</mml:mo><mml:msup><mml:mi mathvariant="normal">M</mml:mi><mml:mo>&#x2217;</mml:mo></mml:msup><mml:mn>100</mml:mn><mml:mo>%</mml:mo></mml:math></disp-formula>
<p>Note: APV is the agreement percentage of valence; N<sub>1</sub>, N<sub>2</sub>, N<sub>3</sub>, N<sub>4</sub>, N<sub>5</sub> and N<sub>6</sub> are the number of people who agree that the image valence is (1) extremely negative, (2) negative, (3) the same, (4) positive, (5) extremely positive, and (6) undeterminable, respectively; N is the total number of participants in the valence evaluation; AP<sub>A</sub> is the agreement percentage of arousal; M<sub>1</sub>, M<sub>2</sub>, M<sub>3</sub>, M<sub>4</sub>, M<sub>5</sub> and M<sub>6</sub> are the number of people who agree that the image arousal is (1) extremely low, (2) low, (3) the same, (4) high, (5) extremely high, and (6) undeterminable, respectively; M is the total number of participants in the arousal evaluation.</p>
<disp-formula id="EQ4"><label>(4)</label><mml:math id="M9"><mml:msub><mml:mi mathvariant="normal">S</mml:mi><mml:mi mathvariant="normal">V</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mo stretchy="true">(</mml:mo><mml:msup><mml:msub><mml:mi mathvariant="normal">N</mml:mi><mml:mn>1</mml:mn></mml:msub><mml:mo>&#x2217;</mml:mo></mml:msup><mml:mn>1</mml:mn><mml:mo>+</mml:mo><mml:msup><mml:msub><mml:mi mathvariant="normal">N</mml:mi><mml:mn>2</mml:mn></mml:msub><mml:mo>&#x2217;</mml:mo></mml:msup><mml:mn>2</mml:mn><mml:mo>+</mml:mo><mml:msup><mml:msub><mml:mi mathvariant="normal">N</mml:mi><mml:mn>3</mml:mn></mml:msub><mml:mo>&#x2217;</mml:mo></mml:msup><mml:mn>3</mml:mn><mml:mo>+</mml:mo><mml:msup><mml:msub><mml:mi mathvariant="normal">N</mml:mi><mml:mn>4</mml:mn></mml:msub><mml:mo>&#x2217;</mml:mo></mml:msup><mml:mn>4</mml:mn><mml:mo>+</mml:mo><mml:msup><mml:msub><mml:mi mathvariant="normal">N</mml:mi><mml:mn>5</mml:mn></mml:msub><mml:mo>&#x2217;</mml:mo></mml:msup><mml:mn>5</mml:mn><mml:mo stretchy="true">)</mml:mo><mml:mo>/</mml:mo><mml:mo stretchy="true">(</mml:mo><mml:mi mathvariant="normal">N</mml:mi><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi mathvariant="normal">N</mml:mi><mml:mn>6</mml:mn></mml:msub><mml:mo stretchy="true">)</mml:mo></mml:math></disp-formula>
<disp-formula id="EQ5"><label>(5)</label><mml:math id="M10"><mml:msub><mml:mi mathvariant="normal">S</mml:mi><mml:mi mathvariant="normal">A</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mo stretchy="true">(</mml:mo><mml:msup><mml:msub><mml:mi mathvariant="normal">M</mml:mi><mml:mn>1</mml:mn></mml:msub><mml:mo>&#x2217;</mml:mo></mml:msup><mml:mn>1</mml:mn><mml:mo>+</mml:mo><mml:msup><mml:msub><mml:mi mathvariant="normal">M</mml:mi><mml:mn>2</mml:mn></mml:msub><mml:mo>&#x2217;</mml:mo></mml:msup><mml:mn>2</mml:mn><mml:mo>+</mml:mo><mml:msup><mml:msub><mml:mi mathvariant="normal">M</mml:mi><mml:mn>3</mml:mn></mml:msub><mml:mo>&#x2217;</mml:mo></mml:msup><mml:mn>3</mml:mn><mml:mo>+</mml:mo><mml:msup><mml:msub><mml:mi mathvariant="normal">M</mml:mi><mml:mn>4</mml:mn></mml:msub><mml:mo>&#x2217;</mml:mo></mml:msup><mml:mn>4</mml:mn><mml:mo>+</mml:mo><mml:msup><mml:msub><mml:mi mathvariant="normal">M</mml:mi><mml:mn>5</mml:mn></mml:msub><mml:mo>&#x2217;</mml:mo></mml:msup><mml:mn>5</mml:mn><mml:mo stretchy="true">)</mml:mo><mml:mo>/</mml:mo><mml:mo stretchy="true">(</mml:mo><mml:mi mathvariant="normal">M</mml:mi><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi mathvariant="normal">M</mml:mi><mml:mn>6</mml:mn></mml:msub><mml:mo stretchy="true">)</mml:mo></mml:math></disp-formula>
<p>Note: SV is the valence score of a certain image, S<sub>A</sub> is the arousal score of a certain image.</p>
</sec>
</sec>
<sec id="sec20">
<label>3.4</label>
<title>Step 4: picture attribute construction</title>
<p>Based on <xref ref-type="disp-formula" rid="EQ6">Equations 6</xref>, <xref ref-type="disp-formula" rid="EQ7">7</xref>, we finalized the valence and arousal labels of each image, and images with consistent affective category in step 2 and step 3 were retained. A total of 115 images remained for the valence dimension, and 116 images remained for the arousal dimension. In addition, we also analyzed the data by group (see <xref ref-type="supplementary-material" rid="SM1">Appendix 1</xref>), and found that the remaining all 14 groups of pictures, except for Animal-8, were able to reflect the complete affective space.</p>
<disp-formula id="EQ6"><label>(6)</label><mml:math id="M11"><mml:mi mathvariant="italic">TV</mml:mi><mml:mo>=</mml:mo><mml:mo stretchy="true">{</mml:mo><mml:mtable columnalign="left" displaystyle="true"><mml:mtr><mml:mtd><mml:mtext mathvariant="italic">positive</mml:mtext><mml:mo>,</mml:mo><mml:mi mathvariant="italic">SV</mml:mi><mml:mo>&#x003E;</mml:mo><mml:mn>3.5</mml:mn></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mtext mathvariant="italic">neutral</mml:mtext><mml:mo>,</mml:mo><mml:mn>2.5</mml:mn><mml:mo>&#x2264;</mml:mo><mml:mi mathvariant="italic">SV</mml:mi><mml:mo>&#x2264;</mml:mo><mml:mn>3.5</mml:mn></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mtext mathvariant="italic">negative</mml:mtext><mml:mo>,</mml:mo><mml:mi mathvariant="italic">SV</mml:mi><mml:mo>&#x003C;</mml:mo><mml:mn>2.5</mml:mn></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<disp-formula id="EQ7"><label>(7)</label><mml:math id="M12"><mml:mi mathvariant="italic">TA</mml:mi><mml:mo>=</mml:mo><mml:mo stretchy="true">{</mml:mo><mml:mtable columnalign="left" displaystyle="true"><mml:mtr><mml:mtd><mml:mtext mathvariant="italic">high</mml:mtext><mml:mo>,</mml:mo><mml:mi mathvariant="italic">SA</mml:mi><mml:mo>&#x003E;</mml:mo><mml:mn>3.5</mml:mn></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mtext mathvariant="italic">medium</mml:mtext><mml:mo>,</mml:mo><mml:mn>2.5</mml:mn><mml:mo>&#x2264;</mml:mo><mml:mi mathvariant="italic">SA</mml:mi><mml:mo>&#x2264;</mml:mo><mml:mn>3.5</mml:mn></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mi mathvariant="italic">low</mml:mi><mml:mo>,</mml:mo><mml:mi mathvariant="italic">SA</mml:mi><mml:mo>&#x003C;</mml:mo><mml:mn>2.5</mml:mn></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>Note: <italic>Tv</italic> is the valence category of a certain image, <italic>T<sub>A</sub></italic> is the arousal category of a certain image.</p>
<p>Further analysis showed that the 14 newly collected sets were unevenly distributed&#x2014;seven animal, five plant, and only two scene sets. To enrich the material and balance category counts, we supplemented the database with images from the original HDRPS database, whose pictures had already been judged using the same procedure but on a 9-point valence/arousal scale. To keep the granularity consistent across all materials, we recoded the 9-point HDRPS valence scores into five ordered categories (1&#x2013;2&#x202F;=&#x202F;very low, 3&#x2013;4&#x202F;=&#x202F;low, 5&#x202F;=&#x202F;neutral, 6&#x2013;7&#x202F;=&#x202F;high, 8&#x2013;9&#x202F;=&#x202F;very high; see <xref ref-type="table" rid="tab3">Table 3</xref>). After supplementation, the final database contained 20 thematic picture sets.</p>
<table-wrap position="float" id="tab3">
<label>Table 3</label>
<caption>
<p>Supplementary images from HDRPS.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Group</th>
<th align="center" valign="top">Number</th>
<th align="center" valign="top">Dimension</th>
<th align="center" valign="top">Score</th>
<th align="center" valign="top">Number</th>
<th align="center" valign="top">Dimension</th>
<th align="center" valign="top">Score</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Animals 8</td>
<td align="center" valign="middle">Animals-n011</td>
<td align="center" valign="middle">Valence</td>
<td align="center" valign="middle">1.65</td>
<td align="center" valign="middle">/</td>
<td align="center" valign="middle">/</td>
<td align="center" valign="middle">/</td>
</tr>
<tr>
<td align="left" valign="middle" rowspan="3">Plants 6</td>
<td align="center" valign="middle">Plants-p129</td>
<td align="center" valign="middle">Valence</td>
<td align="center" valign="middle">3.85</td>
<td align="center" valign="middle">Plants-p107</td>
<td align="center" valign="middle">Arousal</td>
<td align="center" valign="middle">3.60</td>
</tr>
<tr>
<td align="center" valign="middle">Plants-hn094</td>
<td align="center" valign="middle">Valence</td>
<td align="center" valign="middle">3.35</td>
<td align="center" valign="middle">Plants-ln016</td>
<td align="center" valign="middle">Arousal</td>
<td align="center" valign="middle">2.56</td>
</tr>
<tr>
<td align="center" valign="middle">Plants-n001</td>
<td align="center" valign="middle">Valence</td>
<td align="center" valign="middle">2.28</td>
<td align="center" valign="middle">Plants-n008</td>
<td align="center" valign="middle">Arousal</td>
<td align="center" valign="middle">1.61</td>
</tr>
<tr>
<td align="left" valign="middle" rowspan="3">Scenes 3</td>
<td align="center" valign="middle">Scenes-p006</td>
<td align="center" valign="middle">Valence</td>
<td align="center" valign="middle">3.74</td>
<td align="center" valign="middle">Scenes-p031</td>
<td align="center" valign="middle">Arousal</td>
<td align="center" valign="middle">3.58</td>
</tr>
<tr>
<td align="center" valign="middle">Scenes-hn017</td>
<td align="center" valign="middle">Valence</td>
<td align="center" valign="middle">3.4</td>
<td align="center" valign="middle">Scenes-hn013</td>
<td align="center" valign="middle">Arousal</td>
<td align="center" valign="middle">2.77</td>
</tr>
<tr>
<td align="center" valign="middle">Scenes-n011</td>
<td align="center" valign="middle">Valence</td>
<td align="center" valign="middle">2.21</td>
<td align="center" valign="middle">Scenes-n010</td>
<td align="center" valign="middle">Arousal</td>
<td align="center" valign="middle">1.77</td>
</tr>
<tr>
<td align="left" valign="middle" rowspan="3">Scenes 4</td>
<td align="center" valign="middle">Scenes-p340</td>
<td align="center" valign="middle">Valence</td>
<td align="center" valign="middle">3.98</td>
<td align="center" valign="middle">Scenes-p293</td>
<td align="center" valign="middle">Arousal</td>
<td align="center" valign="middle">3.71</td>
</tr>
<tr>
<td align="center" valign="middle">Scenes-hn089</td>
<td align="center" valign="middle">Valence</td>
<td align="center" valign="middle">3.15</td>
<td align="center" valign="middle">Scenes-hn116</td>
<td align="center" valign="middle">Arousal</td>
<td align="center" valign="middle">2.87</td>
</tr>
<tr>
<td align="center" valign="middle">Scenes-n086</td>
<td align="center" valign="middle">Valence</td>
<td align="center" valign="middle">2.17</td>
<td align="center" valign="middle">Scenes-n063</td>
<td align="center" valign="middle">Arousal</td>
<td align="center" valign="middle">1.79</td>
</tr>
<tr>
<td align="left" valign="middle" rowspan="3">Scenes 5</td>
<td align="center" valign="middle">Scenes-p235</td>
<td align="center" valign="middle">Valence</td>
<td align="center" valign="middle">3.76</td>
<td align="center" valign="middle">Scenes-p235</td>
<td align="center" valign="middle">Arousal</td>
<td align="center" valign="middle">3.30</td>
</tr>
<tr>
<td align="center" valign="middle">Scenes-hn347</td>
<td align="center" valign="middle">Valence</td>
<td align="center" valign="middle">3.27</td>
<td align="center" valign="middle">Scenes-hn373</td>
<td align="center" valign="middle">Arousal</td>
<td align="center" valign="middle">4.15</td>
</tr>
<tr>
<td align="center" valign="middle">Scenes-n234</td>
<td align="center" valign="middle">Valence</td>
<td align="center" valign="middle">2.08</td>
<td align="center" valign="middle">Scenes-ln113</td>
<td align="center" valign="middle">Arousal</td>
<td align="center" valign="middle">1.86</td>
</tr>
<tr>
<td align="left" valign="middle" rowspan="3">Scenes 6</td>
<td align="center" valign="middle">Scenes-p236</td>
<td align="center" valign="middle">Valence</td>
<td align="center" valign="middle">3.66</td>
<td align="center" valign="middle">Scenes-p085</td>
<td align="center" valign="middle">Arousal</td>
<td align="center" valign="middle">3.64</td>
</tr>
<tr>
<td align="center" valign="middle">Scenes-hn366</td>
<td align="center" valign="middle">Valence</td>
<td align="center" valign="middle">3.18</td>
<td align="center" valign="middle">Scenes-hn367</td>
<td align="center" valign="middle">Arousal</td>
<td align="center" valign="middle">2.86</td>
</tr>
<tr>
<td align="center" valign="middle">Scenes-n228</td>
<td align="center" valign="middle">Valence</td>
<td align="center" valign="middle">1.99</td>
<td align="center" valign="middle">Scenes-n229</td>
<td align="center" valign="middle">Arousal</td>
<td align="center" valign="middle">1.69</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec id="sec21">
<label>4</label>
<title>Study 3: validation of HDRPS+</title>
<sec id="sec22">
<label>4.1</label>
<title>Step 1: presentation-mode determination</title>
<sec id="sec23">
<label>4.1.1</label>
<title>Method</title>
<sec id="sec24">
<label>4.1.1.1</label>
<title>Objective</title>
<p>In this session, we determined the optimal presentation format of the scale by comparing the accuracy rates of different presentation methods. Based on the magnitude of differences between the presented images, the presentation formats of the scale can be divided into three levels: primary, middle, and advanced (see <xref ref-type="supplementary-material" rid="SM1">Appendix 2</xref>). The primary-level format presented images of the same category and theme (e.g., all pictures are tigers). The middle-level format followed the testing method of HDRPS, presenting images of the same category but with multiple themes (e.g., presenting rabbits, dogs, and tigers at the same time). The advanced-level format followed the testing method of PAM, presenting images of multiple categories and themes (e.g., presenting lotus, birds, and mountains simultaneously).</p>
</sec>
<sec id="sec25">
<label>4.1.1.2</label>
<title>Participants</title>
<p>A total of 88 individuals applied to participate in this experiment, of whom 85 subjects (26 males, 59 females, average age 23.75) passed the Ishihara Color Blindness Test and were then randomly assigned to the experimental group (with 31 in the positive group and 29 in the negative group) and the control group (25 subjects), and each received a compensation of 10 CNY after the experiment.</p>
</sec>
<sec id="sec26">
<label>4.1.1.3</label>
<title>Procedure</title>
<p>The experimental group completed three phases per trial: (1) baseline affective self-reporting, (2) video affect evaluation, and (3) picture selection (see <xref ref-type="fig" rid="fig2">Figure 2</xref> for details). Each trial began with the baseline affective self-reporting, where subjects were required to report their affects using the 5-point SAM scale. In the video affect evaluation phase, subjects were instructed to watch a 15-s video clip and then evaluate its valence and arousal using the SAM scale. In the picture selection phase, subjects were required to make 9 rounds of picture selection based on the emotions reflected in the videos, in terms of valence and arousal dimensions, respectively. In the valence module, the subjects were required to select 1 image from 3 images that best represented the valence level of the video; and in the arousal module, subjects were required to select 1 image from 3 images that best represented the physiological or psychological arousal level of the video. Participants were given a 60-s rest time after the first trial, after which the aforementioned processes were repeated (with new materials).</p>
<fig position="float" id="fig2">
<label>Figure 2</label>
<caption>
<p>Procedure of experimental group.</p>
</caption>
<graphic xlink:href="fpsyg-16-1498143-g002.tif">
<alt-text content-type="machine-generated">Flowchart depicting two trials. Trial-1 includes: 1. Baseline affective self-reporting, 2. Affective evaluation on Video-1, 3. Picture selection for nine rounds. Trial-2 repeats these steps with Video-2. An arrow shows the progression from Start to End, indicating a Break between trials and an option to Continue.</alt-text>
</graphic>
</fig>
<p>In contrast to the experimental group, the procedure in the control group merely consisted of two phases: (1) the baseline affective self-reporting, and (2) picture selection, in which the subjects were instructed to select pictures in accordance with their emotions.</p>
</sec>
<sec id="sec27">
<label>4.1.1.4</label>
<title>Performance indices</title>
<p>(a) Internal consistency for reliability analysis: Cronbach&#x2019;s <italic>&#x03B1;</italic> for each nine-choice set; &#x03B1;&#x202F;&#x2265;&#x202F;0.70 was deemed acceptable delineated by <xref ref-type="bibr" rid="ref7">Cook et al. (2018)</xref>. (b) Time Consumption (TC) for measurement speed analysis: Seconds from items onset to mouse click. (c) Accuracy (AC) for measurement accuracy analysis: <xref ref-type="disp-formula" rid="EQ8">Equations 8</xref>, <xref ref-type="disp-formula" rid="EQ9">9</xref> were used to sequentially determine the picture selection results of 85 participants for 9 rounds, while <xref ref-type="disp-formula" rid="EQ10">Equations 10</xref>, <xref ref-type="disp-formula" rid="EQ11">11</xref> were then employed to calculate the accuracy of HDRPS+ test results.</p>
<disp-formula id="EQ8"><label>(8)</label><mml:math id="M13"><mml:mtext mathvariant="italic">RVij</mml:mtext><mml:mo>=</mml:mo><mml:mo stretchy="true">{</mml:mo><mml:mtable columnalign="left" displaystyle="true"><mml:mtr><mml:mtd><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mi mathvariant="italic">Tv</mml:mi><mml:mo>=</mml:mo><mml:mi mathvariant="italic">Tvi</mml:mi></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:mi mathvariant="italic">Tv</mml:mi><mml:mo>&#x2260;</mml:mo><mml:mi mathvariant="italic">Tvi</mml:mi></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<disp-formula id="EQ9"><label>(9)</label><mml:math id="M14"><mml:mtext mathvariant="italic">RAij</mml:mtext><mml:mo>=</mml:mo><mml:mo stretchy="true">{</mml:mo><mml:mtable columnalign="left" displaystyle="true"><mml:mtr><mml:mtd><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mi mathvariant="italic">TA</mml:mi><mml:mo>=</mml:mo><mml:mi mathvariant="italic">TAi</mml:mi></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:mi mathvariant="italic">TA</mml:mi><mml:mo>&#x2260;</mml:mo><mml:mi mathvariant="italic">TAi</mml:mi></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>Note: <italic>Rv<sub>ij</sub></italic> is the result of the <italic>i</italic>-th valence test of person <italic>j</italic>; <italic>Tv</italic> is the video valence category (experimental group) or self-rated valence category (control group); <italic>Tv<sub>i</sub></italic> is the valence category corresponding to the <italic>i</italic>-th selection of pictures; <italic>R<sub>Aij</sub></italic> is the result of the <italic>i</italic>-th arousal test of person j; <italic>T<sub>A</sub></italic> is the arousal category of video (experimental group) or self-rated arousal category (control group); <italic>T<sub>Ai</sub></italic> is the arousal category corresponding to the <italic>i</italic>-th selection of pictures; <italic>i</italic> is the order of picture selection ranging from 1 to 9, and <italic>j</italic> is the subject number.</p>
<disp-formula id="EQ10"><label>(10)</label><mml:math id="M15"><mml:mtext mathvariant="italic">ACVj</mml:mtext><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:munderover><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mn>9</mml:mn></mml:munderover><mml:mtext mathvariant="italic">RVij</mml:mtext></mml:mrow><mml:mn>9</mml:mn></mml:mfrac><mml:mo>&#x2217;</mml:mo><mml:mn>100</mml:mn><mml:mo>%</mml:mo></mml:math></disp-formula>
<disp-formula id="EQ11"><label>(11)</label><mml:math id="M16"><mml:mtext mathvariant="italic">ACAj</mml:mtext><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:munderover><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mn>9</mml:mn></mml:munderover><mml:mtext mathvariant="italic">RAij</mml:mtext></mml:mrow><mml:mn>9</mml:mn></mml:mfrac><mml:mo>&#x2217;</mml:mo><mml:mn>100</mml:mn><mml:mo>%</mml:mo></mml:math></disp-formula>
<p>Note: <italic>AC<sub>Vj</sub></italic>, <italic>AC<sub>Aj</sub></italic> are the accuracy rates of valence and arousal tests using HDRPS+ of the <italic>j</italic>-th individual, respectively.</p>
</sec>
</sec>
<sec id="sec28">
<label>4.1.2</label>
<title>Results</title>
<sec id="sec29">
<label>4.1.2.1</label>
<title>Reliability analysis</title>
<p>After deleting redundant and missing data, we obtained a total of 2,988 picture-selection results, 1,494 each for both valence and arousal dimensions. Cronbach&#x2019;s &#x03B1; was computed across nine selection rounds for both the experimental and control groups to assess internal consistency. As shown in <xref ref-type="table" rid="tab4">Table 4</xref>, the HDRPS+ demonstrates acceptable reliability as the consistency test results of picture selection were all higher than the threshold of 0.5 delineated by <xref ref-type="bibr" rid="ref7">Cook et al. (2018)</xref>.</p>
<table-wrap position="float" id="tab4">
<label>Table 4</label>
<caption>
<p>Consistency test for picture selection.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top" rowspan="2">Group</th>
<th align="center" valign="top" colspan="2">Cronbach&#x2019;s alpha of valence</th>
<th align="center" valign="top" colspan="2">Cronbach&#x2019;s alpha of arousal</th>
</tr>
<tr>
<th align="center" valign="top">Original</th>
<th align="center" valign="top">Min after removing items</th>
<th align="center" valign="top">Original</th>
<th align="center" valign="top">Min after removing items</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Experimental</td>
<td align="center" valign="middle">0.894</td>
<td align="center" valign="middle">0.875</td>
<td align="center" valign="middle">0.666</td>
<td align="center" valign="middle">0.620</td>
</tr>
<tr>
<td align="left" valign="middle">Control</td>
<td align="center" valign="middle">0.805</td>
<td align="center" valign="middle">0.752</td>
<td align="center" valign="middle">0.720</td>
<td align="center" valign="middle">0.660</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="sec30">
<label>4.1.2.2</label>
<title>Measurement speed analysis</title>
<p>The average time consumption (TC) was 8.022&#x202F;s per trial, indicating quick testing and rapid judgment responses. Then, a one-way ANOVA showed a significant effect of presentation method on time consumption (<italic>F</italic>&#x202F;=&#x202F;5.19, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001). Further comparison of means showed that TC <sub>primary</sub> &#x003C; TC <sub>advanced</sub> &#x003C; TC <sub>middle</sub>. Considering that image category might also influence response time, we conducted a separate analysis across categories but no significant differences emerged, although animal images were numerically the slowest to rate.</p>
<p>Then we analyzed the effect of demographic data such as gender and education on measurement time using an independent sample <italic>t</italic>-test. The results showed that gender significantly affected test speed (<italic>F</italic>&#x202F;=&#x202F;9.461, <italic>p</italic>&#x202F;=&#x202F;0.002), with females taking longer to select pictures.</p>
</sec>
<sec id="sec31">
<label>4.1.2.3</label>
<title>Measurement accuracy analysis</title>
<p>The results showed that the accuracy of the experimental group&#x2019;s valence test was 65.80%, while that of the arousal test was 58.81%. The control group exhibited a valence accuracy rate of 63.56% and an arousal accuracy rate of 62.44%. Comparison of the accuracy rates of different testing methods showed that AC<sub>primary</sub>&#x202F;&#x003E;&#x202F;AC<sub>middle</sub>&#x202F;&#x003E;&#x202F;AC<sub>advanced</sub>. Further analysis of the measurement results for different categories of images, it was observed that plant images demonstrated the highest accuracy in valence measurement (see <xref ref-type="table" rid="tab5">Table 5</xref> for details).</p>
<table-wrap position="float" id="tab5">
<label>Table 5</label>
<caption>
<p>Accuracy of presentation methods.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top" rowspan="2">Method</th>
<th align="center" valign="top" colspan="2">Experimental group</th>
<th align="center" valign="top" colspan="2">Control group</th>
</tr>
<tr>
<th align="center" valign="top">ACv</th>
<th align="center" valign="top">AC<sub>A</sub></th>
<th align="center" valign="top">ACv</th>
<th align="center" valign="top">AC<sub>A</sub></th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">General</td>
<td align="center" valign="middle">65.80%</td>
<td align="center" valign="middle">58.81%</td>
<td align="center" valign="middle">63.56%</td>
<td align="center" valign="middle">62.44%</td>
</tr>
<tr>
<td align="left" valign="middle">Advanced</td>
<td align="center" valign="middle">59.48%</td>
<td align="center" valign="middle">52.72%</td>
<td align="center" valign="middle">59.46%</td>
<td align="center" valign="middle">53.37%</td>
</tr>
<tr>
<td align="left" valign="middle">Middle</td>
<td align="center" valign="middle">64.94%</td>
<td align="center" valign="middle">52.87%</td>
<td align="center" valign="middle">63.16%</td>
<td align="center" valign="middle">58.55%</td>
</tr>
<tr>
<td align="left" valign="middle">Middle-animals</td>
<td align="center" valign="middle">64.66%</td>
<td align="center" valign="middle">48.28%</td>
<td align="center" valign="middle">70%</td>
<td align="center" valign="middle">56%</td>
</tr>
<tr>
<td align="left" valign="middle">Middle-plants</td>
<td align="center" valign="middle">68.97%</td>
<td align="center" valign="middle">50.86%</td>
<td align="center" valign="middle">70%</td>
<td align="center" valign="middle">54%</td>
</tr>
<tr>
<td align="left" valign="middle">Middle-scenes</td>
<td align="center" valign="middle">61.21%</td>
<td align="center" valign="middle">59.48%</td>
<td align="center" valign="middle">50%</td>
<td align="center" valign="middle">68%</td>
</tr>
<tr>
<td align="left" valign="middle">Primary</td>
<td align="center" valign="middle">72.99%</td>
<td align="center" valign="middle">70.89%</td>
<td align="center" valign="middle">68%</td>
<td align="center" valign="middle">75.33%</td>
</tr>
<tr>
<td align="left" valign="middle">Primary-animals</td>
<td align="center" valign="middle">65.52%</td>
<td align="center" valign="middle">69.83%</td>
<td align="center" valign="middle">58%</td>
<td align="center" valign="middle">76%</td>
</tr>
<tr>
<td align="left" valign="middle">Primary-plants</td>
<td align="center" valign="middle">76.72%</td>
<td align="center" valign="middle">74.78%</td>
<td align="center" valign="middle">80%</td>
<td align="center" valign="middle">74%</td>
</tr>
<tr>
<td align="left" valign="middle">Primary-scenes</td>
<td align="center" valign="middle">76.72%</td>
<td align="center" valign="middle">68.10%</td>
<td align="center" valign="middle">66%</td>
<td align="center" valign="middle">76%</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Similarly, we investigated the effect of gender, education, and number of trials on accuracy using independent samples T-tests, yet the results showed that the accuracy of the HDRPS+ was not affected by these factors. We also explored the relationship between individual emotional states and measurement accuracy using correlation analysis. The data showed a significant positive correlation between valence and test accuracy (r&#x202F;=&#x202F;0.153&#x002A;&#x002A;), indicating that higher valences were associated with higher accuracy in HDRPS+ measurement results.</p>
<p>In summary, the primary mode can not only reduce the influence of personal preference and picture attractiveness on measurement results, but also has the advantages of rapid measurement and high accuracy, which is recommended as the optimal measurement method for the HDRPS+.</p>
</sec>
</sec>
</sec>
<sec id="sec32">
<label>4.2</label>
<title>Step 2: validation based on large-scale samples</title>
<sec id="sec33">
<label>4.2.1</label>
<title>Participants</title>
<p>Through Step 1, we have determined the presentation format of HDRPS+, and in this session, we would test the validity of HDRPS+ among student and working populations simultaneously. A snowball sampling technique was employed to openly recruit subjects within the community. After deleting 8 subjects due to data loss caused by network issues and 18 subjects with color blindness, a total of 442 subjects (148 males and 294 females, 13 to 55&#x202F;years old, mean age of 27.06&#x202F;years) participated in this experiment, including 245 students and 197 working individuals (details provided in <xref ref-type="table" rid="tab6">Table 6</xref>). All participants provided informed consent prior to the experiment, and a symbolic reward was given upon completion of the study.</p>
<table-wrap position="float" id="tab6">
<label>Table 6</label>
<caption>
<p>Demographic information of participants.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top" colspan="2">Item</th>
<th align="center" valign="top">Student group</th>
<th align="center" valign="top">Working group</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle" rowspan="2">Gender</td>
<td align="center" valign="middle">Male</td>
<td align="center" valign="middle">76</td>
<td align="center" valign="middle">72</td>
</tr>
<tr>
<td align="center" valign="middle">Female</td>
<td align="center" valign="middle">169</td>
<td align="center" valign="middle">125</td>
</tr>
<tr>
<td align="left" valign="middle" colspan="2">Average age</td>
<td align="center" valign="middle">22.79</td>
<td align="center" valign="middle">32.37</td>
</tr>
<tr>
<td align="left" valign="middle" rowspan="4">Education</td>
<td align="center" valign="middle">High school or below</td>
<td align="center" valign="middle">5</td>
<td align="center" valign="middle">13</td>
</tr>
<tr>
<td align="center" valign="middle">Associate degree</td>
<td align="center" valign="middle">0</td>
<td align="center" valign="middle">18</td>
</tr>
<tr>
<td align="center" valign="middle">Bachelor&#x2019;s degree</td>
<td align="center" valign="middle">76</td>
<td align="center" valign="middle">85</td>
</tr>
<tr>
<td align="center" valign="middle">Master&#x2019;s degree or above</td>
<td align="center" valign="middle">164</td>
<td align="center" valign="middle">81</td>
</tr>
<tr>
<td align="left" valign="middle" rowspan="5">Years of experience</td>
<td align="center" valign="middle">Less than 3&#x202F;years</td>
<td align="center" valign="middle">N/A</td>
<td align="center" valign="middle">66</td>
</tr>
<tr>
<td align="center" valign="middle">3&#x2013;5&#x202F;years</td>
<td align="center" valign="middle">N/A</td>
<td align="center" valign="middle">19</td>
</tr>
<tr>
<td align="center" valign="middle">5&#x2013;10&#x202F;years</td>
<td align="center" valign="middle">N/A</td>
<td align="center" valign="middle">59</td>
</tr>
<tr>
<td align="center" valign="middle">10&#x2013;15&#x202F;years</td>
<td align="center" valign="middle">N/A</td>
<td align="center" valign="middle">16</td>
</tr>
<tr>
<td align="center" valign="middle">More than 15&#x202F;years</td>
<td align="center" valign="middle">N/A</td>
<td align="center" valign="middle">37</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="sec34">
<label>4.2.2</label>
<title>Method</title>
<p>This experiment was conducted for 7 consecutive days (from Monday to Sunday) to further validate the measurement advantage of the HDRPS+ and the effect of continuous measurement. To minimize potential interference with participants&#x2019; normal work and life, participants were required to use a WeChat Mini-Program for HDRPS+ testing after completing their daily work.</p>
<p>The daily test consisted of three parts. First, HDRPS+ Test was administered with one type of picture (e.g., animals or plants or scenes) each day and required subjects to select 1 picture from 3 pictures (as well as an option of &#x201C;none of the above&#x201D;) with the same theme that best represented their valence or arousal level on that day, respectively (see <xref ref-type="fig" rid="fig3">Figure 3</xref> for details). Meanwhile, since the pictures might induce emotional changes in the subjects (e.g., IAPS, NAPS, etc.), subjects were asked whether their affects were influenced by pictures after the HDRPS+ test was completed. The second is the SAM Scale Test, in which subjects were told to report their current valence and arousal levels using SAM scales. Third, after the daily test was completed, the mini-program provided feedback on the pictorial scale test results, and participants were asked to rate the accuracy of those responses using a 5-point scale. In addition, after the 7 daily tests were completed, subjects were required to complete the PANAS scale to assess their overall emotional states during the past week.</p>
<fig position="float" id="fig3">
<label>Figure 3</label>
<caption>
<p>Example screenshots for <bold>(a)</bold> valence and <bold>(b)</bold> arousal tests.</p>
</caption>
<graphic xlink:href="fpsyg-16-1498143-g003.tif">
<alt-text content-type="machine-generated">Two panels labeled "a" and "b" show image selection prompts. Panel "a" asks to choose a picture representing mood or emotional state, featuring images of flowers and an option labeled "None of the above." Panel "b" asks to select an image representing physical or mental arousal, with similar flower images and a "None of the above" option. Both panels include a "Submit" button.</alt-text>
</graphic>
</fig>
</sec>
<sec id="sec35">
<label>4.2.3</label>
<title>Results</title>
<p>A total of 2,588 test sessions were completed by 442 participants. Daily logs showed the attrition inherent in continuous self-report: 80.32% of participants provided data on at least five workdays, and 67.19% completed every scheduled session (<xref ref-type="fig" rid="fig4">Figure 4</xref>). Laboratory research on pictorial questionnaires has shown higher response motivation than verbal formats (<xref ref-type="bibr" rid="ref2">Baumgartner et al., 2019</xref>); our retention figures suggest that the pictorial HDRPS+ can likewise sustain participant engagement during field studies. However, it should be noted that prior studies that used realistic-picture emotion scales, including the original HDRPS (<xref ref-type="bibr" rid="ref27">Liu et al., 2023</xref>), rarely report diary-retention figures, and because the present design combined our new pictorial tool with established scales, these completion rates are offered for descriptive context rather than as a basis for formal statistical comparison with traditional EMA or text-based surveys.</p>
<fig position="float" id="fig4">
<label>Figure 4</label>
<caption>
<p>Completion rate of HDRPS+ over 7 days.</p>
</caption>
<graphic xlink:href="fpsyg-16-1498143-g004.tif">
<alt-text content-type="machine-generated">Line graph showing the percentage decrease over seven days. Starting at 100 percent on Day 1, it declines to 90.27 percent on Day 2, 86.43 percent on Day 3, 83.48 percent on Day 4, 80.32 percent on Day 5, 76.92 percent on Day 6, and 67.19 percent on Day 7.</alt-text>
</graphic>
</fig>
<sec id="sec36">
<label>4.2.3.1</label>
<title>Measurement mechanism test</title>
<p>To examine whether viewing the pictures altered participants&#x2019; affect, this study directly asked them to rate the influence of each set on a five-point scale (1 for &#x201C;completely affected,&#x201D; 2 for &#x201C;greatly affected,&#x201D; 3 for &#x201C;moderately affected,&#x201D; 4 for &#x201C;slightly affected,&#x201D; and 5 for &#x201C;not affected at all&#x201D;). Most respondents reported only slight or no change, and the ratings also clustered around the midpoint of the scale (M&#x202F;=&#x202F;3.36, Mdn&#x202F;=&#x202F;3, IQR&#x202F;=&#x202F;2&#x2013;4), suggesting that HDRPS+ can be administered without materially influencing the affect it is intended to measure.</p>
</sec>
<sec id="sec37">
<label>4.2.3.2</label>
<title>Reliability analysis</title>
<p>Reliability is pertained to the ability of an instrument to consistently measure an attribute (<xref ref-type="bibr" rid="ref14">DeVon et al., 2007</xref>). Considering that affect is relatively short-lived affective states, traditional reliability testing methods (e.g., Cronbach&#x2019;s alpha) are not applicable, we reviewed the development process of picture-based scales such as PAM and Affective Slider, and found that none of the prior studies had conducted reliability analyses due to the variability of affect.</p>
<p>Considering that reliability analysis is necessary for instrument development, this study used the results of the SAM test to calculate the consistency of the HDRPS+ measurement results, and used this as a reliability test result. The resulting consistency coefficients were 0.689 for valence (animals&#x202F;=&#x202F;0.668, plants&#x202F;=&#x202F;0.718, scenes&#x202F;=&#x202F;0.666) and 0.645 for arousal (animals&#x202F;=&#x202F;0.663, plants&#x202F;=&#x202F;0.642, scenes&#x202F;=&#x202F;0.631). All values exceed the 0.50 benchmark proposed by <xref ref-type="bibr" rid="ref7">Cook et al. (2018)</xref>, indicating that HDRPS<sup>+</sup> demonstrates acceptable measurement stability across thematic categories.</p>
</sec>
<sec id="sec38">
<label>4.2.3.3</label>
<title>Validity analysis</title>
<p>Validity refers to the degree to which a tool measures what it intends to measure, and it can be divided into content validity, criterion-related validity, and construct validity (<xref ref-type="bibr" rid="ref30">Lynn, 1986</xref>). As studies 1 and 2 have already ensured the content validity of the pictorial scale through rigorous experimental design, we mainly focus on assessing criterion-related validity and construct validity here.</p>
<p>Concurrent validity, as a criterion-related validity, refers to the degree of correlation between the results of a newly developed scale and an existing structurally similar scale, measured at the same point in time (<xref ref-type="bibr" rid="ref14">DeVon et al., 2007</xref>). Generally, a coefficient greater than 0.45 has been commonly recommended as an indicator of concurrent validity (<xref ref-type="bibr" rid="ref7">Cook et al., 2018</xref>). By calculating the correlation between HDRPS+ and SAM scale, we observed that the correlation of valence was 0.626&#x002A;&#x002A;, correlation of arousal was 0.520&#x002A;&#x002A;, both of which exceeded judgment criterion of concurrent validity, thus we can conclude that the HDRPS+ passed the concurrent validity test.</p>
<p>Construct validity can be examined through by convergent validity and discriminant validity. Convergent validity refers to the degree of similarity in measurement results when different measurement methods are used to measure the same characteristic, and 0.5 is generally used as an indicator of convergent validity (<xref ref-type="bibr" rid="ref7">Cook et al., 2018</xref>). Since the HDRPS+ and SAM scale measure the same dimensions, the correlation coefficient between them can be used as the indicator of convergent validity judgment as well. Based on the criteria for evaluating convergent validity, we found that the HDRPS+ also passed the convergent validity test.</p>
<p>Discriminant validity refers to the degree of association between two theoretically unrelated constructs, with smaller correlation coefficients indicating better discriminant validity. <xref ref-type="bibr" rid="ref7">Cook et al. (2018)</xref> suggested that a coefficient of less than 0.45 can be considered significant for discriminant validity. According to this, we calculated the correlation between the HDRPS+ and PANAS scales and found that all correlation coefficients were less than 0.45 (see <xref ref-type="table" rid="tab7">Table 7</xref> for details), indicating that the HDRPS+ is essentially different from the PANAS scale.</p>
<table-wrap position="float" id="tab7">
<label>Table 7</label>
<caption>
<p>Discriminant validity test.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Item</th>
<th align="center" valign="top">Arousal of the HDRPS+</th>
<th align="center" valign="top">PANAS</th>
<th align="center" valign="top">PA</th>
<th align="center" valign="top">NA</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Valence of the HDRPS+</td>
<td align="center" valign="middle">0.462&#x002A;&#x002A;</td>
<td align="center" valign="middle">0.314&#x002A;&#x002A;</td>
<td align="center" valign="middle">0.289&#x002A;&#x002A;</td>
<td align="center" valign="middle">&#x2212;0.157&#x002A;&#x002A;</td>
</tr>
<tr>
<td align="left" valign="middle">Arousal of the HDRPS+</td>
<td align="center" valign="middle">1</td>
<td align="center" valign="middle">0.290&#x002A;&#x002A;</td>
<td align="center" valign="middle">0.249&#x002A;&#x002A;</td>
<td align="center" valign="middle">&#x2212;0.163&#x002A;&#x002A;</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>&#x002A;&#x002A;<italic>p</italic> &#x003C; 0.01.</p>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="sec39">
<label>4.2.3.4</label>
<title>Analysis of social desirability effects</title>
<p>To gain a more intuitive understanding of the validity of the measurement results, we asked participants to provide direct evaluations (i.e., direct method) of the HDRPS+ measurement results using a 5-point scale after completing the daily test. The participants assigned values of 5, 4, 3, 2, and 1 to indicate &#x201C;very accurate,&#x201D; &#x201C;accurate,&#x201D; &#x201C;average,&#x201D; &#x201C;not accurate,&#x201D; and &#x201C;not accurate at all,&#x201D; respectively. The accuracy rate of HDRPS+ was found to be 78.03% based on this scoring method. We also used the SAM scale as a benchmark to calculate the accuracy rate of HDRPS+ (i.e., indirect method) and found that the accuracy rates of valence and arousal were 68.9 and 64.5%, respectively, which were lower than the accuracy rates obtained using the direct method.</p>
<p>Considering that the HDRPS+ was developed based on the technique of psychological projection and its measurement method is more concealed compared to the SAM scale, we have reason to believe that self-reporting using the SAM scale may lead to concealment behavior, whereas using the HDRPS+ for affective measures may mitigate the social desirability bias to some extent.</p>
<p>To further validate this inference, this study first collated and analyzed the distribution of the test results on the SAM scale and the HDRPS+ (see <xref ref-type="table" rid="tab8">Table 8</xref> for details), and revealed that subjects reported more positive and less negative emotions when using the SAM scale for valence measures. In terms of arousal distribution, subjects reported more high or more low arousal level when using the SAM scale. However, it is commonly assumed that individuals tend to have moderate arousal levels in their daily studies and work, which is more consistent with results obtained from the HDRPS+ test.</p>
<table-wrap position="float" id="tab8">
<label>Table 8</label>
<caption>
<p>Distribution of the test results.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top" rowspan="2">Scale</th>
<th align="center" valign="top" colspan="2">Valence</th>
<th align="center" valign="top" colspan="2">Arousal</th>
</tr>
<tr>
<th align="center" valign="top">Category</th>
<th align="center" valign="top">Number</th>
<th align="center" valign="top">Category</th>
<th align="center" valign="top">Number</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle" rowspan="3">SAM</td>
<td align="center" valign="middle">Positive</td>
<td align="center" valign="middle">1,535</td>
<td align="center" valign="middle">High</td>
<td align="center" valign="middle">986</td>
</tr>
<tr>
<td align="center" valign="middle">Neutral</td>
<td align="center" valign="middle">750</td>
<td align="center" valign="middle">Medium</td>
<td align="center" valign="middle">966</td>
</tr>
<tr>
<td align="center" valign="middle">Negative</td>
<td align="center" valign="middle">303</td>
<td align="center" valign="middle">Low</td>
<td align="center" valign="middle">636</td>
</tr>
<tr>
<td align="left" valign="middle" rowspan="2">HDRPS+</td>
<td align="center" valign="middle">Positive</td>
<td align="center" valign="middle">1,203</td>
<td align="center" valign="middle">High</td>
<td align="center" valign="middle">933</td>
</tr>
<tr>
<td align="center" valign="middle">Neutral</td>
<td align="center" valign="middle">985</td>
<td align="center" valign="middle">Medium</td>
<td align="center" valign="middle">1,124</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
</sec>
</sec>
<sec sec-type="discussion" id="sec40">
<label>5</label>
<title>Discussion</title>
<p>Affect measurement is one of the fundamental issues in the field of emotion science. Currently, in the field of organizational behavior, studies predominantly rely on discrete emotion theories, employing adjectives and recall-based measurement methods that fail to meet the demands for dynamic measurement. Moreover, existing dynamic methods face challenges such as high costs, contextual limitations, and constrained measurement results. This research is dedicated to the development and validation of a cutting-edge measurement tool, HDRPS+, which uses real images as anchors based on the theory of emotional dimensions, creating an intuitive, innovative, and fast tool applicable to organizational settings.</p>
<p>To address the limitations of HDRPS, the construction process was redesigned, involving experiments to evaluate benchmark images, validate effectiveness, assess emotional types, and evaluate emotional scores, ultimately creating a database containing 20 sets of theme-specific real images. Building on two-dimensional emotion theory, HDRPS+ was developed and tested with 85 participants to determine the optimal presentation method&#x2014;displaying three images of the same category and theme at once. Subsequently, 442 participants were recruited for a seven-day diary study, which demonstrated that HDRPS+ possesses high reliability and validity, reduces the impact of social desirability, and achieves an accuracy rate of 78.03%.</p>
<p>In the duration of picture pool construction, we noticed that the contents of pictures with living subjects, such as animals and plants, are more likely to convey affective meanings, while pictures of non-living subjects, such as complex scenes, are difficult to express clear emotional meanings. Therefore, extra attention should be paid to the influence of vitality on emotional meanings when developing scales in the future. We also investigated the factors influencing the speed of the HDRPS+ test and found that female participants responded more slowly, possibly because women engage in a more elaborate cognitive-appraisal sequence when evaluating emotions (<xref ref-type="bibr" rid="ref29">Lively, 2008</xref>). Furthermore, we analyzed the relationship between subjects&#x2019; affective state and test speed, and found that subjects in high-valence levels had the fastest testing speeds, which may be related to the attention bias moderated by affective proposed by <xref ref-type="bibr" rid="ref35">Rozin and Royzman (2001)</xref> and <xref ref-type="bibr" rid="ref40">Smith et al. (2003)</xref>.</p>
<p>Upon analyzing the accuracy of HDRPS+, it was found that using HDRPS+ for valence testing could weaken the influence of masking behaviors to some extent, but arousal testing with this instrument reported more moderate arousal and less arousal fluctuations. Reviewing the previous studies, we suggest that HDRPS+ did not show the expected attenuating effect when measuring arousal, may due to problems in defining arousal (<xref ref-type="bibr" rid="ref38">Schimmack and Grob, 2000</xref>). The SAM scale defines arousal as a process of change from relaxed and sleepy to excited and energetic, which implicitly includes two sub-dimensions: mental arousal and physical arousal. For mental arousal, individuals may experience a process of change from relaxed to tense, and they may report lower arousal to express a more relaxed personal state. For physical arousal, individuals may experience a process of change from sleepy and tired to energetic, and they may report higher arousal to present a positive and energetic personal state. Variations in the understanding of arousal contributed to the reporting of divergent arousal levels, indicating a need for a more nuanced approach in measuring arousal with HDRPS+.</p>
<p>This research features key innovations both theoretically and practically. First, it developed and optimized a cutting-edge research tool, verifying the image-emotion reflection mechanism and creating an emotional measurement tool that is intuitive, quick, and responsive. This represents a significant advancement in emotional measurement methods, providing valuable tools and data for studies in organizational behavior and emotion science. Second, in terms of application, HDRPS+ uses real images categorized by similarity to measure individual emotions through projective techniques directly, reducing the need for textual analysis and expression. This accelerates response times, decreases social desirability bias, and is particularly suited to the modern era&#x2019;s reading and information processing demands, offering substantial practical value.</p>
<p>However, the study also presents certain limitations. Apart from the potential statistical bias that may result from our gender-imbalanced samples, several additional limitations deserve mention. First, although HDRPS+ separates valence and arousal with themed photographs, each assessment uses only three images per dimension, which may be too coarse to capture subtle affective nuances; future studies could compare HDRPS+ with slider-based tools such as the SAM grid or Affective Slider to achieve finer resolution. Second, the current picture bank contains only 20 theme-specific sets, so prolonged deployment (e.g., longer than 1 month) may introduce stimulus repetition and participant fatigue, highlighting the need to enlarge the image database. Third, although HDRPS+ appears to offer advantages like higher participant engagement and lower social-expectation bias, we could only assess these advantages preliminarily, as neither our study nor most prior work has collected comparable adherence data or explicit social-desirability measures. Future research should gather matched engagement data and include straightforward checks such as anonymous versus identified responding or physiological markers, to permit systematic comparisons. Finally, HDRPS+ was developed entirely within a Chinese context; despite a small validation with Chinese high-school students in the United States, its cross-cultural equivalence remains uncertain. Researchers applying HDRPS+ in other cultures should therefore conduct preliminary image evaluations and, if necessary, adapt the stimulus pool to the local context.</p>
</sec>
<sec sec-type="conclusions" id="sec41">
<label>6</label>
<title>Conclusion</title>
<p>This study developed and validated HDRPS+, a novel emotional measurement tool based on the design principles of PAM and HDRPS, utilizing real images as anchors and suitable for organizational contexts. By verifying the image-emotion reflection mechanism, this research not only enhanced the intuitiveness and motivational appeal of the emotional measurement tool but also achieved rapid response judgment, demonstrating its innovative capabilities in the field of emotion measurement. Specifically, the application of HDRPS+ reduces the reliance on traditional textual analysis by directly measuring individual emotions through projective techniques, effectively minimizing social desirability biases and accelerating participant response times. These features make HDRPS+ particularly suitable for the digital age, aligning with modern needs for quick reading and information processing, and offering significant practical application value and broad developmental prospects. The study also revealed limitations of HDRPS+, including issues with emotional granularity, constraints in the diversity of image materials, and challenges in cross-cultural applications. Future efforts should focus on further optimizing the tool&#x2019;s design and application to enhance its broad applicability and accuracy. The HDRPS+ tool, along with its criterion valence and arousal scores, can be requested at <ext-link xlink:href="https://osf.io/d4wcn" ext-link-type="uri">https://osf.io/d4wcn</ext-link> (<xref ref-type="bibr" rid="ref28">Liu et al., 2024</xref>).</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="sec42">
<title>Data availability statement</title>
<p>The datasets presented in this study can be found in online repositories. The names of the repository/repositories and accession number(s) can be found at: <ext-link xlink:href="https://osf.io/d4wcn" ext-link-type="uri">https://osf.io/d4wcn</ext-link>.</p>
</sec>
<sec sec-type="ethics-statement" id="sec43">
<title>Ethics statement</title>
<p>The studies involving humans were approved by Business School of Sichuan University. The studies were conducted in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study.</p>
</sec>
<sec sec-type="author-contributions" id="sec44">
<title>Author contributions</title>
<p>PL: Conceptualization, Project administration, Supervision, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. YW: Formal analysis, Investigation, Methodology, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. YLL: Data curation, Formal analysis, Methodology, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. JH: Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. YYL: Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. KZ: Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. JM: Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing.</p>
</sec>
<sec sec-type="funding-information" id="sec45">
<title>Funding</title>
<p>The author(s) declare that no financial support was received for the research and/or publication of this article.</p>
</sec>
<sec sec-type="COI-statement" id="sec46">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="disclaimer" id="sec47">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec sec-type="supplementary-material" id="sec48">
<title>Supplementary material</title>
<p>The Supplementary material for this article can be found online at: <ext-link xlink:href="https://www.frontiersin.org/articles/10.3389/fpsyg.2025.1498143/full#supplementary-material" ext-link-type="uri">https://www.frontiersin.org/articles/10.3389/fpsyg.2025.1498143/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Data_Sheet_1.pdf" id="SM1" mimetype="application/pdf" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<ref-list>
<title>References</title>
<ref id="ref1"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Bachmann</surname> <given-names>A.</given-names></name> <name><surname>Klebsattel</surname> <given-names>C.</given-names></name> <name><surname>Budde</surname> <given-names>M.</given-names></name> <name><surname>Riedel</surname> <given-names>T.</given-names></name> <name><surname>Beigl</surname> <given-names>M.</given-names></name> <name><surname>Reichert</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2015</year>). <article-title>How to use smartphones for less obtrusive ambulatory mood assessment and mood recognition</article-title>. <conf-name>Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing and Proceedings of the 2015 ACM International Symposium on Wearable Computers &#x2013; UbiComp &#x2018;15</conf-name>, <fpage>693</fpage>&#x2013;<lpage>702</lpage>. doi: <pub-id pub-id-type="doi">10.1145/2800835.2804394</pub-id></citation></ref>
<ref id="ref2"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Baumgartner</surname> <given-names>J.</given-names></name> <name><surname>Frei</surname> <given-names>N.</given-names></name> <name><surname>Kleinke</surname> <given-names>M.</given-names></name> <name><surname>Sauer</surname> <given-names>J.</given-names></name> <name><surname>Sonderegger</surname> <given-names>A.</given-names></name></person-group> (<year>2019</year>). <article-title>Pictorial system usability scale (P-SUS): developing an instrument for measuring perceived usability</article-title>. <conf-name>Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems</conf-name>, <fpage>1</fpage>&#x2013;<lpage>11</lpage>. doi: <pub-id pub-id-type="doi">10.1145/3290605.3300299</pub-id></citation></ref>
<ref id="ref3"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Betella</surname> <given-names>A.</given-names></name> <name><surname>Verschure</surname> <given-names>P. F.</given-names></name></person-group> (<year>2016</year>). <article-title>The affective slider: a digital self-assessment scale for the measurement of human emotions</article-title>. <source>PLoS One</source> <volume>11</volume>:<fpage>e0148037</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pone.0148037</pub-id>, PMID: <pub-id pub-id-type="pmid">26849361</pub-id></citation></ref>
<ref id="ref4"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bolpagni</surname> <given-names>M.</given-names></name> <name><surname>Pardini</surname> <given-names>S.</given-names></name> <name><surname>Dianti</surname> <given-names>M.</given-names></name> <name><surname>Gabrielli</surname> <given-names>S.</given-names></name></person-group> (<year>2024</year>). <article-title>Personalized stress detection using biosignals from wearables: a scoping review</article-title>. <source>Sensors</source> <volume>24</volume>:<fpage>3221</fpage>. doi: <pub-id pub-id-type="doi">10.3390/s24103221</pub-id>, PMID: <pub-id pub-id-type="pmid">38794074</pub-id></citation></ref>
<ref id="ref5"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bradley</surname> <given-names>M. M.</given-names></name> <name><surname>Lang</surname> <given-names>P. J.</given-names></name></person-group> (<year>1994</year>). <article-title>Measuring emotion: the self-assessment manikin and the semantic differential</article-title>. <source>J. Behav. Ther. Exp. Psychiatry</source> <volume>25</volume>, <fpage>49</fpage>&#x2013;<lpage>59</lpage>. doi: <pub-id pub-id-type="doi">10.1016/0005-7916(94)90063-9</pub-id>, PMID: <pub-id pub-id-type="pmid">7962581</pub-id></citation></ref>
<ref id="ref7"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cook</surname> <given-names>A.</given-names></name> <name><surname>Roberts</surname> <given-names>D.</given-names></name> <name><surname>Nelson</surname> <given-names>K.</given-names></name> <name><surname>Clark</surname> <given-names>B. R.</given-names></name> <name><surname>Parker</surname> <given-names>B. E.</given-names> <suffix>Jr.</suffix></name></person-group> (<year>2018</year>). <article-title>Development of a pictorial scale for assessing functional interference with chronic pain: the pictorial pain interference questionnaire</article-title>. <source>J. Pain Res.</source> <volume>11</volume>, <fpage>1343</fpage>&#x2013;<lpage>1354</lpage>. doi: <pub-id pub-id-type="doi">10.2147/JPR.S160801</pub-id>, PMID: <pub-id pub-id-type="pmid">30050318</pub-id></citation></ref>
<ref id="ref8"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Cowie</surname> <given-names>R.</given-names></name> <name><surname>Douglas-Cowie</surname> <given-names>E.</given-names></name></person-group> (<year>1996</year>). <article-title>Automatic statistical analysis of the signal and prosodic signs of emotion in speech</article-title>. <conf-name>Proceeding of Fourth International Conference on Spoken Language Processing. ICSLP&#x2019;96</conf-name>, <volume>3</volume>, <fpage>1989</fpage>&#x2013;<lpage>1992</lpage>. Available online at: <ext-link xlink:href="https://ieeexplore.ieee.org/abstract/document/608027/" ext-link-type="uri">https://ieeexplore.ieee.org/abstract/document/608027/</ext-link></citation></ref>
<ref id="ref9"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Cowie</surname> <given-names>R.</given-names></name> <name><surname>Douglas-Cowie</surname> <given-names>E.</given-names></name> <name><surname>Savvidou</surname> <given-names>S.</given-names></name> <name><surname>McMahon</surname> <given-names>E.</given-names></name> <name><surname>Sawey</surname> <given-names>M.</given-names></name> <name><surname>Schr&#x00F6;der</surname> <given-names>M.</given-names></name></person-group> (<year>2000</year>). <article-title>&#x201C;FEELTRACE&#x201D;: an instrument for recording perceived emotion in real time</article-title>. <conf-name>ISCA tutorial and research workshop (ITRW) on speech and emotion</conf-name>. Available online at: <ext-link xlink:href="https://www.isca-archive.org/speechemotion_2000/cowie00b_speechemotion.pdf" ext-link-type="uri">https://www.isca-archive.org/speechemotion_2000/cowie00b_speechemotion.pdf</ext-link></citation></ref>
<ref id="ref10"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cranford</surname> <given-names>J. A.</given-names></name> <name><surname>Shrout</surname> <given-names>P. E.</given-names></name> <name><surname>Iida</surname> <given-names>M.</given-names></name> <name><surname>Rafaeli</surname> <given-names>E.</given-names></name> <name><surname>Yip</surname> <given-names>T.</given-names></name> <name><surname>Bolger</surname> <given-names>N.</given-names></name></person-group> (<year>2006</year>). <article-title>A procedure for evaluating sensitivity to within-person change: can mood measures in diary studies detect change reliably?</article-title> <source>Personal. Soc. Psychol. Bull.</source> <volume>32</volume>, <fpage>917</fpage>&#x2013;<lpage>929</lpage>. doi: <pub-id pub-id-type="doi">10.1177/0146167206287721</pub-id>, PMID: <pub-id pub-id-type="pmid">16738025</pub-id></citation></ref>
<ref id="ref11"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dan-Glauser</surname> <given-names>E. S.</given-names></name> <name><surname>Scherer</surname> <given-names>K. R.</given-names></name></person-group> (<year>2011</year>). <article-title>The Geneva affective picture database (GAPED): a new 730-picture database focusing on valence and normative significance</article-title>. <source>Behav. Res. Methods</source> <volume>43</volume>, <fpage>468</fpage>&#x2013;<lpage>477</lpage>. doi: <pub-id pub-id-type="doi">10.3758/s13428-011-0064-1</pub-id>, PMID: <pub-id pub-id-type="pmid">21431997</pub-id></citation></ref>
<ref id="ref12"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Davis</surname> <given-names>M. A.</given-names></name></person-group> (<year>2009</year>). <article-title>Understanding the relationship between mood and creativity: a meta-analysis</article-title>. <source>Organ. Behav. Hum. Decis. Process.</source> <volume>108</volume>, <fpage>25</fpage>&#x2013;<lpage>38</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.obhdp.2008.04.001</pub-id></citation></ref>
<ref id="ref13"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Desmet</surname> <given-names>P. M. A.</given-names></name> <name><surname>Vastenburg</surname> <given-names>M. H.</given-names></name> <name><surname>Romero</surname> <given-names>N.</given-names></name></person-group> (<year>2016</year>). <article-title>Mood measurement with pick-A-mood: review of current methods and design of a pictorial self-report scale</article-title>. <source>J. Design Res.</source> <volume>14</volume>:<fpage>241</fpage>. doi: <pub-id pub-id-type="doi">10.1504/JDR.2016.079751</pub-id></citation></ref>
<ref id="ref14"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>DeVon</surname> <given-names>H. A.</given-names></name> <name><surname>Block</surname> <given-names>M. E.</given-names></name> <name><surname>Moyle-Wright</surname> <given-names>P.</given-names></name> <name><surname>Ernst</surname> <given-names>D. M.</given-names></name> <name><surname>Hayden</surname> <given-names>S. J.</given-names></name> <name><surname>Lazzara</surname> <given-names>D. J.</given-names></name> <etal/></person-group>. (<year>2007</year>). <article-title>A psychometric toolbox for testing validity and reliability</article-title>. <source>J. Nurs. Scholarsh.</source> <volume>39</volume>, <fpage>155</fpage>&#x2013;<lpage>164</lpage>. doi: <pub-id pub-id-type="doi">10.1111/j.1547-5069.2007.00161.x</pub-id>, PMID: <pub-id pub-id-type="pmid">17535316</pub-id></citation></ref>
<ref id="ref15"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Doherty</surname> <given-names>C.</given-names></name> <name><surname>Baldwin</surname> <given-names>M.</given-names></name> <name><surname>Lambe</surname> <given-names>R.</given-names></name> <name><surname>Altini</surname> <given-names>M.</given-names></name> <name><surname>Caulfield</surname> <given-names>B.</given-names></name></person-group> (<year>2025</year>). <article-title>Privacy in consumer wearable technologies: a living systematic analysis of data policies across leading manufacturers</article-title>. <source>NPJ Digit. Med.</source> <volume>8</volume>, <fpage>1</fpage>&#x2013;<lpage>11</lpage>. doi: <pub-id pub-id-type="doi">10.1038/s41746-025-01757-1</pub-id>, PMID: <pub-id pub-id-type="pmid">40517175</pub-id></citation></ref>
<ref id="ref16"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ebner-Priemer</surname> <given-names>U. W.</given-names></name> <name><surname>Sawitzki</surname> <given-names>G.</given-names></name></person-group> (<year>2007</year>). <article-title>Ambulatory assessment of affective instability in borderline personality disorder</article-title>. <source>Eur. J. Psychol. Assess.</source> <volume>23</volume>, <fpage>238</fpage>&#x2013;<lpage>247</lpage>. doi: <pub-id pub-id-type="doi">10.1027/1015-5759.23.4.238</pub-id></citation></ref>
<ref id="ref17"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Eisenberg</surname> <given-names>N.</given-names></name></person-group> (<year>2020</year>). <article-title>Considering the role of positive emotion in the early emergence of prosocial behavior: Commentary on Hammond and Drummond (2019)</article-title>. <source>Dev. Psychol.</source> <volume>56</volume>, <fpage>843</fpage>&#x2013;<lpage>845</lpage>. doi: <pub-id pub-id-type="doi">10.1037/dev0000880</pub-id></citation></ref>
<ref id="ref18"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ilies</surname> <given-names>R.</given-names></name> <name><surname>Aw</surname> <given-names>S. S. Y.</given-names></name> <name><surname>Pluut</surname> <given-names>H.</given-names></name></person-group> (<year>2015</year>). <article-title>Intraindividual models of employee well-being: what have we learned and where do we go from here?</article-title> <source>Eur. J. Work Organ. Psychol.</source> <volume>24</volume>, <fpage>827</fpage>&#x2013;<lpage>838</lpage>. doi: <pub-id pub-id-type="doi">10.1080/1359432X.2015.1071422</pub-id></citation></ref>
<ref id="ref19"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Inness</surname> <given-names>M.</given-names></name> <name><surname>Turner</surname> <given-names>N.</given-names></name> <name><surname>Barling</surname> <given-names>J.</given-names></name> <name><surname>Stride</surname> <given-names>C. B.</given-names></name></person-group> (<year>2010</year>). <article-title>Transformational leadership and employee safety performance: a within-person, between-jobs design</article-title>. <source>J. Occup. Health Psychol.</source> <volume>15</volume>, <fpage>279</fpage>&#x2013;<lpage>290</lpage>. doi: <pub-id pub-id-type="doi">10.1037/a0019380</pub-id>, PMID: <pub-id pub-id-type="pmid">20604634</pub-id></citation></ref>
<ref id="ref20"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Isomursu</surname> <given-names>M.</given-names></name> <name><surname>T&#x00E4;hti</surname> <given-names>M.</given-names></name> <name><surname>V&#x00E4;in&#x00E4;m&#x00F6;</surname> <given-names>S.</given-names></name> <name><surname>Kuutti</surname> <given-names>K.</given-names></name></person-group> (<year>2007</year>). <article-title>Experimental evaluation of five methods for collecting emotions in field settings with mobile applications</article-title>. <source>Int. J. Hum.-Comput. Stud.</source> <volume>65</volume>, <fpage>404</fpage>&#x2013;<lpage>418</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ijhcs.2006.11.007</pub-id></citation></ref>
<ref id="ref21"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Klumb</surname> <given-names>P.</given-names></name> <name><surname>Elfering</surname> <given-names>A.</given-names></name> <name><surname>Herre</surname> <given-names>C.</given-names></name></person-group> (<year>2009</year>). <article-title>Ambulatory assessment in industrial/organizational psychology: fruitful examples and methodological issues</article-title>. <source>Eur. Psychol.</source> <volume>14</volume>, <fpage>120</fpage>&#x2013;<lpage>131</lpage>. doi: <pub-id pub-id-type="doi">10.1027/1016-9040.14.2.120</pub-id></citation></ref>
<ref id="ref22"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kurdi</surname> <given-names>B.</given-names></name> <name><surname>Lozano</surname> <given-names>S.</given-names></name> <name><surname>Banaji</surname> <given-names>M. R.</given-names></name></person-group> (<year>2017</year>). <article-title>Introducing the open affective standardized image set (OASIS)</article-title>. <source>Behav. Res. Methods</source> <volume>49</volume>, <fpage>457</fpage>&#x2013;<lpage>470</lpage>. doi: <pub-id pub-id-type="doi">10.3758/s13428-016-0715-3</pub-id>, PMID: <pub-id pub-id-type="pmid">26907748</pub-id></citation></ref>
<ref id="ref23"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Lang</surname> <given-names>P. J.</given-names></name> <name><surname>Bradley</surname> <given-names>M. M.</given-names></name> <name><surname>Cuthbert</surname> <given-names>B. N.</given-names></name></person-group> (<year>2005</year>). <article-title>International affective picture system (IAPS): affective ratings of pictures and instruction manual</article-title>. <conf-name>NIMH, Center for the Study of Emotion &#x0026; Attention Gainesville, FL</conf-name>.</citation></ref>
<ref id="ref24"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lee</surname> <given-names>Y.-K.</given-names></name> <name><surname>Kim</surname> <given-names>S.-H.</given-names></name> <name><surname>Kim</surname> <given-names>M.-S.</given-names></name> <name><surname>Kim</surname> <given-names>H.-S.</given-names></name></person-group> (<year>2017</year>). <article-title>Person&#x2013;environment fit and its effects on employees&#x2019; emotions and self-rated/supervisor-rated performances: the case of employees in luxury hotel restaurants</article-title>. <source>Int. J. Contemp. Hosp. Manag.</source> <volume>29</volume>, <fpage>1447</fpage>&#x2013;<lpage>1467</lpage>. doi: <pub-id pub-id-type="doi">10.1108/IJCHM-08-2015-0441</pub-id></citation></ref>
<ref id="ref25"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>L.</given-names></name></person-group> (<year>2014</year>) Preliminary establishment of a database of TCM Wuzhi picture stimulation materials. [Master&#x2019;s thesis]. Beijing University of Chinese Medicine</citation></ref>
<ref id="ref26"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>Q.</given-names></name> <name><surname>Wei</surname> <given-names>J.</given-names></name> <name><surname>Wu</surname> <given-names>Q.</given-names></name> <name><surname>Zhang</surname> <given-names>N. X.</given-names></name> <name><surname>Zhao</surname> <given-names>T. N.</given-names></name></person-group> (<year>2020</year>). <article-title>Investigation and analysis on anxiety and depression of 183 medical staffs during the epidemic period of the COVID-19</article-title>. <source>Zhonghua Lao Dong Wei Sheng Zhi Ye Bing Za Zhi</source> <volume>38</volume>, <fpage>908</fpage>&#x2013;<lpage>911</lpage>. doi: <pub-id pub-id-type="doi">10.3760/cma.j.cn121094-20200227-00091</pub-id>, PMID: <pub-id pub-id-type="pmid">33406550</pub-id></citation></ref>
<ref id="ref27"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>P.</given-names></name> <name><surname>Wang</surname> <given-names>Y.</given-names></name> <name><surname>Hu</surname> <given-names>J.</given-names></name> <name><surname>Qing</surname> <given-names>L.</given-names></name> <name><surname>Zhao</surname> <given-names>K.</given-names></name></person-group> (<year>2023</year>). <article-title>Development and validation of a highly dynamic and reusable picture-based scale: a new affective measurement tool</article-title>. <source>Front. Psychol.</source> <volume>13</volume>:<fpage>1078691</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2022.1078691</pub-id>, PMID: <pub-id pub-id-type="pmid">36733871</pub-id></citation></ref>
<ref id="ref28"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>P.</given-names></name> <name><surname>Wang</surname> <given-names>Y.</given-names></name> <name><surname>Liu</surname> <given-names>Y.</given-names></name> <name><surname>Hu</surname> <given-names>J.</given-names></name> <name><surname>Li</surname> <given-names>Y.</given-names></name> <name><surname>Zhao</surname> <given-names>K.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>HDRPS+ database</article-title>. <source>Open Science framework</source>. doi: <pub-id pub-id-type="doi">10.17605/OSF.IO/D4WCN</pub-id></citation></ref>
<ref id="ref29"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lively</surname> <given-names>K.</given-names></name></person-group> (<year>2008</year>). <article-title>Emotional segues and the management of emotion by women and men</article-title>. <source>Soc. Forces</source> <volume>87</volume>, <fpage>911</fpage>&#x2013;<lpage>936</lpage>. doi: <pub-id pub-id-type="doi">10.1353/sof.0.0133</pub-id></citation></ref>
<ref id="ref30"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lynn</surname> <given-names>M. R.</given-names></name></person-group> (<year>1986</year>). <article-title>Determination and quantification of content validity</article-title>. <source>Nurs. Res.</source> <volume>35</volume>, <fpage>382</fpage>&#x2013;<lpage>386</lpage>. doi: <pub-id pub-id-type="doi">10.1097/00006199-198611000-00017</pub-id>, PMID: <pub-id pub-id-type="pmid">3640358</pub-id></citation></ref>
<ref id="ref31"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Lyons</surname> <given-names>M. J.</given-names></name></person-group> (<year>2021</year>). <article-title><italic>&#x201C;Excavating AI&#x201D; re-excavated: debunking a fallacious account of the JAFFE dataset</italic> (no. arXiv:2107.13998)</article-title>. <source>arXiv</source>. Available online at: <ext-link xlink:href="http://arxiv.org/abs/2107.13998" ext-link-type="uri">http://arxiv.org/abs/2107.13998</ext-link></citation></ref>
<ref id="ref9001"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Marchewka</surname> <given-names>A.</given-names></name> <name><surname>&#x017B;urawski</surname> <given-names>&#x0141;.</given-names></name> <name><surname>Jednor&#x00F3;g</surname> <given-names>K.</given-names></name> <name><surname>Grabowska</surname> <given-names>A.</given-names></name></person-group> (<year>2014</year>). <article-title>The Nencki Affective Picture System (NAPS): Introduction to a novel, standardized, wide_range, high_quality, realistic picture database</article-title>. <source>Behav. Res. Methods.</source>&#x202F;<volume>46</volume>,&#x202F;<fpage>596</fpage>&#x2013;<lpage>610</lpage>. doi: <pub-id pub-id-type="doi">10.3758/s13428_013_0379_1</pub-id></citation></ref>
<ref id="ref32"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Obaid</surname> <given-names>M.</given-names></name> <name><surname>D&#x00FC;nser</surname> <given-names>A.</given-names></name> <name><surname>Moltchanova</surname> <given-names>E.</given-names></name> <name><surname>Cummings</surname> <given-names>D.</given-names></name> <name><surname>Wagner</surname> <given-names>J.</given-names></name> <name><surname>Bartneck</surname> <given-names>C.</given-names></name></person-group> (<year>2015</year>). &#x201C;<article-title>LEGO pictorial scales for assessing affective response</article-title>,&#x201D; in <source>Human-Computer Interaction - INTERACT 2015</source>. <italic>Lecture Notes in Computer Science (vol 9296)</italic>. eds. <person-group person-group-type="editor"><name><surname>Abascal</surname> <given-names>J.</given-names></name> <name><surname>Barbosa</surname> <given-names>S.</given-names></name> <name><surname>Fetter</surname> <given-names>M.</given-names></name> <name><surname>Gross</surname> <given-names>T.</given-names></name> <name><surname>Palanque</surname> <given-names>P.</given-names></name> <name><surname>Winckler</surname> <given-names>M.</given-names></name></person-group> <publisher-loc>(Cham</publisher-loc>: <publisher-name>Springer)</publisher-name>. doi: <pub-id pub-id-type="doi">10.1007/978-3-319-22701-6_19</pub-id></citation></ref>
<ref id="ref33"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Park</surname> <given-names>I.-J.</given-names></name> <name><surname>Shim</surname> <given-names>S.-H.</given-names></name> <name><surname>Hai</surname> <given-names>S.</given-names></name> <name><surname>Kwon</surname> <given-names>S.</given-names></name> <name><surname>Kim</surname> <given-names>T. G.</given-names></name></person-group> (<year>2022</year>). <article-title>Cool down emotion, don&#x2019;t be fickle! The role of paradoxical leadership in the relationship between emotional stability and creativity</article-title>. <source>Int. J. Hum. Resour. Manag.</source> <volume>33</volume>, <fpage>2856</fpage>&#x2013;<lpage>2886</lpage>. doi: <pub-id pub-id-type="doi">10.1080/09585192.2021.1891115</pub-id></citation></ref>
<ref id="ref34"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Pollak</surname> <given-names>J. P.</given-names></name> <name><surname>Adams</surname> <given-names>P.</given-names></name> <name><surname>Gay</surname> <given-names>G.</given-names></name></person-group> (<year>2011</year>). <article-title>PAM: a photographic affect meter for frequent, in situ measurement of affect</article-title>. <conf-name>Proceedings of the SIGCHI Conference on Human Factors in Computing Systems</conf-name>, <fpage>725</fpage>&#x2013;<lpage>734</lpage>.</citation></ref>
<ref id="ref35"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rozin</surname> <given-names>P.</given-names></name> <name><surname>Royzman</surname> <given-names>E. B.</given-names></name></person-group> (<year>2001</year>). <article-title>Negativity bias, negativity dominance, and contagion</article-title>. <source>Personal. Soc. Psychol. Rev.</source> <volume>5</volume>, <fpage>296</fpage>&#x2013;<lpage>320</lpage>. doi: <pub-id pub-id-type="doi">10.1207/S15327957PSPR0504_2</pub-id></citation></ref>
<ref id="ref36"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Russell</surname> <given-names>J. A.</given-names></name> <name><surname>Weiss</surname> <given-names>A.</given-names></name> <name><surname>Mendelsohn</surname> <given-names>G. A.</given-names></name></person-group> (<year>1989</year>). <article-title>Affect grid: a single-item scale of pleasure and arousal</article-title>. <source>J. Pers. Soc. Psychol.</source> <volume>57</volume>:<fpage>493</fpage>. doi: <pub-id pub-id-type="doi">10.1037/0022-3514.57.3.493</pub-id></citation></ref>
<ref id="ref37"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sauer</surname> <given-names>J.</given-names></name> <name><surname>Baumgartner</surname> <given-names>J.</given-names></name> <name><surname>Frei</surname> <given-names>N.</given-names></name> <name><surname>Sonderegger</surname> <given-names>A.</given-names></name></person-group> (<year>2021</year>). <article-title>Pictorial scales in research and practice: a review</article-title>. <source>Eur. Psychol.</source> <volume>26</volume>, <fpage>112</fpage>&#x2013;<lpage>130</lpage>. doi: <pub-id pub-id-type="doi">10.1027/1016-9040/a000405</pub-id></citation></ref>
<ref id="ref38"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Schimmack</surname> <given-names>U.</given-names></name> <name><surname>Grob</surname> <given-names>A.</given-names></name></person-group> (<year>2000</year>). <article-title>Dimensional models of core affect: a quantitative comparison by means of structural equation modeling</article-title>. <source>Eur. J. Personal.</source> <volume>14</volume>, <fpage>325</fpage>&#x2013;<lpage>345</lpage>. doi: <pub-id pub-id-type="doi">10.1002/1099-0984(200007/08)14:4&#x003C;325::AID-PER380&#x003E;3.0.CO;2-I</pub-id></citation></ref>
<ref id="ref39"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Schreiber</surname> <given-names>M.</given-names></name> <name><surname>Jenny</surname> <given-names>G. J.</given-names></name></person-group> (<year>2020</year>). <article-title>Development and validation of the &#x2018;Lebender emoticon PANAVA&#x2019;SCALE (LE-PANAVA) for digitally measuring positive and negative activation, and valence via emoticons</article-title>. <source>Pers. Individ. Differ.</source> <volume>160</volume>:<fpage>109923</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.paid.2020.109923</pub-id></citation></ref>
<ref id="ref40"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Smith</surname> <given-names>N. K.</given-names></name> <name><surname>Cacioppo</surname> <given-names>J. T.</given-names></name> <name><surname>Larsen</surname> <given-names>J. T.</given-names></name> <name><surname>Chartrand</surname> <given-names>T. L.</given-names></name></person-group> (<year>2003</year>). <article-title>May I have your attention, please: electrocortical responses to positive and negative stimuli</article-title>. <source>Neuropsychologia</source> <volume>41</volume>, <fpage>171</fpage>&#x2013;<lpage>183</lpage>. doi: <pub-id pub-id-type="doi">10.1016/S0028-3932(02)00147-1</pub-id>, PMID: <pub-id pub-id-type="pmid">12459215</pub-id></citation></ref>
<ref id="ref41"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Toshnazarov</surname> <given-names>K.</given-names></name> <name><surname>Lee</surname> <given-names>U.</given-names></name> <name><surname>Kim</surname> <given-names>B. H.</given-names></name> <name><surname>Mishra</surname> <given-names>V.</given-names></name> <name><surname>Najarro</surname> <given-names>L. A. C.</given-names></name> <name><surname>Noh</surname> <given-names>Y.</given-names></name></person-group> (<year>2024</year>). <article-title>SOSW: stress sensing with off-the-shelf smartwatches in the wild</article-title>. <source>IEEE Internet Things J.</source> <volume>11</volume>, <fpage>21527</fpage>&#x2013;<lpage>21545</lpage>. doi: <pub-id pub-id-type="doi">10.1109/JIOT.2024.3375299</pub-id></citation></ref>
<ref id="ref43"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>J.</given-names></name> <name><surname>Liao</surname> <given-names>P.-C.</given-names></name></person-group> (<year>2021</year>). <article-title>Re-thinking the mediating role of emotional valence and arousal between personal factors and occupational safety attention levels</article-title>. <source>Int. J. Environ. Res. Public Health</source> <volume>18</volume>:<fpage>5511</fpage>. doi: <pub-id pub-id-type="doi">10.3390/ijerph18115511</pub-id>, PMID: <pub-id pub-id-type="pmid">34063856</pub-id></citation></ref>
<ref id="ref42"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>L. X.</given-names></name> <name><surname>Chu</surname> <given-names>Y. D.</given-names></name></person-group> (<year>2013</year>). <article-title>Standardization and assessment of affective picture stimulates material system of sport circumstance</article-title>. <source>J. Beijing Sport Univ.</source> <volume>36</volume>, <fpage>74</fpage>&#x2013;<lpage>77</lpage>.</citation></ref>
<ref id="ref44"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Watson</surname> <given-names>D.</given-names></name> <name><surname>Clark</surname> <given-names>L. A.</given-names></name> <name><surname>Tellegen</surname> <given-names>A.</given-names></name></person-group> (<year>1988</year>). <article-title>Development and validation of brief measures of positive and negative affect: the PANAS scales</article-title>. <source>J. Pers. Soc. Psychol.</source> <volume>54</volume>, <fpage>1063</fpage>&#x2013;<lpage>1070</lpage>. doi: <pub-id pub-id-type="doi">10.1037/0022-3514.54.6.1063</pub-id>, PMID: <pub-id pub-id-type="pmid">3397865</pub-id></citation></ref>
<ref id="ref45"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Weiss</surname> <given-names>H. M.</given-names></name> <name><surname>Cropanzano</surname> <given-names>R.</given-names></name></person-group> (<year>1996</year>). <article-title>Affective events theory</article-title>. <source>Res. Organ. Behav.</source> <volume>18</volume>, <fpage>1</fpage>&#x2013;<lpage>74</lpage>.</citation></ref>
<ref id="ref46"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wilhelm</surname> <given-names>P.</given-names></name> <name><surname>Schoebi</surname> <given-names>D.</given-names></name></person-group> (<year>2007</year>). <article-title>Assessing mood in daily life</article-title>. <source>Eur. J. Psychol. Assess.</source> <volume>23</volume>, <fpage>258</fpage>&#x2013;<lpage>267</lpage>. doi: <pub-id pub-id-type="doi">10.1027/1015-5759.23.4.258</pub-id></citation></ref>
<ref id="ref47"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wissmath</surname> <given-names>B.</given-names></name> <name><surname>Weibel</surname> <given-names>D.</given-names></name> <name><surname>Mast</surname> <given-names>F. W.</given-names></name></person-group> (<year>2010</year>). <article-title>Measuring presence with verbal versus pictorial scales: a comparison between online- and ex post-ratings</article-title>. <source>Virtual Reality</source> <volume>14</volume>, <fpage>43</fpage>&#x2013;<lpage>53</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10055-009-0127-0</pub-id></citation></ref>
<ref id="ref48"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zung</surname> <given-names>W. W.</given-names></name></person-group> (<year>1965</year>). <article-title>A self-rating depression scale</article-title>. <source>Arch. Gen. Psychiatry</source> <volume>12</volume>, <fpage>63</fpage>&#x2013;<lpage>70</lpage>.</citation></ref>
<ref id="ref49"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zung</surname> <given-names>W. W.</given-names></name></person-group> (<year>1971</year>). <article-title>A rating instrument for anxiety disorders</article-title>. <source>Psychosomatics</source> <volume>12</volume>, <fpage>371</fpage>&#x2013;<lpage>379</lpage>. doi: <pub-id pub-id-type="doi">10.1016/S0033-3182(71)71479-0</pub-id>, PMID: <pub-id pub-id-type="pmid">5172928</pub-id></citation></ref>
</ref-list>
</back>
</article>