<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="2.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Hum. Neurosci.</journal-id>
<journal-title>Frontiers in Human Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Hum. Neurosci.</abbrev-journal-title>
<issn pub-type="epub">1662-5161</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnhum.2025.1516058</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Human Neuroscience</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Precuneus activation correlates with the vividness of dynamic and static imagery: an fMRI study</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" equal-contrib="yes">
<name><surname>Duan</surname> <given-names>Suna</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="author-notes" rid="fn0003"><sup>&#x2020;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2876671/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author" equal-contrib="yes">
<name><surname>Li</surname> <given-names>Qingfeng</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="author-notes" rid="fn0003"><sup>&#x2020;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1157920/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Yang</surname> <given-names>Junjie</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1608776/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Yang</surname> <given-names>Qing</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Li</surname> <given-names>Enran</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Liu</surname> <given-names>Yuting</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2299926/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Jiang</surname> <given-names>Lijuan</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/304738/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Li</surname> <given-names>Chunbo</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/106014/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Zhao</surname> <given-names>Binglei</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1076727/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Shanghai Mental Health Center, Shanghai Jiao Tong University School of Medicine</institution>, <addr-line>Shanghai</addr-line>, <country>China</country></aff>
<aff id="aff2"><sup>2</sup><institution>School of Psychology, Shanghai Jiao Tong University</institution>, <addr-line>Shanghai</addr-line>, <country>China</country></aff>
<aff id="aff3"><sup>3</sup><institution>School of Biomedical Engineering, ShanghaiTech University</institution>, <addr-line>Shanghai</addr-line>, <country>China</country></aff>
<author-notes>
<fn fn-type="edited-by" id="fn0004">
<p>Edited by: Xiang-Zhen Kong, Zhejiang University, China</p>
</fn>
<fn fn-type="edited-by" id="fn0005">
<p>Reviewed by: Hewei Wang, Fudan University, China</p>
<p>Weifang Huang, Boston College, United States</p>
</fn>
<corresp id="c001">&#x002A;Correspondence: Chunbo Li, <email>licb@smhc.org.cn</email>; Binglei Zhao, <email>binglei.zhao@sjtu.edu.cn</email></corresp>
<fn fn-type="equal" id="fn0003"><p><sup>&#x2020;</sup>These authors have contributed equally to this work and share first authorship</p></fn>
</author-notes>
<pub-date pub-type="epub">
<day>14</day>
<month>03</month>
<year>2025</year>
</pub-date>
<pub-date pub-type="collection">
<year>2025</year>
</pub-date>
<volume>19</volume>
<elocation-id>1516058</elocation-id>
<history>
<date date-type="received">
<day>23</day>
<month>10</month>
<year>2024</year>
</date>
<date date-type="accepted">
<day>03</day>
<month>03</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2025 Duan, Li, Yang, Yang, Li, Liu, Jiang, Li and Zhao.</copyright-statement>
<copyright-year>2025</copyright-year>
<copyright-holder>Duan, Li, Yang, Yang, Li, Liu, Jiang, Li and Zhao</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<sec id="sec1">
<title>Introduction</title>
<p>Visual mental imagery (VMI) is a cognitive function that significantly impacts various aspects of daily life. However, the neural correlates of VMI vividness remain unclear, especially underlying different VMI types. Therefore, the current functional magnetic resonance imaging (fMRI) study aimed to investigate the neural mechanisms underlying static (SI) and dynamic VMI (DI), focusing on the role of precuneus especially in the imagery vividness.</p>
</sec>
<sec id="sec2">
<title>Methods</title>
<p>The study involved 24 participants recruited from ShanghaiTech University. After excluding four participants due to excessive movements, data from 20 participants were analyzed. Each participant completed the Chinese version of the Vividness of Visual Imagery Questionnaire (VVIQ) to assess their imagery vividness abilities. During fMRI scanning, participants were asked to imagine SI and DI scenarios in response to auditory stimuli. High-resolution fMRI data were acquired using a 3T scanner, and a General Linear Model (GLM) was applied to analyze blood oxygenation level-dependent (BOLD) signals, focusing on the precuneus&#x2019;s role in imagery vividness.</p>
</sec>
<sec id="sec3">
<title>Results</title>
<p>The results revealed that the left precuneus was found activated in both SI and DI tasks. Moreover, the left precuneus activation was positively correlated with VVIQ score. On the other hand, greater activation in the right precuneus was found during dynamic than static imagery as well as more extensive neural engagements; the right precuneus activation was further detected significantly correlated with individual VVIQ scores.</p>
</sec>
<sec id="sec4">
<title>Discussion</title>
<p>The study&#x2019;s findings offered fresh insights into the cognitive and neural processes subserving VMI. By revealing the distinct roles of the left and right precuneus in imagery vividness, this research contributed to a more nuanced understanding of VMI and its neural basis.</p>
</sec>
</abstract>
<kwd-group>
<kwd>static imagery</kwd>
<kwd>dynamic imagery</kwd>
<kwd>precuneus</kwd>
<kwd>functional magnetic resonance imaging</kwd>
<kwd>vividness of visual mental imagery</kwd>
</kwd-group>
<counts>
<fig-count count="3"/>
<table-count count="4"/>
<equation-count count="0"/>
<ref-count count="82"/>
<page-count count="10"/>
<word-count count="8558"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Cognitive Neuroscience</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec5">
<label>1</label>
<title>Introduction</title>
<p>Visual mental imagery (VMI) refers to the perception-like representations that people form in their minds when things are not in sight, like a weak form of perception (<xref ref-type="bibr" rid="ref60">Pearson, 2019</xref>). VMI plays an essential role in our daily life and affects lots of cognitive functions, such as episodic memory (<xref ref-type="bibr" rid="ref18">D'Angiulli et al., 2013</xref>), spatial navigation (<xref ref-type="bibr" rid="ref8">Bird et al., 2012</xref>; <xref ref-type="bibr" rid="ref12">Carbone et al., 2021</xref>), reading comprehension (<xref ref-type="bibr" rid="ref20">De Koning and van der Schoot, 2013</xref>; <xref ref-type="bibr" rid="ref43">Leutner et al., 2009</xref>), creativity (<xref ref-type="bibr" rid="ref6">Benedek et al., 2020</xref>; <xref ref-type="bibr" rid="ref51">May et al., 2020</xref>) and moral decision-making (<xref ref-type="bibr" rid="ref2">Amit and Greene, 2012</xref>). Though recent studies have shed light on the brain areas activated during imagination (c.f., <xref ref-type="bibr" rid="ref27">Fulford et al., 2018</xref>; <xref ref-type="bibr" rid="ref31">Hajhajate et al., 2022</xref>; <xref ref-type="bibr" rid="ref45">Liu and Bartolomeo, 2023a</xref>; <xref ref-type="bibr" rid="ref74">Winlove et al., 2018</xref>), the neural mechanisms underlying different types of VMI are still not fully understood.</p>
<p>There are at least two types of VMI, to represent a static object or scene (<italic>static imagery</italic>, SI) or the dynamic manipulation process of mental images (<italic>dynamic imagery</italic>, DI) (c.f., <xref ref-type="bibr" rid="ref58">Paivio and Clark, 1991</xref>; <xref ref-type="bibr" rid="ref60">Pearson, 2019</xref>). Some patients have been reported to be able to imagine the characteristics of static objects, such as color and shape, but to have difficulty in mentally rotating them (<xref ref-type="bibr" rid="ref1">Aleman et al., 2005</xref>; <xref ref-type="bibr" rid="ref48">Luzzatti et al., 1998</xref>). In contrast, other patients can mentally rotate objects but cannot visualize static objects (<xref ref-type="bibr" rid="ref39">Karnath et al., 2009</xref>; <xref ref-type="bibr" rid="ref42">L&#x00EA; et al., 2002</xref>). These results seem to reveal a dissociable association between SI and DI processes (i.e., <xref ref-type="bibr" rid="ref1">Aleman et al., 2005</xref>; <xref ref-type="bibr" rid="ref9">Borst, 2013</xref>; <xref ref-type="bibr" rid="ref44">Levine et al., 1985</xref>). Thanks to the neuroimaging technology, the neural mechanisms can be further explored and uncovered different brain activation patterns underlying SI and DI (i.e., <xref ref-type="bibr" rid="ref23">Dijkstra et al., 2019</xref>). In a series of functional magnetic resonance imaging (fMRI) studies, SI was found accompanied by the activation of the precuneus, superior parietal cortex, prefrontal cortex and several regions in the occipito-temporal cortex when participants were required to represent an apple or a forest in their minds&#x2019; eyes (<xref ref-type="bibr" rid="ref23">Dijkstra et al., 2019</xref>; <xref ref-type="bibr" rid="ref28">Ganis et al., 2004</xref>; <xref ref-type="bibr" rid="ref37">Ishai et al., 2000</xref>; <xref ref-type="bibr" rid="ref41">Kosslyn et al., 2001</xref>; <xref ref-type="bibr" rid="ref53">Mechelli et al., 2004</xref>; <xref ref-type="bibr" rid="ref74">Winlove et al., 2018</xref>). Different brain areas (i.e., premotor areas, parietal areas, prefrontal cortex and precuneus) were observed to active in DI tasks (<xref ref-type="bibr" rid="ref7">Bien and Sack, 2014</xref>; <xref ref-type="bibr" rid="ref11">Burianov&#x00E1; et al., 2013</xref>; <xref ref-type="bibr" rid="ref26">Formisano et al., 2002</xref>; <xref ref-type="bibr" rid="ref35">H&#x00E9;tu et al., 2013</xref>; <xref ref-type="bibr" rid="ref41">Kosslyn et al., 2001</xref>; <xref ref-type="bibr" rid="ref59">Parsons et al., 1995</xref>; <xref ref-type="bibr" rid="ref73">Wang et al., 2023</xref>; <xref ref-type="bibr" rid="ref72">Wang et al., 2020</xref>) when participants were required to subjectively imagine a motor process in minds (i.e., climbing stairs; <xref ref-type="bibr" rid="ref16">Cui et al., 2007</xref>) or to complete tasks that required mental manipulation (i.e., dynamically rotating an actual physical object in mental rotation tasks; <xref ref-type="bibr" rid="ref64">Shepard and Metzler, 1971</xref>). These brain regions also played a crucial role in tasks involving motor imagery of grasping, where individuals imagined the complex motor actions required to grasp and manipulate objects. This process engaged the anterior intraparietal area, ventral premotor cortex, dorsal premotor cortex, and the supplementary motor area (<xref ref-type="bibr" rid="ref4">Bencivenga et al., 2021</xref>, <xref ref-type="bibr" rid="ref5">2023</xref>). However, to our best knowledge, there was no neuroimaging study yet directly comparing SI and DI neural mechanisms.</p>
<p>In addition, vividness stands out as a key feature in imagery processes (<xref ref-type="bibr" rid="ref27">Fulford et al., 2018</xref>). Vividness of visual imagery (VVI) refers to the clarity and richness of details in the visual image one can generate in the mind (<xref ref-type="bibr" rid="ref50">Marks, 1973</xref>; <xref ref-type="bibr" rid="ref36">Hishitani and Murakami, 1992</xref>). Vividness of Visual Imagery Questionnaire (VVIQ) (<xref ref-type="bibr" rid="ref50">Marks, 1973</xref>; <xref ref-type="bibr" rid="ref76">Zhang et al., 2024</xref>) is the most widely used measurement for such abilities (<xref ref-type="bibr" rid="ref16">Cui et al., 2007</xref>; <xref ref-type="bibr" rid="ref27">Fulford et al., 2018</xref>; <xref ref-type="bibr" rid="ref46">Liu and Bartolomeo, 2023b</xref>). This questionnaire comprises four scenarios related to VMI, covering four domains: color, detail, depth, and movement. Participants were asked to imagine each scenario and to rate the vividness of their imagination on a five-point scale using the Likert scale (1, no image to 5, very vivid) (<xref ref-type="bibr" rid="ref50">Marks, 1973</xref>; <xref ref-type="bibr" rid="ref76">Zhang et al., 2024</xref>). VVI abilities were found to vary across individuals: some can generate very vivid mental images in minds that are truly as lively and vivid as real seeing while others may fail to generate or can create very vague images (<xref ref-type="bibr" rid="ref75">Zeman et al., 2015</xref>; <xref ref-type="bibr" rid="ref40">Keogh et al., 2021</xref>).</p>
<p>The individual difference in VVI abilities was observed to affect both SI (<xref ref-type="bibr" rid="ref22">Dijkstra et al., 2017</xref>; <xref ref-type="bibr" rid="ref27">Fulford et al., 2018</xref>) and DI processes (<xref ref-type="bibr" rid="ref16">Cui et al., 2007</xref>; <xref ref-type="bibr" rid="ref47">Logie et al., 2011</xref>; <xref ref-type="bibr" rid="ref79">Zhao and Sala, 2018</xref>; <xref ref-type="bibr" rid="ref77">Zhao et al., 2019</xref>; <xref ref-type="bibr" rid="ref78">Zhao et al., 2022</xref>). Systematic differences between individuals with higher and lower VVI abilities were observed in SI while famous faces or places were required to be generated: the posterior visual network was activated in higher imagers whereas the frontal network was observed activated in lower imagers, including the inferior frontal and anterior cingulate gyrus (<xref ref-type="bibr" rid="ref27">Fulford et al., 2018</xref>). Moreover, the connectivity strength between the occipital place area and the parahippocampal place area correlated positively with an individual&#x2019;s VVIQ score, indicating a link between brain region connectivity and the vividness of visual imagery (<xref ref-type="bibr" rid="ref69">Tullo et al., 2022</xref>). In coping with mental rotation tasks (<xref ref-type="bibr" rid="ref64">Shepard and Metzler, 1971</xref>), a classic measurement for DI, differential brain activation patterns were found between individuals with lower and higher VVI (<xref ref-type="bibr" rid="ref47">Logie et al., 2011</xref>).</p>
<p>Notably, the key brain areas responsible for VVI abilities are still unclear, though more researchers shed light on the neural mechanisms of VMI (c.f., <xref ref-type="bibr" rid="ref3">Bartolomeo et al., 2020</xref>; <xref ref-type="bibr" rid="ref65">Spagna et al., 2021</xref>; <xref ref-type="bibr" rid="ref74">Winlove et al., 2018</xref>). The precuneus is somehow neglected though always reported activation in VMI tasks (<xref ref-type="bibr" rid="ref27">Fulford et al., 2018</xref>; <xref ref-type="bibr" rid="ref52">Mazzoni et al., 2019</xref>; <xref ref-type="bibr" rid="ref74">Winlove et al., 2018</xref>). For example, <xref ref-type="bibr" rid="ref82">Zvyagintsev et al. (2013)</xref> reported the activation of the left precuneus when participants were asked to imagine familiar static objects (i.e., animals or trees). Similar observations were observed in SI tasks accompanied by the activation in bilateral precuneus (<xref ref-type="bibr" rid="ref19">de Borst et al., 2012</xref>; <xref ref-type="bibr" rid="ref29">Gardini et al., 2009</xref>). In addition, the left precuneus was also found to be activated when participants were asked to perform DI tasks (i.e., mental rotation task) (<xref ref-type="bibr" rid="ref61">Podzebenko et al., 2002</xref>). When participants were asked to imagine a movement (i.e., pushing a door), the precuneus was found to activate as well as other brain areas (<xref ref-type="bibr" rid="ref15">Confalonieri et al., 2012</xref>; <xref ref-type="bibr" rid="ref32">Hanakawa et al., 2003</xref>). Interestingly, precuneus activation was also reported when VVI abilities were taken into account (i.e., <xref ref-type="bibr" rid="ref27">Fulford et al., 2018</xref>; <xref ref-type="bibr" rid="ref22">Dijkstra et al., 2017</xref>). For example, when participants were required to imagine famous faces or places (e.g., Einstein), the left and right precuneus has been reported positively correlated with individual VVI abilities (<xref ref-type="bibr" rid="ref27">Fulford et al., 2018</xref>; <xref ref-type="bibr" rid="ref22">Dijkstra et al., 2017</xref>).</p>
<p>Based on the existing observations, therefore, we proposed that precuneus would play a key role in VVI abilities. To test this hypothesis, all participants were assessed with SI and DI tasks in the magnetic resonance scanner while functional images with blood oxygenation level-dependent (BOLD) contrast were acquired. Their imagery vividness abilities were assessed with VVIQ (<xref ref-type="bibr" rid="ref50">Marks, 1973</xref>; <xref ref-type="bibr" rid="ref76">Zhang et al., 2024</xref>). First, the brain activation patterns were explored in SI and DI processes, respectively. To address the difference between these two imagery types, brain activations were compared between SI and DI processes in all imagers. In addition, the association between the bilateral precuneus and VVI abilities was indexed by VVIQ scores. Gleaned from the literature, we predicted that both left and right precuneus would be activated in both SI and DI. Considering the imagery task complexity (i.e., how much vivid information should be represented in minds), there would be distinctive neural mechanisms between SI and DI, especially in precuneus if it is the key area for VVI abilities. In addition, there would be correlations observed between precuneus and VVI abilities in SI and DI processes.</p>
</sec>
<sec sec-type="materials|methods" id="sec6">
<label>2</label>
<title>Materials and methods</title>
<sec id="sec7">
<label>2.1</label>
<title>Participants</title>
<p>Twenty-four participants from ShanghaiTech University were recruited (four females, ages 19&#x2013;37, <italic>M</italic>&#x202F;=&#x202F;22.79&#x202F;years old, SD&#x202F;=&#x202F;4.27&#x202F;years old). All participants were right-handed, had no history of physical or mental illness, and had normal or corrected-to-normal vision. Additionally, participants scored higher than 32 points on the Visual Image Vividness Questionnaire (VVIQ) to ensure that our sample consisted of individuals with visual image abilities (<xref ref-type="bibr" rid="ref50">Marks, 1973</xref>). The experimental design was approved by the Ethics Committee of Shanghai Jiaotong University (ethical no.: H20230182I), and all participants read and signed the fMRI experiment informed consent before the experiment. Four participants were excluded from the analysis due to head movements exceeding our predefined criteria for excessive motion: translations greater than 3&#x202F;mm and rotations greater than 3 degrees. Thus, the remaining 20 participants&#x2019; data (three females, ages 19&#x2013;27, <italic>M</italic>&#x202F;=&#x202F;22.00&#x202F;years old, SD&#x202F;=&#x202F;2.51&#x202F;years old) were used for data analysis. None of these participants participated in our previous fMRI study of imagery. To determine the adequacy of our sample size, a power analysis was performed using G&#x002A;Power 3.1 (<xref ref-type="bibr" rid="ref24">Faul et al., 2009</xref>; <xref ref-type="bibr" rid="ref25">Faul et al., 2007</xref>). Assuming an effect size of 0.5, alpha level of 0.05 and an expected power of 0.80 (<xref ref-type="bibr" rid="ref14">Cohen, 1988</xref>; <xref ref-type="bibr" rid="ref65">Spagna et al., 2021</xref>; <xref ref-type="bibr" rid="ref81">ZHENG et al., 2011</xref>). For a within-subjects design, G&#x002A;Power indicated that a sample size of 20 provides adequate power to detect medium to large effects in our analyses.</p>
</sec>
<sec id="sec8">
<label>2.2</label>
<title>Subjective vividness rating</title>
<p>Each participant completed the Chinese version of the Vividness of Visual Imagery Questionnaire (VVIQ-C; <xref ref-type="bibr" rid="ref76">Zhang et al., 2024</xref>). Similar to <xref ref-type="bibr" rid="ref50">Marks&#x2019;s (1973)</xref> original version, the VVIQ-C consists of 16 items to measure the vividness of the participants&#x2019; mental imagery. Participants were asked to create a mental image of a specific scene or object (e.g., the rising sun and familiar relatives) and to rate its vividness on a 5-point Likert scale, 1 means no image at all, 5 means the image is very vivid and clear. Total scores range from 16 to 80.</p>
</sec>
<sec id="sec9">
<label>2.3</label>
<title>Experimental task</title>
<p>Static (SI) and dynamic imagery (DI) auditory stimuli were designed in the present study. In the SI trails, participants were instructed to imagine either &#x201C;familiar relatives&#x201D; or &#x201C;dense forest.&#x201D; In the &#x201C;familiar relatives&#x201D; trial, participants were instructed to close their eyes, listen to the phrase &#x201C;familiar relatives&#x201D; and visualize in their minds their close family members, such as parents, siblings, or close friends. They were asked to imagine the physical characteristics of these individuals and other static visual images in as much detail as possible. In the &#x201C;dense forest&#x201D; trial, participants were required to imagine various static object details within the forest (i.e., the texture of the trees). Two auditory cues were involved in DI trials, &#x201C;pouring rain&#x201D; or &#x201C;climbing stairs&#x201D; in which motor imagery were engaged (<xref ref-type="bibr" rid="ref16">Cui et al., 2007</xref>; <xref ref-type="bibr" rid="ref54">Munzert et al., 2009</xref>; <xref ref-type="bibr" rid="ref67">Szameitat et al., 2007</xref>). In the &#x201C;pouring rain&#x201D; trial, participants were instructed to imagine a scene of &#x201C;pouring rain,&#x201D; where they needed to mentally construct a dynamic scene with rainwater cascading from the sky. For the &#x201C;climbing stairs&#x201D; trial, participants were asked to visualize themselves in the dynamic process of climbing stairs. Each stimulus was presented with five Chinese characters, which were converted into auditory stimuli through professional text-to-auditory software to ensure that the stimulus duration (all within 2&#x202F;s), pronunciation, and volume were consistent.</p>
<p>The experimental procedure was similar to <xref ref-type="bibr" rid="ref16">Cui et al.&#x2019;s (2007)</xref> design. An eye patch was given to each participant and they were instructed to close their eyes for the entire experiment. At the beginning of each trial (as shown in <xref ref-type="fig" rid="fig1">Figure 1</xref>), an auditory instruction was given lasting for 2&#x202F;s prompting participants to imagine either an object or an action. To ensure imagery starts at the same time, participants were instructed to start visualization after hearing the &#x2018;go&#x2019; signal and they had 10&#x202F;s to imagine (imagery phase). They were instructed to stop visualization and had a 10-s rest when they heard &#x2018;stop&#x2019; before the next trial. This rest period allowed the BOLD signal to return to baseline. Each auditory stimulus was presented eight times in a random order for each subject. Therefore, there were 32 trials (4 types of auditory stimuli<inline-formula>
<mml:math id="M1">
<mml:mo>&#x00D7;</mml:mo>
</mml:math>
</inline-formula>8 repetition 32 trials) in total.</p>
<fig position="float" id="fig1">
<label>Figure 1</label>
<caption>
<p>Imagery task timeline. Participants started visualization after hearing the &#x2018;go&#x2019; signal, and stopped visualization when they heard &#x2018;stop&#x2019;. Participants visualized for 10 seconds, rested for 10 seconds. All instructions were auditory.</p>
</caption>
<graphic xlink:href="fnhum-19-1516058-g001.tif"/>
</fig>
<p>Upon completion of the experiment, a debriefing session was conducted with each participant to verify whether they had engaged in mental imagery as required by the tasks. Specifically, each participant was asked to confirm whether they had performed mental imagery during the experimental trials. All participants confirmed that they had indeed engaged in imagery tasks as instructed.</p>
</sec>
<sec id="sec10">
<label>2.4</label>
<title>MRI acquisition</title>
<p>Images were acquired using a 3T UIH uMR790 MR whole-body scanner (United-Imaging Healthcare, China). High-resolution anatomical images were acquired using a 3D T1-weighted Gradient-Recalled Echo (GRE) sequence (TR: 8.1&#x202F;ms, TE: 3.4&#x202F;ms, voxel size: 0.8&#x202F;mm&#x202F;&#x00D7;&#x202F;0.8&#x202F;mm, thickness: 0.8&#x202F;mm, number of slices: 208, field of view: 256&#x202F;mm&#x202F;&#x00D7;&#x202F;240&#x202F;mm&#x202F;&#x00D7;&#x202F;208&#x202F;mm, flip angle: 8&#x00B0;). Functional images were obtained using an Echo-Planar Imaging (EPI) sequence (TR: 2000&#x202F;ms, TE: 30&#x202F;ms, voxel size: 3.5&#x202F;mm&#x202F;&#x00D7;&#x202F;3.5&#x202F;mm, thickness: 4&#x202F;mm, number of slices: 33, field of view: 224&#x202F;mm&#x202F;&#x00D7;&#x202F;224&#x202F;mm&#x202F;&#x00D7;&#x202F;132&#x202F;mm, flip angle: 90&#x00B0;, number of volumes: 352). Slices were acquired in the axial plane, parallel to the anterior commissure/posterior commissure (ACPC) line.</p>
<p>Quality control measures were implemented during the study to ensure data integrity, including regular assessments using water phantoms to verify device stability. During data collection, scanning technicians rescanned participants if excessive head motion was detected (monitored in real time by United Imaging&#x2019;s mocap system) or if artifacts were present in the reconstructed images. Additionally, during the data analysis phase, participants were excluded if their head movements exceeded predefined criteria for excessive motion, specifically translations greater than 3&#x202F;mm and rotations greater than 3 degrees.</p>
</sec>
<sec id="sec11">
<label>2.5</label>
<title>fMRI data processing</title>
<p>fMRI preprocessing and statistical analysis were performed using MATLAB R2019b (The MathWorks, Inc., Natick, MA) and SPM12 (Statistical Parametric Mapping software).<xref ref-type="fn" rid="fn0001"><sup>1</sup></xref> For each fMRI image, we first performed slice timing correction, then spatially realigned the images to the reference volume (i.e., the first acquired volume) and then co-registered to the mean EPI image. The mean EPI image was normalized to the standard single subject template in MNI space. A Gaussian kernel of 4&#x202F;mm full-width half-maximum was used for smoothing to meet the statistical requirements of the theory of Gaussian fields according to the General Linear Model employed in SPM and to compensate for inter-individual variability in macro- and micro-anatomical structures across subjects (<xref ref-type="bibr" rid="ref9001">Friston et al., 1995a</xref>, <xref ref-type="bibr" rid="ref9002">b</xref>).</p>
<p>A General Linear Model (GLM) was thus applied to each voxel of the functional dataset (i.e., first-level analysis). We used an event-related analysis and the BOLD response for each event type was modeled with the canonical Hemodynamic Response Function (HRF) and its temporal derivative. A temporal high-pass filter of 1/128&#x202F;Hz and linear trend removal were employed. The three translations and the three rotation movement parameters obtained from the initial spatially realignment were included as further regressors.</p>
<p>For this experiment, two event types were defined and then used as conditions for the model specification: (a) dynamic imagery, &#x201C;DI,&#x201D; (b) static imagery, &#x201C;SI.&#x201D; To assess specific effects, we applied appropriate linear contrasts of the parameter estimates for the DI and SI conditions. For each participant, we calculated the following contrast images: DI&#x202F;&#x003E;&#x202F;Rest and SI&#x202F;&#x003E;&#x202F;Rest, which represent the activation patterns specific to each imagery condition compared to the resting state. We also calculated the main effect contrasts (DI&#x202F;&#x003E;&#x202F;SI and SI&#x202F;&#x003E;&#x202F;DI) to directly compare the two imagery conditions. Second level Random Effects Analyses were performed by using a <italic>t</italic>-test to create an SPM{T} on contrast images obtained from individual participants, to obtain significant activations specific for each contrast on a group level (i.e., second-level analysis). We used a threshold of <italic>p</italic>&#x202F;&#x003C;&#x202F;0.05, corrected for multiple comparisons at the cluster level using family-wise error (FWE), with a height threshold at the voxel level of <italic>p</italic>&#x202F;&#x003C;&#x202F;0.05, uncorrected. The xjView Toolbox v10 and the Anatomical Automatic Labeling (AAL) atlas were employed for the anatomical localization and labeling of activation clusters in the brain (<xref ref-type="bibr" rid="ref70">Tzourio-Mazoyer et al., 2002</xref>). The code for the first-level and second-level analyses can be found on this page.<xref ref-type="fn" rid="fn0002"><sup>2</sup></xref></p>
</sec>
<sec id="sec12">
<label>2.6</label>
<title>Behavioral data analysis</title>
<p>VVIQ data was analyzed using the SciPy v1.7.3 toolkit of Python 3.7. Group-level correlation between VVIQ scores and brain activations was calculated by computing the Pearson correlation coefficient, in which brain activation of certain ROI was calculated by averaging the voxel intensity of the contrast images (generated in first-level analysis) covered by the ROI.</p>
</sec>
</sec>
<sec sec-type="results" id="sec13">
<label>3</label>
<title>Results</title>
<sec id="sec14">
<label>3.1</label>
<title>The VVIQ score of the participant</title>
<p>The participants&#x2019; scores on the Vividness of Visual Imagery Questionnaire (VVIQ-C) ranged from 39 to 80, with an <italic>M</italic> score of 62.85 and a SD of 12.99.</p>
</sec>
<sec id="sec15">
<label>3.2</label>
<title>Activation of brain regions in DI and SI task</title>
<p>A two-sample <italic>t</italic>-test was used to see the activated brain area in the DI task and SI task. The DI task engaged a network of brain regions. Specifically, the task-related network included the left inferior frontal gyrus, left precuneus, left supplementary motor area, left insula, left superior parietal lobe, and left superior frontal cortex (All cluster sizes &#x003E;30 voxels, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.05, cluster-wise FWE corrected; see <xref ref-type="table" rid="tab1">Table 1</xref>; <xref ref-type="fig" rid="fig2">Figure 2a</xref>). The SI task also activated a distinct set of brain regions, including the right cerebellum crus1 lobule, left medial superior frontal cortex, middle frontal gyrus (bilaterally), left superior parietal lobe, and left supplementary motor area, along with the left precuneus (All cluster sizes &#x003E;30 voxels, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.05, cluster-wise FWE corrected; see <xref ref-type="table" rid="tab2">Table 2</xref>; <xref ref-type="fig" rid="fig2">Figure 2b</xref>). The activation of the precuneus in both DI and SI tasks underscored its potential role as a common neural substrate for visual mental imagery, irrespective of the content of the imagery (DI and SI).</p>
<table-wrap position="float" id="tab1">
<label>Table 1</label>
<caption>
<p>Activation of brain regions in dynamic imagery (DI) task.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top" rowspan="2">Anatomical area</th>
<th align="center" valign="top" rowspan="2">Hemi</th>
<th align="center" valign="top" colspan="3">Co-ordinates</th>
<th align="center" valign="top" rowspan="2">K</th>
<th align="center" valign="top" rowspan="2"><italic>t</italic>-value</th>
</tr>
<tr>
<th align="center" valign="top">X</th>
<th align="center" valign="top">Y</th>
<th align="center" valign="top">Z</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Inferior Frontal Gyrus</td>
<td align="center" valign="middle">L</td>
<td align="center" valign="middle">&#x2212;52</td>
<td align="center" valign="middle">8</td>
<td align="center" valign="middle">14</td>
<td align="center" valign="middle">1,346</td>
<td align="center" valign="middle">6.84</td>
</tr>
<tr>
<td align="left" valign="middle">Precuneus</td>
<td align="center" valign="middle">L</td>
<td align="center" valign="middle">&#x2212;6</td>
<td align="center" valign="middle">&#x2212;62</td>
<td align="center" valign="middle">68</td>
<td align="center" valign="middle">1,795</td>
<td align="center" valign="middle">6.46</td>
</tr>
<tr>
<td align="left" valign="middle">Supplementary Motor_Area</td>
<td align="center" valign="middle">L</td>
<td align="center" valign="middle">18</td>
<td align="center" valign="middle">&#x2212;15</td>
<td align="center" valign="middle">&#x2212;18</td>
<td align="center" valign="middle">2,457</td>
<td align="center" valign="middle">5.62</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>Hemi, Hemisphere activation present in-left (L) or right (R). K, Cluster size. <italic>t</italic>-value, peak <italic>t</italic>-value.</p>
</table-wrap-foot>
</table-wrap>
<fig position="float" id="fig2">
<label>Figure 2</label>
<caption>
<p>The activation clusters in the <bold>(a)</bold> dynamic imagery (DI) contrast (DI&#x202F;&#x003E;&#x202F;Rest), <bold>(b)</bold> static imagery (SI) contrast (SI&#x202F;&#x003E;&#x202F;Rest), <bold>(c)</bold> dynamic imagery&#x202F;&#x003E;&#x202F;static imagery contrast (DI&#x202F;&#x003E;&#x202F;SI), and <bold>(d)</bold> static imagery&#x202F;&#x003E;&#x202F;dynamic imagery contrast (SI&#x202F;&#x003E;&#x202F;DI).</p>
</caption>
<graphic xlink:href="fnhum-19-1516058-g002.tif"/>
</fig>
<table-wrap position="float" id="tab2">
<label>Table 2</label>
<caption>
<p>Activation of brain regions in static imagery (SI) task.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top" rowspan="2">Anatomical area</th>
<th align="center" valign="top" rowspan="2">Hemi</th>
<th align="center" valign="top" colspan="3">Co-ordinates</th>
<th align="center" valign="top" rowspan="2">K</th>
<th align="center" valign="top" rowspan="2"><italic>t</italic>-value</th>
</tr>
<tr>
<th align="center" valign="top">X</th>
<th align="center" valign="top">Y</th>
<th align="center" valign="top">Z</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Cerebellum_Crus1</td>
<td align="center" valign="middle" rowspan="2">R</td>
<td align="center" valign="middle">46</td>
<td align="center" valign="middle">&#x2212;58</td>
<td align="center" valign="middle">&#x2212;30</td>
<td align="center" valign="middle">402</td>
<td align="center" valign="middle">4.66</td>
</tr>
<tr>
<td align="left" valign="middle">Undefined</td>
<td align="center" valign="middle">&#x2212;18</td>
<td align="center" valign="middle">&#x2212;52</td>
<td align="center" valign="middle">40</td>
<td align="center" valign="middle">408</td>
<td align="center" valign="middle">3.59</td>
</tr>
<tr>
<td align="left" valign="middle">Medial Frontal Lobe</td>
<td align="center" valign="middle">L</td>
<td align="center" valign="middle">&#x2212;6</td>
<td align="center" valign="middle">24</td>
<td align="center" valign="middle">42</td>
<td align="center" valign="middle">429</td>
<td align="center" valign="middle">3.71</td>
</tr>
<tr>
<td align="left" valign="middle">Middle Frontal Gyrus</td>
<td align="center" valign="middle">R</td>
<td align="center" valign="middle">30</td>
<td align="center" valign="middle">&#x2212;4</td>
<td align="center" valign="middle">52</td>
<td align="center" valign="middle">363</td>
<td align="center" valign="middle">4.16</td>
</tr>
<tr>
<td align="left" valign="middle">Middle Frontal Gyrus</td>
<td align="center" valign="middle">L</td>
<td align="center" valign="middle">&#x2212;30</td>
<td align="center" valign="middle">4</td>
<td align="center" valign="middle">64</td>
<td align="center" valign="middle">1,171</td>
<td align="center" valign="middle">5.24</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>Hemi, Hemisphere activation present in-left (L) or right (R). K, Cluster size. <italic>t</italic>-value, peak <italic>t</italic>-value.</p>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="sec16">
<label>3.3</label>
<title>Activation differences between DI and SI tasks</title>
<p>When comparing the DI and SI tasks directly, differential activation in several brain regions was observed. The right fusiform, bilateral superior temporal gyrus, left cuneus, right precuneus, left lingual, and right supplementary motor area were activated more strongly in the DI task than in the SI task (All cluster sizes &#x003E;30 voxels, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.05, cluster-wise FWE corrected; see <xref ref-type="table" rid="tab3">Table 3</xref>; <xref ref-type="fig" rid="fig2">Figure 2c</xref>). In contrast, the reverse subtraction revealed that only the left cerebellum crusI lobule were activated more strongly in the SI task than in the DI task (All cluster sizes &#x003E;30 voxels, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.05, cluster-wise FWE corrected; see <xref ref-type="table" rid="tab4">Table 4</xref>; <xref ref-type="fig" rid="fig2">Figure 2d</xref>). These differential patterns of activation suggested that while there was some overlap in the neural substrates supporting SI and DI, each task also engages unique neural processes.</p>
<table-wrap position="float" id="tab3">
<label>Table 3</label>
<caption>
<p>Brain regions activated more strongly in the dynamic imagery (DI) task than in the static imagery (SI) task.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top" rowspan="2">Anatomical area</th>
<th align="center" valign="top" rowspan="2">Hemi</th>
<th align="center" valign="top" colspan="3">Co-ordinates</th>
<th align="center" valign="top" rowspan="2">K</th>
<th align="center" valign="top" rowspan="2"><italic>t</italic>-value</th>
</tr>
<tr>
<th align="center" valign="top">X</th>
<th align="center" valign="top">Y</th>
<th align="center" valign="top">Z</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Fusiform</td>
<td align="center" valign="middle">R</td>
<td align="center" valign="middle">40</td>
<td align="center" valign="middle">&#x2212;36</td>
<td align="center" valign="middle">&#x2212;16</td>
<td align="center" valign="middle">2,860</td>
<td align="center" valign="middle">4.69</td>
</tr>
<tr>
<td align="left" valign="middle">Superior Temporal Gyrus</td>
<td align="center" valign="middle">L</td>
<td align="center" valign="middle">&#x2212;50</td>
<td align="center" valign="middle">&#x2212;36</td>
<td align="center" valign="middle">18</td>
<td align="center" valign="middle">6,049</td>
<td align="center" valign="middle">8.34</td>
</tr>
<tr>
<td align="left" valign="middle">Cuneus</td>
<td align="center" valign="middle">L</td>
<td align="center" valign="middle">&#x2212;16</td>
<td align="center" valign="middle">&#x2212;62</td>
<td align="center" valign="middle">22</td>
<td align="center" valign="middle">1,860</td>
<td align="center" valign="middle">4.90</td>
</tr>
<tr>
<td align="left" valign="middle">Superior Temporal Gyrus</td>
<td align="center" valign="middle">R</td>
<td align="center" valign="middle">68</td>
<td align="center" valign="middle">&#x2212;26</td>
<td align="center" valign="middle">6</td>
<td align="center" valign="middle">4,142</td>
<td align="center" valign="middle">8.94</td>
</tr>
<tr>
<td align="left" valign="middle">Precuneus</td>
<td align="center" valign="middle">R</td>
<td align="center" valign="middle">8</td>
<td align="center" valign="middle">&#x2212;46</td>
<td align="center" valign="middle">52</td>
<td align="center" valign="middle">7,763</td>
<td align="center" valign="middle">7.24</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>Hemi, Hemisphere activation present in-left (L) or right (R). K, Cluster size. <italic>t</italic>-value, peak <italic>t</italic>-value.</p>
</table-wrap-foot>
</table-wrap>
<table-wrap position="float" id="tab4">
<label>Table 4</label>
<caption>
<p>Brain regions activated more strongly in the static imagery (SI) task than in the dynamic imagery (DI) task.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top" rowspan="2">Anatomical area</th>
<th align="center" valign="top" rowspan="2">Hemi</th>
<th align="center" valign="top" colspan="3">Co-ordinates</th>
<th align="center" valign="top" rowspan="2">K</th>
<th align="center" valign="top" rowspan="2"><italic>t</italic>-value</th>
</tr>
<tr>
<th align="center" valign="top">X</th>
<th align="center" valign="top">Y</th>
<th align="center" valign="top">Z</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Cerebellum_Crus1</td>
<td align="center" valign="middle" rowspan="2">L</td>
<td align="center" valign="middle">&#x2212;8</td>
<td align="center" valign="middle">&#x2212;72</td>
<td align="center" valign="middle">&#x2212;28</td>
<td align="center" valign="middle">2,177</td>
<td align="center" valign="middle">5.18</td>
</tr>
<tr>
<td align="left" valign="middle">Undefined</td>
<td align="center" valign="middle">&#x2212;18</td>
<td align="center" valign="middle">&#x2212;10</td>
<td align="center" valign="middle">34</td>
<td align="center" valign="middle">9,269</td>
<td align="center" valign="middle">8.93</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>Hemi, Hemisphere activation present in-left (L) or right (R). K, Cluster size. <italic>t</italic>-value, peak <italic>t</italic>-value.</p>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="sec17">
<label>3.4</label>
<title>Relationship between brain activation of bilateral precuneus and reported vividness of visual images</title>
<p>The relationship between the vividness of mental imagery, as measured by the VVIQ, and the activation of the precuneus was a key focus of our analysis. The correlations of bilateral precuneus activation with VVIQ-C scores were analyzed. For the left precuneus (<xref ref-type="fig" rid="fig3">Figure 3a</xref>), there was no significant correlation between activation of the DI&#x202F;&#x003E;&#x202F;SI task and VVIQ score (Pearson <italic>r</italic>&#x202F;=&#x202F;&#x2212;0.33, <italic>p</italic>&#x202F;=&#x202F;0.16); no significant correlation between activation of SI&#x202F;&#x003E;&#x202F;DI task and VVIQ score (Pearson <italic>r</italic>&#x202F;=&#x202F;0.37, <italic>p</italic>&#x202F;=&#x202F;0.11); no significant correlation between the activation of DI&#x202F;&#x003E;&#x202F;Rest task and VVIQ score (Pearson <italic>r</italic>&#x202F;=&#x202F;0.22, <italic>p</italic>&#x202F;=&#x202F;0.35). The significant positive correlation was evident between activation of SI&#x202F;&#x003E;&#x202F;Rest task and VVIQ score (Pearson <italic>r</italic>&#x202F;=&#x202F;0.51, <italic>p</italic>&#x202F;=&#x202F;0.02) (<xref ref-type="fig" rid="fig3">Figure 3b</xref>).</p>
<fig position="float" id="fig3">
<label>Figure 3</label>
<caption>
<p>The illustration of <bold>(a)</bold> bilateral precuneus, and correlation of brain activation in the <bold>(b)</bold> and <bold>(c)</bold> right precuneus under dynamic imagery (DI) task (DI&#x202F;&#x003E;&#x202F;Rest) and static imagery (SI) task (SI) task (SI&#x202F;&#x003E;&#x202F;Rest) with the Vividness of Visual Imagery Questionnaire (VVIQ) score.</p>
</caption>
<graphic xlink:href="fnhum-19-1516058-g003.tif"/>
</fig>
<p>For the right precuneus (<xref ref-type="fig" rid="fig3">Figure 3a</xref>), there was no significant correlation between activation of DI&#x202F;&#x003E;&#x202F;SI task and VVIQ score (Pearson <italic>r</italic>&#x202F;=&#x202F;0.03, <italic>p</italic>&#x202F;=&#x202F;0.91); no significant correlation between activation of SI&#x202F;&#x003E;&#x202F;DI task and VVIQ score (Pearson <italic>r</italic>&#x202F;=&#x202F;0.39, <italic>p</italic>&#x202F;=&#x202F;0.09). Interestingly, there were significant positive associations between the VVIQ score and activation of DI&#x202F;&#x003E;&#x202F;Rest task (Pearson <italic>r</italic>&#x202F;=&#x202F;0.48, <italic>p</italic>&#x202F;=&#x202F;0.03) and activation of SI&#x202F;&#x003E;&#x202F;Rest task (Pearson <italic>r</italic>&#x202F;=&#x202F;0.48, <italic>p</italic>&#x202F;=&#x202F;0.03) respectively (<xref ref-type="fig" rid="fig3">Figure 3c</xref>). This suggests that the right precuneus may be particularly sensitive to the vividness of mental imagery, regardless of whether the imagery is static or dynamic.</p>
</sec>
</sec>
<sec sec-type="discussion" id="sec18">
<label>4</label>
<title>Discussion</title>
<p>In the current study, the neural underpinnings of SI and DI in healthy participants were explored. Utilizing the Vividness of Visual Imagery Questionnaire (VVIQ-C) (<xref ref-type="bibr" rid="ref76">Zhang et al., 2024</xref>), participants&#x2019; VVI abilities were quantified before engaging them in imagery tasks involving static and dynamic scenes. High-resolution fMRI data were acquired using a 3T scanner, and a General Linear Model (GLM) was applied to analyze BOLD signals, focusing on the precuneus&#x2019;s role in imagery vividness. This methodological approach allowed us to draw correlations between subjective vividness ratings on VMI abilities and neural activations, particularly within the precuneus.</p>
<p>The study revealed that both SI and DI tasks were associated with activation in the precuneus, a finding that supports the precuneus&#x2019;s role in VMI. The right precuneus, in particular, showed significant correlations with VVI scores during both static and DI tasks and the left precuneus showed significant correlations with VVI scores during SI task, suggesting its importance in the vividness of mental imagery. Furthermore, the DI task was found to activate more brain regions, including the precuneus, compared to the SI task.</p>
<sec id="sec19">
<label>4.1</label>
<title>Both SI and DI were associated with precuneus activation</title>
<p>As we predicted, our experiments revealed the left precuneus was activated in both DI&#x202F;&#x003E;&#x202F;Rest task (<xref ref-type="fig" rid="fig2">Figure 2a</xref>) and SI&#x202F;&#x003E;&#x202F;Rest task (<xref ref-type="fig" rid="fig2">Figure 2b</xref>), indicating a potential involvement of the precuneus in VMI. The precuneus is a region consistently implicated in a wide array of cognitive tasks, including visual&#x2013;spatial imagery (<xref ref-type="bibr" rid="ref13">Cavanna and Trimble, 2006</xref>; <xref ref-type="bibr" rid="ref52">Mazzoni et al., 2019</xref>), memory retrieval (<xref ref-type="bibr" rid="ref33">Hebscher et al., 2020</xref>; <xref ref-type="bibr" rid="ref52">Mazzoni et al., 2019</xref>) and self-processing operations, namely first-person perspective taking and an experience of agency (<xref ref-type="bibr" rid="ref55">Murray et al., 2015</xref>; <xref ref-type="bibr" rid="ref56">Northoff et al., 2006</xref>). The current results were consistent with the observations in numerous neuroimaging studies, highlighting its role in VMI (<xref ref-type="bibr" rid="ref22">Dijkstra et al., 2017</xref>; <xref ref-type="bibr" rid="ref27">Fulford et al., 2018</xref>; <xref ref-type="bibr" rid="ref74">Winlove et al., 2018</xref>). In a meta-analysis study on VMI, regions consistently activated by VMI were identified across 40 neuroimaging studies using the Activation Likelihood Estimation (ALE) algorithm which involving the activation of the precuneus (<xref ref-type="bibr" rid="ref74">Winlove et al., 2018</xref>). Our findings, which demonstrate activation of the precuneus in both SI and DI tasks, aligned with the existing literature. For instance, the precuneus was found to be activated during SI tasks such as imagining familiar concrete objects (<xref ref-type="bibr" rid="ref19">de Borst et al., 2012</xref>; <xref ref-type="bibr" rid="ref29">Gardini et al., 2009</xref>; <xref ref-type="bibr" rid="ref82">Zvyagintsev et al., 2013</xref>), has also been found to be activated in DI tasks such as mental rotation and imagining a movement (<xref ref-type="bibr" rid="ref15">Confalonieri et al., 2012</xref>; <xref ref-type="bibr" rid="ref32">Hanakawa et al., 2003</xref>; <xref ref-type="bibr" rid="ref61">Podzebenko et al., 2002</xref>).</p>
<p>The precuneus&#x2019;s activation patterns in our study are particularly noteworthy, as they reflect the brain engagement in constructing mental representations of both SI and DI. This dual involvement suggests that the precuneus may play a critical role in the core processes underlying VMI. The activation of the precuneus could be indicative of its function in integrating sensory information, spatial orientation, and self-related perspectives, which are all essential components of VMI (<xref ref-type="bibr" rid="ref13">Cavanna and Trimble, 2006</xref>; <xref ref-type="bibr" rid="ref17">Dadario and Sughrue, 2023</xref>; <xref ref-type="bibr" rid="ref52">Mazzoni et al., 2019</xref>; <xref ref-type="bibr" rid="ref71">Utevsky et al., 2014</xref>).</p>
<p>However, it is worth noting that the significant overlap in brain activity between SI and DI may be partially due to the nature of the experimental materials used in the SI tasks. For example, participants may have unintentionally included dynamic elements (e.g., imagining wind blowing through the forest) when visualizing static scenes. Additionally, the within-subjects design might have made the participants&#x2019; imagery in SI conditions be influenced by their exposure to DI conditions. Future studies could address this limitation by using more controlled stimuli or employing a between-subjects design to minimize such confounds.</p>
</sec>
<sec id="sec20">
<label>4.2</label>
<title>Precuneus was associated with vividness of SI and DI</title>
<p>As predicted in our introduction, the VVIQ was significantly correlated with activation of the left precuneus in the SI task (<xref ref-type="fig" rid="fig3">Figure 3b</xref>), and the VVIQ was significantly correlated with activation of the right precuneus in both SI and DI tasks (<xref ref-type="fig" rid="fig3">Figure 3c</xref>), further supported the notion that the precuneus was not only involved in VMI but also in the vividness of the imagery experienced. The relationship between precuneus activation and the vividness of VMI, as measured by the Vividness of Visual Imagery Questionnaire (VVIQ) (<xref ref-type="bibr" rid="ref50">Marks, 1973</xref>; <xref ref-type="bibr" rid="ref76">Zhang et al., 2024</xref>), was a novel contribution to our study. While previous research has documented the precuneus&#x2019;s activation during VMI tasks (<xref ref-type="bibr" rid="ref22">Dijkstra et al., 2017</xref>; <xref ref-type="bibr" rid="ref27">Fulford et al., 2018</xref>; <xref ref-type="bibr" rid="ref74">Winlove et al., 2018</xref>), the explicit link to imagery vividness has not been thoroughly explored. Our findings revealed a significant correlation between the right precuneus activation and VVI scores in both SI and DI conditions, suggesting that this region may be a key neural substrate for the vividness of VMI.</p>
<p>The precuneus&#x2019;s role in VMI vividness may extend beyond the realm of imagery alone. Creativity, for example, often involves the generation of novel and detailed mental images, a process that, as the previous findings suggest, may be facilitated by precuneus activation (<xref ref-type="bibr" rid="ref21">De Pisapia et al., 2016</xref>; <xref ref-type="bibr" rid="ref30">Gonen-Yaacovi et al., 2013</xref>). The precuneus&#x2019;s engagement in tasks requiring creativity, such as artistic expression or problem-solving, further underscores its importance in cognitive functions that demand the manipulation and synthesis of complex information (<xref ref-type="bibr" rid="ref13">Cavanna and Trimble, 2006</xref>; <xref ref-type="bibr" rid="ref17">Dadario and Sughrue, 2023</xref>; <xref ref-type="bibr" rid="ref52">Mazzoni et al., 2019</xref>; <xref ref-type="bibr" rid="ref71">Utevsky et al., 2014</xref>). Moreover, the precuneus&#x2019;s activation has been observed in studies on episodic memory retrieval, which shares similarities with VMI in terms of constructing mental scenes (<xref ref-type="bibr" rid="ref13">Cavanna and Trimble, 2006</xref>; <xref ref-type="bibr" rid="ref17">Dadario and Sughrue, 2023</xref>).</p>
</sec>
<sec id="sec21">
<label>4.3</label>
<title>Precuneus is more active in DI</title>
<p>The present fMRI study provided intriguing insights into the neural distinctions between SI and DI, particularly highlighting the differential activation of the precuneus. Our findings indicated that DI was associated with greater activation in the right precuneus compared to SI, suggesting a more extensive neural engagement during the mental simulation of moving scenes or sequences. An existing study revealed that visual processing related regions and emotion-related regions were more active when viewing dynamic landscapes than static ones (<xref ref-type="bibr" rid="ref80">Zhao et al., 2020</xref>).</p>
<p>This enhanced activation during DI aligned with previous research that has consistently reported increased neural activity during tasks requiring the manipulation of mental images (<xref ref-type="bibr" rid="ref59">Parsons et al., 1995</xref>; <xref ref-type="bibr" rid="ref41">Kosslyn et al., 2001</xref>). The precuneus, a region known for its role in episodic memory retrieval and visuospatial processing (<xref ref-type="bibr" rid="ref13">Cavanna and Trimble, 2006</xref>), appears to be a common neural substrate for both SI and DI. However, our results suggest a heightened role for the right precuneus in DI, which may be attributed to its involvement in VVI. The correlation between the right precuneus activation and the vividness of VMI, as measured by the VVIQ-C, underscored the importance of this region in the clarity and detail of mental imagery. The lack of significant activation differences between SI and DI in any brain region, except for the right precuneus, indicated that while both types of imagery share common neural mechanisms, dynamic VMI may place greater demands on these mechanisms, particularly those related to the vividness of imagery. In other words, according to our finding that the right precuneus was associated with vividness in both SI and DI, we hypothesize that one of the main differences between DI and SI is the vividness of representations, i.e., DI is more vivid than SI.</p>
<p>In addition to the vividness factor, the enhanced activation of the right precuneus during DI may be attributed to its involvement in detailed cognitive processes like motor imagery and spatial navigation (<xref ref-type="bibr" rid="ref13">Cavanna and Trimble, 2006</xref>; <xref ref-type="bibr" rid="ref49">Malouin et al., 2003</xref>; <xref ref-type="bibr" rid="ref57">Ogiso et al., 2000</xref>). During DI tasks, participants are often tasked with simulating movements or navigating through mental landscapes, which could significantly engage the right precuneus. This engagement is likely due to the precuneus&#x2019;s essential function in managing spatial information and orchestrating motor actions, as highlighted by several studies (<xref ref-type="bibr" rid="ref49">Malouin et al., 2003</xref>; <xref ref-type="bibr" rid="ref57">Ogiso et al., 2000</xref>). Notably, the precuneus exhibits marked activation during tasks that involve imagining motion or navigating in a mental space, emphasizing its role in the dynamic components of imagery. Furthermore, the complexity of the psychological processes involved in DI may also contribute to the increased activation of the right precuneus (<xref ref-type="bibr" rid="ref34">Hebscher et al., 2019</xref>; <xref ref-type="bibr" rid="ref38">Jia et al., 2015</xref>; <xref ref-type="bibr" rid="ref63">Schulz et al., 2018</xref>). Unlike SI, which primarily involves constructing a mental image of an object or scene, DI requires participants to visualize changes. DI requires more cognitive resources to simulate movement and transformation than SI, which can lead to increased activation of the right precuneus (<xref ref-type="bibr" rid="ref34">Hebscher et al., 2019</xref>; <xref ref-type="bibr" rid="ref38">Jia et al., 2015</xref>; <xref ref-type="bibr" rid="ref63">Schulz et al., 2018</xref>). The need to integrate and manipulate these dynamic elements may explain why this region shows greater activation during DI tasks.</p>
<p>In conclusion, our fMRI study revealed the neural intricacies underlying SI and DI, with a particular emphasis on the precuneus&#x2019;s role. The precuneus played a key role in the vividness of mental representations. Our findings indicated that while SI and DI share common neural substrates, the latter engages a broader network. These findings not only advance our understanding of VMI but also underscore the precuneus&#x2019;s significance in shaping the vividness of mental imagery. While our study provides insights into the precuneus&#x2019;s role in visual mental imagery (VMI) vividness, a key limitation is the lack of dissociation between clarity and richness of details. The Vividness of Visual Imagery Questionnaire (VVIQ) yields a composite score, making it unclear whether precuneus activation is more strongly associated with clarity or richness (<xref ref-type="bibr" rid="ref50">Marks, 1973</xref>; <xref ref-type="bibr" rid="ref66">Sreekumar et al., 2018</xref>). Future studies could address this by incorporating separate ratings for clarity and richness during imagery tasks, enabling correlation analyses to identify distinct neural substrates. Additionally, seed-based functional connectivity analysis could elucidate the precuneus&#x2019;s contributions to clarity versus richness (<xref ref-type="bibr" rid="ref27">Fulford et al., 2018</xref>; <xref ref-type="bibr" rid="ref62">Saiote et al., 2016</xref>). Finally, developing objective measures for clarity and richness remains a challenge. Emerging techniques such as multivariate pattern analysis (MVPA), machine learning, and neurofeedback could provide more quantifiable and dynamic assessments of these aspects, advancing our understanding of VMI vividness (<xref ref-type="bibr" rid="ref23">Dijkstra et al., 2019</xref>; <xref ref-type="bibr" rid="ref74">Winlove et al., 2018</xref>).</p>
</sec>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="sec22">
<title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p>
</sec>
<sec sec-type="ethics-statement" id="sec23">
<title>Ethics statement</title>
<p>The studies involving humans were approved by Ethics Committee of Shanghai Jiaotong University. The studies were conducted in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study. Written informed consent was obtained from the individual(s) for the publication of any potentially identifiable images or data included in this article.</p>
</sec>
<sec sec-type="author-contributions" id="sec24">
<title>Author contributions</title>
<p>SD: Conceptualization, Data curation, Formal analysis, Methodology, Visualization, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. QL: Data curation, Formal analysis, Methodology, Visualization, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. JY: Data curation, Methodology, Writing &#x2013; review &#x0026; editing. QY: Data curation, Methodology, Supervision, Writing &#x2013; review &#x0026; editing. EL: Conceptualization, Investigation, Writing &#x2013; review &#x0026; editing. YL: Conceptualization, Investigation, Writing &#x2013; review &#x0026; editing. LJ: Conceptualization, Investigation, Writing &#x2013; review &#x0026; editing. CL: Conceptualization, Methodology, Supervision, Writing &#x2013; review &#x0026; editing. BZ: Conceptualization, Funding acquisition, Methodology, Project administration, Resources, Supervision, Writing &#x2013; review &#x0026; editing.</p>
</sec>
<sec sec-type="funding-information" id="sec25">
<title>Funding</title>
<p>The author(s) declare that financial support was received for the research and/or publication of this article. This work was supported by Shanghai Jiao Tong University (Grant numbers 24X010301316; 23X010300690).</p>
</sec>
<ack>
<p>We are deeply grateful to all participants for their invaluable contributions and commitment to this research.</p>
</ack>
<sec sec-type="COI-statement" id="sec26">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="disclaimer" id="sec27">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<fn-group>
<fn id="fn0001"><p><sup>1</sup><ext-link xlink:href="http://www.fil.ion.ucl.ac.uk/spm" ext-link-type="uri">www.fil.ion.ucl.ac.uk/spm</ext-link></p></fn>
<fn id="fn0002"><p><sup>2</sup><ext-link xlink:href="https://gitee.com/bmeqingfenglee/vviq" ext-link-type="uri">https://gitee.com/bmeqingfenglee/vviq</ext-link></p></fn>
</fn-group>
<ref-list>
<title>References</title>
<ref id="ref1"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Aleman</surname> <given-names>A.</given-names></name> <name><surname>de Haan</surname> <given-names>E. H.</given-names></name> <name><surname>Kahn</surname> <given-names>R. S.</given-names></name></person-group> (<year>2005</year>). <article-title>Object versus spatial visual mental imagery in patients with schizophrenia</article-title>. <source>J. Psychiatry Neurosci.</source> <volume>30</volume>, <fpage>53</fpage>&#x2013;<lpage>56</lpage>, PMID: <pub-id pub-id-type="pmid">15644999</pub-id></citation></ref>
<ref id="ref2"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Amit</surname> <given-names>E.</given-names></name> <name><surname>Greene</surname> <given-names>J. D.</given-names></name></person-group> (<year>2012</year>). <article-title>You see, the ends don&#x2019;t justify the means: visual imagery and moral judgment</article-title>. <source>Psychol. Sci.</source> <volume>23</volume>, <fpage>861</fpage>&#x2013;<lpage>868</lpage>. doi: <pub-id pub-id-type="doi">10.1177/0956797611434965</pub-id>, PMID: <pub-id pub-id-type="pmid">22745347</pub-id></citation></ref>
<ref id="ref3"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bartolomeo</surname> <given-names>P.</given-names></name> <name><surname>Hajhajate</surname> <given-names>D.</given-names></name> <name><surname>Liu</surname> <given-names>J.</given-names></name> <name><surname>Spagna</surname> <given-names>A.</given-names></name></person-group> (<year>2020</year>). <article-title>Assessing the causal role of early visual areas in visual mental imagery</article-title>. <source>Nat. Rev. Neurosci.</source> <volume>21</volume>, <fpage>517</fpage>&#x2013;<lpage>518</lpage>. doi: <pub-id pub-id-type="doi">10.1038/s41583-020-0348-5</pub-id>, PMID: <pub-id pub-id-type="pmid">32665713</pub-id></citation></ref>
<ref id="ref4"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bencivenga</surname> <given-names>F.</given-names></name> <name><surname>Sulpizio</surname> <given-names>V.</given-names></name> <name><surname>Tullo</surname> <given-names>M. G.</given-names></name> <name><surname>Galati</surname> <given-names>G.</given-names></name></person-group> (<year>2021</year>). <article-title>Assessing the effective connectivity of premotor areas during real vs imagined grasping: a DCM-PEB approach</article-title>. <source>NeuroImage</source> <volume>230</volume>:<fpage>117806</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2021.117806</pub-id>, PMID: <pub-id pub-id-type="pmid">33524574</pub-id></citation></ref>
<ref id="ref5"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bencivenga</surname> <given-names>F.</given-names></name> <name><surname>Tullo</surname> <given-names>M. G.</given-names></name> <name><surname>Maltempo</surname> <given-names>T.</given-names></name> <name><surname>von Gal</surname> <given-names>A.</given-names></name> <name><surname>Serra</surname> <given-names>C.</given-names></name> <name><surname>Pitzalis</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Effector-selective modulation of the effective connectivity within frontoparietal circuits during visuomotor tasks</article-title>. <source>Cereb. Cortex</source> <volume>33</volume>, <fpage>2517</fpage>&#x2013;<lpage>2538</lpage>. doi: <pub-id pub-id-type="doi">10.1093/cercor/bhac223</pub-id>, PMID: <pub-id pub-id-type="pmid">35709758</pub-id></citation></ref>
<ref id="ref6"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Benedek</surname> <given-names>M.</given-names></name> <name><surname>Jurisch</surname> <given-names>J.</given-names></name> <name><surname>Koschutnig</surname> <given-names>K.</given-names></name> <name><surname>Fink</surname> <given-names>A.</given-names></name> <name><surname>Beaty</surname> <given-names>R. E.</given-names></name></person-group> (<year>2020</year>). <article-title>Elements of creative thought: investigating the cognitive and neural correlates of association and bi-association processes</article-title>. <source>NeuroImage</source> <volume>210</volume>:<fpage>116586</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2020.116586</pub-id></citation></ref>
<ref id="ref7"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bien</surname> <given-names>N.</given-names></name> <name><surname>Sack</surname> <given-names>A. T.</given-names></name></person-group> (<year>2014</year>). <article-title>Dissecting hemisphere-specific contributions to visual spatial imagery using parametric brain mapping</article-title>. <source>NeuroImage</source> <volume>94</volume>, <fpage>231</fpage>&#x2013;<lpage>238</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2014.03.006</pub-id>, PMID: <pub-id pub-id-type="pmid">24636879</pub-id></citation></ref>
<ref id="ref8"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bird</surname> <given-names>C. M.</given-names></name> <name><surname>Bisby</surname> <given-names>J. A.</given-names></name> <name><surname>Burgess</surname> <given-names>N.</given-names></name></person-group> (<year>2012</year>). <article-title>The hippocampus and spatial constraints on mental imagery</article-title>. <source>Front. Hum. Neurosci.</source> <volume>6</volume>:<fpage>142</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnhum.2012.00142</pub-id>, PMID: <pub-id pub-id-type="pmid">22629242</pub-id></citation></ref>
<ref id="ref9"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Borst</surname> <given-names>G.</given-names></name></person-group> (<year>2013</year>). &#x201C;<article-title>Neural underpinning of object mental imagery, spatial imagery, and motor imagery</article-title>&#x201D; in <source>The Oxford handbook of cognitive Neuroscience</source>, vol. <volume>2</volume> (<publisher-loc>Oxford</publisher-loc>: <publisher-name>OUP USA</publisher-name>), <fpage>74</fpage>&#x2013;<lpage>87</lpage>.</citation></ref>
<ref id="ref11"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Burianov&#x00E1;</surname> <given-names>H.</given-names></name> <name><surname>Marstaller</surname> <given-names>L.</given-names></name> <name><surname>Sowman</surname> <given-names>P.</given-names></name> <name><surname>Tesan</surname> <given-names>G.</given-names></name> <name><surname>Rich</surname> <given-names>A. N.</given-names></name> <name><surname>Williams</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2013</year>). <article-title>Multimodal functional imaging of motor imagery using a novel paradigm</article-title>. <source>NeuroImage</source> <volume>71</volume>, <fpage>50</fpage>&#x2013;<lpage>58</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2013.01.001</pub-id>, PMID: <pub-id pub-id-type="pmid">23319043</pub-id></citation></ref>
<ref id="ref12"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Carbone</surname> <given-names>E.</given-names></name> <name><surname>Meneghetti</surname> <given-names>C.</given-names></name> <name><surname>Borella</surname> <given-names>E.</given-names></name></person-group> (<year>2021</year>). <article-title>Supporting route learning in older adults: the role of imagery strategy</article-title>. <source>Aging Ment. Health</source> <volume>25</volume>, <fpage>1564</fpage>&#x2013;<lpage>1571</lpage>. doi: <pub-id pub-id-type="doi">10.1080/13607863.2020.1727844</pub-id>, PMID: <pub-id pub-id-type="pmid">32067468</pub-id></citation></ref>
<ref id="ref13"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cavanna</surname> <given-names>A. E.</given-names></name> <name><surname>Trimble</surname> <given-names>M. R.</given-names></name></person-group> (<year>2006</year>). <article-title>The precuneus: a review of its functional anatomy and behavioural correlates</article-title>. <source>Brain</source> <volume>129</volume>, <fpage>564</fpage>&#x2013;<lpage>583</lpage>. doi: <pub-id pub-id-type="doi">10.1093/brain/awl004</pub-id>, PMID: <pub-id pub-id-type="pmid">16399806</pub-id></citation></ref>
<ref id="ref14"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Cohen</surname> <given-names>J.</given-names></name></person-group> (<year>1988</year>). &#x201C;<article-title>The concepts of power analysis</article-title>&#x201D; in <source>Statistical power analysis for the behavioral sciences</source>, vol. <volume>2</volume> (<publisher-loc>Cambridge</publisher-loc>: <publisher-name>Academic Press</publisher-name>), <fpage>1</fpage>&#x2013;<lpage>17</lpage>.</citation></ref>
<ref id="ref15"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Confalonieri</surname> <given-names>L.</given-names></name> <name><surname>Pagnoni</surname> <given-names>G.</given-names></name> <name><surname>Barsalou</surname> <given-names>L. W.</given-names></name> <name><surname>Rajendra</surname> <given-names>J.</given-names></name> <name><surname>Eickhoff</surname> <given-names>S. B.</given-names></name> <name><surname>Butler</surname> <given-names>A. J.</given-names></name></person-group> (<year>2012</year>). <article-title>Brain activation in primary motor and somatosensory cortices during motor imagery correlates with motor imagery ability in stroke patients</article-title>. <source>Int. Scholarly Res. Notices</source>. doi: <pub-id pub-id-type="doi">10.5402/2012/613595</pub-id></citation></ref>
<ref id="ref16"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cui</surname> <given-names>X.</given-names></name> <name><surname>Jeter</surname> <given-names>C. B.</given-names></name> <name><surname>Yang</surname> <given-names>D.</given-names></name> <name><surname>Montague</surname> <given-names>P. R.</given-names></name> <name><surname>Eagleman</surname> <given-names>D. M.</given-names></name></person-group> (<year>2007</year>). <article-title>Vividness of mental imagery: individual variability can be measured objectively</article-title>. <source>Vis. Res.</source> <volume>47</volume>, <fpage>474</fpage>&#x2013;<lpage>478</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.visres.2006.11.013</pub-id>, PMID: <pub-id pub-id-type="pmid">17239915</pub-id></citation></ref>
<ref id="ref17"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dadario</surname> <given-names>N. B.</given-names></name> <name><surname>Sughrue</surname> <given-names>M. E.</given-names></name></person-group> (<year>2023</year>). <article-title>The functional role of the precuneus</article-title>. <source>Brain</source> <volume>146</volume>, <fpage>3598</fpage>&#x2013;<lpage>3607</lpage>. doi: <pub-id pub-id-type="doi">10.1093/brain/awad181</pub-id>, PMID: <pub-id pub-id-type="pmid">37254740</pub-id></citation></ref>
<ref id="ref18"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>D'Angiulli</surname> <given-names>A.</given-names></name> <name><surname>Runge</surname> <given-names>M.</given-names></name> <name><surname>Faulkner</surname> <given-names>A.</given-names></name> <name><surname>Zakizadeh</surname> <given-names>J.</given-names></name> <name><surname>Chan</surname> <given-names>A.</given-names></name> <name><surname>Morcos</surname> <given-names>S.</given-names></name></person-group> (<year>2013</year>). <article-title>Vividness of visual imagery and incidental recall of verbal cues, when phenomenological availability reflects long-term memory accessibility</article-title>. <source>Front. Psychol.</source> <volume>4</volume>:<fpage>1</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2013.00001</pub-id>, PMID: <pub-id pub-id-type="pmid">23382719</pub-id></citation></ref>
<ref id="ref19"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>de Borst</surname> <given-names>A. W.</given-names></name> <name><surname>Sack</surname> <given-names>A. T.</given-names></name> <name><surname>Jansma</surname> <given-names>B. M.</given-names></name> <name><surname>Esposito</surname> <given-names>F.</given-names></name> <name><surname>De Martino</surname> <given-names>F.</given-names></name> <name><surname>Valente</surname> <given-names>G.</given-names></name> <etal/></person-group>. (<year>2012</year>). <article-title>Integration of &#x201C;what&#x201D; and &#x201C;where&#x201D; in frontal cortex during visual imagery of scenes</article-title>. <source>NeuroImage</source> <volume>60</volume>, <fpage>47</fpage>&#x2013;<lpage>58</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2011.12.005</pub-id>, PMID: <pub-id pub-id-type="pmid">22186678</pub-id></citation></ref>
<ref id="ref20"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>De Koning</surname> <given-names>B. B.</given-names></name> <name><surname>van der Schoot</surname> <given-names>M.</given-names></name></person-group> (<year>2013</year>). <article-title>Becoming part of the story! Refueling the interest in visualization strategies for reading comprehension</article-title>. <source>Educ. Psychol. Rev.</source> <volume>25</volume>, <fpage>261</fpage>&#x2013;<lpage>287</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10648-013-9222-6</pub-id></citation></ref>
<ref id="ref21"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>De Pisapia</surname> <given-names>N.</given-names></name> <name><surname>Bacci</surname> <given-names>F.</given-names></name> <name><surname>Parrott</surname> <given-names>D.</given-names></name> <name><surname>Melcher</surname> <given-names>D.</given-names></name></person-group> (<year>2016</year>). <article-title>Brain networks for visual creativity: a functional connectivity study of planning a visual artwork</article-title>. <source>Sci. Rep.</source> <volume>6</volume>:<fpage>39185</fpage>. doi: <pub-id pub-id-type="doi">10.1038/srep39185</pub-id>, PMID: <pub-id pub-id-type="pmid">27991592</pub-id></citation></ref>
<ref id="ref22"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dijkstra</surname> <given-names>N.</given-names></name> <name><surname>Bosch</surname> <given-names>S. E.</given-names></name> <name><surname>van Gerven</surname> <given-names>M. A.</given-names></name></person-group> (<year>2017</year>). <article-title>Vividness of visual imagery depends on the neural overlap with perception in visual areas</article-title>. <source>J. Neurosci.</source> <volume>37</volume>, <fpage>1367</fpage>&#x2013;<lpage>1373</lpage>. doi: <pub-id pub-id-type="doi">10.1523/JNEUROSCI.3022-16.2016</pub-id>, PMID: <pub-id pub-id-type="pmid">28073940</pub-id></citation></ref>
<ref id="ref23"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dijkstra</surname> <given-names>N.</given-names></name> <name><surname>Bosch</surname> <given-names>S. E.</given-names></name> <name><surname>van Gerven</surname> <given-names>M. A.</given-names></name></person-group> (<year>2019</year>). <article-title>Shared neural mechanisms of visual perception and imagery</article-title>. <source>Trends Cogn. Sci.</source> <volume>23</volume>, <fpage>423</fpage>&#x2013;<lpage>434</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.tics.2019.02.004</pub-id>, PMID: <pub-id pub-id-type="pmid">30876729</pub-id></citation></ref>
<ref id="ref24"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Faul</surname> <given-names>F.</given-names></name> <name><surname>Erdfelder</surname> <given-names>E.</given-names></name> <name><surname>Buchner</surname> <given-names>A.</given-names></name> <name><surname>Lang</surname> <given-names>A. G.</given-names></name></person-group> (<year>2009</year>). <article-title>Statistical power analyses using G&#x002A; power 3.1: tests for correlation and regression analyses</article-title>. <source>Behav. Res. Methods</source> <volume>41</volume>, <fpage>1149</fpage>&#x2013;<lpage>1160</lpage>. doi: <pub-id pub-id-type="doi">10.3758/BRM.41.4.1149</pub-id>, PMID: <pub-id pub-id-type="pmid">19897823</pub-id></citation></ref>
<ref id="ref25"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Faul</surname> <given-names>F.</given-names></name> <name><surname>Erdfelder</surname> <given-names>E.</given-names></name> <name><surname>Lang</surname> <given-names>A. G.</given-names></name> <name><surname>Buchner</surname> <given-names>A.</given-names></name></person-group> (<year>2007</year>). <article-title>G&#x002A; power 3: a flexible statistical power analysis program for the social, behavioral, and biomedical sciences</article-title>. <source>Behav. Res. Methods</source> <volume>39</volume>, <fpage>175</fpage>&#x2013;<lpage>191</lpage>. doi: <pub-id pub-id-type="doi">10.3758/BF03193146</pub-id>, PMID: <pub-id pub-id-type="pmid">17695343</pub-id></citation></ref>
<ref id="ref26"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Formisano</surname> <given-names>E.</given-names></name> <name><surname>Linden</surname> <given-names>D. E.</given-names></name> <name><surname>Di Salle</surname> <given-names>F.</given-names></name> <name><surname>Trojano</surname> <given-names>L.</given-names></name> <name><surname>Esposito</surname> <given-names>F.</given-names></name> <name><surname>Sack</surname> <given-names>A. T.</given-names></name> <etal/></person-group>. (<year>2002</year>). <article-title>Tracking the mind's image in the brain I: time-resolved fMRI during visuospatial mental imagery</article-title>. <source>Neuron</source> <volume>35</volume>, <fpage>185</fpage>&#x2013;<lpage>194</lpage>. doi: <pub-id pub-id-type="doi">10.1016/S0896-6273(02)00747-X</pub-id>, PMID: <pub-id pub-id-type="pmid">12123618</pub-id></citation></ref>
<ref id="ref9001"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Friston</surname> <given-names>K. J.</given-names></name> <name><surname>Frith</surname> <given-names>C. D.</given-names></name> <name><surname>Turner</surname> <given-names>R.</given-names></name> <name><surname>Frackowiak</surname> <given-names>R. S</given-names></name></person-group>. (<year>1995a</year>). <article-title>Characterizing evoked hemodynamics with fMRI</article-title>. <source>Neuroimage</source> <volume>2</volume>, <fpage>157</fpage>&#x2013;<lpage>165</lpage>., PMID: <pub-id pub-id-type="pmid">23319043</pub-id></citation></ref>
<ref id="ref9002"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Friston</surname> <given-names>K. J.</given-names></name> <name><surname>Holmes</surname> <given-names>A. P.</given-names></name> <name><surname>Poline</surname> <given-names>J. B.</given-names></name> <name><surname>Grasby</surname> <given-names>P. J.</given-names></name> <name><surname>Williams</surname> <given-names>S. C. R.</given-names></name> <name><surname>Frackowiak</surname> <given-names>R. S.</given-names></name> <etal/></person-group>. (<year>1995b</year>). <article-title>Analysis of fMRI time-series revisited</article-title>. <source>NeuroImage</source> <volume>2</volume>, <fpage>45</fpage>&#x2013;<lpage>53</lpage>., PMID: <pub-id pub-id-type="pmid">23319043</pub-id></citation></ref>
<ref id="ref27"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fulford</surname> <given-names>J.</given-names></name> <name><surname>Milton</surname> <given-names>F.</given-names></name> <name><surname>Salas</surname> <given-names>D.</given-names></name> <name><surname>Smith</surname> <given-names>A.</given-names></name> <name><surname>Simler</surname> <given-names>A.</given-names></name> <name><surname>Winlove</surname> <given-names>C.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>The neural correlates of visual imagery vividness&#x2013;an fMRI study and literature review</article-title>. <source>Cortex</source> <volume>105</volume>, <fpage>26</fpage>&#x2013;<lpage>40</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cortex.2017.09.014</pub-id>, PMID: <pub-id pub-id-type="pmid">29079342</pub-id></citation></ref>
<ref id="ref28"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ganis</surname> <given-names>G.</given-names></name> <name><surname>Thompson</surname> <given-names>W. L.</given-names></name> <name><surname>Kosslyn</surname> <given-names>S. M.</given-names></name></person-group> (<year>2004</year>). <article-title>Brain areas underlying visual mental imagery and visual perception: an fMRI study</article-title>. <source>Cogn. Brain Res.</source> <volume>20</volume>, <fpage>226</fpage>&#x2013;<lpage>241</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cogbrainres.2004.02.012</pub-id>, PMID: <pub-id pub-id-type="pmid">15183394</pub-id></citation></ref>
<ref id="ref29"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gardini</surname> <given-names>S.</given-names></name> <name><surname>Cornoldi</surname> <given-names>C.</given-names></name> <name><surname>De Beni</surname> <given-names>R.</given-names></name> <name><surname>Venneri</surname> <given-names>A.</given-names></name></person-group> (<year>2009</year>). <article-title>Cognitive and neuronal processes involved in sequential generation of general and specific mental images</article-title>. <source>Psychol. Res. PRPF</source> <volume>73</volume>, <fpage>633</fpage>&#x2013;<lpage>643</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00426-008-0175-1</pub-id>, PMID: <pub-id pub-id-type="pmid">18987882</pub-id></citation></ref>
<ref id="ref30"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gonen-Yaacovi</surname> <given-names>G.</given-names></name> <name><surname>de Souza</surname> <given-names>L. C.</given-names></name> <name><surname>Levy</surname> <given-names>R.</given-names></name> <name><surname>Urbanski</surname> <given-names>M.</given-names></name> <name><surname>Josse</surname> <given-names>G.</given-names></name> <name><surname>Volle</surname> <given-names>E.</given-names></name></person-group> (<year>2013</year>). <article-title>Rostral and caudal prefrontal contribution to creativity: a meta-analysis of functional imaging data</article-title>. <source>Front. Hum. Neurosci.</source> <volume>7</volume>:<fpage>465</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnhum.2013.00465</pub-id>, PMID: <pub-id pub-id-type="pmid">23966927</pub-id></citation></ref>
<ref id="ref31"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hajhajate</surname> <given-names>D.</given-names></name> <name><surname>Kaufmann</surname> <given-names>B. C.</given-names></name> <name><surname>Liu</surname> <given-names>J.</given-names></name> <name><surname>Siuda-Krzywicka</surname> <given-names>K.</given-names></name> <name><surname>Bartolomeo</surname> <given-names>P.</given-names></name></person-group> (<year>2022</year>). <article-title>The connectional anatomy of visual mental imagery: evidence from a patient with left occipito-temporal damage</article-title>. <source>Brain Struct. Funct.</source> <volume>227</volume>, <fpage>3075</fpage>&#x2013;<lpage>3083</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00429-022-02505-x</pub-id>, PMID: <pub-id pub-id-type="pmid">35622159</pub-id></citation></ref>
<ref id="ref32"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hanakawa</surname> <given-names>T.</given-names></name> <name><surname>Immisch</surname> <given-names>I.</given-names></name> <name><surname>Toma</surname> <given-names>K.</given-names></name> <name><surname>Dimyan</surname> <given-names>M. A.</given-names></name> <name><surname>Van Gelderen</surname> <given-names>P.</given-names></name> <name><surname>Hallett</surname> <given-names>M.</given-names></name></person-group> (<year>2003</year>). <article-title>Functional properties of brain areas associated with motor execution and imagery</article-title>. <source>J. Neurophysiol.</source> <volume>89</volume>, <fpage>989</fpage>&#x2013;<lpage>1002</lpage>. doi: <pub-id pub-id-type="doi">10.1152/jn.00132.2002</pub-id>, PMID: <pub-id pub-id-type="pmid">12574475</pub-id></citation></ref>
<ref id="ref33"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hebscher</surname> <given-names>M.</given-names></name> <name><surname>Ibrahim</surname> <given-names>C.</given-names></name> <name><surname>Gilboa</surname> <given-names>A.</given-names></name></person-group> (<year>2020</year>). <article-title>Precuneus stimulation alters the neural dynamics of autobiographical memory retrieval</article-title>. <source>NeuroImage</source> <volume>210</volume>:<fpage>116575</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2020.116575</pub-id>, PMID: <pub-id pub-id-type="pmid">31972285</pub-id></citation></ref>
<ref id="ref34"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hebscher</surname> <given-names>M.</given-names></name> <name><surname>Meltzer</surname> <given-names>J. A.</given-names></name> <name><surname>Gilboa</surname> <given-names>A.</given-names></name></person-group> (<year>2019</year>). <article-title>A causal role for the precuneus in network-wide theta and gamma oscillatory activity during complex memory retrieval</article-title>. <source>eLife</source> <volume>8</volume>:<fpage>e43114</fpage>. doi: <pub-id pub-id-type="doi">10.7554/eLife.43114</pub-id>, PMID: <pub-id pub-id-type="pmid">30741161</pub-id></citation></ref>
<ref id="ref35"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>H&#x00E9;tu</surname> <given-names>S.</given-names></name> <name><surname>Gr&#x00E9;goire</surname> <given-names>M.</given-names></name> <name><surname>Saimpont</surname> <given-names>A.</given-names></name> <name><surname>Coll</surname> <given-names>M.-P.</given-names></name> <name><surname>Eug&#x00E8;ne</surname> <given-names>F.</given-names></name> <name><surname>Michon</surname> <given-names>P.-E.</given-names></name> <etal/></person-group>. (<year>2013</year>). <article-title>The neural network of motor imagery: an ALE meta-analysis</article-title>. <source>Neurosci. Biobehav. Rev.</source> <volume>37</volume>, <fpage>930</fpage>&#x2013;<lpage>949</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neubiorev.2013.03.017</pub-id>, PMID: <pub-id pub-id-type="pmid">23583615</pub-id></citation></ref>
<ref id="ref36"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hishitani</surname> <given-names>S.</given-names></name> <name><surname>Murakami</surname> <given-names>S.</given-names></name></person-group> (<year>1992</year>). <article-title>What is vividness of imagery? Characteristics of vivid visual imagery</article-title>. <source>Percept. Mot. Skills</source> <volume>75</volume>, <fpage>1291</fpage>&#x2013;<lpage>1307</lpage>. doi: <pub-id pub-id-type="doi">10.2466/pms.1992.75.3f.1291</pub-id>, PMID: <pub-id pub-id-type="pmid">1484801</pub-id></citation></ref>
<ref id="ref37"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ishai</surname> <given-names>A.</given-names></name> <name><surname>Ungerleider</surname> <given-names>L. G.</given-names></name> <name><surname>Haxby</surname> <given-names>J. V.</given-names></name></person-group> (<year>2000</year>). <article-title>Distributed neural systems for the generation of visual images</article-title>. <source>Neuron</source> <volume>28</volume>, <fpage>979</fpage>&#x2013;<lpage>990</lpage>. doi: <pub-id pub-id-type="doi">10.1016/S0896-6273(00)00168-9</pub-id>, PMID: <pub-id pub-id-type="pmid">11163281</pub-id></citation></ref>
<ref id="ref38"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jia</surname> <given-names>X.</given-names></name> <name><surname>Liang</surname> <given-names>P.</given-names></name> <name><surname>Shi</surname> <given-names>L.</given-names></name> <name><surname>Wang</surname> <given-names>D.</given-names></name> <name><surname>Li</surname> <given-names>K.</given-names></name></person-group> (<year>2015</year>). <article-title>Prefrontal and parietal activity is modulated by the rule complexity of inductive reasoning and can be predicted by a cognitive model</article-title>. <source>Neuropsychologia</source> <volume>66</volume>, <fpage>67</fpage>&#x2013;<lpage>74</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuropsychologia.2014.10.015</pub-id>, PMID: <pub-id pub-id-type="pmid">25447072</pub-id></citation></ref>
<ref id="ref39"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Karnath</surname> <given-names>H.-O.</given-names></name> <name><surname>R&#x00FC;ter</surname> <given-names>J.</given-names></name> <name><surname>Mandler</surname> <given-names>A.</given-names></name> <name><surname>Himmelbach</surname> <given-names>M.</given-names></name></person-group> (<year>2009</year>). <article-title>The anatomy of object recognition&#x2014;visual form agnosia caused by medial occipitotemporal stroke</article-title>. <source>J. Neurosci.</source> <volume>29</volume>, <fpage>5854</fpage>&#x2013;<lpage>5862</lpage>. doi: <pub-id pub-id-type="doi">10.1523/JNEUROSCI.5192-08.2009</pub-id>, PMID: <pub-id pub-id-type="pmid">19420252</pub-id></citation></ref>
<ref id="ref40"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Keogh</surname> <given-names>R.</given-names></name> <name><surname>Pearson</surname> <given-names>J.</given-names></name> <name><surname>Zeman</surname> <given-names>A.</given-names></name></person-group> (<year>2021</year>). &#x201C;<article-title>Aphantasia: the science of visual imagery extremes</article-title>&#x201D; in <source>Handbook of clinical neurology</source>, vol. <volume>178</volume> (<publisher-loc>Amsterdam</publisher-loc>: <publisher-name>Elsevier</publisher-name>), <fpage>277</fpage>&#x2013;<lpage>296</lpage>.</citation></ref>
<ref id="ref41"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kosslyn</surname> <given-names>S. M.</given-names></name> <name><surname>Ganis</surname> <given-names>G.</given-names></name> <name><surname>Thompson</surname> <given-names>W. L.</given-names></name></person-group> (<year>2001</year>). <article-title>Neural foundations of imagery</article-title>. <source>Nat. Rev. Neurosci.</source> <volume>2</volume>, <fpage>635</fpage>&#x2013;<lpage>642</lpage>. doi: <pub-id pub-id-type="doi">10.1038/35090055</pub-id>, PMID: <pub-id pub-id-type="pmid">11533731</pub-id></citation></ref>
<ref id="ref42"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>L&#x00EA;</surname> <given-names>S.</given-names></name> <name><surname>Cardebat</surname> <given-names>D.</given-names></name> <name><surname>Boulanouar</surname> <given-names>K.</given-names></name> <name><surname>H&#x00E9;naff</surname> <given-names>M. A.</given-names></name> <name><surname>Michel</surname> <given-names>F.</given-names></name> <name><surname>Milner</surname> <given-names>D.</given-names></name> <etal/></person-group>. (<year>2002</year>). <article-title>Seeing, since childhood, without ventral stream: a behavioural study</article-title>. <source>Brain</source> <volume>125</volume>, <fpage>58</fpage>&#x2013;<lpage>74</lpage>. doi: <pub-id pub-id-type="doi">10.1093/brain/awf004</pub-id></citation></ref>
<ref id="ref43"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Leutner</surname> <given-names>D.</given-names></name> <name><surname>Leopold</surname> <given-names>C.</given-names></name> <name><surname>Sumfleth</surname> <given-names>E.</given-names></name></person-group> (<year>2009</year>). <article-title>Cognitive load and science text comprehension: effects of drawing and mentally imagining text content</article-title>. <source>Comput. Hum. Behav.</source> <volume>25</volume>, <fpage>284</fpage>&#x2013;<lpage>289</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.chb.2008.12.010</pub-id></citation></ref>
<ref id="ref44"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Levine</surname> <given-names>D. N.</given-names></name> <name><surname>Warach</surname> <given-names>J.</given-names></name> <name><surname>Farah</surname> <given-names>M.</given-names></name></person-group> (<year>1985</year>). <article-title>Two visual systems in mental imagery: dissociation of &#x201C;what&#x201D; and &#x201C;where&#x201D; in imagery disorders due to bilateral posterior cerebral lesions</article-title>. <source>Neurology</source> <volume>35</volume>:<fpage>1010</fpage>. doi: <pub-id pub-id-type="doi">10.1212/WNL.35.7.1010</pub-id>, PMID: <pub-id pub-id-type="pmid">4010939</pub-id></citation></ref>
<ref id="ref45"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>J.</given-names></name> <name><surname>Bartolomeo</surname> <given-names>P.</given-names></name></person-group> (<year>2023a</year>). <article-title>The model-resistant richness of human visual experience</article-title>. <source>Behav. Brain Sci.</source> <volume>46</volume>:<fpage>e401</fpage>. doi: <pub-id pub-id-type="doi">10.1017/S0140525X23001656</pub-id></citation></ref>
<ref id="ref46"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>J.</given-names></name> <name><surname>Bartolomeo</surname> <given-names>P.</given-names></name></person-group> (<year>2023b</year>). <article-title>Probing the unimaginable: the impact of aphantasia on distinct domains of visual mental imagery and visual perception</article-title>. <source>Cortex</source> <volume>166</volume>, <fpage>338</fpage>&#x2013;<lpage>347</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cortex.2023.06.003</pub-id>, PMID: <pub-id pub-id-type="pmid">37481856</pub-id></citation></ref>
<ref id="ref47"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Logie</surname> <given-names>R. H.</given-names></name> <name><surname>Pernet</surname> <given-names>C. R.</given-names></name> <name><surname>Buonocore</surname> <given-names>A.</given-names></name> <name><surname>Della Sala</surname> <given-names>S.</given-names></name></person-group> (<year>2011</year>). <article-title>Lower and higher imagers activate networks differentially in mental rotation</article-title>. <source>Neuropsychologia</source> <volume>49</volume>, <fpage>3071</fpage>&#x2013;<lpage>3077</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuropsychologia.2011.07.011</pub-id>, PMID: <pub-id pub-id-type="pmid">21802436</pub-id></citation></ref>
<ref id="ref48"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Luzzatti</surname> <given-names>C.</given-names></name> <name><surname>Vecchi</surname> <given-names>T.</given-names></name> <name><surname>Agazzi</surname> <given-names>D.</given-names></name> <name><surname>Cesa-Bianchi</surname> <given-names>M.</given-names></name> <name><surname>Vergani</surname> <given-names>C.</given-names></name></person-group> (<year>1998</year>). <article-title>A neurological dissociation between preserved visual and impaired spatial processing in mental imagery</article-title>. <source>Cortex</source> <volume>34</volume>, <fpage>461</fpage>&#x2013;<lpage>469</lpage>. doi: <pub-id pub-id-type="doi">10.1016/S0010-9452(08)70768-8</pub-id>, PMID: <pub-id pub-id-type="pmid">9669110</pub-id></citation></ref>
<ref id="ref49"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Malouin</surname> <given-names>F.</given-names></name> <name><surname>Richards</surname> <given-names>C. L.</given-names></name> <name><surname>Jackson</surname> <given-names>P. L.</given-names></name> <name><surname>Dumas</surname> <given-names>F.</given-names></name> <name><surname>Doyon</surname> <given-names>J.</given-names></name></person-group> (<year>2003</year>). <article-title>Brain activations during motor imagery of locomotor-related tasks: a PET study</article-title>. <source>Hum. Brain Mapp.</source> <volume>19</volume>, <fpage>47</fpage>&#x2013;<lpage>62</lpage>. doi: <pub-id pub-id-type="doi">10.1002/hbm.10103</pub-id>, PMID: <pub-id pub-id-type="pmid">12731103</pub-id></citation></ref>
<ref id="ref50"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Marks</surname> <given-names>D. F.</given-names></name></person-group> (<year>1973</year>). <article-title>Visual imagery differences in the recall of pictures</article-title>. <source>Br. J. Psychol.</source> <volume>64</volume>, <fpage>17</fpage>&#x2013;<lpage>24</lpage>. doi: <pub-id pub-id-type="doi">10.1111/j.2044-8295.1973.tb01322.x</pub-id>, PMID: <pub-id pub-id-type="pmid">4742442</pub-id></citation></ref>
<ref id="ref51"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>May</surname> <given-names>J.</given-names></name> <name><surname>Redding</surname> <given-names>E.</given-names></name> <name><surname>Whatley</surname> <given-names>S.</given-names></name> <name><surname>&#x0141;ucznik</surname> <given-names>K.</given-names></name> <name><surname>Clements</surname> <given-names>L.</given-names></name> <name><surname>Weber</surname> <given-names>R.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Enhancing creativity by training metacognitive skills in mental imagery</article-title>. <source>Think. Skills Creat.</source> <volume>38</volume>:<fpage>100739</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.tsc.2020.100739</pub-id></citation></ref>
<ref id="ref52"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mazzoni</surname> <given-names>G.</given-names></name> <name><surname>Clark</surname> <given-names>A.</given-names></name> <name><surname>De Bartolo</surname> <given-names>A.</given-names></name> <name><surname>Guerrini</surname> <given-names>C.</given-names></name> <name><surname>Nahouli</surname> <given-names>Z.</given-names></name> <name><surname>Duzzi</surname> <given-names>D.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>Brain activation in highly superior autobiographical memory: the role of the precuneus in the autobiographical memory retrieval network</article-title>. <source>Cortex</source> <volume>120</volume>, <fpage>588</fpage>&#x2013;<lpage>602</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cortex.2019.02.020</pub-id>, PMID: <pub-id pub-id-type="pmid">30926140</pub-id></citation></ref>
<ref id="ref53"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mechelli</surname> <given-names>A.</given-names></name> <name><surname>Price</surname> <given-names>C. J.</given-names></name> <name><surname>Friston</surname> <given-names>K. J.</given-names></name> <name><surname>Ishai</surname> <given-names>A.</given-names></name></person-group> (<year>2004</year>). <article-title>Where bottom-up meets top-down: neuronal interactions during perception and imagery</article-title>. <source>Cereb. Cortex</source> <volume>14</volume>, <fpage>1256</fpage>&#x2013;<lpage>1265</lpage>. doi: <pub-id pub-id-type="doi">10.1093/cercor/bhh087</pub-id>, PMID: <pub-id pub-id-type="pmid">15192010</pub-id></citation></ref>
<ref id="ref54"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Munzert</surname> <given-names>J.</given-names></name> <name><surname>Lorey</surname> <given-names>B.</given-names></name> <name><surname>Zentgraf</surname> <given-names>K.</given-names></name></person-group> (<year>2009</year>). <article-title>Cognitive motor processes: the role of motor imagery in the study of motor representations</article-title>. <source>Brain Res. Rev.</source> <volume>60</volume>, <fpage>306</fpage>&#x2013;<lpage>326</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.brainresrev.2008.12.024</pub-id>, PMID: <pub-id pub-id-type="pmid">19167426</pub-id></citation></ref>
<ref id="ref55"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Murray</surname> <given-names>R. J.</given-names></name> <name><surname>Debban&#x00E9;</surname> <given-names>M.</given-names></name> <name><surname>Fox</surname> <given-names>P. T.</given-names></name> <name><surname>Bzdok</surname> <given-names>D.</given-names></name> <name><surname>Eickhoff</surname> <given-names>S. B.</given-names></name></person-group> (<year>2015</year>). <article-title>Functional connectivity mapping of regions associated with self-and other-processing</article-title>. <source>Hum. Brain Mapp.</source> <volume>36</volume>, <fpage>1304</fpage>&#x2013;<lpage>1324</lpage>. doi: <pub-id pub-id-type="doi">10.1002/hbm.22703</pub-id>, PMID: <pub-id pub-id-type="pmid">25482016</pub-id></citation></ref>
<ref id="ref56"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Northoff</surname> <given-names>G.</given-names></name> <name><surname>Heinzel</surname> <given-names>A.</given-names></name> <name><surname>De Greck</surname> <given-names>M.</given-names></name> <name><surname>Bermpohl</surname> <given-names>F.</given-names></name> <name><surname>Dobrowolny</surname> <given-names>H.</given-names></name> <name><surname>Panksepp</surname> <given-names>J.</given-names></name></person-group> (<year>2006</year>). <article-title>Self-referential processing in our brain&#x2014;a meta-analysis of imaging studies on the self</article-title>. <source>NeuroImage</source> <volume>31</volume>, <fpage>440</fpage>&#x2013;<lpage>457</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2005.12.002</pub-id>, PMID: <pub-id pub-id-type="pmid">16466680</pub-id></citation></ref>
<ref id="ref57"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ogiso</surname> <given-names>T.</given-names></name> <name><surname>Kobayashi</surname> <given-names>K.</given-names></name> <name><surname>Sugishita</surname> <given-names>M.</given-names></name></person-group> (<year>2000</year>). <article-title>The precuneus in motor imagery: a magnetoencephalographic study</article-title>. <source>Neuroreport</source> <volume>11</volume>, <fpage>1345</fpage>&#x2013;<lpage>1349</lpage>. doi: <pub-id pub-id-type="doi">10.1097/00001756-200004270-00039</pub-id>, PMID: <pub-id pub-id-type="pmid">10817619</pub-id></citation></ref>
<ref id="ref58"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Paivio</surname> <given-names>A.</given-names></name> <name><surname>Clark</surname> <given-names>J. M.</given-names></name></person-group> (<year>1991</year>). &#x201C;<article-title>Static versus dynamic imagery</article-title>&#x201D; in <source>Imagery and cognition</source> (<publisher-loc>New York, NY</publisher-loc>: <publisher-name>Springer US</publisher-name>), <fpage>221</fpage>&#x2013;<lpage>245</lpage>.</citation></ref>
<ref id="ref59"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Parsons</surname> <given-names>L. M.</given-names></name> <name><surname>Fox</surname> <given-names>P. T.</given-names></name> <name><surname>Downs</surname> <given-names>J. H.</given-names></name> <name><surname>Glass</surname> <given-names>T.</given-names></name> <name><surname>Hirsch</surname> <given-names>T. B.</given-names></name> <name><surname>Martin</surname> <given-names>C. C.</given-names></name> <etal/></person-group>. (<year>1995</year>). <article-title>Use of implicit motor imagery for visual shape discrimination as revealed by PET</article-title>. <source>Nature</source> <volume>375</volume>, <fpage>54</fpage>&#x2013;<lpage>58</lpage>. doi: <pub-id pub-id-type="doi">10.1038/375054a0</pub-id>, PMID: <pub-id pub-id-type="pmid">7723842</pub-id></citation></ref>
<ref id="ref60"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pearson</surname> <given-names>J.</given-names></name></person-group> (<year>2019</year>). <article-title>The human imagination: the cognitive neuroscience of visual mental imagery</article-title>. <source>Nat. Rev. Neurosci.</source> <volume>20</volume>, <fpage>624</fpage>&#x2013;<lpage>634</lpage>. doi: <pub-id pub-id-type="doi">10.1038/s41583-019-0202-9</pub-id>, PMID: <pub-id pub-id-type="pmid">31384033</pub-id></citation></ref>
<ref id="ref61"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Podzebenko</surname> <given-names>K.</given-names></name> <name><surname>Egan</surname> <given-names>G. F.</given-names></name> <name><surname>Watson</surname> <given-names>J. D.</given-names></name></person-group> (<year>2002</year>). <article-title>Widespread dorsal stream activation during a parametric mental rotation task, revealed with functional magnetic resonance imaging</article-title>. <source>NeuroImage</source> <volume>15</volume>, <fpage>547</fpage>&#x2013;<lpage>558</lpage>. doi: <pub-id pub-id-type="doi">10.1006/nimg.2001.0999</pub-id>, PMID: <pub-id pub-id-type="pmid">11848697</pub-id></citation></ref>
<ref id="ref62"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Saiote</surname> <given-names>C.</given-names></name> <name><surname>Tacchino</surname> <given-names>A.</given-names></name> <name><surname>Brichetto</surname> <given-names>G.</given-names></name> <name><surname>Roccatagliata</surname> <given-names>L.</given-names></name> <name><surname>Bommarito</surname> <given-names>G.</given-names></name> <name><surname>Cordano</surname> <given-names>C.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>Resting-state functional connectivity and motor imagery brain activation</article-title>. <source>Hum. Brain Mapp.</source> <volume>37</volume>, <fpage>3847</fpage>&#x2013;<lpage>3857</lpage>. doi: <pub-id pub-id-type="doi">10.1002/hbm.23280</pub-id>, PMID: <pub-id pub-id-type="pmid">27273577</pub-id></citation></ref>
<ref id="ref63"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Schulz</surname> <given-names>L.</given-names></name> <name><surname>Ischebeck</surname> <given-names>A.</given-names></name> <name><surname>Wriessnegger</surname> <given-names>S. C.</given-names></name> <name><surname>Steyrl</surname> <given-names>D.</given-names></name> <name><surname>M&#x00FC;ller-Putz</surname> <given-names>G. R.</given-names></name></person-group> (<year>2018</year>). <article-title>Action affordances and visuo-spatial complexity in motor imagery: an fMRI study</article-title>. <source>Brain Cogn.</source> <volume>124</volume>, <fpage>37</fpage>&#x2013;<lpage>46</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.bandc.2018.03.012</pub-id>, PMID: <pub-id pub-id-type="pmid">29723681</pub-id></citation></ref>
<ref id="ref64"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Shepard</surname> <given-names>R. N.</given-names></name> <name><surname>Metzler</surname> <given-names>J.</given-names></name></person-group> (<year>1971</year>). <article-title>Mental rotation of three-dimensional objects</article-title>. <source>Science</source> <volume>171</volume>, <fpage>701</fpage>&#x2013;<lpage>703</lpage>. doi: <pub-id pub-id-type="doi">10.1126/science.171.3972.701</pub-id>, PMID: <pub-id pub-id-type="pmid">5540314</pub-id></citation></ref>
<ref id="ref65"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Spagna</surname> <given-names>A.</given-names></name> <name><surname>Hajhajate</surname> <given-names>D.</given-names></name> <name><surname>Liu</surname> <given-names>J.</given-names></name> <name><surname>Bartolomeo</surname> <given-names>P.</given-names></name></person-group> (<year>2021</year>). <article-title>Visual mental imagery engages the left fusiform gyrus, but not the early visual cortex: a meta-analysis of neuroimaging evidence</article-title>. <source>Neurosci. Biobehav. Rev.</source> <volume>122</volume>, <fpage>201</fpage>&#x2013;<lpage>217</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neubiorev.2020.12.029</pub-id>, PMID: <pub-id pub-id-type="pmid">33422567</pub-id></citation></ref>
<ref id="ref66"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sreekumar</surname> <given-names>V.</given-names></name> <name><surname>Nielson</surname> <given-names>D. M.</given-names></name> <name><surname>Smith</surname> <given-names>T. A.</given-names></name> <name><surname>Dennis</surname> <given-names>S. J.</given-names></name> <name><surname>Sederberg</surname> <given-names>P. B.</given-names></name></person-group> (<year>2018</year>). <article-title>The experience of vivid autobiographical reminiscence is supported by subjective content representations in the precuneus</article-title>. <source>Sci. Rep.</source> <volume>8</volume>:<fpage>14899</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-018-32879-0</pub-id>, PMID: <pub-id pub-id-type="pmid">30297824</pub-id></citation></ref>
<ref id="ref67"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Szameitat</surname> <given-names>A. J.</given-names></name> <name><surname>Shen</surname> <given-names>S.</given-names></name> <name><surname>Sterr</surname> <given-names>A.</given-names></name></person-group> (<year>2007</year>). <article-title>Motor imagery of complex everyday movements. An fMRI study</article-title>. <source>NeuroImage</source> <volume>34</volume>, <fpage>702</fpage>&#x2013;<lpage>713</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2006.09.033</pub-id>, PMID: <pub-id pub-id-type="pmid">17112742</pub-id></citation></ref>
<ref id="ref69"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tullo</surname> <given-names>M. G.</given-names></name> <name><surname>Almgren</surname> <given-names>H.</given-names></name> <name><surname>Van de Steen</surname> <given-names>F.</given-names></name> <name><surname>Sulpizio</surname> <given-names>V.</given-names></name> <name><surname>Marinazzo</surname> <given-names>D.</given-names></name> <name><surname>Galati</surname> <given-names>G.</given-names></name></person-group> (<year>2022</year>). <article-title>Individual differences in mental imagery modulate effective connectivity of scene-selective regions during resting state</article-title>. <source>Brain Struct. Funct.</source> <volume>227</volume>, <fpage>1831</fpage>&#x2013;<lpage>1842</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00429-022-02475-0</pub-id>, PMID: <pub-id pub-id-type="pmid">35312868</pub-id></citation></ref>
<ref id="ref70"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tzourio-Mazoyer</surname> <given-names>N.</given-names></name> <name><surname>Landeau</surname> <given-names>B.</given-names></name> <name><surname>Papathanassiou</surname> <given-names>D.</given-names></name> <name><surname>Crivello</surname> <given-names>F.</given-names></name> <name><surname>Etard</surname> <given-names>O.</given-names></name> <name><surname>Delcroix</surname> <given-names>N.</given-names></name> <etal/></person-group>. (<year>2002</year>). <article-title>Automated anatomical labeling of activations in SPM using a macroscopic anatomical parcellation of the MNI MRI single-subject brain</article-title>. <source>NeuroImage</source> <volume>15</volume>, <fpage>273</fpage>&#x2013;<lpage>289</lpage>. doi: <pub-id pub-id-type="doi">10.1006/nimg.2001.0978</pub-id>, PMID: <pub-id pub-id-type="pmid">11771995</pub-id></citation></ref>
<ref id="ref71"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Utevsky</surname> <given-names>A. V.</given-names></name> <name><surname>Smith</surname> <given-names>D. V.</given-names></name> <name><surname>Huettel</surname> <given-names>S. A.</given-names></name></person-group> (<year>2014</year>). <article-title>Precuneus is a functional core of the default-mode network</article-title>. <source>J. Neurosci.</source> <volume>34</volume>, <fpage>932</fpage>&#x2013;<lpage>940</lpage>. doi: <pub-id pub-id-type="doi">10.1523/JNEUROSCI.4227-13.2014</pub-id>, PMID: <pub-id pub-id-type="pmid">24431451</pub-id></citation></ref>
<ref id="ref72"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>X. U.</given-names></name> <name><surname>Wang</surname> <given-names>H.</given-names></name> <name><surname>Xiong</surname> <given-names>X.</given-names></name> <name><surname>Sun</surname> <given-names>C.</given-names></name> <name><surname>Zhu</surname> <given-names>B.</given-names></name> <name><surname>Xu</surname> <given-names>Y.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Motor imagery training after stroke increases slow-5 oscillations and functional connectivity in the ipsilesional inferior parietal lobule</article-title>. <source>Neurorehabil. Neural Repair</source> <volume>34</volume>, <fpage>321</fpage>&#x2013;<lpage>332</lpage>. doi: <pub-id pub-id-type="doi">10.1177/1545968319899919</pub-id>, PMID: <pub-id pub-id-type="pmid">32102610</pub-id></citation></ref>
<ref id="ref73"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>H.</given-names></name> <name><surname>Xiong</surname> <given-names>X.</given-names></name> <name><surname>Zhang</surname> <given-names>K.</given-names></name> <name><surname>Wang</surname> <given-names>X.</given-names></name> <name><surname>Sun</surname> <given-names>C.</given-names></name> <name><surname>Zhu</surname> <given-names>B.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Motor network reorganization after motor imagery training in stroke patients with moderate to severe upper limb impairment</article-title>. <source>CNS Neurosci. Ther.</source> <volume>29</volume>, <fpage>619</fpage>&#x2013;<lpage>632</lpage>. doi: <pub-id pub-id-type="doi">10.1111/cns.14065</pub-id>, PMID: <pub-id pub-id-type="pmid">36575865</pub-id></citation></ref>
<ref id="ref74"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Winlove</surname> <given-names>C. I.</given-names></name> <name><surname>Milton</surname> <given-names>F.</given-names></name> <name><surname>Ranson</surname> <given-names>J.</given-names></name> <name><surname>Fulford</surname> <given-names>J.</given-names></name> <name><surname>MacKisack</surname> <given-names>M.</given-names></name> <name><surname>Macpherson</surname> <given-names>F.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>The neural correlates of visual imagery: a co-ordinate-based meta-analysis</article-title>. <source>Cortex</source> <volume>105</volume>, <fpage>4</fpage>&#x2013;<lpage>25</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cortex.2017.12.014</pub-id>, PMID: <pub-id pub-id-type="pmid">29502874</pub-id></citation></ref>
<ref id="ref75"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zeman</surname> <given-names>A.</given-names></name> <name><surname>Dewar</surname> <given-names>M.</given-names></name> <name><surname>Della Sala</surname> <given-names>S.</given-names></name></person-group> (<year>2015</year>). <article-title>Lives without imagery&#x2013;congenital aphantasia</article-title>. <source>Cortex</source> <volume>73</volume>, <fpage>378</fpage>&#x2013;<lpage>380</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cortex.2015.05.019</pub-id>, PMID: <pub-id pub-id-type="pmid">26115582</pub-id></citation></ref>
<ref id="ref76"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>Z.</given-names></name> <name><surname>Liu</surname> <given-names>Y.</given-names></name> <name><surname>Yang</surname> <given-names>J.</given-names></name> <name><surname>Li</surname> <given-names>C.</given-names></name> <name><surname>Marks</surname> <given-names>D. F.</given-names></name> <name><surname>Della Sala</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2024</year>). <source>Effects of age and gender on the vividness of visual imagery: A study with the Chinese version of the VVIQ (VVIQ-C)</source>. [preprint].</citation></ref>
<ref id="ref77"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhao</surname> <given-names>B.</given-names></name> <name><surname>Della Sala</surname> <given-names>S.</given-names></name> <name><surname>Gherri</surname> <given-names>E.</given-names></name></person-group> (<year>2019</year>). <article-title>Visual imagery vividness and mental rotation of characters: an event related potentials study</article-title>. <source>Neurosci. Lett.</source> <volume>703</volume>, <fpage>19</fpage>&#x2013;<lpage>24</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neulet.2019.03.014</pub-id>, PMID: <pub-id pub-id-type="pmid">30872043</pub-id></citation></ref>
<ref id="ref78"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhao</surname> <given-names>B.</given-names></name> <name><surname>Della Sala</surname> <given-names>S.</given-names></name> <name><surname>Zeman</surname> <given-names>A.</given-names></name> <name><surname>Gherri</surname> <given-names>E.</given-names></name></person-group> (<year>2022</year>). <article-title>Spatial transformation in mental rotation tasks in aphantasia</article-title>. <source>Psychon. Bull. Rev.</source> <volume>29</volume>, <fpage>2096</fpage>&#x2013;<lpage>2107</lpage>. doi: <pub-id pub-id-type="doi">10.3758/s13423-022-02126-9</pub-id>, PMID: <pub-id pub-id-type="pmid">35680760</pub-id></citation></ref>
<ref id="ref79"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhao</surname> <given-names>B.</given-names></name> <name><surname>Sala</surname> <given-names>S. D.</given-names></name></person-group> (<year>2018</year>). <article-title>Different representations and strategies in mental rotation</article-title>. <source>Q. J. Exp. Psychol.</source> <volume>71</volume>, <fpage>1574</fpage>&#x2013;<lpage>1583</lpage>. doi: <pub-id pub-id-type="doi">10.1080/17470218.2017.1342670</pub-id>, PMID: <pub-id pub-id-type="pmid">28856952</pub-id></citation></ref>
<ref id="ref80"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhao</surname> <given-names>X.</given-names></name> <name><surname>Wang</surname> <given-names>J.</given-names></name> <name><surname>Li</surname> <given-names>J.</given-names></name> <name><surname>Luo</surname> <given-names>G.</given-names></name> <name><surname>Li</surname> <given-names>T.</given-names></name> <name><surname>Chatterjee</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>The neural mechanism of aesthetic judgments of dynamic landscapes: an fMRI study</article-title>. <source>Sci. Rep.</source> <volume>10</volume>:<fpage>20774</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-020-77658-y</pub-id>, PMID: <pub-id pub-id-type="pmid">33247221</pub-id></citation></ref>
<ref id="ref81"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zheng</surname> <given-names>H. M.</given-names></name> <name><surname>Wen</surname> <given-names>Z. L.</given-names></name> <name><surname>Wu</surname> <given-names>Y.</given-names></name></person-group> (<year>2011</year>). <article-title>The appropriate effect sizes and their calculations in psychological research</article-title>. <source>Adv. Psychol. Sci.</source> <volume>19</volume>:<fpage>1868</fpage>.</citation></ref>
<ref id="ref82"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zvyagintsev</surname> <given-names>M.</given-names></name> <name><surname>Clemens</surname> <given-names>B.</given-names></name> <name><surname>Chechko</surname> <given-names>N.</given-names></name> <name><surname>Mathiak</surname> <given-names>K. A.</given-names></name> <name><surname>Sack</surname> <given-names>A. T.</given-names></name> <name><surname>Mathiak</surname> <given-names>K.</given-names></name></person-group> (<year>2013</year>). <article-title>Brain networks underlying mental imagery of auditory and visual information</article-title>. <source>Eur. J. Neurosci.</source> <volume>37</volume>, <fpage>1421</fpage>&#x2013;<lpage>1434</lpage>. doi: <pub-id pub-id-type="doi">10.1111/ejn.12140</pub-id>, PMID: <pub-id pub-id-type="pmid">23383863</pub-id></citation></ref>
</ref-list>
</back>
</article>