<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Hum. Neurosci.</journal-id>
<journal-title>Frontiers in Human Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Hum. Neurosci.</abbrev-journal-title>
<issn pub-type="epub">1662-5161</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnhum.2025.1507782</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Human Neuroscience</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>EEG-based multivariate and univariate analyses reveal the mechanisms underlying the recognition-based production effect: evidence from mixed-list design</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Zhang</surname> <given-names>Bohua</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/2878977/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Abdullah</surname> <given-names>Alhassan</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Yan</surname> <given-names>Minmin</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/2163302/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Hou</surname> <given-names>Yongqing</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/2862928/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Chen</surname> <given-names>Antao</given-names></name>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/113158/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>McLaren</surname> <given-names>Helen</given-names></name>
<xref ref-type="aff" rid="aff6"><sup>6</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/2919719/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>College of Education, Psychology and Social Work, Flinders University</institution>, <addr-line>Adelaide, SA</addr-line>, <country>Australia</country></aff>
<aff id="aff2"><sup>2</sup><institution>Faculty of Psychology, Southwest University</institution>, <addr-line>Chongqing</addr-line>, <country>China</country></aff>
<aff id="aff3"><sup>3</sup><institution>School of Social Work and Arts, Charles Sturt University</institution>, <addr-line>Thurgoona, NSW</addr-line>, <country>Australia</country></aff>
<aff id="aff4"><sup>4</sup><institution>Department of Neurobiology and Department of Psychiatry of the Second Affiliated Hospital of Zhejiang University School of Medicine, School of Brain Science and Brain Medicine of the Zhejiang University School of Medicine</institution>, <addr-line>Hangzhou</addr-line>, <country>China</country></aff>
<aff id="aff5"><sup>5</sup><institution>School of Psychology, Shanghai University of Sport</institution>, <addr-line>Shanghai</addr-line>, <country>China</country></aff>
<aff id="aff6"><sup>6</sup><institution>School of Allied Health (VIC), Australian Catholic University</institution>, <addr-line>Melbourne, VIC</addr-line>, <country>Australia</country></aff>
<author-notes>
<fn fn-type="edited-by"><p>Edited by: Soledad Ballesteros, National University of Distance Education (UNED), Spain</p></fn>
<fn fn-type="edited-by"><p>Reviewed by: Jesse Q. Sargent, Francis Marion University, United States</p>
<p>Ye Li, Intuitive Surgical, Inc, United States</p></fn>
<corresp id="c001">&#x0002A;Correspondence: Antao Chen <email>chenantao&#x00040;sus.edu.cn</email></corresp>
</author-notes>
<pub-date pub-type="epub">
<day>29</day>
<month>01</month>
<year>2025</year>
</pub-date>
<pub-date pub-type="collection">
<year>2025</year>
</pub-date>
<volume>19</volume>
<elocation-id>1507782</elocation-id>
<history>
<date date-type="received">
<day>08</day>
<month>10</month>
<year>2024</year>
</date>
<date date-type="accepted">
<day>02</day>
<month>01</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2025 Zhang, Abdullah, Yan, Hou, Chen and McLaren.</copyright-statement>
<copyright-year>2025</copyright-year>
<copyright-holder>Zhang, Abdullah, Yan, Hou, Chen and McLaren</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license>
</permissions>
<abstract>
<p>The production effect (PE) is a phenomenon where reading words aloud, rather than silently, during study leads to improved recognition memory. Human recognition memory can be divided into recollection (recognition based on complex contextual information) and familiarity (recognition based on a sense of familiarity). This study explored how reading aloud affects recollection and familiarity using electroencephalography (EEG) in a mixed-list design. Participants encoded each list item, either aloud or silently during the study phase and made remember/know/new judgments in the test phase, while EEG data were recorded. The behavioral results replicated the classic PE pattern and indicated that the PE was present in both recollection and familiarity. At the Event-Related Potential (ERP) level, the recollection-based LPC (late positive complex) old/new effect at test was largest in the aloud condition; however, the familiarity-based FN400 old/new effect was equivalent when comparing the aloud condition and the silent condition. Moreover, this study was the first to employ multivariate pattern analysis (MVPA) to decode the time course between two distinct memory strategies (aloud vs. silent). The results revealed significant decoding between 760 and 840 ms, which is consistent with the LPC old/new effect. The paper discusses both traditional theories and the Feature Space Theory based on our results, highlighting inconsistencies with assumptions regarding unconscious retrieval in the Feature Space Theory. In summary, the current results support the role of distinctiveness (enhanced memory for auditory or action information, consistent with recollection) in the PE, rather than the role of strength (enhanced memory trace, consistent with familiarity). This study suggests that enhanced distinctiveness/recollection may be a shared mechanism underlying certain advantageous memory strategies.</p></abstract>
<kwd-group>
<kwd>reading aloud</kwd>
<kwd>silent reading</kwd>
<kwd>LPC</kwd>
<kwd>FN400</kwd>
<kwd>MVPA</kwd>
</kwd-group>
<counts>
<fig-count count="4"/>
<table-count count="2"/>
<equation-count count="0"/>
<ref-count count="63"/>
<page-count count="12"/>
<word-count count="9755"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Cognitive Neuroscience</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec id="s1">
<title>1 Introduction</title>
<p>Memory plays a crucial role in human learning and daily life, as it allows for retention and retrieval of information when needed. Consequently, people are always on the lookout for effective strategies to enhance their memory. Dunlosky et al. (<xref ref-type="bibr" rid="B12">2013</xref>) provides a comprehensive review of 10 common memory strategies, revealing that only a few may genuinely be effective (Dunlosky et al., <xref ref-type="bibr" rid="B12">2013</xref>). However, one simple yet powerful strategy was notably absent from the discussion: reading aloud.</p>
<p>Memory associated with reading aloud is considered stronger than when silent reading (see MacLeod and Bodner, <xref ref-type="bibr" rid="B33">2017</xref>; for a brief review). This effect was first reported by Hopkins and Edwards (<xref ref-type="bibr" rid="B20">1972</xref>), but it was only after MacLeod et al. (<xref ref-type="bibr" rid="B34">2010</xref>) delineated this phenomenon and coined the term, production effect (PE), that this effective encoding strategy received increasing attention from researchers. Since MacLeod et al. (<xref ref-type="bibr" rid="B34">2010</xref>), a substantial number of researchers have reported on PE (e.g., Bodner et al., <xref ref-type="bibr" rid="B2">2020</xref>; Forrin et al., <xref ref-type="bibr" rid="B17">2012</xref>; Kelly et al., <xref ref-type="bibr" rid="B24">2024</xref>; Lin and MacLeod, <xref ref-type="bibr" rid="B30">2012</xref>; L&#x000F3;pez Assef et al., <xref ref-type="bibr" rid="B31">2021</xref>; Saint-Aubin et al., <xref ref-type="bibr" rid="B45">2021</xref>; Whitridge et al., <xref ref-type="bibr" rid="B52">2024</xref>; Zormpa et al., <xref ref-type="bibr" rid="B63">2019</xref>; Zhang et al., <xref ref-type="bibr" rid="B61">2023a</xref>,<xref ref-type="bibr" rid="B60">b</xref>), especially in the recognition memory field (e.g., Bodner et al., <xref ref-type="bibr" rid="B5">2014</xref>; Fawcett, <xref ref-type="bibr" rid="B14">2013</xref>).</p>
<p>Research has primarily investigated the mechanism of PE (e.g., Bodner et al., <xref ref-type="bibr" rid="B3">2016</xref>; Fawcett and Ozubko, <xref ref-type="bibr" rid="B15">2016</xref>; MacLeod and Bodner, <xref ref-type="bibr" rid="B33">2017</xref>; Ozubko and MacLeod, <xref ref-type="bibr" rid="B41">2010</xref>). Up to now, the two dominant explanations of PE are the distinctiveness account and the strength account. In the distinctiveness account, MacLeod et al. (<xref ref-type="bibr" rid="B34">2010</xref>) emphasized that reading aloud involves unique phonological and articulatory processing, enabling participants to encode distinctive information during the encoding phase. During the recognition phase participants are said to retrieve this distinctive information, which facilitates recognition. Because the recognition process described in the distinctiveness account is based on the retrieval of contextual information, it aligns with recollection&#x02014;the process of recognition memory associated with contextual information (Yonelinas et al., <xref ref-type="bibr" rid="B59">2022</xref>). Therefore, by definition, recollection is consistent with distinctiveness (Fawcett and Ozubko, <xref ref-type="bibr" rid="B15">2016</xref>; Zhang et al., <xref ref-type="bibr" rid="B61">2023a</xref>). The strength account on the other hand emphasizes that reading aloud does not rely on retaining information about distinctiveness. Instead, it enhances the activation of memory traces for studied items, thereby increasing their familiarity during testing (Bodner and Taikh, <xref ref-type="bibr" rid="B4">2012</xref>). This description aligns with the familiarity process in recognition memory (Yonelinas et al., <xref ref-type="bibr" rid="B59">2022</xref>), where participants rely on the sense of familiarity for recognition decisions. Therefore, by definition, familiarity is inherently aligned with the concept of strength (Fawcett and Ozubko, <xref ref-type="bibr" rid="B15">2016</xref>; MacKenzie and Donaldson, <xref ref-type="bibr" rid="B32">2007</xref>; Parks, <xref ref-type="bibr" rid="B42">2007</xref>; Yonelinas et al., <xref ref-type="bibr" rid="B59">2022</xref>; Zhang et al., <xref ref-type="bibr" rid="B61">2023a</xref>).</p>
<p>Previous studies have primarily employed two paradigms to explore the roles of distinctiveness and strength in the PE (Fawcett and Ozubko, <xref ref-type="bibr" rid="B15">2016</xref>; Ozubko et al., <xref ref-type="bibr" rid="B40">2012</xref>; Zhang et al., <xref ref-type="bibr" rid="B61">2023a</xref>). The first is the mixed-list design, which is the most common and primary paradigm (MacLeod and Bodner, <xref ref-type="bibr" rid="B33">2017</xref>). In this paradigm the vocabulary to be learned is often divided into two sets, possibly labeled as the blue set and the yellow set. Participants are required to read aloud or silently either the blue or the yellow font. The instructions represented by the two colors are counterbalanced across participant. Subsequently, during the studying phase, these items are randomly mixed (Bodner et al., <xref ref-type="bibr" rid="B3">2016</xref>; MacLeod et al., <xref ref-type="bibr" rid="B34">2010</xref>). The participants are then required to identify both studied and unstudied words. The second paradigm, which is less commonly used, is the block design. This design usually involves participants reading a series of words aloud continuously, followed by reading a series of words silently.</p>
<p>The size of the PE differs between the mixed-list design and the block design. Bodner et al. (<xref ref-type="bibr" rid="B5">2014</xref>) found that the PE in mixed-list designs is larger relative to that in the block design. This is because the memory of silent reading in the mixed-list design is worse than in the block design, showing a cost of silent reading. This cost may arise from lazy reading, wherein people tend to perceive the words read aloud as more important in the mixed-list. This leads them to reduce effort during the silent reading phase, consequently causing a decline in memory performance during silent reading (Bodner et al., <xref ref-type="bibr" rid="B5">2014</xref>). The occurrence of this cost may be related to reading aloud interrupting covert rehearsal, which prevents participants from effectively maintaining the memory of words read silently after reading aloud (Cyr et al., <xref ref-type="bibr" rid="B11">2022</xref>). An item-order account suggests that commonly processed items (silent reading) incur a cost in mixed-lists because the presence of unusually processed items (reading aloud) disrupts the encoding of relational information (Jonker et al., <xref ref-type="bibr" rid="B23">2014</xref>).</p>
<p>At the mechanism level, greater PE at the behavioral level might be related to amplified distinctiveness or strength. A speculative piece of evidence comes from Ozubko et al. (<xref ref-type="bibr" rid="B39">2020</xref>), who explored whether adding a third condition could amplify the additional cost of silent reading. Their behavioral results indicated that in the group where the &#x0201C;important&#x0201D; condition (participants were instructed to remember the information carefully) was added, memory performance in the silent reading condition was worse when compared to the group without the &#x0201C;important&#x0201D; condition, showing a kind of cost. This cost is reflected in recollection (which is consistent with the definition of distinctiveness) and in familiarity (which is consistent with the definition of strength) (see Exp. 6; Ozubko et al., <xref ref-type="bibr" rid="B39">2020</xref>). Moreover, Ozubko et al. (<xref ref-type="bibr" rid="B39">2020</xref>) proposed that this cost was due to participants looking for distinctiveness encoding information during the test. In conditions without distinctiveness encoding (silent reading), participants&#x00027; confidence decreased. Regardless of the cause of the cost in the mixed-list, we can infer that the cost of silent reading might be reflected in recollection or familiarity, thereby potentially amplifying the role of distinctiveness or strength in the PE. This may ultimately lead to the larger mixed-list PE. Thus, if a block-based study only finds that recollection/distinctiveness contributes to the PE (see Zhang et al., <xref ref-type="bibr" rid="B61">2023a</xref>), it is necessary to explore whether familiarity/strength also contributes to the PE in the context of a mixed-list.</p>
<p>Zhang et al. (<xref ref-type="bibr" rid="B61">2023a</xref>) used electroencephalography (EEG) technology to investigate the effects of reading aloud on recollection and familiarity. However, the results of their study need to be further examined using a mixed-list task (Zhang et al., <xref ref-type="bibr" rid="B61">2023a</xref>). Initial research found that reading aloud can simultaneously enhance both recollection and familiarity at the behavioral level (see Fawcett and Ozubko, <xref ref-type="bibr" rid="B15">2016</xref>: Dual processing account). However, ERP technology is a more sensitive method for exploring recollection and familiarity (Zhang et al., <xref ref-type="bibr" rid="B61">2023a</xref>). In terms of ERP indicators of recollection and familiarity, the LPC component during the recognition phase varies with recollection, while the FN400 component varies with familiarity (Bridger and Mecklinger, <xref ref-type="bibr" rid="B6">2012</xref>; Curran and Friedman, <xref ref-type="bibr" rid="B10">2004</xref>; Madore et al., <xref ref-type="bibr" rid="B36">2020</xref>; Rugg and Curran, <xref ref-type="bibr" rid="B44">2007</xref>). The LPC old/new effect reflects a more positive amplitude for old items, compared to new items in the left parietal area, from 500 to 800 ms after stimulus onset (Curran and Friedman, <xref ref-type="bibr" rid="B10">2004</xref>; Forester et al., <xref ref-type="bibr" rid="B16">2019</xref>; Madore et al., <xref ref-type="bibr" rid="B36">2020</xref>; Zhang et al., <xref ref-type="bibr" rid="B61">2023a</xref>), indicative of enhanced recollection. Therefore, LPC can serve as a sensitive indicator of distinctiveness (Zhang et al., <xref ref-type="bibr" rid="B61">2023a</xref>). The FN400 is an early negative component. The FN400 old/new effect reflects a more positive amplitude for old items compared to new items in the frontal area, from 300 to 500 ms after stimulus onset, indicative of enhanced familiarity. Therefore, FN400 can serve as an indicator of strength (Mecklinger and J&#x000E4;ger, <xref ref-type="bibr" rid="B37">2009</xref>; Zhang et al., <xref ref-type="bibr" rid="B61">2023a</xref>).</p>
<p>Zhang et al. (<xref ref-type="bibr" rid="B61">2023a</xref>) used a block design to investigate how reading aloud affects recollection and familiarity at the ERP level. At the behavioral level, they found that reading aloud can enhance both recollection and familiarity simultaneously. However, the ERP results showed a significant PE only in the LPC old/new effect, with no significant PE in the FN400 old/new effect. Simultaneously, they used multivariate pattern analysis (MVPA) to decode the time-course of neural activity for studied trials compared to new trials (aloud vs. new and silent vs. new) during the recognition phase. The results showed that stable decoding could only be achieved after 500 ms, which is consistent with the LPC effect (Zhang et al., <xref ref-type="bibr" rid="B61">2023a</xref>). These findings indicated that reading aloud only enhanced recollection compared to silent reading, with no evidence of enhanced familiarity. They suggested that the evidence of familiarity-related PE observed at the behavioral level may reflect weak recollection.</p>
<p>The above results merely support the notion that recollection/distinctiveness contributes to the PE. Nevertheless, Zhang et al. (<xref ref-type="bibr" rid="B61">2023a</xref>) used a block design. As mentioned earlier, in mixed-lists the potential costs may lead to an amplification of the PE in familiarity (Ozubko et al., <xref ref-type="bibr" rid="B39">2020</xref>). Therefore, we speculate that although Zhang et al. (<xref ref-type="bibr" rid="B61">2023a</xref>) did not observe a PE on the FN400 old/new effect in a block design, the presence of potential costs in mixed-lists may amplify the advantage of reading aloud over silent reading on the FN400 old/new effect, thereby leading to a significant PE on the FN400 old/new effect. This indicates that familiarity/strength may contribute to the PE in mixed-list designs rather than in block designs. Investigating this issue is of great importance for understanding the mechanisms of the PE (i.e., the roles of distinctiveness and strength). At the same time, developing an understanding of how reading aloud influences basic recognition memory processes (recollection and familiarity) is crucial for constructing a comprehensive modern theoretical framework for the PE in the future.</p>
<p>Overall, the purpose of this study was to examine the effect of reading aloud on recollection and familiarity in mixed-lists using ERP technology. We posit that if reading aloud enhances recollection rather than familiarity, this suggests that the PE is a function of distinctiveness rather than strength, and that the mechanism of the PE is common and not dependent on a blocked paradigm. Also, If reading aloud could enhance both recollection and familiarity, compared to silent reading, it indicates that in mixed-lists PE relies on both distinctiveness and strength, and the mechanism of PE is potentially unique to the paradigm or task. Furthermore, this study attempts to use MVPA to decode the temporal dynamics of the PE during the recognition phase (aloud vs. silent).</p></sec>
<sec id="s2">
<title>2 Materials and methods</title>
<p>The study was conducted in compliance with Good Clinical Practice and the Declaration of Helsinki and this study was approved in 2024 by the Human Research Ethics Committee at Flinders University, project no. 6844. Written informed consent was obtained from all participants.</p>
<sec>
<title>2.1 Participants</title>
<p>We recruited 35 participants at Southwest University (China) and tested them. Due to high artifacts, 5 participants were excluded, leaving 30 participants (M<sub>age</sub> = 22.2, SD = 3.7; 20 female) for the final analysis. A post hoc sensitivity analysis indicated that testing 30 participants with &#x003B1; = 0.05, a power of 0.8, and default parameters would allow us to detect a medium effect size in our paired-sample <italic>t</italic>-test (Effect size <italic>dz</italic> = 0.53).</p>
</sec>
<sec>
<title>2.2 Stimuli</title>
<p>To align as closely as possible with the classic mixed-list design, we maintained the same number of vocabulary items as in previous classic studies (Fawcett and Ozubko, <xref ref-type="bibr" rid="B15">2016</xref>). We selected 240 frequently used two-character nouns from the vocabulary database of Zhang et al. (<xref ref-type="bibr" rid="B61">2023a</xref>) (familiarity: <italic>M</italic> = 5.4, SD = 1), with frequency ranging from 1 to 5,848 occurrences per million (<italic>M</italic> = 593, SD = 884) according to SUBTLEX-CH and CNCORPU (Cai and Brysbaert, <xref ref-type="bibr" rid="B7">2010</xref>; Xiao, <xref ref-type="bibr" rid="B54">2016</xref>). These items were subdivided into two sets matched for familiarity; one set was studied and the other served as the new items on the recognition test.</p>
</sec>
<sec>
<title>2.3 Experimental procedure</title>
<p>This study follows a within-subject design with 2 conditions (Memory condition: reading aloud, silent reading) to explore how different memory condition affect recollection and familiarity in recognition. During the study phase, in the mixed-list design, a total of 60 words were read aloud, and 60 words were read silently. Words were presented in either yellow or blue to indicate which action participants should take. For half of the participants, yellow indicated silent reading, and blue indicated reading aloud. These instructions were reversed for the remaining participants, and the items in the study phase were randomly mixed. During the study phase, participants first saw a 500 ms fixation point, followed by a 500 ms blank screen, and then a 2,000 ms presentation of words (2.65&#x000B0; &#x000D7; 1.30&#x000B0; visual angle). After the study phase, participants entered the test phase. During this phase, the words were presented in black on a gray background. Adequate breaks were scheduled in between. Participants were then presented with 60 read-aloud words, 60 silently read words, and 120 new words. Specifically, participants first saw a word that remained fixed on the screen for 3 s. Following this, they proceeded to an R/K/N judgment task. They were required to categorize the words as &#x0201C;remember,&#x0201D; &#x0201C;know,&#x0201D; or &#x0201C;new.&#x0201D; &#x0201C;Remember&#x0201D; was to be used if participants recalled contextual details from when they encoded the word (e.g., what they were thinking about or felt when they saw the item and what items had come before or after it). &#x0201C;Know&#x0201D; was to be used if they recognized having seen the word before but could not recall contextual details. &#x0201C;New&#x0201D; was to be used if they were certain they hadn&#x00027;t encountered the word before or were unsure if they had (<xref ref-type="fig" rid="F1">Figure 1</xref>).</p>
<fig id="F1" position="float">
<label>Figure 1</label>
<caption><p>Flowchart of this study. During the study phase, two colors of fonts appeared randomly mixed. Afterward, participants entered the test phase, and ERP data were recorded during the stimulus stage indicated by the red box.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-19-1507782-g0001.tif"/>
</fig>
</sec>
<sec>
<title>2.4 Statistical analysis</title>
<p>We used SPSS for the data analysis. Effects were deemed significant when <italic>p</italic> &#x0003C; 0.05. We conducted paired-samples <italic>t</italic>-tests on the behavioral data. For the ERP data, a three-factor repeated measures ANOVA was first performed, followed by paired-samples <italic>t</italic>-tests. Estimates of effect size are provided for significant comparisons using partial-eta squared (<inline-formula><mml:math id="M1"><mml:msubsup><mml:mrow><mml:mi>&#x003B7;</mml:mi></mml:mrow><mml:mrow><mml:mtext>p</mml:mtext></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula>) for ANOVAs and Cohen&#x00027;s d for <italic>t</italic>-tests.</p>
</sec>
<sec>
<title>2.5 ERP data recording</title>
<p>Participants were tested in a dimly lit, soundproof room. EEG data were recorded using a 64-channel Brain Products system (Brain Products GmbH, Germany; passband: 0.01&#x02013;100 Hz) with tin electrodes mounted on a standard elastic cap based on the international 10&#x02013;20 system. Data were analyzed using MATLAB and its toolbox EEGLAB. Electrodes were referenced to electrode FCz; offline data reference to the mean value of left and right mastoid. The electrodes on the outside of the right eye were used to monitor horizontal eye movement, and the electrodes on the underside of the left eye were used to monitor vertical eye movement. EEG was recorded at a sample rate of 500 Hz. Electrode impedances were maintained below 5 k&#x003A9; during data recording. Data were filtered with a 0.1&#x02013;30 Hz bandpass filter, followed by independent component analysis (ICA) for each participant to identify and discard eyeblink-related components. Trials with voltages over &#x02212;100 &#x003BC;V to &#x0002B;100 &#x003BC;V were removed. And EEG data were recorded during in test phases, ERPs were extracted during the item presentation of test phase.</p>
<p>As in previous studies, we focused on analysis of the frontal old/new effect and parietal old/new effect (J&#x000E4;ger et al., <xref ref-type="bibr" rid="B22">2006</xref>; Mecklinger and J&#x000E4;ger, <xref ref-type="bibr" rid="B37">2009</xref>; Zhang et al., <xref ref-type="bibr" rid="B61">2023a</xref>). The LPC and FN400 were measured during the test phase. For the FN400 old/new effect, we selected 2 electrode points on the frontal region (F1, F3); for the LPC old/new effect, we selected 2 electrode points in the left parietal region (P1, P3) (Forester et al., <xref ref-type="bibr" rid="B16">2019</xref>; Madore et al., <xref ref-type="bibr" rid="B36">2020</xref>; Zhang et al., <xref ref-type="bibr" rid="B61">2023a</xref>). For each component, we calculated the average amplitude across the indicated electrode sites.</p>
</sec>
<sec>
<title>2.6 Multivariate pattern analysis</title>
<p>MVPA was used to decode the time-course of word recognition in the recognition phase (test phase). MVPA is a multivariate analytical technique and is typically used when referring to the practice of characterizing (decoding) the difference between experimental conditions based on their patterns of brain responses (Fahrenfort et al., <xref ref-type="bibr" rid="B13">2018</xref>). MVPA has been demonstrated to be a superior method to understand the nature of memory (Xue, <xref ref-type="bibr" rid="B55">2018</xref>). MVPA involves training a classifier (a pattern classification algorithm) to distinguish different patterns of brain activity associated with different experimental variables of interest, which is more sensitive than conventional ERP analysis. This is because MVPA uses whole-brain activity to depict neural activity patterns over time (Li et al., <xref ref-type="bibr" rid="B28">2022</xref>, <xref ref-type="bibr" rid="B29">2024</xref>; Sharifian et al., <xref ref-type="bibr" rid="B47">2021</xref>), and because the neural stability of decoding performance can be analyzed (King and Dehaene, <xref ref-type="bibr" rid="B25">2014</xref>). To this end, we performed MVPA on the pre-processed EEG data using the Amsterdam decoding and modeling (ADAM) toolbox (Fahrenfort et al., <xref ref-type="bibr" rid="B13">2018</xref>; Zhang et al., <xref ref-type="bibr" rid="B61">2023a</xref>). MVPA involved a backward decoding classification algorithm (linear discriminant analysis). MVPA decoding captures differences between pairs of classes at the whole-brain level (i.e., aloud vs. new, silent vs. new, aloud vs. silent). Before performing MVPA, the EEG data were down-sampled offline to 50 Hz to facilitate decoding (Fahrenfort et al., <xref ref-type="bibr" rid="B13">2018</xref>; Zhang et al., <xref ref-type="bibr" rid="B61">2023a</xref>). The EEG data at individual electrode channels were used as classification features, and all electrodes were used to create features.</p>
<p>In terms of MVPA analysis, the classifier at each time point was trained and tested, using 5-fold cross-validation, to minimize potential biases resulting from the assignment of trials to different groups. We performed 5 iterations of the entire procedure, shuffling the order of the trials at the beginning of each iteration (Li et al., <xref ref-type="bibr" rid="B29">2024</xref>). Based on the guidelines provided by Fahrenfort et al. (<xref ref-type="bibr" rid="B13">2018</xref>), after exporting the MVPA files, we utilized the scripts from Fahrenfort et al. (<xref ref-type="bibr" rid="B13">2018</xref>) to perform classification analysis (decoding) and temporal generalization analysis (All analysis scripts can be referred to in: Fahrenfort et al., <xref ref-type="bibr" rid="B13">2018</xref>). To measure the classification performance (decoding), we used the area under the curve (AUC) as a metric, a larger AUC value means better classification performance (Fahrenfort et al., <xref ref-type="bibr" rid="B13">2018</xref>). An AUC value of 0.5 indicated chanced classification performance. The results were corrected for multiple comparisons by cluster-based permutation tests (<italic>p</italic> &#x0003C; 0.05; 1,000 iterations). Finally, stability of classified neural activity over time was detected using temporal generalization analysis (King and Dehaene, <xref ref-type="bibr" rid="B25">2014</xref>). This analysis explored whether the decoded neural activity patterns were stable by training the classifier at each time point and testing the classifier at all time points (Fahrenfort et al., <xref ref-type="bibr" rid="B13">2018</xref>). The temporal generalization matrix was then obtained. As a result, if the classification accuracy outside the diagonal axis was above the chance level it indicated stable neural activity (Fahrenfort et al., <xref ref-type="bibr" rid="B13">2018</xref>; Li et al., <xref ref-type="bibr" rid="B28">2022</xref>, <xref ref-type="bibr" rid="B29">2024</xref>; Zhang et al., <xref ref-type="bibr" rid="B61">2023a</xref>).</p></sec></sec>
<sec id="s3">
<title>3 Results</title>
<sec>
<title>3.1 Behavioral data</title>
<p>Our behavioral measures were analyzed in paired-samples <italic>t</italic>-test with condition (aloud, silent) as the factor: (1) overall recognition (percentage of items correctly identified as &#x0201C;old&#x0201D; at test; i.e., sum of remember &#x0002B; know judgments), (2) remember judgments (percentage of old items to which participants made a &#x0201C;remember&#x0201D; response), and (3) &#x0201C;know&#x0201D; judgments, and (4) familiarity estimates (as defined below). <xref ref-type="table" rid="T1">Table 1</xref> provides the means and <xref ref-type="fig" rid="F2">Figure 2</xref> displays them.</p>
<table-wrap position="float" id="T1">
<label>Table 1</label>
<caption><p>Behavioral data: overall recognition, recollection, and familiarity by condition.</p></caption>
<table frame="box" rules="all">
<thead>
<tr style="background-color:#8f9496;color:#ffffff">
<th/>
<th valign="top" align="center" colspan="2"><bold>Aloud</bold></th>
<th valign="top" align="center" colspan="2"><bold>Silent</bold></th>
<th valign="top" align="center"><bold><italic>F</italic>(1, 29)</bold></th>
<th valign="top" align="center"><bold><italic><inline-formula><mml:math id="M2"><mml:msubsup><mml:mrow><mml:mi>&#x003B7;</mml:mi></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula></italic></bold></th>
</tr>
<tr style="background-color:#8f9496;color:#ffffff">
<th valign="top" align="left"><bold>Measure</bold></th>
<th valign="top" align="center"><bold>M</bold></th>
<th valign="top" align="center"><bold>SD</bold></th>
<th valign="top" align="center"><bold>M</bold></th>
<th valign="top" align="center"><bold>SD</bold></th>
<th/>
<th/>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="center">Overall recognition</td>
<td valign="top" align="center">0.71</td>
<td valign="top" align="center">0.11</td>
<td valign="top" align="center">0.54</td>
<td valign="top" align="center">0.14</td>
<td valign="top" align="center">66.71<sup>&#x0002A;</sup></td>
<td valign="top" align="center">0.69</td>
</tr> <tr>
<td valign="top" align="center">Recollection (R judgments)</td>
<td valign="top" align="center">0.39</td>
<td valign="top" align="center">0.18</td>
<td valign="top" align="center">0.23</td>
<td valign="top" align="center">0.15</td>
<td valign="top" align="center">55.24<sup>&#x0002A;</sup></td>
<td valign="top" align="center">0.66</td>
</tr> <tr>
<td valign="top" align="center">K judgments</td>
<td valign="top" align="center">0.31</td>
<td valign="top" align="center">0.1</td>
<td valign="top" align="center">0.3</td>
<td valign="top" align="center">0.11</td>
<td valign="top" align="center">0.23</td>
<td valign="top" align="center">0.008</td>
</tr> <tr>
<td valign="top" align="center">Familiarity (IRK estimates)</td>
<td valign="top" align="center">0.52</td>
<td valign="top" align="center">0.13</td>
<td valign="top" align="center">0.4</td>
<td valign="top" align="center">0.14</td>
<td valign="top" align="center">21.43<sup>&#x0002A;</sup></td>
<td valign="top" align="center">0.425</td>
</tr></tbody>
</table>
<table-wrap-foot>
<p>R, remember; K, know; IRK, independence remember-know. <sup>&#x0002A;</sup><italic>p</italic> &#x0003C; 0.05.</p>
</table-wrap-foot>
</table-wrap>
<fig id="F2" position="float">
<label>Figure 2</label>
<caption><p>Behavioral data: overall recognition, recollection, and familiarity by condition. Error bars indicate the standard error of each mean. &#x0002A;&#x0002A;&#x0002A;<italic>p</italic> &#x0003C; 0.001.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-19-1507782-g0002.tif"/>
</fig>
<p>Overall recognition was greater in the aloud condition than in the silent condition, <italic>t</italic>(29) = 8.168, <italic>p</italic> &#x0003C; 0.001, Cohen&#x00027;s <italic>d</italic> = 0.63, thus demonstrating a robust PE on recognition.</p>
<p>In terms of the rate of remember judgments, as was true for overall recognition, remember judgments were more common in the aloud condition than in the silent condition, <italic>t</italic>(29) = 7.433, <italic>p</italic> &#x0003C; 0.001, Cohen&#x00027;s <italic>d</italic> =2.53.</p>
<p>The rate of know (K) judgments was similar across the conditions, <italic>t</italic>(29) = 0.481, <italic>p</italic> = 0.634, Cohen&#x00027;s <italic>d</italic> = 0.09. However, in the remember/know task, the rate of K judgment underestimates familiarity because participants who experience familiarity will only report a K judgment if recollection fails&#x02014;otherwise they will report a remember (R) judgment (Yonelinas, <xref ref-type="bibr" rid="B56">2002</xref>; Yonelinas et al., <xref ref-type="bibr" rid="B57">2010</xref>). To compensate for this underestimation, familiarity was estimated using the independence remember-know (IRK) correction K/(1-R) (<xref ref-type="table" rid="T1">Table 1</xref>; see Yonelinas and Jacoby, <xref ref-type="bibr" rid="B58">1995</xref>). Estimates of familiarity were greater in the aloud condition than in the silent condition, <italic>t</italic>(29) = 4.42, <italic>p</italic> &#x0003C; 0.001, Cohen&#x00027;s <italic>d</italic> = 3.796.</p>
</sec>
<sec>
<title>3.2 ERP data</title>
<sec>
<title>3.2.1 Recognition phase: LPC effect (500&#x02013;800 ms)</title>
<p>To evaluate the possible contribution of recollection to the PE, we compared mean LPC amplitude during the test phase across the parietal region electrodes (P1, P3; <xref ref-type="fig" rid="F3">Figure 3</xref>) for hits to aloud and silent trials, and for correct rejections of new trials. The ANOVA revealed a significant main effect of condition, <italic>F</italic>(2,58) = 8.82, <italic>p</italic> &#x0003C; 0.001, &#x003B7;<sub>p</sub><sup>2</sup> = 0.233. The LPC amplitude was greater on aloud test trials than on new trials, <italic>p</italic> = 0.001, Cohen&#x00027;s <italic>d</italic> = 1.14, showing a significant LPC old/new effect (difference) for aloud trials. The LPC amplitude was greater on aloud test trials than on silent trials, <italic>p</italic> = 0.032, Cohen&#x00027;s <italic>d</italic> = 0.47, showing that the LPC old/new effect for aloud trials is greater than for silent trial. LPC amplitude was greater on silent test trials than on new trials, <italic>p</italic> = 0.037, Cohen&#x00027;s <italic>d</italic> = 1.14, showing a significant LPC old/new effect (difference) for silent trials (<xref ref-type="table" rid="T2">Table 2</xref> and <xref ref-type="fig" rid="F3">Figure 3</xref>).</p>
<fig id="F3" position="float">
<label>Figure 3</label>
<caption><p>Amplitude of LPC and FN400 (Test Phase) ERP components by condition. <bold>(A)</bold> Waveforms and topography of LPC (test phase). <bold>(B)</bold> Evoked LPC amplitudes (test phase). <bold>(C)</bold> Waveforms and topography of FN400 (test phase). <bold>(D)</bold> Evoked FN400 amplitudes (test phase). <bold>(A, C)</bold> Shaded regions represent the defined time windows. <bold>(B, D)</bold> Error bars indicate the standard error of each mean. &#x0002A;<italic>p</italic> &#x0003C; 0.05 and &#x0002A;&#x0002A;&#x0002A;<italic>p</italic> &#x0003C; 0.001.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-19-1507782-g0003.tif"/>
</fig>
<table-wrap position="float" id="T2">
<label>Table 2</label>
<caption><p>ERP data: LPC, and FN400.</p></caption>
<table frame="box" rules="all">
<thead>
<tr style="background-color:#8f9496;color:#ffffff">
<th valign="top" align="left"><bold>Components</bold></th>
<th valign="top" align="center" colspan="2"><bold>Aloud</bold></th>
<th valign="top" align="center" colspan="2"><bold>Silent</bold></th>
<th valign="top" align="center" colspan="2"><bold>New</bold></th>
<th valign="top" align="center"><bold><italic>F</italic>(2, 58)</bold></th>
<th valign="top" align="center"><bold><inline-formula><mml:math id="M3"><mml:msubsup><mml:mrow><mml:mi>&#x003B7;</mml:mi></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula></bold></th>
</tr>
<tr style="background-color:#8f9496;color:#ffffff">
<th/>
<th valign="top" align="center"><bold>M</bold></th>
<th valign="top" align="center"><bold>SD</bold></th>
<th valign="top" align="center"><bold>M</bold></th>
<th valign="top" align="center"><bold>SD</bold></th>
<th valign="top" align="center"><bold>M</bold></th>
<th valign="top" align="center"><bold>SD</bold></th>
<th/>
<th/>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="center">LPC</td>
<td valign="top" align="center">5.75</td>
<td valign="top" align="center">3.95</td>
<td valign="top" align="center">5</td>
<td valign="top" align="center">3.6</td>
<td valign="top" align="center">4.03</td>
<td valign="top" align="center">3.2</td>
<td valign="top" align="center">8.82<sup>&#x0002A;</sup></td>
<td valign="top" align="center">0.23</td>
</tr> <tr>
<td valign="top" align="center">FN400</td>
<td valign="top" align="center">1.38</td>
<td valign="top" align="center">4.2</td>
<td valign="top" align="center">1.18</td>
<td valign="top" align="center">4.5</td>
<td valign="top" align="center">0.06</td>
<td valign="top" align="center">4.3</td>
<td valign="top" align="center">6.45<sup>&#x0002A;</sup></td>
<td valign="top" align="center">0.18</td>
</tr></tbody>
</table>
<table-wrap-foot>
<p>The unit of the data is &#x0201C;microvolts&#x0201D; (&#x003BC;V). <sup>&#x0002A;</sup><italic>p</italic> &#x0003C; 0.05.</p>
</table-wrap-foot>
</table-wrap>
</sec>
<sec>
<title>3.2.2 Recognition phase: FN400 effect (300&#x02013;500 ms)</title>
<p>To evaluate the possible contribution of familiarity to the PE, we compared mean FN400 amplitude during the test phase across the frontal region electrodes (F1, F3; <xref ref-type="fig" rid="F3">Figure 3</xref>). The ANOVA revealed a significant main effect of condition, <italic>F</italic>(2,58) = 6.45, <italic>p</italic> = 0.003, &#x003B7;<sub>p</sub><sup>2</sup> = 0.182. The FN400 amplitude was more positive for hits to aloud test items than for correct rejections of new test items, <italic>p</italic> &#x0003C; 0.001, Cohen&#x00027;s <italic>d</italic> = 0.782, the latter showing a significant FN400 old/new effect. The FN400 amplitude was more positive for hits to silent test items than for correct rejections of new test items, <italic>p</italic> = 0.017, Cohen&#x00027;s <italic>d</italic> = 0.782, the latter showing a significant FN400 old/new effect. However, aloud trials, silent trials were equivalent, <italic>ps</italic> &#x0003E; 0.05, showing no difference in FN400 old/new effect of the two memory conditions. Thus, no PE on familiarity was found using this measure (<xref ref-type="table" rid="T2">Table 2</xref> and <xref ref-type="fig" rid="F3">Figure 3</xref>).</p>
</sec>
</sec>
<sec>
<title>3.3 Multivariate pattern analysis</title>
<p>We observed significant classification for each pair of conditions using MVPA. MVPA revealed a significant above-chance difference in aloud vs. new classification from 540 to 1000 ms post-stimulus onset (paired <italic>t</italic>-test, <italic>p</italic> &#x0003C; 0.05, cluster corrected), in silent vs. new classification from 480 to 580 ms and from 600 to 980 ms post-stimulus onset (paired <italic>t</italic>-test, <italic>p</italic> &#x0003C; 0.05, cluster corrected), and for aloud vs. silent classification from 760 to 840 ms post-stimulus onset (paired <italic>t</italic>-test, <italic>p</italic> &#x0003C; 0.05, cluster corrected) (<xref ref-type="fig" rid="F4">Figure 4A</xref>).</p>
<fig id="F4" position="float">
<label>Figure 4</label>
<caption><p>Mean classification performance and spatio-temporal decoding in test phase. <bold>(A)</bold> Spatio-temporal decoding of old/new contrast result in recognition. Classifier accuracy was threshold (cluster-based correction, <italic>p</italic> &#x0003C; 0.05). Gray lines indicate classifier accuracy. Solid black lines indicate significant clusters. Gray shaded contours in classification performance plots represent standard error of the mean. <bold>(B)</bold> Temporal generalization matrices. Saturated colors indicate significant samples (<italic>p</italic> &#x0003C; 0.05).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-19-1507782-g0004.tif"/>
</fig>
<p>Because significant decoding was found in all three comparisons, we next calculated temporal generalization matrices to test the stability of neural activity patterns with underlying significant classification performance (Li et al., <xref ref-type="bibr" rid="B28">2022</xref>). A cluster of significant above-chance activity was stable and significant from 380 to 1000 ms post-stimulus onset for the aloud vs. new comparison, from 140 to 980 ms and from 260 to 1000 ms post-stimulus onset for the silent vs. new comparison, and from 440 to 980 ms and from 520 to 1000 ms post-stimulus onset for the control vs. new comparison. The time generalization matrices indicated that the neural activity could be decoded by the trained classifiers during these time windows, suggesting that the differences between pairs of conditions were stable over time (<xref ref-type="fig" rid="F4">Figure 4B</xref>).</p></sec></sec>
<sec id="s4">
<title>4 Discussion</title>
<p>PE is the phenomenon where the memory of reading aloud is better than that of silent reading. To date, few studies have investigated PE using neuroimaging techniques (Bailey et al., <xref ref-type="bibr" rid="B1">2021</xref>; Hassall et al., <xref ref-type="bibr" rid="B19">2016</xref>; Tan et al., <xref ref-type="bibr" rid="B48">2022</xref>; Zhang et al., <xref ref-type="bibr" rid="B61">2023a</xref>). To further examine the mechanism of the PE, our study is the first to systematically investigate the impact of reading aloud on recollection and familiarity in a mixed-list design and to explore whether the mechanism underlying recognition PE is general or task-specific. Theoretically, the PE on recollection is consistent with reading aloud enhancing distinctiveness, while the PE on familiarity is consistent with reading aloud enhancing memory strength (Bailey et al., <xref ref-type="bibr" rid="B1">2021</xref>; Fawcett and Ozubko, <xref ref-type="bibr" rid="B15">2016</xref>; Ozubko et al., <xref ref-type="bibr" rid="B40">2012</xref>). This study extracted the EEG amplitude during the testing phase and the results showed that, behaviorally, reading aloud increased overall recognition, recollection, and familiarity compared to silent conditions. At the ERP level, the larger LPC ERP component for aloud items further confirmed that reading aloud enhanced memory by increasing recollection. However, the FN400 ERP component findings did not provide convincing evidence that reading aloud also enhanced familiarity (<xref ref-type="fig" rid="F3">Figure 3</xref>). Furthermore, our MVPA decoding analysis of the PE revealed that classification between study trials and new trials could be accurately determined from about 500 ms of onset, suggesting that participants rely more on recollection to recognize aloud and silent words. Further, for the first time, we found significant decoding between aloud trials and silent trials at 760&#x02013;840 ms, expanding previous research and supporting the role of recollection in the PE (Zhang et al., <xref ref-type="bibr" rid="B61">2023a</xref>). Our results indicates that the mechanism of recognition-based PE is general rather than task-specific, with enhanced recollection/distinctiveness contributing to the recognition PE. We have discussed the results in the context of postulations from traditional theories and in the context of a new model based on our study results. These discussions provide important insight for the development of future research. Below we unpack these findings in more detail.</p>
<sec>
<title>4.1 Effect of production on LPC at retrieval</title>
<p>Recollection is widely believed to be a process of controlled retrieval, which refers to memory based on contextual information (Rugg and Curran, <xref ref-type="bibr" rid="B44">2007</xref>; Schaefer et al., <xref ref-type="bibr" rid="B46">2011</xref>). The LPC old/new effect is considered a marker of recognition based on recollection (Bridger and Mecklinger, <xref ref-type="bibr" rid="B6">2012</xref>). In the time window of 500&#x02013;800 ms in this study, a significant LPC old/new effect was observed between both reading aloud and silent reading conditions and the correct rejection of new trials, which indicates that both reading aloud and silent reading produced recollection-based memory. This result is inconsistent with findings of Zhang et al. (<xref ref-type="bibr" rid="B61">2023a</xref>) that only discovered a significant LPC old/new effect in the aloud condition. There are some differences between this study design and that of Zhang et al. (<xref ref-type="bibr" rid="B61">2023a</xref>). Zhang et al. (<xref ref-type="bibr" rid="B61">2023a</xref>) included three studying conditions and a total of 240 words, compared to two conditions and a total of 120 words in the current study. Having fewer words to memorize in the study phase might result in higher memory clarity for silent reading, enabling the recall of some background information during recognition, thus causing a significant LPC old/new effect of silent reading.</p>
<p>Next, we compared the LPC old/new effect of reading aloud with that of silent reading. The results indicated a PE in the LPC old/new effect, suggesting that although silent reading activated recollection-based recognition memory, reading aloud remained superior in terms of recollection indicator. These results revealed the mechanism of PE, indicating that PE originates from the advantage of reading aloud over silent reading with respect to recollection/distinctiveness, supporting the distinctiveness account. These results demonstrated that, regarding recollection, the format or length of the lists studied by participants might not influence the mechanism of PE, suggesting consistency and generality in the PE mechanisms across different paradigms/lists (Zhang et al., <xref ref-type="bibr" rid="B61">2023a</xref>). Subsequently, we discuss the familiarity in the mixed-list.</p>
</sec>
<sec>
<title>4.2 Effect of production on FN400 at retrieval</title>
<p>In comparison to recollection, familiarity reflects an automated extraction process that lacks contextual information about one&#x00027;s encoding (Migo et al., <xref ref-type="bibr" rid="B38">2012</xref>; Zhang et al., <xref ref-type="bibr" rid="B61">2023a</xref>). According to the explanation of strength account, reading aloud enhances participants&#x00027; familiarity with words thus providing a memory advantage over silent reading. At the behavioral level, the current study assessed familiarity using the corrected familiarity indicator (Ozubko et al., <xref ref-type="bibr" rid="B40">2012</xref>; Fawcett and Ozubko, <xref ref-type="bibr" rid="B15">2016</xref>; Zhang et al., <xref ref-type="bibr" rid="B61">2023a</xref>). We found a familiarity-based PE in the mixed-list design, which is consistent with the strength account (e.g., Bodner and Taikh, <xref ref-type="bibr" rid="B4">2012</xref>); however, there was no significant PE in the K judgments.</p>
<p>In terms of ERP, the FN400 old/new effect has been postulated to reflect familiarity-based recognition (Rugg and Curran, <xref ref-type="bibr" rid="B44">2007</xref>; Wang et al., <xref ref-type="bibr" rid="B51">2021</xref>). Firstly, we found that, like the LPC old/new effect, there is a significant FN400 old/new effect in both the reading aloud and silent reading conditions. This indicates that both reading aloud and silent reading activate familiarity-based recognition. Zhang et al. (<xref ref-type="bibr" rid="B61">2023a</xref>) found a significant FN400 old/new effect only in the reading aloud condition. We argue that the difference between the two study results is related to the number of vocabulary items memorized. Given that the memory for silent reading is weak, it is not surprising that reducing the number of items would allow participants to generate more familiarity.</p>
<p>The main aim of this section is to discuss whether there is a significant PE in the FN400 old/new effect. Our hypothesis was that in the mixed-list, due to the potential cost of silent reading, familiarity-based recognition during silent reading will be further reduced. As a result, the advantages of familiarity in the aloud condition will be amplified; thus, we will observe a PE in FN400 old/new effect. However, the current results did not support the above hypothesis. Although we found a significant FN400 old/new effect in both reading aloud and silent reading conditions, there was no difference between the two. This aligns with the findings of Zhang et al. (<xref ref-type="bibr" rid="B61">2023a</xref>). The current results and previous results indicate that there is no FN400 old/new effect PE in either mixed-list or block designs. Therefore, the advantage of familiarity at the behavioral level might only be a form of &#x0201C;weak recollection&#x0201D; (Wixted et al., <xref ref-type="bibr" rid="B53">2010</xref>; Zhang et al., <xref ref-type="bibr" rid="B61">2023a</xref>).</p>
<p>Even though the FN400 is currently considered to be the best indicator of familiarity and is generally accepted (Madore et al., <xref ref-type="bibr" rid="B36">2020</xref>; Rugg and Curran, <xref ref-type="bibr" rid="B44">2007</xref>), it might also reflect semantic processing of words or perhaps something broader (Voss and Federmeier, <xref ref-type="bibr" rid="B49">2011</xref>; Leynes and Mok, <xref ref-type="bibr" rid="B27">2017</xref>). Therefore, future research should explore if another ERP component could index familiarity-based recognition more effectively.</p>
<p>In addition, researchers should explore the impact of reading aloud on recollection and familiarity in a between-subjects design in future studies. This is because the PE of recollection may only emerge in within-subjects designs, while the between-subjects PE may rely more on strength/familiarity (Bodner et al., <xref ref-type="bibr" rid="B2">2020</xref>; Fawcett and Ozubko, <xref ref-type="bibr" rid="B15">2016</xref>; Whitridge et al., <xref ref-type="bibr" rid="B52">2024</xref>).</p>
</sec>
<sec>
<title>4.3 Insights from MVPA decoding</title>
<p>Our research decoded EEG data during the test phase in a mixed-list or event-design memory task using MVPA technology. MVPA is a crucial method for researchers to understand the nature of memory (Xue, <xref ref-type="bibr" rid="B55">2018</xref>). In terms of decoding between the study conditions (aloud and silent) and new trials, significant decoding was observed only after 500 ms, not before, regardless of whether it was the reading aloud condition with good memory or the silent reading condition. This is consistent with the LPC time window rather than the FN400. This pattern is consistent with the ERP results and previous decoding findings, indicating that recognition memory may rely more on recollection than familiarity. It also aligns with the ERP results, supporting the potential role of the LPC effect in the PE (cf. Curran et al., <xref ref-type="bibr" rid="B9">2006</xref>; Zhang et al., <xref ref-type="bibr" rid="B61">2023a</xref>).</p>
<p>Moreover, there has been significant interest in the temporal dynamics of different memory strategies (Forester et al., <xref ref-type="bibr" rid="B16">2019</xref>; Pereira et al., <xref ref-type="bibr" rid="B43">2021</xref>; Zhang et al., <xref ref-type="bibr" rid="B61">2023a</xref>), which is crucial for understanding the core mechanisms of advantageous memory strategies. Until now, no research has decoded the temporal dynamics between different memory strategies (such as the time course between reading aloud and silent reading).</p>
<p>To further directly decode and unpack the differences between aloud trials and silent trials, we are the first to decode the time course of two memory strategies (aloud vs. silent). This is important for understanding the mechanisms of the PE, as PE fundamentally involves a comparison between reading aloud and silent reading. The results showed significant classification between reading aloud and silent reading in later time windows (corresponding to the LPC time window). This demonstrates that increased distinctiveness/recollection plays a significant role in the PE, while strength/familiarity does not. This finding is consistent with our ERP results. The two strategies could be decoded in the later time window, further indicating that the enhancement of recollection has the potential to become a major mechanism underlying certain superior memory strategies, such as production. Future research should explore this possibility further.</p>
</sec>
<sec>
<title>4.4 The common mechanism of PE</title>
<p>This study investigated the effect of recollection and familiarity in mixed-list PE. Previous studies indicated that the PE of mixed-list designs is larger than that of block designs due to the costs of silent reading generated in mixed-lists (Bodner et al., <xref ref-type="bibr" rid="B5">2014</xref>). Ozubko et al. (<xref ref-type="bibr" rid="B39">2020</xref>) found that, behaviorally, the cost was reflected in both recollection and familiarity, but Zhang et al. (<xref ref-type="bibr" rid="B61">2023a</xref>) showed EEG data in block design suggesting that reading aloud only enhanced recollection. We addressed two possibilities for PE. Firstly, regardless of design (blocked/mixed list) there is a common mechanism of PE that works through distinctiveness/recollection rather than strength/familiarity (MacLeod et al., <xref ref-type="bibr" rid="B35">2022</xref>; Zhang et al., <xref ref-type="bibr" rid="B61">2023a</xref>). Another possibility is that mixed-list PE depends on familiarity. Silent reading might incur a cost in mixed-lists, possibly amplifying the advantage of reading aloud in terms of familiarity and thereby producing a significant PE on the FN400 old/new effect. This implies that familiarity/strength might contribute to mixed-list PE, indicating that PE could be specific to the paradigm. The above inference produces a crucial question: Is the mechanism of PE specific to the paradigm or does it have common mechanisms? Collectively, they suggest that, studying the mechanisms of mixed-list PE is of great significance.</p>
<p>The current study revealed that in mixed-list designs, despite the possibility of amplifying the advantage of reading aloud on familiarity relative to silent reading, only a recollection-related PE was present. Also, our study decoded the time-course of PE (aloud trials vs. silent trials) for the first time. The current results suggest that in the test phase, trials of reading aloud and silent reading could only be decoded after 500 ms, which is consistent with the ERP results. The results of the mixed-list design is consistent with Zhang et al. (<xref ref-type="bibr" rid="B61">2023a</xref>), indicating that PE relies solely on recollection/distinctiveness rather than familiarity/strength. The basis of PE is broad and stable, at least within subjects.</p>
</sec>
<sec>
<title>4.5 Traditional explanation and feature space theory</title>
<p>This study aims to further explore the effects of reading aloud on recollection and familiarity, building upon Zhang et al. (<xref ref-type="bibr" rid="B61">2023a</xref>). Understanding the processes of recollection and familiarity is crucial for constructing a modern theoretical framework for the mechanisms underlying the PE. Consistent with previous research, this study posits that the PE in recollection is driven by distinctiveness, and the results align with the predictions of the distinctiveness account (Fawcett and Ozubko, <xref ref-type="bibr" rid="B15">2016</xref>; Zhang et al., <xref ref-type="bibr" rid="B61">2023a</xref>).</p>
<p>However, recent years have seen the emergence of several models. A memory effect is often complex (Hourihan and Fawcett, <xref ref-type="bibr" rid="B21">2024</xref>; Kelly et al., <xref ref-type="bibr" rid="B24">2024</xref>). For example, the (Retrieving Effectively from Memory framework) REM model suggests that recognition-based PE may stem from longer retrieval times during the testing phase, while the model by Wakeham-Lewis et al. (<xref ref-type="bibr" rid="B50">2022</xref>) argues that PE arises from the enhancement of semantic encoding due to vocalization (Wakeham-Lewis et al., <xref ref-type="bibr" rid="B50">2022</xref>). These models are closely tied to the distinctiveness account. However, they all emphasize that the PE may not be entirely a direct result of distinctiveness.</p>
<p>Here, the recently proposed Feature-Space Theory provides an important theoretical perspective challenging the distinctiveness account. MacLeod et al. (<xref ref-type="bibr" rid="B34">2010</xref>) suggested that reading aloud enhances the retention of distinctiveness factors, allowing participants to consciously recognize that they have read the word when they see it again, and matching the word through this awareness, which is similar to recollection. In contrast, the Feature-Space Theory emphasizes that the enhanced attention from reading aloud captures phonological features within a feature space. In the recognition phase, participants are thought to focus their attention on phonological features to determine whether the probe word is one that has been learned or is new. Items that have been produced tend to store more phonological features, making them more likely to form a match in memory compared to items that have been read silently. In the recognition phase, the view of Caplan and Guitard (<xref ref-type="bibr" rid="B8">2024</xref>) is more aligned with that of Kolers (<xref ref-type="bibr" rid="B26">1973</xref>), suggesting that the matching or recognition process during this phase should be unconscious.</p>
<p>The key difference between the theories of MacLeod et al. (<xref ref-type="bibr" rid="B34">2010</xref>) and Caplan and Guitard (<xref ref-type="bibr" rid="B8">2024</xref>) in the recognition phase lies in the fact that MacLeod et al. (<xref ref-type="bibr" rid="B34">2010</xref>) believes that retrieval is conscious. Caplan and Guitard (<xref ref-type="bibr" rid="B8">2024</xref>) tend to view retrieval as unconscious (Hourihan and Fawcett, <xref ref-type="bibr" rid="B21">2024</xref>). This raises questions on whether recollection can be considered a slow process and an active retrieval (Yonelinas et al., <xref ref-type="bibr" rid="B59">2022</xref>), and whether the Feature-Space Theory can directly predict the PE in recollection. Since the Feature-Space Theory is a mathematical model and did not attempt to predict the PE in recollection in the paper, we remain cautious about this. Since the LPC component, an indicator of recollection, is a slow and late positive ERP wave that typically reflects active retrieval (Guillaume and Tiberghien, <xref ref-type="bibr" rid="B18">2013</xref>; Parks, <xref ref-type="bibr" rid="B42">2007</xref>; MacKenzie and Donaldson, <xref ref-type="bibr" rid="B32">2007</xref>; Yonelinas et al., <xref ref-type="bibr" rid="B59">2022</xref>; Zhao et al., <xref ref-type="bibr" rid="B62">2020</xref>), this does not align with the Feature-Space Theory which suggests that retrieval tends to be automatic. Nevertheless, this is consistent with the predictions of the distinctiveness account, which proposes that participants actively retrieve the words. Therefore, this study supports the role of distinctiveness in the PE and also reinforces the distinctiveness account. We tend to believe that PE recognition is based on active retrieval rather than automatic processing. Another piece of evidence supporting this comes from familiarity (FN400).</p>
<p>In terms of familiarity, familiarity is considered a relatively automated component of recognition, with fast processing speed. This explains why in everyday life, when we see someone, we may instinctively feel a sense of familiarity. Considering that the early frontal component FN400 is a fast and automated processing component, it also makes sense why FN400 is regarded as a representative component of familiarity (Guillaume and Tiberghien, <xref ref-type="bibr" rid="B18">2013</xref>; Parks, <xref ref-type="bibr" rid="B42">2007</xref>; MacKenzie and Donaldson, <xref ref-type="bibr" rid="B32">2007</xref>; Yonelinas et al., <xref ref-type="bibr" rid="B59">2022</xref>; Zhao et al., <xref ref-type="bibr" rid="B62">2020</xref>). Feature-Space Theory suggests that matching or recognition is unconscious, which would imply a significant PE should be observed in the early components of fast, automatic processing. However, we did not observe a PE in the FN400 (familiarity). This is inconsistent with the hypothesis of unconscious matching proposed by Feature-Space Theory.</p>
<p>Furthermore, we examined evidence from MVPA, and if this process is primarily unconscious, we would expect to observe whole-brain decoding in MVPA during the early time window. This is because MVPA is more sensitive than ERP, allowing for the decoding of whole-brain activity across a broader spectrum, which helps avoid biases related to electrode selection and time window constraints. This approach more accurately reflects the true nature of memory processes (Xue, <xref ref-type="bibr" rid="B55">2018</xref>). However, we did not observe decoding before 500 ms, which is also different from the predictions of unconscious matching or retrieval.</p>
<p>Feature-Space Theory, as an excellent mathematical model, has successfully predicted some effects of the PE. However, we are still unclear about how to predict the neural dynamics at the experimental level, how these dynamics interact with context-based recollection and familiarity, and how to anticipate the separations we observe in behavior and electrophysiology.</p>
<p>Future research should further explore these issues. Future studies should continue to test and advance these models by integrating computational models with experiments, moving beyond mere theoretical simulation to reveal what truly occurs for participants during the experimental process. Perhaps future integration of neuroscience techniques, experiments, and these models will be a good way to unravel the mystery.</p>
<p>Overall, this study supports the role of distinctiveness in the PE, rather than strength.</p></sec></sec>
<sec id="s5">
<title>5 Conclusion</title>
<p>Previous studies detected the effect of reading aloud on recollection and familiarity at both the behavioral and EEG levels, with results indicating that reading aloud may enhance recollection rather than familiarity. The current study used EEG technology to systematically investigate, for the first time, the effects of reading aloud on recollection and familiarity in a mixed-list PE, thereby establishing a fundamental understanding of the PE. At the ERP level, the PE was observed only in the LPC old/new effect rather than the FN400 old/new effect, indicating that the PE exists only in recollection. This suggests that PE relies on distinctiveness rather than strength. At the MVPA level, in order to further understand the mechanisms of the PE, this study for the first time decoded the time-course of reading aloud and silent reading during the test phase, indicating that only EEG data after 500 ms could be decoded, consistent with the time window of the LPC component. Overall, the current study demonstrated that the mechanism of the PE may exhibit broad and stable cross-paradigm consistency. In the conclusion of the paper, this study discusses traditional theories and Feature Space Theory based on the findings of the research. We reported the inconsistency between our results and the unconscious processing hypothesis of Feature Space Theory. Furthermore, this study suggests that enhanced distinctiveness/recollection may be a shared mechanism underlying certain advantageous memory strategies.</p></sec>
</body>
<back>
<sec sec-type="data-availability" id="s6">
<title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p>
</sec>
<sec sec-type="ethics-statement" id="s7">
<title>Ethics statement</title>
<p>The studies involving humans were approved by Human Research Ethics in Flinders University. The studies were conducted in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study.</p>
</sec>
<sec sec-type="author-contributions" id="s8">
<title>Author contributions</title>
<p>BZ: Conceptualization, Data curation, Formal analysis, Investigation, Methodology, Project administration, Resources, Software, Validation, Visualization, Writing &#x02013; original draft, Writing &#x02013; review &#x00026; editing. AA: Funding acquisition, Supervision, Writing &#x02013; review &#x00026; editing. MY: Data curation, Writing &#x02013; review &#x00026; editing. YH: Data curation, Writing &#x02013; review &#x00026; editing. AC: Funding acquisition, Resources, Supervision, Writing &#x02013; review &#x00026; editing. HM: Funding acquisition, Resources, Supervision, Writing &#x02013; review &#x00026; editing.</p>
</sec>
<sec sec-type="funding-information" id="s9">
<title>Funding</title>
<p>The author(s) declare financial support was received for the research, authorship, and/or publication of this article. This work was supported by National Natural Science Foundation of China (Grant Nos. 32171040 and 32371105).</p>
</sec>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
<p>The author(s) declared that they were an editorial board member of Frontiers, at the time of submission. This had no impact on the peer review process and the final decision.</p>
</sec>
<sec id="s10">
<title>Generative AI statement</title>
<p>The author(s) declare that Gen AI was used in the creation of this manuscript. We used ChatGPT to check the grammar.</p></sec>
<sec sec-type="disclaimer" id="s11">
<title>Publisher&#x00027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bailey</surname> <given-names>L. M.</given-names></name> <name><surname>Bodner</surname> <given-names>G. E.</given-names></name> <name><surname>Matheson</surname> <given-names>H. E.</given-names></name> <name><surname>Stewart</surname> <given-names>B. M.</given-names></name> <name><surname>Roddick</surname> <given-names>K.</given-names></name> <name><surname>O&#x00027;Neil</surname> <given-names>K.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Neural correlates of the production effect: an fMRI study</article-title>. <source>Brain Cogn.</source> <volume>152</volume>:<fpage>105757</fpage>. <pub-id pub-id-type="doi">10.1016/j.bandc.2021.105757</pub-id><pub-id pub-id-type="pmid">34130081</pub-id></citation></ref>
<ref id="B2">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bodner</surname> <given-names>G. E.</given-names></name> <name><surname>Huff</surname> <given-names>M. J.</given-names></name> <name><surname>Taikh</surname> <given-names>A.</given-names></name></person-group> (<year>2020</year>). <article-title>Pure-list production improves item recognition and sometimes also improves source memory</article-title>. <source>Mem. Cogn.</source> <volume>48</volume>, <fpage>1281</fpage>&#x02013;<lpage>1294</lpage>. <pub-id pub-id-type="doi">10.3758/s13421-020-01044-2</pub-id><pub-id pub-id-type="pmid">32399916</pub-id></citation></ref>
<ref id="B3">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bodner</surname> <given-names>G. E.</given-names></name> <name><surname>Jamieson</surname> <given-names>R. K.</given-names></name> <name><surname>Cormack</surname> <given-names>D. T.</given-names></name> <name><surname>McDonald</surname> <given-names>D. L.</given-names></name> <name><surname>Bernstein</surname> <given-names>D. M.</given-names></name></person-group> (<year>2016</year>). <article-title>The production effect in recognition memory: weakening strength can strengthen distinctiveness</article-title>. <source>Can. J. Exp. Psychol. Rev. Can. Psychol. Exp</source>. <volume>70</volume>:<fpage>93</fpage>. <pub-id pub-id-type="doi">10.1037/cep0000082</pub-id><pub-id pub-id-type="pmid">27244351</pub-id></citation></ref>
<ref id="B4">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bodner</surname> <given-names>G. E.</given-names></name> <name><surname>Taikh</surname> <given-names>A.</given-names></name></person-group> (<year>2012</year>). <article-title>Reassessing the basis of the production effect in memory</article-title>. <source>J. Exp. Psychol. Learn. Mem. Cogn.</source> <volume>38</volume>:<fpage>1711</fpage>. <pub-id pub-id-type="doi">10.1037/a0028466</pub-id><pub-id pub-id-type="pmid">22563636</pub-id></citation></ref>
<ref id="B5">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bodner</surname> <given-names>G. E.</given-names></name> <name><surname>Taikh</surname> <given-names>A.</given-names></name> <name><surname>Fawcett</surname> <given-names>J. M.</given-names></name></person-group> (<year>2014</year>). <article-title>Assessing the costs and benefits of production in recognition</article-title>. <source>Psychon. Bull. Rev.</source> <volume>21</volume>, <fpage>149</fpage>&#x02013;<lpage>154</lpage>. <pub-id pub-id-type="doi">10.3758/s13423-013-0485-1</pub-id><pub-id pub-id-type="pmid">23884689</pub-id></citation></ref>
<ref id="B6">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bridger</surname> <given-names>E. K.</given-names></name> <name><surname>Mecklinger</surname> <given-names>A.</given-names></name></person-group> (<year>2012</year>). <article-title>Electrophysiologically dissociating episodic preretrieval processing</article-title>. <source>J. Cogn. Neurosci.</source> <volume>24</volume>, <fpage>1476</fpage>&#x02013;<lpage>1491</lpage>. <pub-id pub-id-type="doi">10.1162/jocn_a_00152</pub-id><pub-id pub-id-type="pmid">21981675</pub-id></citation></ref>
<ref id="B7">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cai</surname> <given-names>Q.</given-names></name> <name><surname>Brysbaert</surname> <given-names>M.</given-names></name></person-group> (<year>2010</year>). <article-title>SUBTLEX-CH: Chinese word and character frequencies based on film subtitles</article-title>. <source>PLoS ONE</source> <volume>5</volume>:<fpage>e10729</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0010729</pub-id><pub-id pub-id-type="pmid">20532192</pub-id></citation></ref>
<ref id="B8">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Caplan</surname> <given-names>J. B.</given-names></name> <name><surname>Guitard</surname> <given-names>D.</given-names></name></person-group> (<year>2024</year>). <article-title>A feature-space theory of the production effect in recognition</article-title>. <source>Exp. Psychol.</source> <volume>71</volume>, <fpage>64</fpage>&#x02013;<lpage>82</lpage>. <pub-id pub-id-type="doi">10.1027/1618-3169/a000611</pub-id><pub-id pub-id-type="pmid">39078071</pub-id></citation></ref>
<ref id="B9">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Curran</surname> <given-names>T.</given-names></name> <name><surname>DeBuse</surname> <given-names>C.</given-names></name> <name><surname>Woroch</surname> <given-names>B.</given-names></name> <name><surname>Hirshman</surname> <given-names>E.</given-names></name></person-group> (<year>2006</year>). <article-title>Combined pharmacological and electrophysiological dissociation of familiarity and recollection</article-title>. <source>J. Neurosci.</source> <volume>26</volume>, <fpage>1979</fpage>&#x02013;<lpage>1985</lpage>. <pub-id pub-id-type="doi">10.1523/JNEUROSCI.5370-05.2006</pub-id><pub-id pub-id-type="pmid">16481430</pub-id></citation></ref>
<ref id="B10">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Curran</surname> <given-names>T.</given-names></name> <name><surname>Friedman</surname> <given-names>W. J.</given-names></name></person-group> (<year>2004</year>). <article-title>ERP old/new effects at different retention intervals in recency discrimination tasks</article-title>. <source>Cogn. Brain Res.</source> <volume>18</volume>, <fpage>107</fpage>&#x02013;<lpage>120</lpage>. <pub-id pub-id-type="doi">10.1016/j.cogbrainres.2003.09.006</pub-id><pub-id pub-id-type="pmid">14736570</pub-id></citation></ref>
<ref id="B11">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cyr</surname> <given-names>V.</given-names></name> <name><surname>Poirier</surname> <given-names>M.</given-names></name> <name><surname>Yearsley</surname> <given-names>J. M.</given-names></name> <name><surname>Guitard</surname> <given-names>D.</given-names></name> <name><surname>Harrigan</surname> <given-names>I.</given-names></name> <name><surname>Saint-Aubin</surname> <given-names>J.</given-names></name></person-group> (<year>2022</year>). <article-title>The production effect over the long term: modeling distinctiveness using serial positions</article-title>. <source>J. Exp. Psychol. Learn. Memory Cogn.</source> <volume>48</volume>:<fpage>1797</fpage>. <pub-id pub-id-type="doi">10.1037/xlm0001093</pub-id><pub-id pub-id-type="pmid">34726441</pub-id></citation></ref>
<ref id="B12">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dunlosky</surname> <given-names>J.</given-names></name> <name><surname>Rawson</surname> <given-names>K. A.</given-names></name> <name><surname>Marsh</surname> <given-names>E. J.</given-names></name> <name><surname>Nathan</surname> <given-names>M. J.</given-names></name> <name><surname>Willingham</surname> <given-names>D. T.</given-names></name></person-group> (<year>2013</year>). <article-title>Improving students&#x00027; learning with effective learning techniques: promising directions from cognitive and educational psychology</article-title>. <source>Psychol. Sci. Public Interest</source> <volume>14</volume>, <fpage>4</fpage>&#x02013;<lpage>58</lpage>. <pub-id pub-id-type="doi">10.1177/1529100612453266</pub-id><pub-id pub-id-type="pmid">26173288</pub-id></citation></ref>
<ref id="B13">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fahrenfort</surname> <given-names>J. J.</given-names></name> <name><surname>Van Driel</surname> <given-names>J.</given-names></name> <name><surname>Van Gaal</surname> <given-names>S.</given-names></name> <name><surname>Olivers</surname> <given-names>C. N.</given-names></name></person-group> (<year>2018</year>). <article-title>From ERPs to MVPA using the Amsterdam decoding and modeling toolbox (ADAM)</article-title>. <source>Front. Neurosci.</source> <volume>12</volume>:<fpage>368</fpage>. <pub-id pub-id-type="doi">10.3389/fnins.2018.00368</pub-id><pub-id pub-id-type="pmid">30018529</pub-id></citation></ref>
<ref id="B14">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fawcett</surname> <given-names>J. M.</given-names></name></person-group> (<year>2013</year>). <article-title>The production effect benefits performance in between-subject designs: a meta-analysis</article-title>. <source>Acta Psychol</source>. <volume>142</volume>, <fpage>1</fpage>&#x02013;<lpage>5</lpage>. <pub-id pub-id-type="doi">10.1016/j.actpsy.2012.10.001</pub-id><pub-id pub-id-type="pmid">23142670</pub-id></citation></ref>
<ref id="B15">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fawcett</surname> <given-names>J. M.</given-names></name> <name><surname>Ozubko</surname> <given-names>J. D.</given-names></name></person-group> (<year>2016</year>). <article-title>Familiarity, but not recollection, supports the between-subject production effect in recognition memory</article-title>. <source>Can. J. Exp. Psychol. Rev. Can. Psychol. Exp.</source> <volume>70</volume>:<fpage>99</fpage>. <pub-id pub-id-type="doi">10.1037/cep0000089</pub-id><pub-id pub-id-type="pmid">27244352</pub-id></citation></ref>
<ref id="B16">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Forester</surname> <given-names>G.</given-names></name> <name><surname>Kroneisen</surname> <given-names>M.</given-names></name> <name><surname>Erdfelder</surname> <given-names>E.</given-names></name> <name><surname>Kamp</surname> <given-names>S. M.</given-names></name></person-group> (<year>2019</year>). <article-title>On the role of retrieval processes in the survival processing effect: evidence from ROC and ERP analyses</article-title>. <source>Neurobiol. Learn. Mem</source>. <volume>166</volume>:<fpage>107083</fpage>. <pub-id pub-id-type="doi">10.1016/j.nlm.2019.107083</pub-id><pub-id pub-id-type="pmid">31491554</pub-id></citation></ref>
<ref id="B17">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Forrin</surname> <given-names>N. D.</given-names></name> <name><surname>MacLeod</surname> <given-names>C. M.</given-names></name> <name><surname>Ozubko</surname> <given-names>J. D.</given-names></name></person-group> (<year>2012</year>). <article-title>Widening the boundaries of the production effect</article-title>. <source>Memory Cogn.</source> <volume>40</volume>, <fpage>1046</fpage>&#x02013;<lpage>1055</lpage>. <pub-id pub-id-type="doi">10.3758/s13421-012-0210-8</pub-id><pub-id pub-id-type="pmid">22528825</pub-id></citation></ref>
<ref id="B18">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Guillaume</surname> <given-names>F.</given-names></name> <name><surname>Tiberghien</surname> <given-names>G.</given-names></name></person-group> (<year>2013</year>). <article-title>Impact of intention on the ERP correlates of face recognition</article-title>. <source>Brain Cogn.</source> <volume>81</volume>, <fpage>73</fpage>&#x02013;<lpage>81</lpage>. <pub-id pub-id-type="doi">10.1016/j.bandc.2012.10.007</pub-id><pub-id pub-id-type="pmid">23174431</pub-id></citation></ref>
<ref id="B19">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hassall</surname> <given-names>C. D.</given-names></name> <name><surname>Quinlan</surname> <given-names>C. K.</given-names></name> <name><surname>Turk</surname> <given-names>D. J.</given-names></name> <name><surname>Taylor</surname> <given-names>T. L.</given-names></name> <name><surname>Krigolson</surname> <given-names>O. E.</given-names></name></person-group> (<year>2016</year>). <article-title>A preliminary investigation into the neural basis of the production effect</article-title>. <source>Can. J. Exp. Psychol. Rev. Can. Psychol. Exp.</source> <volume>70</volume>:<fpage>139</fpage>. <pub-id pub-id-type="doi">10.1037/cep0000093</pub-id><pub-id pub-id-type="pmid">27244355</pub-id></citation></ref>
<ref id="B20">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hopkins</surname> <given-names>R. H.</given-names></name> <name><surname>Edwards</surname> <given-names>R. E.</given-names></name></person-group> (<year>1972</year>). <article-title>Pronunciation effects in recognition memory</article-title>. <source>J. Verbal Learn. Verbal Behav</source>. <volume>11</volume>, <fpage>534</fpage>&#x02013;<lpage>537</lpage>. <pub-id pub-id-type="doi">10.1016/S0022-5371(72)80036-7</pub-id></citation>
</ref>
<ref id="B21">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hourihan</surname> <given-names>K. L.</given-names></name> <name><surname>Fawcett</surname> <given-names>J. M.</given-names></name></person-group> (<year>2024</year>). <article-title>It&#x00027;s all about that case: Production and reading fluency</article-title>. <source>Exp. Psychol.</source> <volume>71</volume>, <fpage>83</fpage>&#x02013;<lpage>96</lpage>. <pub-id pub-id-type="doi">10.1027/1618-3169/a000615</pub-id><pub-id pub-id-type="pmid">39314148</pub-id></citation></ref>
<ref id="B22">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>J&#x000E4;ger</surname> <given-names>T.</given-names></name> <name><surname>Mecklinger</surname> <given-names>A.</given-names></name> <name><surname>Kipp</surname> <given-names>K. H.</given-names></name></person-group> (<year>2006</year>). <article-title>Intra-and inter-item associations doubly dissociate the electrophysiological correlates of familiarity and recollection</article-title>. <source>Neuron</source> <volume>52</volume>, <fpage>535</fpage>&#x02013;<lpage>545</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuron.2006.09.013</pub-id><pub-id pub-id-type="pmid">17088218</pub-id></citation></ref>
<ref id="B23">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jonker</surname> <given-names>T. R.</given-names></name> <name><surname>Levene</surname> <given-names>M.</given-names></name> <name><surname>MacLeod</surname> <given-names>C. M.</given-names></name></person-group> (<year>2014</year>). <article-title>Testing the item-order account of design effects using the production effect</article-title>. <source>J. Exp. Psychol. Learn. Mem. Cogn.</source> <volume>40</volume>:<fpage>441</fpage>. <pub-id pub-id-type="doi">10.1037/a0034977</pub-id><pub-id pub-id-type="pmid">24219087</pub-id></citation></ref>
<ref id="B24">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kelly</surname> <given-names>M. O.</given-names></name> <name><surname>Ensor</surname> <given-names>T. M.</given-names></name> <name><surname>MacLeod</surname> <given-names>C. M.</given-names></name> <name><surname>Risko</surname> <given-names>E. F.</given-names></name></person-group> (<year>2024</year>). <article-title>The prod eff: partially producing items moderates the production effect</article-title>. <source>Psychon. Bull. Rev.</source> <volume>31</volume>, <fpage>373</fpage>&#x02013;<lpage>379</lpage>. <pub-id pub-id-type="doi">10.3758/s13423-023-02360-9</pub-id><pub-id pub-id-type="pmid">37620632</pub-id></citation></ref>
<ref id="B25">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>King</surname> <given-names>J. R.</given-names></name> <name><surname>Dehaene</surname> <given-names>S.</given-names></name></person-group> (<year>2014</year>). <article-title>Characterizing the dynamics of mental representations: the temporal generalization method</article-title>. <source>Trends Cogn. Sci.</source> <volume>18</volume>, <fpage>203</fpage>&#x02013;<lpage>210</lpage>. <pub-id pub-id-type="doi">10.1016/j.tics.2014.01.002</pub-id><pub-id pub-id-type="pmid">24593982</pub-id></citation></ref>
<ref id="B26">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kolers</surname> <given-names>P. A.</given-names></name></person-group> (<year>1973</year>). <article-title>Remembering operations</article-title>. <source>Mem. Cogn</source> <volume>1</volume>, <fpage>347</fpage>&#x02013;<lpage>355</lpage>. <pub-id pub-id-type="doi">10.3758/BF03198119</pub-id><pub-id pub-id-type="pmid">24214568</pub-id></citation></ref>
<ref id="B27">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Leynes</surname> <given-names>P. A.</given-names></name> <name><surname>Mok</surname> <given-names>B. A.</given-names></name></person-group> (<year>2017</year>). <article-title>Encoding focus alters diagnostic recollection and event-related potentials (ERPs)</article-title>. <source>Brain Cogn.</source> <volume>117</volume>, <fpage>1</fpage>&#x02013;<lpage>11</lpage>. <pub-id pub-id-type="doi">10.1016/j.bandc.2017.06.011</pub-id><pub-id pub-id-type="pmid">28683338</pub-id></citation></ref>
<ref id="B28">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>Q.</given-names></name> <name><surname>Wang</surname> <given-names>J.</given-names></name> <name><surname>Li</surname> <given-names>Z.</given-names></name> <name><surname>Chen</surname> <given-names>A.</given-names></name></person-group> (<year>2022</year>). <article-title>Decoding the specificity of post-error adjustments using EEG-based multivariate pattern analysis</article-title>. <source>J. Neurosci.</source> <volume>42</volume>, <fpage>6800</fpage>&#x02013;<lpage>6809</lpage>. <pub-id pub-id-type="doi">10.1523/JNEUROSCI.0590-22.2022</pub-id><pub-id pub-id-type="pmid">35879098</pub-id></citation></ref>
<ref id="B29">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>Z.</given-names></name> <name><surname>Wang</surname> <given-names>J.</given-names></name> <name><surname>Chen</surname> <given-names>Y.</given-names></name> <name><surname>Li</surname> <given-names>Q.</given-names></name> <name><surname>Yin</surname> <given-names>S.</given-names></name> <name><surname>Chen</surname> <given-names>A.</given-names></name></person-group> (<year>2024</year>). <article-title>Attenuated conflict self-referential information facilitating conflict resolution</article-title>. <source>NPJ Sci. Learn</source>. <volume>9</volume>:<fpage>47</fpage>. <pub-id pub-id-type="doi">10.1038/s41539-024-00256-4</pub-id><pub-id pub-id-type="pmid">39030204</pub-id></citation></ref>
<ref id="B30">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lin</surname> <given-names>O. Y.</given-names></name> <name><surname>MacLeod</surname> <given-names>C. M.</given-names></name></person-group> (<year>2012</year>). <article-title>Aging and the production effect: a test of the distinctiveness account</article-title>. <source>Canad. J. Exp. Psychol.</source> <volume>66</volume>:<fpage>212</fpage>. <pub-id pub-id-type="doi">10.1037/a0028309</pub-id><pub-id pub-id-type="pmid">22686153</pub-id></citation></ref>
<ref id="B31">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>L&#x000F3;pez Assef</surname> <given-names>B.</given-names></name> <name><surname>Desmeules-Trudel</surname> <given-names>F.</given-names></name> <name><surname>Bernard</surname> <given-names>A.</given-names></name> <name><surname>Zamuner</surname> <given-names>T. S.</given-names></name></person-group> (<year>2021</year>). <article-title>A shift in the direction of the production effect in children aged 2&#x02013;6 years</article-title>. <source>Child Dev.</source> <volume>92</volume>, <fpage>2447</fpage>&#x02013;<lpage>2464</lpage>. <pub-id pub-id-type="doi">10.1111/cdev.13618</pub-id><pub-id pub-id-type="pmid">34406649</pub-id></citation></ref>
<ref id="B32">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>MacKenzie</surname> <given-names>G.</given-names></name> <name><surname>Donaldson</surname> <given-names>D. I.</given-names></name></person-group> (<year>2007</year>). <article-title>Dissociating recollection from familiarity: electrophysiological evidence that familiarity for faces is associated with a posterior old/new effect</article-title>. <source>Neuroimage</source> <volume>36</volume>, <fpage>454</fpage>&#x02013;<lpage>463</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2006.12.005</pub-id><pub-id pub-id-type="pmid">17451972</pub-id></citation></ref>
<ref id="B33">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>MacLeod</surname> <given-names>C. M.</given-names></name> <name><surname>Bodner</surname> <given-names>G. E.</given-names></name></person-group> (<year>2017</year>). <article-title>The production effect in memory</article-title>. <source>Curr. Dir. Psychol</source>. <volume>26</volume>, <fpage>390</fpage>&#x02013;<lpage>395</lpage>. <pub-id pub-id-type="doi">10.1177/0963721417691356</pub-id></citation>
</ref>
<ref id="B34">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>MacLeod</surname> <given-names>C. M.</given-names></name> <name><surname>Gopie</surname> <given-names>N.</given-names></name> <name><surname>Hourihan</surname> <given-names>K. L.</given-names></name> <name><surname>Neary</surname> <given-names>K. R.</given-names></name> <name><surname>Ozubko</surname> <given-names>J. D.</given-names></name></person-group> (<year>2010</year>). <article-title>The production effect: delineation of a phenomenon</article-title>. <source>J. Exp. Psychol. Learn. Mem. Cogn.</source> <volume>36</volume>:<fpage>671</fpage>. <pub-id pub-id-type="doi">10.1037/a0018785</pub-id><pub-id pub-id-type="pmid">20438265</pub-id></citation></ref>
<ref id="B35">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>MacLeod</surname> <given-names>C. M.</given-names></name> <name><surname>Ozubko</surname> <given-names>J. D.</given-names></name> <name><surname>Hourihan</surname> <given-names>K. L.</given-names></name> <name><surname>Major</surname> <given-names>J. C.</given-names></name></person-group> (<year>2022</year>). <article-title>The production effect is consistent over material variations: support for the distinctiveness account</article-title>. <source>Memory</source> <volume>30</volume>, <fpage>1000</fpage>&#x02013;<lpage>1007</lpage>. <pub-id pub-id-type="doi">10.1080/09658211.2022.2069270</pub-id><pub-id pub-id-type="pmid">35635318</pub-id></citation></ref>
<ref id="B36">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Madore</surname> <given-names>K. P.</given-names></name> <name><surname>Khazenzon</surname> <given-names>A. M.</given-names></name> <name><surname>Backes</surname> <given-names>C. W.</given-names></name> <name><surname>Jiang</surname> <given-names>J.</given-names></name> <name><surname>Uncapher</surname> <given-names>M. R.</given-names></name> <name><surname>Norcia</surname> <given-names>A. M.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Memory failure predicted by attention lapsing and media multitasking</article-title>. <source>Nature</source> <volume>587</volume>, <fpage>87</fpage>&#x02013;<lpage>91</lpage>. <pub-id pub-id-type="doi">10.1038/s41586-020-2870-z</pub-id><pub-id pub-id-type="pmid">33116309</pub-id></citation></ref>
<ref id="B37">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mecklinger</surname> <given-names>A.</given-names></name> <name><surname>J&#x000E4;ger</surname> <given-names>T.</given-names></name></person-group> (<year>2009</year>). <article-title>Episodic memory storage and retrieval: insights from electrophysiological measures</article-title>. <source>Neuroimag. Psychol. Theor. Hum. Memory</source> <volume>357</volume>:<fpage>382</fpage>. <pub-id pub-id-type="doi">10.1093/acprof:oso/9780199217298.003.0020</pub-id></citation>
</ref>
<ref id="B38">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Migo</surname> <given-names>E. M.</given-names></name> <name><surname>Mayes</surname> <given-names>A. R.</given-names></name> <name><surname>Montaldi</surname> <given-names>D.</given-names></name></person-group> (<year>2012</year>). <article-title>Measuring recollection and familiarity: improving the remember/know procedure</article-title>. <source>Conscious. Cogn.</source> <volume>21</volume>, <fpage>1435</fpage>&#x02013;<lpage>1455</lpage>. <pub-id pub-id-type="doi">10.1016/j.concog.2012.04.014</pub-id><pub-id pub-id-type="pmid">22846231</pub-id></citation></ref>
<ref id="B39">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ozubko</surname> <given-names>J. D.</given-names></name> <name><surname>Bamburoski</surname> <given-names>L. D.</given-names></name> <name><surname>Carlin</surname> <given-names>K.</given-names></name> <name><surname>Fawcett</surname> <given-names>J. M.</given-names></name></person-group> (<year>2020</year>). <article-title>Distinctive encodings and the production effect: failure to retrieve distinctive encodings decreases recollection of silent items</article-title>. <source>Memory</source> <volume>28</volume>, <fpage>237</fpage>&#x02013;<lpage>260</lpage>. <pub-id pub-id-type="doi">10.1080/09658211.2019.1711128</pub-id><pub-id pub-id-type="pmid">31959064</pub-id></citation></ref>
<ref id="B40">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ozubko</surname> <given-names>J. D.</given-names></name> <name><surname>Gopie</surname> <given-names>N.</given-names></name> <name><surname>MacLeod</surname> <given-names>C. M.</given-names></name></person-group> (<year>2012</year>). <article-title>Production benefits both recollection and familiarity</article-title>. <source>Mem. Cogn</source>. <volume>40</volume>, <fpage>e326</fpage>&#x02013;<lpage>e338</lpage>. <pub-id pub-id-type="doi">10.3758/s13421-011-0165-1</pub-id><pub-id pub-id-type="pmid">22127849</pub-id></citation></ref>
<ref id="B41">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ozubko</surname> <given-names>J. D.</given-names></name> <name><surname>MacLeod</surname> <given-names>C. M.</given-names></name></person-group> (<year>2010</year>). <article-title>The production effect in memory: evidence that distinctiveness underlies the benefit</article-title>. <source>J. Exp. Psychol. Learn. Mem. Cogn.</source> <volume>36</volume>:<fpage>1543</fpage>. <pub-id pub-id-type="doi">10.1037/a0020604</pub-id><pub-id pub-id-type="pmid">20804284</pub-id></citation></ref>
<ref id="B42">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Parks</surname> <given-names>C. M.</given-names></name></person-group> (<year>2007</year>). <article-title>The role of noncriterial recollection in estimating recollection and familiarity</article-title>. <source>J. Mem. Lang.</source> <volume>57</volume>, <fpage>81</fpage>&#x02013;<lpage>100</lpage>. <pub-id pub-id-type="doi">10.1016/j.jml.2007.03.003</pub-id><pub-id pub-id-type="pmid">18591986</pub-id></citation></ref>
<ref id="B43">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pereira</surname> <given-names>D. R.</given-names></name> <name><surname>Sampaio</surname> <given-names>A.</given-names></name> <name><surname>Pinheiro</surname> <given-names>A. P.</given-names></name></person-group> (<year>2021</year>). <article-title>Interactions of emotion and self-reference in source memory: an ERP study</article-title>. <source>Cogn. Affect. Behav. Neurosci.</source> <volume>21</volume>, <fpage>172</fpage>&#x02013;<lpage>190</lpage>. <pub-id pub-id-type="doi">10.3758/s13415-020-00858-6</pub-id><pub-id pub-id-type="pmid">33608840</pub-id></citation></ref>
<ref id="B44">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rugg</surname> <given-names>M. D.</given-names></name> <name><surname>Curran</surname> <given-names>T.</given-names></name></person-group> (<year>2007</year>). <article-title>Event-related potentials and recognition memory</article-title>. <source>Trends Cogn. Sci</source>. <volume>11</volume>, <fpage>251</fpage>&#x02013;<lpage>257</lpage>. <pub-id pub-id-type="doi">10.1016/j.tics.2007.04.004</pub-id><pub-id pub-id-type="pmid">17481940</pub-id></citation></ref>
<ref id="B45">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Saint-Aubin</surname> <given-names>J.</given-names></name> <name><surname>Yearsley</surname> <given-names>J. M.</given-names></name> <name><surname>Poirier</surname> <given-names>M.</given-names></name> <name><surname>Cyr</surname> <given-names>V.</given-names></name> <name><surname>Guitard</surname> <given-names>D.</given-names></name></person-group> (<year>2021</year>). <article-title>A model of the production effect over the short-term: the cost of relative distinctiveness</article-title>. <source>J. Mem. Lang.</source> <volume>118</volume>:<fpage>104219</fpage>. <pub-id pub-id-type="doi">10.1016/j.jml.2021.104219</pub-id></citation>
</ref>
<ref id="B46">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Schaefer</surname> <given-names>A.</given-names></name> <name><surname>Pottage</surname> <given-names>C. L.</given-names></name> <name><surname>Rickart</surname> <given-names>A. J.</given-names></name></person-group> (<year>2011</year>). <article-title>Electrophysiological correlates of remembering emotional pictures</article-title>. <source>Neuroimage</source> <volume>54</volume>, <fpage>714</fpage>&#x02013;<lpage>724</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2010.07.030</pub-id><pub-id pub-id-type="pmid">20650320</pub-id></citation></ref>
<ref id="B47">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sharifian</surname> <given-names>F.</given-names></name> <name><surname>Schneider</surname> <given-names>D.</given-names></name> <name><surname>Arnau</surname> <given-names>S.</given-names></name> <name><surname>Wascher</surname> <given-names>E.</given-names></name></person-group> (<year>2021</year>). <article-title>Decoding of cognitive processes involved in the continuous performance task</article-title>. <source>Int. J. Psychophysiol.</source> <volume>167</volume>, <fpage>57</fpage>&#x02013;<lpage>68</lpage>. <pub-id pub-id-type="doi">10.1016/j.ijpsycho.2021.06.012</pub-id><pub-id pub-id-type="pmid">34216693</pub-id></citation></ref>
<ref id="B48">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tan</surname> <given-names>X.</given-names></name> <name><surname>Zhang</surname> <given-names>P.</given-names></name> <name><surname>Zhang</surname> <given-names>Q.</given-names></name> <name><surname>Yu</surname> <given-names>Q.</given-names></name> <name><surname>Bai</surname> <given-names>X.</given-names></name></person-group> (<year>2022</year>). <article-title>The role of articulation movement and sound in the production effect: an fNIRS study</article-title>. <source>J. Psychol. Sci.</source> <volume>45</volume>, <fpage>545</fpage>&#x02013;<lpage>552</lpage>. <pub-id pub-id-type="doi">10.16719/j.cnki.1671-6981.20220305</pub-id></citation>
</ref>
<ref id="B49">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Voss</surname> <given-names>J. L.</given-names></name> <name><surname>Federmeier</surname> <given-names>K. D.</given-names></name></person-group> (<year>2011</year>). <article-title>FN400 potentials are functionally identical to N400 potentials and reflect semantic processing during recognition testing</article-title>. <source>Psychophysiology</source> <volume>48</volume>, <fpage>532</fpage>&#x02013;<lpage>546</lpage>. <pub-id pub-id-type="doi">10.1111/j.1469-8986.2010.01085.x</pub-id><pub-id pub-id-type="pmid">20701709</pub-id></citation></ref>
<ref id="B50">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wakeham-Lewis</surname> <given-names>R. M.</given-names></name> <name><surname>Ozubko</surname> <given-names>J.</given-names></name> <name><surname>Fawcett</surname> <given-names>J. M.</given-names></name></person-group> (<year>2022</year>). <article-title>Characterizing production: the production effect is eliminated for unusual voices unless they are frequent at study</article-title>. <source>Memory</source> <volume>30</volume>, <fpage>1319</fpage>&#x02013;<lpage>1333</lpage>. <pub-id pub-id-type="doi">10.1080/09658211.2022.2115075</pub-id><pub-id pub-id-type="pmid">36107805</pub-id></citation></ref>
<ref id="B51">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>B.</given-names></name> <name><surname>Cheng</surname> <given-names>C.</given-names></name> <name><surname>Jin</surname> <given-names>Z.</given-names></name> <name><surname>Wu</surname> <given-names>S.</given-names></name> <name><surname>Xiang</surname> <given-names>L.</given-names></name></person-group> (<year>2021</year>). <article-title>The influence of negative emotional intensity on dual-processing recognition</article-title>. <source>Biol. Psychol.</source> <volume>161</volume>:<fpage>108083</fpage>. <pub-id pub-id-type="doi">10.1016/j.biopsycho.2021.108083</pub-id><pub-id pub-id-type="pmid">33774133</pub-id></citation></ref>
<ref id="B52">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Whitridge</surname> <given-names>J. W.</given-names></name> <name><surname>Huff</surname> <given-names>M. J.</given-names></name> <name><surname>Ozubko</surname> <given-names>J. D.</given-names></name> <name><surname>B&#x000FC;rkner</surname> <given-names>P. C.</given-names></name> <name><surname>Lahey</surname> <given-names>C. D.</given-names></name> <name><surname>Fawcett</surname> <given-names>J. M.</given-names></name></person-group> (<year>2024</year>). <article-title>Singing does not necessarily improve memory more than reading aloud</article-title>. <source>Exp. Psychol.</source> <volume>71</volume>, <fpage>33</fpage>&#x02013;<lpage>50</lpage>. <pub-id pub-id-type="doi">10.1027/1618-3169/a000614</pub-id><pub-id pub-id-type="pmid">39078072</pub-id></citation></ref>
<ref id="B53">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wixted</surname> <given-names>J. T.</given-names></name> <name><surname>Mickes</surname> <given-names>L.</given-names></name> <name><surname>Squire</surname> <given-names>L. R.</given-names></name></person-group> (<year>2010</year>). <article-title>Measuring recollection and familiarity in the medial temporal lobe</article-title>. <source>Hippocampus</source> <volume>20</volume>, <fpage>1195</fpage>&#x02013;<lpage>1205</lpage>. <pub-id pub-id-type="doi">10.1002/hipo.20854</pub-id><pub-id pub-id-type="pmid">20848603</pub-id></citation></ref>
<ref id="B54">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Xiao</surname> <given-names>H.</given-names></name></person-group> (<year>2016</year>). <source>A Study of Corpus Word Sense Annotation</source>. Yunnan Education Publishing House.</citation>
</ref>
<ref id="B55">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Xue</surname> <given-names>G.</given-names></name></person-group> (<year>2018</year>). <article-title>The neural representations underlying human episodic memory</article-title>. <source>Trends Cogn. Sci.</source> <volume>22</volume>, <fpage>544</fpage>&#x02013;<lpage>561</lpage>. <pub-id pub-id-type="doi">10.1016/j.tics.2018.03.004</pub-id><pub-id pub-id-type="pmid">29625850</pub-id></citation></ref>
<ref id="B56">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yonelinas</surname> <given-names>A. P.</given-names></name></person-group> (<year>2002</year>). <article-title>The nature of recollection and familiarity: a review of 30 years of research</article-title>. <source>J. Mem. Lang.</source> <volume>46</volume>, <fpage>441</fpage>&#x02013;<lpage>517</lpage>. <pub-id pub-id-type="doi">10.1006/jmla.2002.2864</pub-id><pub-id pub-id-type="pmid">16899208</pub-id></citation></ref>
<ref id="B57">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yonelinas</surname> <given-names>A. P.</given-names></name> <name><surname>Aly</surname> <given-names>M.</given-names></name> <name><surname>Wang</surname> <given-names>W. C.</given-names></name> <name><surname>Koen</surname> <given-names>J. D.</given-names></name></person-group> (<year>2010</year>). <article-title>Recollection and familiarity: examining controversial assumptions and new directions</article-title>. <source>Hippocampus</source> <volume>20</volume>, <fpage>1178</fpage>&#x02013;<lpage>1194</lpage>. <pub-id pub-id-type="doi">10.1002/hipo.20864</pub-id><pub-id pub-id-type="pmid">20848606</pub-id></citation></ref>
<ref id="B58">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yonelinas</surname> <given-names>A. P.</given-names></name> <name><surname>Jacoby</surname> <given-names>L. L.</given-names></name></person-group> (<year>1995</year>). <article-title>The relation between remembering and knowing as bases for recognition: effects of size congruency</article-title>. <source>J. Mem. Lang.</source> <volume>34</volume>, <fpage>622</fpage>&#x02013;<lpage>643</lpage>. <pub-id pub-id-type="doi">10.1006/jmla.1995.1028</pub-id></citation>
</ref>
<ref id="B59">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Yonelinas</surname> <given-names>A. P.</given-names></name> <name><surname>Ramey</surname> <given-names>M. M.</given-names></name> <name><surname>Riddell</surname> <given-names>C.</given-names></name> <name><surname>Kahana</surname> <given-names>M. J.</given-names></name> <name><surname>Wagner</surname> <given-names>A. D.</given-names></name></person-group> (<year>2022</year>). <italic>Recognition Memory: The Role of Recollection and Familiarity</italic>. <source>The Oxford Handbook of Human Memory.</source> <publisher-loc>Davis, CA</publisher-loc>: <publisher-name>University of California</publisher-name>. <pub-id pub-id-type="doi">10.1523/JNEUROSCI.6396-11.2012</pub-id><pub-id pub-id-type="pmid">22623670</pub-id></citation></ref>
<ref id="B60">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>B.</given-names></name> <name><surname>Hu</surname> <given-names>X.</given-names></name> <name><surname>Li</surname> <given-names>Q.</given-names></name> <name><surname>Chen</surname> <given-names>A.</given-names></name></person-group> (<year>2023b</year>). <article-title>The stranding of the ideography: a nonnegligible role of the spoken language</article-title>. <source>Behav. Brain Sci</source>. <volume>46</volume>:<fpage>e259</fpage>. <pub-id pub-id-type="doi">10.1017/S0140525X2300064X</pub-id><pub-id pub-id-type="pmid">37779292</pub-id></citation></ref>
<ref id="B61">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>B.</given-names></name> <name><surname>Meng</surname> <given-names>Z.</given-names></name> <name><surname>Li</surname> <given-names>Q.</given-names></name> <name><surname>Chen</surname> <given-names>A.</given-names></name> <name><surname>Bodner</surname> <given-names>G. E.</given-names></name></person-group> (<year>2023a</year>). <article-title>EEG-based univariate and multivariate analyses reveal that multiple processes contribute to the production effect in recognition</article-title>. <source>Cortex</source> <volume>165</volume>, <fpage>57</fpage>&#x02013;<lpage>69</lpage>. <pub-id pub-id-type="doi">10.1016/j.cortex.2023.04.006</pub-id><pub-id pub-id-type="pmid">37267658</pub-id></citation></ref>
<ref id="B62">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhao</surname> <given-names>M. F.</given-names></name> <name><surname>Zimmer</surname> <given-names>H. D.</given-names></name> <name><surname>Fu</surname> <given-names>X.</given-names></name> <name><surname>Zheng</surname> <given-names>Z.</given-names></name></person-group> (<year>2020</year>). <article-title>Unitization of internal and external features contributes to associative recognition for faces: evidence from modulations of the FN400</article-title>. <source>Brain Res.</source> <volume>1748</volume>:<fpage>147077</fpage>. <pub-id pub-id-type="doi">10.1016/j.brainres.2020.147077</pub-id><pub-id pub-id-type="pmid">32861676</pub-id></citation></ref>
<ref id="B63">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zormpa</surname> <given-names>E.</given-names></name> <name><surname>Brehm</surname> <given-names>L. E.</given-names></name> <name><surname>Hoedemaker</surname> <given-names>R. S.</given-names></name> <name><surname>Meyer</surname> <given-names>A. S.</given-names></name></person-group> (<year>2019</year>). <article-title>The production effect and the generation effect improve memory in picture naming</article-title>. <source>Memory</source> <volume>27</volume>, <fpage>340</fpage>&#x02013;<lpage>352</lpage>. <pub-id pub-id-type="doi">10.1080/09658211.2018.1510966</pub-id><pub-id pub-id-type="pmid">30141365</pub-id></citation></ref>
</ref-list>
</back>
</article>