<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xml:lang="EN" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Behav. Neurosci.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Behavioral Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Behav. Neurosci.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">1662-5153</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnbeh.2026.1736261</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Single neuron responses in NCL, MVL, and Wulst during the observation of videos of conspecifics support population feature coding</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name><surname>Santos Silva</surname> <given-names>Sara</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<xref ref-type="author-notes" rid="fn004"><sup>&#x2020;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/2953665/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>B&#x00FC;hn</surname> <given-names>Daniela</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Hall</surname> <given-names>Paxton</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Clark</surname> <given-names>William</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/592081/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Rose</surname> <given-names>Jonas</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="author-notes" rid="fn004"><sup>&#x2020;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/175423/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Colombo</surname> <given-names>Michael</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="author-notes" rid="fn004"><sup>&#x2020;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/22782/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Neural Basis of Learning, Faculty of Psychology, Institute of Cognitive Neuroscience, Ruhr University Bochum</institution>, <city>Bochum</city>, <country country="de">Germany</country></aff>
<aff id="aff2"><label>2</label><institution>Department of Psychology, University of Otago</institution>, <city>Dunedin</city>, <country country="nz">New Zealand</country></aff>
<aff id="aff3"><label>3</label><institution>Department of Neurobiology, Harvard Medical School</institution>, <city>Boston, MA</city>, <country country="us">United States</country></aff>
<author-notes>
<corresp id="c001"><label>&#x002A;</label>Correspondence: Sara Santos Silva, <email xlink:href="mailto:sara.santossilva@ruhr-uni-bochum.de">sara.santossilva@ruhr-uni-bochum.de</email></corresp>
<fn fn-type="other" id="fn004"><label>&#x2020;</label><p>ORCID: Sara Santos Silva, <uri xlink:href="https://orcid.org/0009-0000-0322-3056">orcid.org/0009-0000-0322-3056</uri>; Jonas Rose, <uri xlink:href="https://orcid.org/0000-0003-1745-727X">orcid.org/0000-0003-1745-727X</uri>; Michael Colombo, <uri xlink:href="https://orcid.org/0000-0002-2574-4888">orcid.org/0000-0002-2574-4888</uri></p></fn>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-26">
<day>26</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>20</volume>
<elocation-id>1736261</elocation-id>
<history>
<date date-type="received">
<day>31</day>
<month>10</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>26</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>30</day>
<month>01</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2026 Santos Silva, B&#x00FC;hn, Hall, Clark, Rose and Colombo.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Santos Silva, B&#x00FC;hn, Hall, Clark, Rose and Colombo</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-26">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<p>Social visual processing in vertebrates employs sophisticated neural mechanisms ranging from categorical face cells to distributed sparse coding systems. In primates, recent evidence supports a &#x201C;tuning landscape&#x201D; model where neurons signal distances to prototypes in high-dimensional space rather than functioning as simple category detectors. However, social visual processing in non-mammalian animals remains poorly understood. We recorded single-unit activity from three functionally distinct pigeon brain regions&#x2014;mesopallium ventrolaterale (MVL), visual Wulst, and nidopallium caudolaterale (NCL)&#x2014;while birds viewed dynamic videos of conspecifics and control shapes performing courtship, eating, flying, and walking behaviors. Despite finding visually responsive neurons in all regions, we observed no categorical distinction between conspecific and control stimuli. Instead, population analyses revealed discrete temporal modulations corresponding to specific motion features&#x2014;bowing, wing-flapping, head-bobbing&#x2014;suggesting feature-based rather than categorical encoding of visual information. Sound-modulated visual units were significantly more prevalent in MVL than Wulst, indicating earlier multimodal integration in the tectofugal pathway than previously recognized. The absence of differential responses in NCL during passive viewing, contrasting with clear modulation in visual areas, suggests that this region is less involved in the automatic analysis of visual features. These findings suggest that avian visual structures use sparse coding principles that are similar to the visual cortex, where populations encode specific features through coordinated but brief neural responses rather than sustained categorical signals.</p>
</abstract>
<kwd-group>
<kwd>conspecific recognition</kwd>
<kwd>dynamic stimuli</kwd>
<kwd>feature-based coding</kwd>
<kwd>multimodal integration</kwd>
<kwd>pigeon</kwd>
<kwd>sparse coding</kwd>
<kwd>tectofugal pathway</kwd>
<kwd>thalamofugal pathway</kwd>
</kwd-group>
<funding-group>
<award-group id="gs1">
<funding-source id="sp1">
<institution-wrap>
<institution>Marsden Fund</institution>
<institution-id institution-id-type="doi" vocab="open-funder-registry" vocab-identifier="10.13039/open_funder_registry">10.13039/501100009193</institution-id>
</institution-wrap>
</funding-source>
</award-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This work was supported by the Royal Society of New Zealand Marsden Fund Grant to Michael Colombo (Royal Society of New Zealand Marsden grant UOO1917).</funding-statement>
</funding-group>
<counts>
<fig-count count="7"/>
<table-count count="0"/>
<equation-count count="8"/>
<ref-count count="59"/>
<page-count count="14"/>
<word-count count="9605"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Learning and Memory</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec id="S1" sec-type="intro">
<title>Introduction</title>
<p>Conspecific recognition represents a fundamental challenge in visual neuroscience, requiring rapid discrimination of socially relevant stimuli in the environment. Across vertebrate taxa, this capacity supports the acquisition of spatiotemporal information that is a prerequisite for the animal to understand and respond to changes in its surroundings (<xref ref-type="bibr" rid="B28">Kiyokawa et al., 2022</xref>; <xref ref-type="bibr" rid="B19">Greer et al., 2025</xref>).</p>
<p>The neural mechanisms underlying social recognition have been extensively studied in primates, which has revealed that the processing strategies used by neurons extend beyond simple categorical coding of visual stimuli. Early investigations of the primate inferior temporal cortex identified face-selective neurons (<xref ref-type="bibr" rid="B44">Perrett et al., 1982</xref>; <xref ref-type="bibr" rid="B13">Desimone et al., 1984</xref>), organized in discrete patches (<xref ref-type="bibr" rid="B53">Tsao et al., 2006</xref>), which lead to influential face cell models of social visual processing. However, the application of information theory has consistently demonstrated that sensory neurons in the higher visual cortex employ distributed sparse coding mechanisms rather than simple categorical responses, supporting monkeys&#x2019; exceptional capabilities in object discrimination (<xref ref-type="bibr" rid="B47">Rolls and Tovee, 1995</xref>).</p>
<p>Recent theoretical advances have unified sparse coding principles within a broader &#x201C;tuning landscape&#x201D; framework (<xref ref-type="bibr" rid="B45">Ponce et al., 2019</xref>; <xref ref-type="bibr" rid="B54">Wang and Ponce, 2022</xref>). Rather than functioning as categorical detectors, visual sensory neurons signal distances to &#x201C;prototypes&#x201D;&#x2014;specific combinations of visual features within high-dimensional representational space. The model bridges visual encoding mechanisms with hippocampal cognitive mapping theories, where place cells encode locations in multidimensional task-relevant spaces (<xref ref-type="bibr" rid="B2">Aronov et al., 2017</xref>). The framework predicts that responses that appear to be categorical, a priori, actually emerge from the population-level dynamics in feature space rather than from individual neurons encoding abstract categories of visual stimuli.</p>
<p>Dynamic social stimuli are used to reveal additional mechanisms in population coding beyond those that are detectable using presentation of static images. <xref ref-type="bibr" rid="B33">McMahon et al. (2015)</xref> demonstrated that during naturalistic movie viewing, macaque face patch neurons exhibit a remarkable diversity of selectivity, with spatially neighboring cells responding selectively to distinct aspects of social scenes. These included facial identity, proximity relationships, and body movements. Individual neurons maintained consistent responses across repeated presentations while encoding different dynamic features of social interactions, suggesting that natural social processing employs population codes that are more sophisticated than those apparent during the static presentation of images.</p>
<p>The avian visual system provides a compelling comparative framework for understanding the neural coding principles that are used to interpret conspecific behavior across vertebrate evolution. Pigeons possess sophisticated visual capabilities that support complex social behaviors such as social learning, individual recognition and the formation of social hierarchies (<xref ref-type="bibr" rid="B34">Nagy et al., 2013</xref>; <xref ref-type="bibr" rid="B5">Bouchard et al., 2007</xref>; <xref ref-type="bibr" rid="B12">Delacoux et al., 2025</xref>). Behavioral investigations demonstrate successful conspecific categorisation from photographs and videos, including discrimination between familiar and unfamiliar individuals (<xref ref-type="bibr" rid="B50">Shimizu, 1998</xref>; <xref ref-type="bibr" rid="B35">Nakamura et al., 2003</xref>; <xref ref-type="bibr" rid="B56">Ware et al., 2015</xref>; <xref ref-type="bibr" rid="B57">Wilkinson et al., 2010</xref>).</p>
<p>Birds have two major visual pathways like those found in mammals (<xref ref-type="bibr" rid="B25">Karten, 1969</xref>; <xref ref-type="bibr" rid="B51">Shimizu and Bowers, 1999</xref>). The tectofugal pathway is functionally analogous to the mammalian colliculo-pulvinar-cortical pathway (<xref ref-type="bibr" rid="B32">Li et al., 2007</xref>; <xref ref-type="bibr" rid="B17">Frost, 2010</xref>). Previous studies using static stimuli in operant categorization tasks revealed that a population of neurons in the mesopallium ventrolaterale (MVL), a higher center of the tectofugal pathway, supports the decoding of different categories of visual stimuli with diverse features (<xref ref-type="bibr" rid="B3">Azizi et al., 2019</xref>; <xref ref-type="bibr" rid="B9">Clark et al., 2022a</xref>,<xref ref-type="bibr" rid="B8">b</xref>). The visual Wulst in the thalamofugal pathway is comparable with the striate visual cortex (<xref ref-type="bibr" rid="B4">Bischof et al., 2016</xref>) and is believed to mainly process simple features such as orientation in pigeons&#x2019; lateral surroundings (<xref ref-type="bibr" rid="B37">Ng et al., 2010</xref>).</p>
<p>In social interactions, the richness of sensory cues at a pigeon&#x2019;s disposal is much higher than what static stimuli can provide. There is a possibility that the pigeon&#x2019;s ability to identify a conspecific relies on a wide array of visual features, but also on other sensory cues, such as sound. We recorded single-unit neural responses in pigeons, from three different regions: MVL, along the tectofugal pathway, the visual Wulst along the thalamofugal pathway and the nidopallium caudolaterale (NCL), the equivalent structure to the mammalian prefrontal cortex (<xref ref-type="bibr" rid="B20">G&#x00FC;nt&#x00FC;rk&#x00FC;n, 2005</xref>; <xref ref-type="bibr" rid="B21">G&#x00FC;nt&#x00FC;rk&#x00FC;n and Bugnyar, 2016</xref>; <xref ref-type="bibr" rid="B38">Nieder, 2017</xref>), while head-fixed and body-restrained birds watched videos of conspecifics performing four different behaviors: courtship, eating, flying, walking. The videos were corrected for low-level features to ensure that any selectivity was not due to differences in luminance or spatial frequency. By using dynamic stimuli with natural sound accompaniment, we observed that the population coding of dynamic social information during passive fixation is most consistent with a sparse code for visual feature information across the recorded visual brain regions.</p>
</sec>
<sec id="S2" sec-type="materials|methods">
<title>Materials and methods</title>
<sec id="S2.SS1">
<title>Subjects</title>
<p>The study used eleven pigeons (<italic>Columba livia)</italic> of undetermined sex, as subjects. The birds were housed in individual cages, in a colony room maintained at 20&#x00B0;C, with a light-dark cycle of 12 h. During data collection, the pigeons were kept at their free-feeding weight, having unrestricted access to a mixture of grit, wheat, sunflower seeds, peas and corn. Water intake was controlled such that access to water was removed for 12 h prior to an experimental session.</p>
<p>All experimental, animal handling and housing procedures were carried out in accordance with the University of Otago&#x2019;s Code of Ethical Conduct for the Manipulation of Animals and approved by the Animal Ethics Committee of the same entity.</p>
</sec>
<sec id="S2.SS2">
<title>Experimental apparatus</title>
<p>The experiment was performed with head-restrained and body-restrained pigeons. A custom-made metal frame was built to accommodate the fixation of the animal and the administration of water. The apparatus contained a water receptacle that, connected to a pump, filled and emptied automatically (<xref ref-type="fig" rid="F1">Figure 1C</xref>). The pump was controlled using custom code, written in MATLAB, jointly with the OTBR (<xref ref-type="bibr" rid="B41">Otto and Rose, 2023</xref>) and Psychophysics toolboxes (<xref ref-type="bibr" rid="B6">Brainard, 1997</xref>).</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption><p>Experimental setup and visual stimulation. <bold>(A)</bold> Example frames from four videos of the experimental stimulus set. Each frame on the top row is part of a video that depicts one of the target behaviors: courtship, eating, flying and walking. The bottom row depicts the corresponding frame in the version of the video where the action is performed by the greeble. The greeble videos preserve the motion performed by the pigeon. <bold>(B)</bold> Schematic representation of the stimulation protocol. A trial consisted in the presentation of one video. In 10% of the trials of a given session, after the video presentation, the pigeons had 2.4 seconds of access to water. Subsequently, there was a 6 second inter-trial interval. During the inter-trial interval and the period of water access, the screens remained black. <bold>(C)</bold> Labeled photograph of the experimental setup.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnbeh-20-1736261-g001.tif">
<alt-text content-type="machine-generated">Figure contains three panels. Panel A shows eight photos of pigeons performing courtship, eating, flying, and walking in a enclosure. Panel B presents a timeline of an experimental trial, indicating a six second inter-trial interval, a video presentation of two point four to four seconds, and water access in ten percent of trials. Panel C is a labeled photograph of an experimental setup including stimuli display, attachment for implant&#x2019;s metal post, water pump, water receptacle, and velcro straps.</alt-text>
</graphic>
</fig>
<p>There were also two monitors for stimuli display: they were laterally placed at 74 cm, on either side of the pigeons&#x2019; eyes, with an azimuth that centered the images on the optic axis (<xref ref-type="bibr" rid="B36">Nalbach et al., 1990</xref>) of the pigeon. The monitors were gamma corrected.</p>
</sec>
<sec id="S2.SS3">
<title>Task/visual stimulation</title>
<p>Head-restrained and body-restrained pigeons passively viewed a prepared set of videos that displayed pigeons, or a control shape, performing the following behaviors: courtship, eating, flying and walking. The sequence of videos was pseudorandomized, guaranteeing that the same behavioral category wasn&#x2019;t displayed consecutively more than three times. Each video was anteceded by a six second inter-trial interval, and was shown six times per session, three times on the left monitor and three times on the right monitor. In total, there were 432 trials, and a session took approximately 75 min to complete. A random 10% of the trials were followed by 2.4 s of access to water (<xref ref-type="fig" rid="F1">Figure 1B</xref>).</p>
</sec>
<sec id="S2.SS4">
<title>Stimuli</title>
<p>The stimulus set of this experiment is composed of videos, recorded in the lab (GoPro Hero 4 session), with durations ranging from 2.2 to 4 s, depicting four different behaviors: courtship, eating, flying and walking (<xref ref-type="fig" rid="F1">Figure 1</xref>). Within each category, four different videos were created to exemplify the same behavior, such that there is some variation in the way it is represented. Although all videos were recorded in the same aviary enclosure, which gave them a very similar background and illumination, they were recorded so that the same action was performed in different places on screen. Occasionally, different camera angles were used.</p>
<p>For each video, we created a corresponding control video in which the background was preserved but the motion that characterizes the targeted behavior was performed by a 2D shape, called a greeble (<xref ref-type="bibr" rid="B18">Gauthier et al., 1998</xref>). The greebles were edited to be the same size as the pigeons and were given the dominant color of a background subtracted frame of one of the pigeon videos. That is, the color of the greeble was the average color of the pigeon. All control videos were edited using Adobe After Effects (<xref ref-type="bibr" rid="B1">Adobe Inc., 2023</xref>).</p>
<p>Additionally, the stimulus set included four auxiliary videos, showing two pigeons engaging in no specific behavior. The <italic>two-pigeon stimulus set</italic> was included to disambiguate whether neuronal responses to courtship behavior represented courtship behavior or the presence of two animals on screen.</p>
<p>Furthermore, each video had a version where the natural noise, produced with the recorded behavior, was preserved, and a version in which the video was silent. Hence the developed stimulus set consisted of 72 videos, out of which 36 were visually unique.</p>
</sec>
<sec id="S2.SS5">
<title>Processing of stimuli</title>
<p>An important aspect to consider when comparing the neural responses to stimuli ascribed to different categories is whether the observed differences are driven by low-level stimuli features, such as luminance and spatial frequency (<xref ref-type="bibr" rid="B9">Clark et al., 2022a</xref>,<xref ref-type="bibr" rid="B8">b</xref>). Therefore, we averaged the luminance values across all frames of all pigeon videos. Equalization was performed separately for the background and the subject. Subsequently, the average values were applied to the control (greeble) videos. The spatial frequency was equalized across all videos (pigeons and controls) but without changing the individual orientation (angle of the light and dark bars, obtained from the fast Fourier transform of the image) of each frame (<xref ref-type="fig" rid="F2">Figure 2</xref>; <xref ref-type="bibr" rid="B58">Willenbockel et al., 2010</xref>). We chose this type of equalization to preserve the frame&#x2019;s natural look and minimize any visual aberrations when they were parsed together to compile the video. Each frame was converted to the HSV color space and the low-level feature transformations were applied to the value layer (<xref ref-type="bibr" rid="B11">Dal Ben, 2023</xref>). The correction of luminance and spatial frequency were done using MATLAB (Mathworks Inc., Natick, MA United States) and the SHINE toolbox (<xref ref-type="bibr" rid="B58">Willenbockel et al., 2010</xref>, with some code adapted from the SHINE_color toolbox; <xref ref-type="bibr" rid="B11">Dal Ben, 2023</xref>). All videos were captured and displayed at 30 frames per second (a subset of the stimuli is provided in <xref ref-type="supplementary-material" rid="DS1">Supplementary material</xref>).</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption><p>Example video frames before and after low-level feature correction. Images on the left column are original frames from a courtship video (top left), and a greeble walking video (bottom left). The images on the right column are the corresponding frames after the equalization of luminance and spatial frequency.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnbeh-20-1736261-g002.tif">
<alt-text content-type="machine-generated">Four-panel comparison exemplifying the video equalization. Top left: two pigeons labeled &#x201C;Original.&#x201D; Top right: same scene labeled &#x201C;Corrected low-level features.&#x201D; Bottom left and right: both views replace pigeons with gray computer-generated 3D shapes in the same positions.</alt-text>
</graphic>
</fig>
<p>Lastly, we quantified how visually dissimilar each video was from its control, on a frame-by-frame basis. This way, we created an expectation of which moments of the video could drive an accentuated difference in neuronal responding, purely by the difference the pixel space occupied by the pigeon and the greeble on the image (further details and results in <xref ref-type="supplementary-material" rid="DS1">Supplementary material</xref>: frame-by-frame correlation).</p>
</sec>
<sec id="S2.SS6">
<title>Surgery</title>
<p>We performed stereotactic surgery to implant one microdrive, with a probe of eight 25 &#x03BC;m formvar-coated nichrome wires, per animal. For NCL [anteroposterior (AP), 5.5; mediolateral (ML), (&#x00B1;) 7.5], two pigeons (K4, X27) were implanted on the left hemisphere and one (K6) on the right hemisphere. For MVL [AP 10.5; ML (&#x00B1;) 6], two pigeons (K8, JIM) were implanted on the left hemisphere and one (PAM) on the right and. For the visual Wulst [AP 11; ML (&#x00B1;) 3], two pigeons (C1, O4) were implanted on the left hemisphere and three (O9, O8, X15) on right hemisphere. The coordinates for implantation followed histological and inactivation studies (<xref ref-type="bibr" rid="B26">Karten and Hodos, 1967</xref>; <xref ref-type="bibr" rid="B29">Kr&#x00F6;ner and G&#x00FC;nt&#x00FC;rk&#x00FC;n, 1999</xref>).</p>
<p>The birds were anesthetized with a mixture of Ketamine (30 mg/kg) and Xylazine (6 mg/kg), administered intramuscularly. A topical anesthetic (10% Xylocaine) was applied to the scalp and an incision was made to expose the skull. Subsequently, a small craniotomy was made over the target structure, and the dura matter was removed. For each bird, a microdrive was installed over one of the target regions: NCL, MVL or Wulst. The tips of the wire bundles were lowered until the beginning of the target region during surgery. To support the implant, stainless steel screws were inserted into the skull and one of them also served as a ground screw. The microdrive was secured to the skull and to the screws with dental acrylic (Jet Acrylic, Lang Dental). Around the microdrive, we added a short metal post, that was used to attach the animal to the frame of the experimental apparatus. In the days following the surgery, the animals were given analgesics and allowed to recover. On the first 3 days of recovery, for pain control, the animals received an intramuscular injection of Carprieve (4 mg/kg) and the wound margin was sprayed with 10% Xylocaine.</p>
</sec>
<sec id="S2.SS7">
<title>Neuronal recordings</title>
<p>We used probes of eight wires to record extracellular single-neuron activity. Before each recording session, the probe was checked for neuronal signals: pairs of two wires were tested to assess which combination of recording wire and local reference yielded the best signal isolation. If the single-to-noise ratio of the isolated unit was at least 2:1, the unit was considered for recording. The subject was not submitted to any task related visual stimulation, during this process.</p>
<p>Sampling at 20,000 Hz, the signal was amplified using a Grass P511K amplifier (Grass Instruments, Quincy, MA, United States) and a 5 Hz notch filter was applied. The signal was digitized using a CED (Cambridge Electronic Design, Cambridge, United Kingdom) interface. Jointly with Spike2 software, spike sorting was done using the system&#x2019;s template matching capabilities.</p>
<p>The experimental paradigm ran on a separate computer that sent triggers of relevant events to the CED system, which allowed the alignment of the visualization paradigm with the neural data.</p>
<p>After each recording session, the probe was advanced 42 &#x03BC;m and the animal was returned to its home cage. In case it wasn&#x2019;t possible to isolate a unit that satisfied the inclusion criteria, the probe was advanced 21 &#x03BC;m and the session didn&#x2019;t take place.</p>
</sec>
<sec id="S2.SS8">
<title>Statistics and reproducibility</title>
<p>In this experiment, we analyzed single-cell activity in three regions of the pigeon brain (Wulst, MVL, NCL), during the passive observation of videos, depicting pigeons performing different behaviors and their respective controls. We were interested in quantifying neuronal responses, of each one of the regions, to video features shared amongst multiple stimuli. For instance, we wanted to assess neural modulation congruent with the presentation of a video where a specific behavior is performed by a pigeon. It is possible to argue that the features we expect to drive neuronal responses are nested within each video. Considering the nested structure of the stimuli and the repeated measurements of the same neuron, we used several generalized linear mixed models (GLMMs) to assess selectivity, at the single-neuron and population level. When interpreting the models, statistical significance was considered at a level of &#x03B1; = 0.05.</p>
<p>For all neurons, only the trials where the videos were shown on the screen contralateral to the pigeon&#x2019;s implanted hemisphere were considered for analysis, due to the almost complete decussation of the optic nerve in this species and its impact in the ascending visual projections (<xref ref-type="bibr" rid="B22">G&#x00FC;nt&#x00FC;rk&#x00FC;n and Hahmann, 1999</xref>). Therefore, for a given neuron, we recorded its activity to the same visual stimulus six times: three presentations of the video with sound and three without sound. Data analysis was performed in R (<xref ref-type="bibr" rid="B46">R Core Team, 2024</xref>) [packages: glmmTMB, DHARMa, (<xref ref-type="bibr" rid="B23">Hartig, 2024</xref>) emmeans (<xref ref-type="bibr" rid="B30">Lenth, 2025</xref>)] and MATLAB (<xref ref-type="bibr" rid="B52">The MathWorks Inc., 2024</xref>).</p>
</sec>
<sec id="S2.SS9">
<title>Neural data analysis</title>
<p>For each unit, spike counts were binned in 200 ms bins. To determine how the data of a recorded region was distributed, we fitted two null models: one with the poison distribution and the log link function, and another with the negative binomial distribution and log link function (<xref ref-type="bibr" rid="B48">Salinas Ru&#x00ED;z et al., 2023</xref>).</p>
<disp-formula id="S2.Ex1">
<mml:math id="M1">
<mml:mrow>
<mml:mi>n</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>M</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>d</mml:mi>
<mml:mi>e</mml:mi>
<mml:mpadded width="+3.3pt">
<mml:mi>l</mml:mi>
</mml:mpadded>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mi>s</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>k</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>C</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>t</mml:mi>
<mml:mpadded width="+5pt">
<mml:mi>s</mml:mi>
</mml:mpadded>
<mml:mo>&#x223C;</mml:mo>
<mml:mn>1</mml:mn>
<mml:mo>+</mml:mo>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mn>1</mml:mn>
<mml:mo>|</mml:mo>
<mml:mi>t</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>N</mml:mi>
<mml:mi>r</mml:mi>
<mml:mo>)</mml:mo>
</mml:mrow>
<mml:mo rspace="7.5pt">+</mml:mo>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mn>1</mml:mn>
<mml:mo>|</mml:mo>
<mml:mi>n</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>I</mml:mi>
<mml:mi>D</mml:mi>
<mml:mo>)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</disp-formula>
<p>The model fitted with the negative binomial distribution had a better fit to the data than the Poisson model (see Results). Subsequently, we used the DHARMa package in R for residual diagnostics and zero-inflation testing. The test results informed the decision to fit all the models used in the study with the negative binomial distribution and the log link function (R package glmmTMB).</p>
<sec id="S2.SS9.SSS1">
<title>Single neuron analysis</title>
<p>At the single-neuron level, we tested if firing rate modulations, relative to baseline, could be attributed to the main effect of &#x201C;actor&#x201D; (pigeon or greeble), &#x201C;behavioral category&#x201D; (courtship, eating, flying or walking) or their interaction. We also measured the contribution of the auditory component of the videos by including an interaction term between &#x201C;actor&#x201D; and &#x201C;sound&#x201D; (sound On or Off). The random effects component of the model had the trial number as a random intercept: it was included to account for the possible visual adaptation to the stimuli, throughout the session.</p>
<disp-formula id="S2.Ex2">
<mml:math id="M3">
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mpadded width="+3.3pt">
<mml:mn>1</mml:mn>
</mml:mpadded>
<mml:mo rspace="10.8pt">=</mml:mo>
<mml:mi>s</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>k</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>C</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>t</mml:mi>
<mml:mpadded width="+5pt">
<mml:mi>s</mml:mi>
</mml:mpadded>
<mml:mo>&#x223C;</mml:mo>
<mml:mi>a</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>o</mml:mi>
<mml:mpadded width="+3.3pt">
<mml:mi>r</mml:mi>
</mml:mpadded>
<mml:mo rspace="5.8pt">&#x002A;</mml:mo>
<mml:mi>b</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>h</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>v</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>a</mml:mi>
<mml:mpadded width="+3.3pt">
<mml:mi>l</mml:mi>
</mml:mpadded>
<mml:mi>C</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>g</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>r</mml:mi>
<mml:mpadded width="+3.3pt">
<mml:mi>y</mml:mi>
</mml:mpadded>
<mml:mo rspace="5.8pt">+</mml:mo>
<mml:mi>a</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>o</mml:mi>
<mml:mpadded width="+3.3pt">
<mml:mi>r</mml:mi>
</mml:mpadded>
<mml:mo>&#x002A;</mml:mo>
</mml:mrow>
</mml:math>
</disp-formula>
<disp-formula id="S2.Ex3">
<mml:math id="M5">
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>o</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>u</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>n</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mpadded width="+5pt">
<mml:mi>d</mml:mi>
</mml:mpadded>
<mml:mo>&#x2062;</mml:mo>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mrow>
<mml:mpadded width="+5pt">
<mml:mn>1</mml:mn>
</mml:mpadded>
<mml:mo lspace="2.5pt" rspace="7.5pt">|</mml:mo>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>r</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>i</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>a</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mpadded width="+3.3pt">
<mml:mi>l</mml:mi>
</mml:mpadded>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>N</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>u</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>m</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>b</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>e</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>r</mml:mi>
</mml:mrow>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</disp-formula>
<p>A model was fitted to the binned spike counts of each neuron. The dataset consisted of the middle two-seconds of the baseline and the entire video presentation of every trial. The baseline of the trials of the first courtship video was set as the intercept of the <italic>m1</italic> model.</p>
<p>The output of the model was used for hypothesis testing: the <italic>p</italic>-value for each parameter was calculated with the Wald Z-statistic. Furthermore, comparisons between model estimates were done through estimated marginal means (R package emmeans) and the resulting <italic>p</italic>-values were adjusted with the Bonferroni-Holm method. Neurons that had a singular fit were excluded from analysis.</p>
<p>From the output of the model, a unit was classified as <italic>actor-selective</italic> if the comparison of firing rate between the baseline and video presentation resulted in a significant interaction effect between, exclusively, one actor and two or more behaviors or, only, in a significant main effect for one of the actors; the classification of <italic>behavior-selective</italic> was given to units that responded significantly to a specific behavior, for both pigeons and greebles; lastly, <italic>actor-and-behavior-selective neurons</italic> were the ones that only presented a significant interaction between one behavior and one actor. Furthermore, it was possible to assess whether the presence of sound contributed to the observed firing rate modulations. We counted how many neurons fitted this classification and compared their proportions between the three regions: the comparisons were performed using Chi-Squared tests, with Holm&#x2013;Bonferroni correction, or the Fisher&#x2019;s Exact test, with Holm&#x2013;Bonferroni correction, if the expected counts in a group were below 5.</p>
</sec>
<sec id="S2.SS9.SSS2">
<title>Population analysis</title>
<p>In the set of neurons that were tuned to stimuli, we were interested in accessing which moments of the video drove the observed neuronal modulation. For this analysis, we fitted a model that explored the influence of the actor and the moment in the video on firing rate.</p>
<disp-formula id="S2.Ex4">
<mml:math id="M7">
<mml:mrow>
<mml:mi>m</mml:mi>
<mml:mpadded width="+3.3pt">
<mml:mn>2</mml:mn>
</mml:mpadded>
<mml:mo rspace="10.8pt">=</mml:mo>
<mml:mi>s</mml:mi>
<mml:mi>p</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>k</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>C</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>t</mml:mi>
<mml:mpadded width="+5pt">
<mml:mi>s</mml:mi>
</mml:mpadded>
<mml:mo>&#x223C;</mml:mo>
<mml:mi>a</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>o</mml:mi>
<mml:mpadded width="+3.3pt">
<mml:mi>r</mml:mi>
</mml:mpadded>
<mml:mo rspace="5.8pt">&#x002A;</mml:mo>
<mml:mi>B</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>N</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>I</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>T</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>a</mml:mi>
<mml:mpadded width="+3.3pt">
<mml:mi>l</mml:mi>
</mml:mpadded>
<mml:mo>+</mml:mo>
<mml:mrow>
<mml:mo>(</mml:mo>
<mml:mpadded width="+5pt">
<mml:mn>1</mml:mn>
</mml:mpadded>
<mml:mo>|</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</disp-formula>
<disp-formula id="S2.Ex5">
<mml:math id="M9">
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>l</mml:mi>
<mml:mi>N</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>m</mml:mi>
<mml:mi>b</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>r</mml:mi>
<mml:mo>:</mml:mo>
<mml:mi>B</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>N</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>I</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>T</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>i</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>l</mml:mi>
<mml:mo stretchy="false">)</mml:mo>
<mml:mo>+</mml:mo>
<mml:mo>(</mml:mo>
<mml:mpadded width="+5pt">
<mml:mn>1</mml:mn>
</mml:mpadded>
<mml:mo rspace="7.5pt">|</mml:mo>
<mml:mi>s</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>b</mml:mi>
<mml:mi>j</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>I</mml:mi>
<mml:mi>D</mml:mi>
<mml:mo>:</mml:mo>
<mml:mi>n</mml:mi>
<mml:mi>e</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>I</mml:mi>
<mml:mi>D</mml:mi>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</disp-formula>
<p>We ran the model <italic>m2</italic> on the subset of units labeled actor selective, tuned to the actor pigeon, for every region. As in the previous test, the neural activity recorded in each trial was segmented in bins of 200 ms. We tested the same neuronal subset for each of the 16 videos (four videos for each of the behaviors) and detected where the activity of the subpopulation was significantly different from baseline. Then, multiple comparisons were performed, to assess if the predicted spike counts for the temporal intervals of the pigeon videos, in which neural activity was significantly modulated from baseline, differed between pigeons and greebles. As in the previous test, the neural activity recorded in each trial was segmented in bins of 200 ms.</p>
<p>It is relevant to emphasize that the random effects component of the model <italic>m2</italic> contains a random effect for neuron identity, which accounts for differences in baseline firing for each unit. Neuron identity is nested within subjects, which accounts for subject-specific effects and prevents us from drawing conclusions based only on one animal (<xref ref-type="bibr" rid="B59">Yu et al., 2022</xref>).</p>
<p>To further assess how the dynamics of the recorded population change, through the progression of each video, a population state-space analysis was conducted per region. Conceptually, a population state-space analysis starts by representing the activity of a single neuron, at every recorded moment, as a point on a plane. Every neuron generates its own plane, which maps the population activity onto a <italic>n</italic>-dimensional space, where <italic>n</italic> corresponds to the total number of recorded neurons. If there is information encoded at the population level, the spiking activity of several neurons will covary according to a smaller set of variables than the number of recorded units. Using a principal components analysis, dimensionality reduction was performed. The first three dimensions were preserved and their projection on the data was used to define trajectories for each condition (16 videos &#x00D7; 2 actors). The trajectories characterize the instantaneous populational firing rate, through the progression of each video.</p>
<p>The population state-space analysis was performed on all task-engaged neurons (MVL <italic>n</italic> = 49; Wulst <italic>n</italic> = 74; NCL <italic>n</italic> = 49) and done separately for every recorded region. For each neuron, each trial&#x2019;s data was binned in 200 ms bins, with a sliding window of 40 ms. It was necessary to average the activity across trials of the same condition and temporally smooth the data, to minimize fluctuations originating in random spiking (<xref ref-type="bibr" rid="B10">Cunningham and Yu, 2014</xref>). Therefore, the data was averaged per condition (video &#x00D7; actor) and further smoothed with a 200 ms Gaussian kernel. Neural activity was also z-scored: for each neuron, the neuron&#x2019;s average baseline spike count (calculated over a 2 s period, in the middle of the 6 s baseline) was subtracted to the averaged trial activity. The difference was subsequently divided by the standard deviation the baseline spike count. The last data processing step was to reduce the baseline to only 600 ms, for purposes of readability of the neural trajectories.</p>
<p>We build an activity matrix that concatenated the time resolved activity of all neurons, in each condition: to guarantee that we compare the same principal components across all conditions, the temporally resolved response of all neurons, to all combinations of video and actor, was considered pseudo-simultaneous (<xref ref-type="bibr" rid="B15">Ditz and Nieder, 2020</xref>; <xref ref-type="bibr" rid="B40">Ott and Nieder, 2016</xref>).</p>
<p>From the PCA, we obtained the three first principal components and calculated the trajectories for each condition. The assessment of the trajectories of each video allowed us to observe differences in population dynamics when the same video had pigeons or greebles on screen: these differences were quantified by the calculation of the Euclidean distances between the trajectories generated by the same video, when the actor was a pigeon or a greeble.</p>
</sec>
</sec>
<sec id="S2.SS10">
<title>Histology and electrode track reconstruction</title>
<p>At the end of the experiment the pigeons were euthanized with CO<sub>2</sub> by placing them in a 450 mm (Length) &#x00D7; 340 mm (Width) &#x00D7; 300 mm (Depth) chamber, with a CO<sub>2</sub> delivery rate of 16 L/min, an amount that equated to displacing 35% of the chamber volume per minute. The CO<sub>2</sub> was left on for 5 min, and for at least 2 min beyond the point where the animal stopped breathing. The animal was then immediately perfused with 10% formalin in physiological saline. The brains were removed from the skull and kept in 10% formalin for at least 5 days, followed by sucrose formalin (10% formalin, 30% sucrose), and allowed to sink twice. The brains were frozen and sectioned at 40 &#x03BC;m sections with every 10th section mounted and stained with thionin.</p>
</sec>
</sec>
<sec id="S3" sec-type="results">
<title>Results</title>
<sec id="S3.SS1">
<title>Electrode positions</title>
<p>All electrode tracks were within the borders of the targeted NCL, MVL, and Wulst regions as defined by <xref ref-type="bibr" rid="B26">Karten and Hodos (1967)</xref> and <xref ref-type="bibr" rid="B29">Kr&#x00F6;ner and G&#x00FC;nt&#x00FC;rk&#x00FC;n (1999)</xref>. For NCL, the intended track positions were AP +5.5 and ML &#x00B1; 7.5. The track positions for two NCL birds (K6, X27) were as intended. The track position for K4 was AP +6.0, ML +7.0, differing from the intended AP and ML positions by 0.5 mm.</p>
<p>For MVL, the intended track positions were AP +10.5 and ML &#x00B1; 6.0. The track positions for two MVL birds (JIM, PAM) were as intended. The position for K8 were AP +10.5, ML &#x00B1; 5.5, differing only from the intended ML position by 0.5 mm.</p>
<p>For Wulst the intended track positions were AP +11.0 and ML &#x00B1; 3.0. The track positions for three Wulst birds (C1, O4, X15) were as intended. The position for the O8 and O9 were AP +10.75, ML &#x00B1; 3.0, differing only from the intended AP positions by 0.25 mm (<xref ref-type="fig" rid="F3">Figure 3</xref>).</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption><p>Electrode track records. Visualization of the electrode track reconstructions, for every pigeon (pigeon identifiers are shown on top of the respective track). The targeted brain regions are shaded in yellow, and the target coordinates are displayed to the right of the slices. Abbreviations: apical part of the hyperpallium (HA), densocellular part of the hyperpallium (HD), ventricle (V), nidopallium (N), mesopallium ventrolaterale (MVL), entopallium (Ento), medial striatum (MSt), tectum opticum (TeO), nucleus rotundus (Rt), hippocampus (H), dorsolateral corticoid area (CDL), nidopallium caudolaterale (NCL), dorsal arcopallium (AD), intermediate arcopallium (AI), caudal nidopallium (NC).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnbeh-20-1736261-g003.tif">
<alt-text content-type="machine-generated">Three coronal brain section illustrations labeled WULST, MVL, and NCL display shaded yellow areas indicating specific regions of interest, colored vertical lines representing electrode track positions, and labeled anatomical structures with coordinates.</alt-text>
</graphic>
</fig>
</sec>
<sec id="S3.SS2">
<title>Single-unit analysis</title>
<p>In this experiment, we recorded neural activity in three regions of the pigeon brain (Wulst, MVL and NCL) during the passive visualization of videos depicting conspecifics or control shapes performing the following behaviors: courtship, eating, flying and walking. Half of the video presentations, of the pigeon videos as well as the control shapes, included the sound of the pigeon videos.</p>
<p>We recorded and analyzed 103 units in MVL, 123 in NCL and 213 units in Wulst. Data analysis began with the assessment of the data distribution of the recorded neuronal populations. For all regions, the negative binomial distribution had a better fit to the data (lower AIC) than the Poisson distribution (log likelihood ratio test between models for the MVL data: &#x03C7;<sup>2</sup>(1) = 22042,<italic>p</italic> &#x003C; 0.0001; NCL: &#x03C7;<sup>2</sup>(1) &#x003C; 22853,<italic>p</italic> &#x003C; 0.0001; Wulst: &#x03C7;<sup>2</sup>(1) = 9808.9,<italic>p</italic> &#x003C; 0.0001). Furthermore, the negative binomial null models were not zero-inflated (MVL: ratio = 1.0067, <italic>p</italic> = 0.79; NCL: ratio = 1.0183, <italic>p</italic> = 0.38; Wulst: ratio = 1.0014, <italic>p</italic> = 0.88) and the residuals fitted the assumptions of the distribution.</p>
<p>We first assessed if the spike counts of each neuron were significantly modulated, compared to baseline, for the fixed effects of behavior (courtship, eating, flying and walking, and non-interactive two pigeons), actor (the video depicted a pigeon or a greeble) and the interaction between them. Additionally, we tested the interaction between the presence of sound and the actor on screen (GLMM per neuron: m1, described in Methods).</p>
<p>In MVL, the activity of 65 out of 103 recorded neurons (63%) was task modulated. In this dataset, 15 units (23%) were categorically tuned to the presence of the actor: 7 (11%) units displayed the most amount of modulation when a pigeon was on screen; 5 (8%) were selective for the greebles, and 3 (5%) units had an excited response for the pigeon videos but an inhibited one for the greeble videos. Additionally, 2 of the units that coded for the pigeon were sound modulated: firing rates differed significantly between the trials in which the pigeon videos were displayed with sound on, compared to the same videos without sound, and the greeble videos with and without sound (<xref ref-type="fig" rid="F4">Figure 4</xref>).</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption><p>Example sound modulated unit in MVL. The output of the model (see <xref ref-type="supplementary-material" rid="DS1">Supplementary material</xref>) indicated that this neuron was pigeon selective, and sound modulated. The firing rate of the unit here depicted is differently modulated for the pigeon videos (blue and green traces) compared to the greeble videos (orange and red traces). There is a visible separation between presentations of the pigeon videos with sound compared to the presentations of the pigeon videos without sound.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnbeh-20-1736261-g004.tif">
<alt-text content-type="machine-generated">Two-panel scientific figure shows neural responses to stimuli. The top panel is a raster plot with dots indicating spikes for four conditions: pigeon sound on (blue), pigeon sound off (green), greeble sound on (red), and greeble sound off (orange). The bottom panel is a peristimulus time histogram showing firing rate over time for the same four conditions, with a clear increase around video onset at zero seconds, and separation between the pigeon sound on and the greeble conditions. There is a legend identifying the color codes.</alt-text>
</graphic>
</fig>
<p>Moreover, 5 (8%) units were significantly modulated by one specific behavior, for both pigeons and greebles. 21 units (32%) were modulated by a specific behavior, performed only by one actor. We also found one unit that was selective for more than one behavior and 7 units that were predominately selective for the two pigeon controls. The remaining units were visually selective but not modulated by the content of the videos. Between the behaviorally modulated units and the exclusively visually selective units, 12 units presented a significant interaction between actor and sound and differentiated between the sound on and sound off presentations of the pigeon or greeble videos.</p>
<p>We applied the same categorical analysis (model m1) to the 213 units recorded in Wulst. We found 91 task modulated units. The activity observed was best explained by the presence of a pigeon on screen in 13 units (14%). Other 11 units (12%) fulfilled the criteria of having a significant interaction between the greeble and at least 2 behaviors or exclusively having a significant main effect for the actor greeble. 9 units (10%) were classified as behavior selective. Furthermore, 28 units (31%) were selective for a specific behavior performed only by one actor. We also found 7 (8%) units predominantly modulated by the two-pigeon stimulus set and 3 (3%) that responded to two behaviors. The remaining units had a generalized visual response to all stimulus categories. In this region, there were also 4 (4%) sound modulated units, with one of them being selective for the actor pigeon.</p>
<p>Lastly, we recorded 123 units in NCL. 60 (49%) neurons were task modulated. This dataset had 5 (8%) units in which the presence of a pigeon on screen drove the most amount of neural modulation, and other 5 (8%) units that were modulated by the greebles. We also found 9 (15%) units that encoded one specific behavior for both actors. There were also 3 (5%) units that encoded two behaviors and 3 (5%) other units that encoded two behaviors, each one performed by a different actor. 15 (25%) units encoded a specific behavior performed only by one actor. The remaining units have a significant main effect for both the pigeon and the greeble and no further differentiation amongst the behavioral conditions. That is, they are modulated by the presence of a stimulus in a way that is not specific to the content of the video. They were interpreted as visually selective. Regarding sound modulation, we found 7 (12%) units that were sound modulated, but none belonged to the ones labeled as pigeon selective.</p>
<p>The proportion of units (<xref ref-type="fig" rid="F5">Figure 5</xref>) classified as actor selective did not differ between the three recorded regions [&#x03C7;<sup>2</sup>(2) = 2.34, <italic>p</italic> = 0.3], and the same was the case for the proportion of units classified as behavior selective [&#x03C7;<sup>2</sup>(2) = 1.54, <italic>p</italic> = 0.46]. However, there were differences regarding the proportion of sound modulated units: MVL had a higher proportion of units classified as sound modulated compared to Wulst [&#x03C7;<sup>2</sup>(1) = 17.7, <italic>adjustedp</italic>-<italic>value</italic> = 0.0001]. No significant differences were found between NCL and MVL [&#x03C7;<sup>2</sup>(1) = 4.15, <italic>adjustedp</italic>-<italic>value</italic> = 0.08] or, NCL and Wulst (<italic>Fisher</italic>&#x2032;<italic>sExacttest</italic>:<italic>adjustedp</italic>-<italic>value</italic> = 0.11).</p>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption><p>Number of units with a significant response to the categorical task variables for every region, displayed as percentage (rounded) and absolute counts After fitting each unit&#x2019;s activity with the m1 model, neurons were classified as actor-selective (if exclusively selective for one actor, during at least two behaviors), behavior-selective (if exclusively selective for one behavior, performed by both actors&#x2014;labeled &#x201C;one behavior, both actors&#x201D; in this figure) or actor-and-behavior-selective (if were only modulated by one actor performing one behavior&#x2013;labeled &#x201C;one behavior, one actor&#x201D; in this figure). Units that were modulated by the presence of a stimulus without any further distinction were labeled visually selective; the ones not task modulated were labeled non-selective; units selective for more than one behavior or for the two-pigeon stimulus set are included under the category &#x201C;others.&#x201D; Lastly, sound modulated neurons were also accounted for.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnbeh-20-1736261-g005.tif">
<alt-text content-type="machine-generated">Sankey diagram showing the distribution of neuron selectivity across three regions labeled MVL, NCL, and Wulst. Labels show classifications such as selective, non-selective, actor, one behavior (one or both actors), visually selective, sound, and others with corresponding percentages and sample sizes for each category.</alt-text>
</graphic>
</fig>
</sec>
<sec id="S3.SS3">
<title>Population analysis</title>
<p>Although only a small percentage of neurons were considered pigeon selective, we were interested in assessing which moments in the video drove the most amount of modulation. We fitted the subpopulations of pigeon-responsive neurons, grouped by region, with model <italic>m2</italic> (see Methods). For every video, the 200 ms bins in which the populational activity significantly differed from the populational baseline were identified. Subsequently, in each of these bins, the activity recorded during the presentation of a pigeon video and the respective greeble control was compared.</p>
<p>In Wulst and MVL, the populations diverged from their respective baselines, for at least a period of 200 ms, in most videos (MVL: 11 out of 16; Wulst: 12 out of 16). In the videos depicting courtship or eating behaviors, every time bin that significantly differed from baseline corresponded to frames where the animal was performing a bowing motion or walking (<xref ref-type="fig" rid="F6">Figure 6</xref>). During flying videos, the significant moments depicted wing flapping and, in one instance for both MVL and Wulst, small body movements while the animal was stationary. In walking videos, all relevant bins corresponded to the animal moving forward (relative to its original trajectory).</p>
<fig id="F6" position="float">
<label>FIGURE 6</label>
<caption><p>Time points in courtship video 4 that modulated the subpopulation of pigeon responsive neurons. For each region, the neurons labeled as pigeon selective were grouped and fitted with the model m2. We obtained the estimated marginal means for the interaction between every time bin in the video and each actor. Hence, the panels for MVL, Wulst and NCL display the population&#x2019;s response predicted by the model. The time points in which the subpopulation&#x2019;s activity significantly differs from the subpopulation&#x2019;s baseline are highlighted with a box. The asterisk indicates a significant difference in subpopulation response between the pigeon and greeble presentations of the video. Video frames displayed during the highlighted time bins are presented at the top of the figure and matched to the respective time bin with a colored band.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnbeh-20-1736261-g006.tif">
<alt-text content-type="machine-generated">Composite scientific figure showing three rows of line graphs and three sets of paired video frames above. Video frames of the pigeon videos and the corresponding greeble frames, bordered in yellow, green, or purple, match time points on graphs. Line graphs beneath display predicted populational neural responses (MVL, Wulst, NCL) over time to pigeon and greeble stimuli, with blue and red lines and shaded confidence intervals. Black bars, asterisks, and colored rectangles on graphs indicate significant effects at specific time points. A legend identifies plotted variables, and axes are labeled for clarity.</alt-text>
</graphic>
</fig>
<p>In MVL, 2 out of 11 videos had bins where there was a significant difference of activity between the actor pigeon, and the actor greeble. Both instances depicted the animal walking. In Wulst, 9 out of 12 videos contained bins where activity significantly differed from baseline and differed between the pigeon and the greeble presentations of the video.</p>
<p>In NCL, only 5 out of 16 videos had bins where the population activity was significantly modulated from the populational baseline. Furthermore, the identified bins did not differ between the pigeon and the greeble presentations.</p>
<p>Lastly, we analyzed the populational dynamics of the representations, generated by each stimulus, with a population state-space analysis. The aim of this analysis was to identify patterns of activity that differ between the representation of the pigeons and greebles (see Methods for details). The trajectories (<xref ref-type="fig" rid="F7">Figure 7a</xref>) describe instantaneous activation patterns, learned from the data (<xref ref-type="bibr" rid="B42">Pang et al., 2016</xref>): they represent the progression of the populational response, during 600 ms of baseline and the entire video presentation, for each condition (video x actor). The trajectories were projections of the first three principal components on the data. The first three principal components contain 17% of the populational variance in MVL, 13% in Wulst and 11% in NCL (<xref ref-type="supplementary-material" rid="DS1">Supplementary Figure 2</xref>).</p>
<fig id="F7" position="float">
<label>FIGURE 7</label>
<caption><p>State-space analysis of population dynamics during the presentation of each video. <bold>(a)</bold> State-space trajectories for 6 videos. The trajectories were plotted separately for the actor pigeon and actor greeble presentations of the same video. There are brief moments throughout the presentation of videos in which the population represents the actors differently. <bold>(b)</bold> Euclidean distances between the actor pigeon and actor greeble presentations of the same video.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnbeh-20-1736261-g007.tif">
<alt-text content-type="machine-generated">Figure containing six three-dimensional state space trajectory plots at the top and twelve line graphs at the bottom. Top panels depict state space trajectory for examples of flying, walking, courtship, and eating, with lines color-coded by condition. Bottom panels display time series analyses of trajectory distances between the pigeon and the greeble version of each video. Analysis is depicted per brain region and behavior, each with color-coded lines for multiple video conditions.</alt-text>
</graphic>
</fig>
<p>In MVL and Wulst, it is possible to observe a destabilization of the population baseline activity, for every behavioral category (<xref ref-type="fig" rid="F7">Figure 7</xref>). Within each category, the videos differ regarding how much separation there is between the trajectories of the actor pigeon and the actor greeble. We quantified the differences in trajectories by measuring the Euclidean distance between them, at every timepoint (<xref ref-type="fig" rid="F7">Figure 7b</xref>). The moments in which the neuronal populations distinguish between the pigeons and the greebles occur in brief periods, not in a sustained way: the distances between trajectories return to values like the ones observed during the baseline after a visible peak.</p>
<p>For each video, we identified the two time points with the largest Euclidian distance between trajectories. During courtship videos, the biggest differences between activity related to pigeons and greebles were found while the pigeon performed bowing motions and walking. During eating videos, it coincided with bowing motions, pecking, mandibulation and walking. In the flying videos, the differences corresponded to two distinct types of behavior. On one hand, moments where the pigeon was flapping its wings and, therefore, creating a lot of movement on screen, but also in moments where the animal was stationary and only performing slight body shifts, such as small head movements. Lastly, in the walking videos, the differences between pigeons and greebles corresponded to the coordinated motion of stepping and head-bobbing. All behaviors were represented in the modulation of the population of every region.</p>
</sec>
</sec>
<sec id="S4" sec-type="discussion">
<title>Discussion</title>
<p>We recorded single-unit responses in three different regions of the pigeon brain (MVL, Wulst, and NCL) while head-fixed birds watched videos of conspecifics and control shapes performing four different behaviors: courtship, eating, flying, and walking. We aimed to assess if there is specific neuronal coding for conspecifics at different structures of the pigeon&#x2019;s visual system when exposed to dynamic, naturalistic stimuli.</p>
<p>In each region, we found a small proportion of units that were modulated by the pigeon videos. However, we did not find a difference between the regions regarding the proportions of units that responded to the actors or the behaviors observable in the videos. The response patterns we observed were neither in line with primate face-cell coding in terms of selectivity to a particular body feature like the head (<xref ref-type="bibr" rid="B13">Desimone et al., 1984</xref>), nor did they display an invariant, abstract response to social content (<xref ref-type="bibr" rid="B33">McMahon et al., 2015</xref>). This lack of a simple, categorical &#x201C;conspecific&#x201D; code suggests a different processing strategy in the pigeon brain. Our findings are consistent with a tuning landscape model (<xref ref-type="bibr" rid="B45">Ponce et al., 2019</xref>; <xref ref-type="bibr" rid="B54">Wang and Ponce, 2022</xref>) of visual processing, like that described in the primate ventral stream. In this framework, neurons in the higher centers of the pigeon visual system are not simple category detectors, but are better viewed as signaling distances to specific, preferred combinations of visual attributes within a high-dimensional feature space.</p>
<p>Our population analysis supports our interpretation. We found that population activity in the visual areas (MVL and Wulst) showed modulations in discrete moments of the video, corresponding to specific motions like bowing, wing flapping, and head-bobbing. We interpret these momentary differences as the points where the pigeon video trajectory and the greeble video trajectory diverged on this shared, high-dimensional landscape. The greeble, by not emulating the species-specific motion kinematics, may have simply traced a different path through this feature space, resulting in a momentarily separable neural state. We believe that the distinctions in encoding that we observed was not a categorical social signal but rather a discrepancy of visual features over time (see <xref ref-type="supplementary-material" rid="DS1">Supplementary Figure 1</xref>). To verify this possibility, future work should model the kinematics of the videos specifically (e.g., head angular velocity, body displacement) to look for modulation of population activity that aligns with changes in these kinematic variables during the videos.</p>
<p>The absence of a clear difference in the NCL subpopulation reinforces our view that visual areas employ sparse coding of visual stimuli. NCL, the avian prefrontal equivalent, is known to encode task-relevant visual information for active categorization (<xref ref-type="bibr" rid="B16">Divac et al., 1985</xref>; <xref ref-type="bibr" rid="B24">Johnston et al., 2017</xref>). Its lack of any differential engagement during this passive task&#x2014;while visual areas (MVL/Wulst) showed feature-driven modulation&#x2014;is consistent with a functional division between visual feature processing and higher-order cognitive engagement in NCL. The present experiment did not require an explicit categorization of the stimuli during passive fixation, as is often the case in operant chamber tasks (<xref ref-type="bibr" rid="B49">Scarf et al., 2016</xref>; <xref ref-type="bibr" rid="B7">Clark et al., 2019</xref>; <xref ref-type="bibr" rid="B9">Clark et al., 2022a</xref>,<xref ref-type="bibr" rid="B8">b</xref>; <xref ref-type="bibr" rid="B14">Dittrich et al., 1998</xref>).</p>
<p>A separate finding was the presence of sound-modulated visual units, with a greater proportion in MVL compared to Wulst. This points to the tectofugal pathway (which includes MVL) as a site for audio-visual integration, building on earlier reports of auditory responses in the pigeon optic tectum (<xref ref-type="bibr" rid="B31">Lewald and D&#x00F6;rrscheidt, 1998</xref>). This integration may be important for behavior, as other studies have shown that pigeons use vocalizations to aid in visual categorization (<xref ref-type="bibr" rid="B43">Partan et al., 2005</xref>). While the proportion of such neurons in our dataset was small, their presence in MVL suggests multimodal integration may occur early in the avian hierarchy of sensory processing. Sound modulated neurons that are primarily visually responsive are, in fact, found in the primate anterior fundus face patch (<xref ref-type="bibr" rid="B27">Khandhadia et al., 2021</xref>). These face selective neurons display enhanced responses to faces of conspecifics when the stimuli were accompanied by the conspecific&#x2019;s vocalization. In the present experiment, although we find neurons that modulate their response to videos of pigeons with sound, their proportion was quite low as part of the population we sampled, making it difficult to know if these represent a genuine feature of the MVL.</p>
<p>A known problem of experiments that use videos to study the processing of social stimuli in pigeons is the color display (<xref ref-type="bibr" rid="B56">Ware et al., 2015</xref>). Pigeons have tetrachromatic color vision, making it likely that colored videos of conspecifics, displayed on monitors optimized for the human visual system, do not look very realistic. Nonetheless, studies that assessed pigeons&#x2019; behavioral responses towards videos of conspecifics, displayed without the UV component, report that animals maintain natural responses towards them (<xref ref-type="bibr" rid="B50">Shimizu, 1998</xref>; <xref ref-type="bibr" rid="B56">Ware et al., 2015</xref>; <xref ref-type="bibr" rid="B55">Ware et al., 2017</xref>). Therefore, pigeons are still able to extract relevant social information from video stimuli, displayed with suboptimal colors. At the neuronal level, sensitivity to colors in the UV range has been reported in Wulst (<xref ref-type="bibr" rid="B39">Nimpf et al., 2024</xref>). We expect videos of social scenes with naturalistic color information to be represented in MVL and Wulst similarly to the ones used in the present experiment, where video features are encoded at the population level. Nonetheless, we would expect a higher response range from the individual neurons in these regions, evoked by more naturalistic color information, that reflect the distance to the neuron&#x2019;s preferred &#x201C;prototype.&#x201D; Taken together, the findings of the experiment suggest that the pigeon visual system, much like the primate visual cortex, does not employ a simple, invariant code for &#x201C;conspecifics.&#x201D; Instead, our findings support a high-dimensional tuning landscape model, where populations in MVL and Wulst encode specific features that change over time in dynamic videos.</p>
</sec>
</body>
<back>
<sec id="S5" sec-type="data-availability">
<title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p>
</sec>
<sec id="S6" sec-type="ethics-statement">
<title>Ethics statement</title>
<p>The animal study was approved by the University of Otago Animal Ethics Committee. The study was conducted in accordance with the local legislation and institutional requirements.</p>
</sec>
<sec id="S7" sec-type="author-contributions">
<title>Author contributions</title>
<p>SS: Software, Methodology, Visualization, Data curation, Formal analysis, Investigation, Conceptualization, Writing &#x2013; original draft. DB: Investigation, Writing &#x2013; review &#x0026; editing. PH: Writing &#x2013; review &#x0026; editing, Investigation. WC: Writing &#x2013; review &#x0026; editing, Methodology, Software, Conceptualization. JR: Supervision, Project administration, Writing &#x2013; review &#x0026; editing. MC: Methodology, Conceptualization, Supervision, Project administration, Writing &#x2013; review &#x0026; editing, Funding acquisition.</p>
</sec>
<sec id="S9" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="S10" sec-type="ai-statement">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec id="S11" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec id="S12" sec-type="supplementary-material">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fnbeh.2026.1736261/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fnbeh.2026.1736261/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Data_Sheet_1.docx" id="DS1" mimetype="application/vnd.openxmlformats-officedocument.wordprocessingml.document"/>
<supplementary-material xlink:href="Video_1.mp4" id="VS1" mimetype="video/mp4"/>
<supplementary-material xlink:href="Video_2.mp4" id="VS2" mimetype="video/mp4"/>
<supplementary-material xlink:href="Video_3.mp4" id="VS3" mimetype="video/mp4"/>
<supplementary-material xlink:href="Video_4.mp4" id="VS4" mimetype="video/mp4"/>
<supplementary-material xlink:href="Video_5.mp4" id="VS5" mimetype="video/mp4"/>
<supplementary-material xlink:href="Video_6.mp4" id="VS6" mimetype="video/mp4"/>
<supplementary-material xlink:href="Video_7.mp4" id="VS7" mimetype="video/mp4"/>
<supplementary-material xlink:href="Video_8.mp4" id="VS8" mimetype="video/mp4"/>
</sec>
<ref-list>
<title>References</title>
<ref id="B1"><mixed-citation publication-type="book"><collab>Adobe Inc.</collab> (<year>2023</year>). <source><italic>Adobe After Effects.</italic></source> <publisher-loc>San Jose CA</publisher-loc>: <publisher-name>Adobe Inc</publisher-name>.</mixed-citation></ref>
<ref id="B2"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Aronov</surname> <given-names>D.</given-names></name> <name><surname>Nevers</surname> <given-names>R.</given-names></name> <name><surname>Tank</surname> <given-names>D. W.</given-names></name></person-group> (<year>2017</year>). <article-title>Mapping of a non-spatial dimension by the hippocampal&#x2013;entorhinal circuit.</article-title> <source><italic>Nature</italic></source> <volume>543</volume> <fpage>719</fpage>&#x2013;<lpage>722</lpage>. <pub-id pub-id-type="doi">10.1038/nature21692</pub-id> <pub-id pub-id-type="pmid">28358077</pub-id></mixed-citation></ref>
<ref id="B3"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Azizi</surname> <given-names>A. H.</given-names></name> <name><surname>Pusch</surname> <given-names>R.</given-names></name> <name><surname>Koenen</surname> <given-names>C.</given-names></name> <name><surname>Klatt</surname> <given-names>S.</given-names></name> <name><surname>Br&#x00F6;ker</surname> <given-names>F.</given-names></name> <name><surname>Thiele</surname> <given-names>S.</given-names></name><etal/></person-group> (<year>2019</year>). <article-title>Emerging category representation in the visual forebrain hierarchy of pigeons (<italic>Columba livia</italic>).</article-title> <source><italic>Behav. Brain Res.</italic></source> <volume>356</volume> <fpage>423</fpage>&#x2013;<lpage>434</lpage>. <pub-id pub-id-type="doi">10.1016/j.bbr.2018.05.014</pub-id> <pub-id pub-id-type="pmid">29885380</pub-id></mixed-citation></ref>
<ref id="B4"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bischof</surname> <given-names>H.-J.</given-names></name> <name><surname>Eckmeier</surname> <given-names>D.</given-names></name> <name><surname>Keary</surname> <given-names>N.</given-names></name> <name><surname>L&#x00F6;wel</surname> <given-names>S.</given-names></name> <name><surname>Mayer</surname> <given-names>U.</given-names></name> <name><surname>Michael</surname> <given-names>N.</given-names></name></person-group> (<year>2016</year>). <article-title>Multiple visual field representations in the visual wulst of a laterally eyed bird, the zebra finch (<italic>Taeniopygia guttata</italic>).</article-title> <source><italic>PLoS One</italic></source> <volume>11</volume>:<fpage>e0154927</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0154927</pub-id> <pub-id pub-id-type="pmid">27139912</pub-id></mixed-citation></ref>
<ref id="B5"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bouchard</surname> <given-names>J.</given-names></name> <name><surname>Goodyer</surname> <given-names>W.</given-names></name> <name><surname>Lefebvre</surname> <given-names>L.</given-names></name></person-group> (<year>2007</year>). <article-title>Social learning and innovation are positively correlated in pigeons (<italic>Columba livia</italic>).</article-title> <source><italic>Anim. Cogn.</italic></source> <volume>10</volume> <fpage>259</fpage>&#x2013;<lpage>266</lpage>. <pub-id pub-id-type="doi">10.1007/s10071-006-0064-1</pub-id> <pub-id pub-id-type="pmid">17205290</pub-id></mixed-citation></ref>
<ref id="B6"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Brainard</surname> <given-names>D. H.</given-names></name></person-group> (<year>1997</year>). <article-title>The psychophysics toolbox.</article-title> <source><italic>Spatial Vis.</italic></source> <volume>10</volume> <fpage>433</fpage>&#x2013;<lpage>436</lpage>. <pub-id pub-id-type="doi">10.1163/156856897X00357</pub-id></mixed-citation></ref>
<ref id="B7"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Clark</surname> <given-names>W. J.</given-names></name> <name><surname>Porter</surname> <given-names>B.</given-names></name> <name><surname>Colombo</surname> <given-names>M.</given-names></name></person-group> (<year>2019</year>). <article-title>Searching for face-category representation in the avian visual forebrain.</article-title> <source><italic>Front. Physiol.</italic></source> <volume>10</volume>:<fpage>140</fpage>. <pub-id pub-id-type="doi">10.3389/fphys.2019.00140</pub-id> <pub-id pub-id-type="pmid">30873042</pub-id></mixed-citation></ref>
<ref id="B8"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Clark</surname> <given-names>W.</given-names></name> <name><surname>Chilcott</surname> <given-names>M.</given-names></name> <name><surname>Colombo</surname> <given-names>M.</given-names></name></person-group> (<year>2022b</year>). <article-title>The effect of progressive image scrambling on neuronal responses at three stations of the pigeon tectofugal pathway.</article-title> <source><italic>Sci. Rep.</italic></source> <volume>12</volume>:<fpage>14190</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-022-18006-0</pub-id> <pub-id pub-id-type="pmid">35986036</pub-id></mixed-citation></ref>
<ref id="B9"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Clark</surname> <given-names>W.</given-names></name> <name><surname>Chilcott</surname> <given-names>M.</given-names></name> <name><surname>Azizi</surname> <given-names>A.</given-names></name> <name><surname>Pusch</surname> <given-names>R.</given-names></name> <name><surname>Perry</surname> <given-names>K.</given-names></name> <name><surname>Colombo</surname> <given-names>M.</given-names></name></person-group> (<year>2022a</year>). <article-title>Neurons in the pigeon visual network discriminate between faces, scrambled faces, and sine grating images.</article-title> <source><italic>Sci. Rep.</italic></source> <volume>12</volume>:<fpage>589</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-021-04559-z</pub-id> <pub-id pub-id-type="pmid">35022466</pub-id></mixed-citation></ref>
<ref id="B10"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cunningham</surname> <given-names>J. P.</given-names></name> <name><surname>Yu</surname> <given-names>B. M.</given-names></name></person-group> (<year>2014</year>). <article-title>Dimensionality reduction for large-scale neural recordings.</article-title> <source><italic>Nat. Neurosci.</italic></source> <volume>17</volume> <fpage>1500</fpage>&#x2013;<lpage>1509</lpage>. <pub-id pub-id-type="doi">10.1038/nn.3776</pub-id> <pub-id pub-id-type="pmid">25151264</pub-id></mixed-citation></ref>
<ref id="B11"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Dal Ben</surname> <given-names>R.</given-names></name></person-group> (<year>2023</year>). <article-title>SHINE_color: Controlling low-level properties of colorful images.</article-title> <source><italic>MethodsX</italic></source> <volume>11</volume>:<fpage>102377</fpage>. <pub-id pub-id-type="doi">10.1016/j.mex.2023.102377</pub-id> <pub-id pub-id-type="pmid">37771500</pub-id></mixed-citation></ref>
<ref id="B12"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Delacoux</surname> <given-names>M.</given-names></name> <name><surname>Itahara</surname> <given-names>A.</given-names></name> <name><surname>Kano</surname> <given-names>F.</given-names></name></person-group> (<year>2025</year>). <article-title>Gaze following in pigeons increases with the number of demonstrators.</article-title> <source><italic>iScience</italic></source> <volume>28</volume>:<fpage>112857</fpage>. <pub-id pub-id-type="doi">10.1016/j.isci.2025.112857</pub-id> <pub-id pub-id-type="pmid">40612517</pub-id></mixed-citation></ref>
<ref id="B13"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Desimone</surname> <given-names>R.</given-names></name> <name><surname>Albright</surname> <given-names>T.</given-names></name> <name><surname>Gross</surname> <given-names>C.</given-names></name> <name><surname>Bruce</surname> <given-names>C.</given-names></name></person-group> (<year>1984</year>). <article-title>Stimulus-selective properties of inferior temporal neurons in the macaque.</article-title> <source><italic>J. Neurosci.</italic></source> <volume>4</volume> <fpage>2051</fpage>&#x2013;<lpage>2062</lpage>. <pub-id pub-id-type="doi">10.1523/JNEUROSCI.04-08-02051.1984</pub-id> <pub-id pub-id-type="pmid">6470767</pub-id></mixed-citation></ref>
<ref id="B14"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Dittrich</surname> <given-names>W. H.</given-names></name> <name><surname>Lea</surname> <given-names>S. E. G.</given-names></name> <name><surname>Barrett</surname> <given-names>J.</given-names></name> <name><surname>Gurr</surname> <given-names>P. R.</given-names></name></person-group> (<year>1998</year>). <article-title>Categorization of natural movements by pigeons: Visual concept discrimination and biological motion.</article-title> <source><italic>J. Exp. Anal. Behav.</italic></source> <volume>70</volume> <fpage>281</fpage>&#x2013;<lpage>299</lpage>. <pub-id pub-id-type="doi">10.1901/jeab.1998.70-281</pub-id> <pub-id pub-id-type="pmid">16812887</pub-id></mixed-citation></ref>
<ref id="B15"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ditz</surname> <given-names>H. M.</given-names></name> <name><surname>Nieder</surname> <given-names>A.</given-names></name></person-group> (<year>2020</year>). <article-title>Format-dependent and format-independent representation of sequential and simultaneous numerosity in the crow endbrain.</article-title> <source><italic>Nat. Commun.</italic></source> <volume>11</volume>:<fpage>686</fpage>. <pub-id pub-id-type="doi">10.1038/s41467-020-14519-2</pub-id> <pub-id pub-id-type="pmid">32019934</pub-id></mixed-citation></ref>
<ref id="B16"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Divac</surname> <given-names>I.</given-names></name> <name><surname>Mogensen</surname> <given-names>J.</given-names></name> <name><surname>Bj&#x00F6;rklund</surname> <given-names>A.</given-names></name></person-group> (<year>1985</year>). <article-title>The prefrontal &#x2018;cortex&#x2019; in the pigeon. Biochemical evidence.</article-title> <source><italic>Brain Res.</italic></source> <volume>332</volume> <fpage>365</fpage>&#x2013;<lpage>368</lpage>. <pub-id pub-id-type="doi">10.1016/0006-8993(85)90606-7</pub-id> <pub-id pub-id-type="pmid">3995275</pub-id></mixed-citation></ref>
<ref id="B17"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Frost</surname> <given-names>B. J.</given-names></name></person-group> (<year>2010</year>). <article-title>A taxonomy of different forms of visual motion detection and their underlying neural mechanisms.</article-title> <source><italic>Brain Behav. Evol.</italic></source> <volume>75</volume> <fpage>218</fpage>&#x2013;<lpage>235</lpage>. <pub-id pub-id-type="doi">10.1159/000314284</pub-id> <pub-id pub-id-type="pmid">20733297</pub-id></mixed-citation></ref>
<ref id="B18"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gauthier</surname> <given-names>I.</given-names></name> <name><surname>Williams</surname> <given-names>P.</given-names></name> <name><surname>Tarr</surname> <given-names>M. J.</given-names></name> <name><surname>Tanaka</surname> <given-names>J.</given-names></name></person-group> (<year>1998</year>). <article-title>Training &#x2018;greeble&#x2019; experts: A framework for studying expert object recognition processes</article-title>. <source><italic>Vis. Res.</italic></source> <volume>38</volume>, <fpage>2401</fpage>&#x2013;<lpage>2428</lpage>. <pub-id pub-id-type="doi">10.1016/S0042-6989(97)00442-2</pub-id> <pub-id pub-id-type="pmid">9798007</pub-id></mixed-citation></ref>
<ref id="B19"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Greer</surname> <given-names>D.</given-names></name> <name><surname>Lei</surname> <given-names>T.</given-names></name> <name><surname>Kryshtal</surname> <given-names>A.</given-names></name> <name><surname>Jessen</surname> <given-names>Z. F.</given-names></name> <name><surname>Schwartz</surname> <given-names>G. W.</given-names></name></person-group> (<year>2025</year>). <article-title>Visual identification of conspecifics shapes social behavior in mice.</article-title> <source><italic>Curr. Biol.</italic></source> <volume>35</volume> <fpage>287</fpage>&#x2013;<lpage>299.e4</lpage>. <pub-id pub-id-type="doi">10.1016/j.cub.2024.11.041</pub-id> <pub-id pub-id-type="pmid">39706174</pub-id></mixed-citation></ref>
<ref id="B20"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>G&#x00FC;nt&#x00FC;rk&#x00FC;n</surname> <given-names>O.</given-names></name></person-group> (<year>2005</year>). <article-title>The avian &#x2018;prefrontal cortex&#x2019; and cognition.</article-title> <source><italic>Curr. Opin. Neurobiol.</italic></source> <volume>15</volume> <fpage>686</fpage>&#x2013;<lpage>693</lpage>. <pub-id pub-id-type="doi">10.1016/j.conb.2005.10.003</pub-id> <pub-id pub-id-type="pmid">16263260</pub-id></mixed-citation></ref>
<ref id="B21"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>G&#x00FC;nt&#x00FC;rk&#x00FC;n</surname> <given-names>O.</given-names></name> <name><surname>Bugnyar</surname> <given-names>T.</given-names></name></person-group> (<year>2016</year>). <article-title>Cognition without cortex.</article-title> <source><italic>Trends Cogn. Sci.</italic></source> <volume>20</volume> <fpage>291</fpage>&#x2013;<lpage>303</lpage>. <pub-id pub-id-type="doi">10.1016/j.tics.2016.02.001</pub-id> <pub-id pub-id-type="pmid">26944218</pub-id></mixed-citation></ref>
<ref id="B22"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>G&#x00FC;nt&#x00FC;rk&#x00FC;n</surname> <given-names>O.</given-names></name> <name><surname>Hahmann</surname> <given-names>U.</given-names></name></person-group> (<year>1999</year>). <article-title>Functional subdivisions of the ascending visual pathways in the pigeon.</article-title> <source><italic>Behav. Brain Res.</italic></source> <volume>98</volume> <fpage>193</fpage>&#x2013;<lpage>201</lpage>. <pub-id pub-id-type="doi">10.1016/S0166-4328(98)00084-9</pub-id> <pub-id pub-id-type="pmid">10683107</pub-id></mixed-citation></ref>
<ref id="B23"><mixed-citation publication-type="web"><person-group person-group-type="author"><name><surname>Hartig</surname> <given-names>F.</given-names></name></person-group> (<year>2024</year>). <source>DHARMa: Residual Diagnostics for Hierarchical (Multi-Level / Mixed) Regression Models. R package version 0.4.7</source>. <comment>Available online at: <ext-link ext-link-type="uri" xlink:href="https://CRAN.R-project.org/package=DHARMa">https://CRAN.R-project.org/package=DHARMa</ext-link></comment>.</mixed-citation></ref>
<ref id="B24"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Johnston</surname> <given-names>M.</given-names></name> <name><surname>Anderson</surname> <given-names>C.</given-names></name> <name><surname>Colombo</surname> <given-names>M.</given-names></name></person-group> (<year>2017</year>). <article-title>Pigeon NCL and NFL neuronal activity represents neural correlates of the sample.</article-title> <source><italic>Behav. Neurosci.</italic></source> <volume>131</volume> <fpage>213</fpage>&#x2013;<lpage>219</lpage>. <pub-id pub-id-type="doi">10.1037/bne0000198</pub-id> <pub-id pub-id-type="pmid">28471222</pub-id></mixed-citation></ref>
<ref id="B25"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Karten</surname> <given-names>H. J.</given-names></name></person-group> (<year>1969</year>). <article-title>The organization of the avian telencephalon and some speculations on the phylogeny of the amniote telencephalon.</article-title> <source><italic>Ann. N. Y. Acad. Sci.</italic></source> <volume>167</volume> <fpage>164</fpage>&#x2013;<lpage>179</lpage>. <pub-id pub-id-type="doi">10.1111/j.1749-6632.1969.tb20442.x</pub-id></mixed-citation></ref>
<ref id="B26"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Karten</surname> <given-names>H. J.</given-names></name> <name><surname>Hodos</surname> <given-names>W.</given-names></name></person-group> (<year>1967</year>). <source><italic>A Stereotaxic Atlas of the Brain of the Pigeon (Columba livia).</italic></source> <publisher-loc>Baltimore</publisher-loc>: <publisher-name>Johns Hopkins Press</publisher-name>.</mixed-citation></ref>
<ref id="B27"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Khandhadia</surname> <given-names>A. P.</given-names></name> <name><surname>Murphy</surname> <given-names>A. P.</given-names></name> <name><surname>Romanski</surname> <given-names>L. M.</given-names></name> <name><surname>Bizley</surname> <given-names>J. K.</given-names></name> <name><surname>Leopold</surname> <given-names>D. A.</given-names></name></person-group> (<year>2021</year>). <article-title>Audiovisual integration in macaque face patch neurons.</article-title> <source><italic>Curr. Biol.</italic></source> <volume>31</volume> <fpage>1826</fpage>&#x2013;<lpage>1835.e3</lpage>. <pub-id pub-id-type="doi">10.1016/j.cub.2021.01.102</pub-id> . <pub-id pub-id-type="pmid">33636119</pub-id></mixed-citation></ref>
<ref id="B28"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kiyokawa</surname> <given-names>Y.</given-names></name> <name><surname>Kuroda</surname> <given-names>N.</given-names></name> <name><surname>Takeuchi</surname> <given-names>Y.</given-names></name></person-group> (<year>2022</year>). <article-title>The strain of unfamiliar conspecifics affects stress identification in rats.</article-title> <source><italic>Behav. Process.</italic></source> <volume>201</volume>:<fpage>104714</fpage>. <pub-id pub-id-type="doi">10.1016/j.beproc.2022.104714</pub-id> <pub-id pub-id-type="pmid">35901937</pub-id></mixed-citation></ref>
<ref id="B29"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kr&#x00F6;ner</surname> <given-names>S.</given-names></name> <name><surname>G&#x00FC;nt&#x00FC;rk&#x00FC;n</surname> <given-names>O.</given-names></name></person-group> (<year>1999</year>). <article-title>Afferent and efferent connections of the caudolateral neostriatum in the pigeon (<italic>Columba livia</italic>): A retro- and anterograde pathway tracing study.</article-title> <source><italic>J. Comp. Neurol.</italic></source> <volume>407</volume> <fpage>228</fpage>&#x2013;<lpage>260</lpage>. <pub-id pub-id-type="doi">10.1002/(SICI)1096-9861(19990503)407:2%253C228::AID-CNE6%253E3.0.CO;2-2</pub-id></mixed-citation></ref>
<ref id="B30"><mixed-citation publication-type="web"><person-group person-group-type="author"><name><surname>Lenth</surname> <given-names>R.</given-names></name></person-group> (<year>2025</year>). <source><italic>emmeans: Estimated Marginal Means, aka Least-Squares Means. R package version 1.11.2-8.</italic></source> <comment>Available online at: <ext-link ext-link-type="uri" xlink:href="https://CRAN.R-project.org/package=emmeans">https://CRAN.R-project.org/package=emmeans</ext-link> (accessed June 3, 2025)</comment>.</mixed-citation></ref>
<ref id="B31"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lewald</surname> <given-names>J.</given-names></name> <name><surname>D&#x00F6;rrscheidt</surname> <given-names>G. J.</given-names></name></person-group> (<year>1998</year>). <article-title>Spatial-tuning properties of auditory neurons in the optic tectum of the pigeon.</article-title> <source><italic>Brain Res.</italic></source> <volume>790</volume> <fpage>339</fpage>&#x2013;<lpage>342</lpage>. <pub-id pub-id-type="doi">10.1016/S0006-8993(98)00177-2</pub-id> <pub-id pub-id-type="pmid">9593982</pub-id></mixed-citation></ref>
<ref id="B32"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>D. P.</given-names></name> <name><surname>Xiao</surname> <given-names>Q.</given-names></name> <name><surname>Wang</surname> <given-names>S. R.</given-names></name></person-group> (<year>2007</year>). <article-title>Feedforward construction of the receptive field and orientation selectivity of visual neurons in the pigeon.</article-title> <source><italic>Cereb. Cortex</italic></source> <volume>17</volume> <fpage>885</fpage>&#x2013;<lpage>893</lpage>. <pub-id pub-id-type="doi">10.1093/cercor/bhk043</pub-id> <pub-id pub-id-type="pmid">16723406</pub-id></mixed-citation></ref>
<ref id="B33"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>McMahon</surname> <given-names>D. B. T.</given-names></name> <name><surname>Russ</surname> <given-names>B. E.</given-names></name> <name><surname>Elnaiem</surname> <given-names>H. D.</given-names></name> <name><surname>Kurnikova</surname> <given-names>A. I.</given-names></name> <name><surname>Leopold</surname> <given-names>D. A.</given-names></name></person-group> (<year>2015</year>). <article-title>Single-unit activity during natural vision: Diversity, consistency, and spatial sensitivity among AF Face patch neurons.</article-title> <source><italic>J. Neurosci.</italic></source> <volume>35</volume> <fpage>5537</fpage>&#x2013;<lpage>5548</lpage>. <pub-id pub-id-type="doi">10.1523/JNEUROSCI.3825-14.2015</pub-id> <pub-id pub-id-type="pmid">25855170</pub-id></mixed-citation></ref>
<ref id="B34"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nagy</surname> <given-names>M.</given-names></name> <name><surname>V&#x00E1;s&#x00E1;rhelyi</surname> <given-names>G.</given-names></name> <name><surname>Pettit</surname> <given-names>B.</given-names></name> <name><surname>Roberts-Mariani</surname> <given-names>I.</given-names></name> <name><surname>Vicsek</surname> <given-names>T.</given-names></name> <name><surname>Biro</surname> <given-names>D.</given-names></name></person-group> (<year>2013</year>). <article-title>Context-dependent hierarchies in pigeons.</article-title> <source><italic>Proc. Natl. Acad. Sci. U.S.A.</italic></source> <volume>110</volume> <fpage>13049</fpage>&#x2013;<lpage>13054</lpage>. <pub-id pub-id-type="doi">10.1073/pnas.1305552110</pub-id> <pub-id pub-id-type="pmid">23878247</pub-id></mixed-citation></ref>
<ref id="B35"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nakamura</surname> <given-names>T.</given-names></name> <name><surname>Croft</surname> <given-names>D. B.</given-names></name> <name><surname>Westbrook</surname> <given-names>R. F.</given-names></name></person-group> (<year>2003</year>). <article-title>Domestic pigeons (<italic>Columba livia</italic>) discriminate between photographs of individual pigeons.</article-title> <source><italic>Learn. Behav.</italic></source> <volume>31</volume> <fpage>307</fpage>&#x2013;<lpage>317</lpage>. <pub-id pub-id-type="doi">10.3758/BF03195993</pub-id> <pub-id pub-id-type="pmid">14733480</pub-id></mixed-citation></ref>
<ref id="B36"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nalbach</surname> <given-names>H.-O.</given-names></name> <name><surname>Wolf-Oberhollenzer</surname> <given-names>F.</given-names></name> <name><surname>Kirschfeld</surname> <given-names>K.</given-names></name></person-group> (<year>1990</year>). <article-title>The pigeon&#x2019;s eye viewed through an ophthalmoscopic microscope: Orientation of retinal landmarks and significance of eye movements.</article-title> <source><italic>Vis. Res.</italic></source> <volume>30</volume> <fpage>529</fpage>&#x2013;<lpage>540</lpage>. <pub-id pub-id-type="doi">10.1016/0042-6989(90)90065-S</pub-id> <pub-id pub-id-type="pmid">2339507</pub-id></mixed-citation></ref>
<ref id="B37"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ng</surname> <given-names>B. S. W.</given-names></name> <name><surname>Grabska-Barwi&#x0144;ska</surname> <given-names>A.</given-names></name> <name><surname>G&#x00FC;nt&#x00FC;rk&#x00FC;n</surname> <given-names>O.</given-names></name> <name><surname>Jancke</surname> <given-names>D.</given-names></name></person-group> (<year>2010</year>). <article-title>Dominant vertical orientation processing without clustered maps: Early visual brain dynamics imaged with voltage-sensitive dye in the pigeon visual Wulst.</article-title> <source><italic>J. Neurosci.</italic></source> <volume>30</volume> <fpage>6713</fpage>&#x2013;<lpage>6725</lpage>. <pub-id pub-id-type="doi">10.1523/JNEUROSCI.4078-09.2010</pub-id> <pub-id pub-id-type="pmid">20463233</pub-id></mixed-citation></ref>
<ref id="B38"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nieder</surname> <given-names>A.</given-names></name></person-group> (<year>2017</year>). <article-title>Inside the corvid brain&#x2014;Probing the physiology of cognition in crows.</article-title> <source><italic>Curr. Opin. Behav. Sci.</italic></source> <volume>16</volume> <fpage>8</fpage>&#x2013;<lpage>14</lpage>. <pub-id pub-id-type="doi">10.1016/j.cobeha.2017.02.005</pub-id></mixed-citation></ref>
<ref id="B39"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nimpf</surname> <given-names>S.</given-names></name> <name><surname>Kaplan</surname> <given-names>H. S.</given-names></name> <name><surname>Nordmann</surname> <given-names>G. C.</given-names></name> <name><surname>Cushion</surname> <given-names>T.</given-names></name> <name><surname>Keays</surname> <given-names>D. A.</given-names></name></person-group> (<year>2024</year>). <article-title>Long-term, high-resolution in vivo calcium imaging in pigeons.</article-title> <source><italic>Cell Reports Methods</italic></source> <volume>4</volume>:<fpage>100711</fpage>. <pub-id pub-id-type="doi">10.1016/j.crmeth.2024.100711</pub-id> <pub-id pub-id-type="pmid">38382523</pub-id></mixed-citation></ref>
<ref id="B40"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ott</surname> <given-names>T.</given-names></name> <name><surname>Nieder</surname> <given-names>A.</given-names></name></person-group> (<year>2016</year>). <article-title>Dopamine D2 Receptors Enhance Population Dynamics in Primate Prefrontal Working Memory Circuits.</article-title> <source><italic>Cereb. Cortex</italic></source> <volume>27</volume> <fpage>4423</fpage>&#x2013;<lpage>4435</lpage>. <pub-id pub-id-type="doi">10.1093/cercor/bhw244</pub-id> <pub-id pub-id-type="pmid">27591146</pub-id></mixed-citation></ref>
<ref id="B41"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Otto</surname> <given-names>T.</given-names></name> <name><surname>Rose</surname> <given-names>J.</given-names></name></person-group> (<year>2023</year>). <article-title>The open toolbox for behavioral research.</article-title> <source><italic>Behav. Res. Methods</italic></source> <volume>56</volume> <fpage>4522</fpage>&#x2013;<lpage>4529</lpage>. <pub-id pub-id-type="doi">10.3758/s13428-023-02199-x</pub-id> <pub-id pub-id-type="pmid">37794209</pub-id></mixed-citation></ref>
<ref id="B42"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pang</surname> <given-names>R.</given-names></name> <name><surname>Lansdell</surname> <given-names>B. J.</given-names></name> <name><surname>Fairhall</surname> <given-names>A. L.</given-names></name></person-group> (<year>2016</year>). <article-title>Dimensionality reduction in neuroscience.</article-title> <source><italic>Curr. Biol.</italic></source> <volume>26</volume> <fpage>R656</fpage>&#x2013;<lpage>R660</lpage>. <pub-id pub-id-type="doi">10.1016/j.cub.2016.05.029</pub-id> <pub-id pub-id-type="pmid">27458907</pub-id></mixed-citation></ref>
<ref id="B43"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Partan</surname> <given-names>S.</given-names></name> <name><surname>Yelda</surname> <given-names>S.</given-names></name> <name><surname>Price</surname> <given-names>V.</given-names></name> <name><surname>Shimizu</surname> <given-names>T.</given-names></name></person-group> (<year>2005</year>). <article-title>Female pigeons, <italic>Columba livia</italic>, respond to multisensory audio/video playbacks of male courtship behaviour.</article-title> <source><italic>Anim. Behav.</italic></source> <volume>70</volume> <fpage>957</fpage>&#x2013;<lpage>966</lpage>. <pub-id pub-id-type="doi">10.1016/j.anbehav.2005.03.002</pub-id></mixed-citation></ref>
<ref id="B44"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Perrett</surname> <given-names>D. I.</given-names></name> <name><surname>Rolls</surname> <given-names>E. T.</given-names></name> <name><surname>Caan</surname> <given-names>W.</given-names></name></person-group> (<year>1982</year>). <article-title>Visual neurones responsive to faces in the monkey temporal cortex.</article-title> <source><italic>Exp. Brain Res.</italic></source> <volume>47</volume> <fpage>329</fpage>&#x2013;<lpage>342</lpage>. <pub-id pub-id-type="doi">10.1007/bf00239352</pub-id> <pub-id pub-id-type="pmid">7128705</pub-id></mixed-citation></ref>
<ref id="B45"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ponce</surname> <given-names>C. R.</given-names></name> <name><surname>Xiao</surname> <given-names>W.</given-names></name> <name><surname>Schade</surname> <given-names>P. F.</given-names></name> <name><surname>Hartmann</surname> <given-names>T. S.</given-names></name> <name><surname>Kreiman</surname> <given-names>G.</given-names></name> <name><surname>Livingstone</surname> <given-names>M. S.</given-names></name></person-group> (<year>2019</year>). <article-title>Evolving images for visual neurons using a deep generative network reveals coding principles and neuronal preferences.</article-title> <source><italic>Cell</italic></source> <volume>177</volume> <fpage>999</fpage>&#x2013;<lpage>1009</lpage>. <pub-id pub-id-type="doi">10.1016/j.cell.2019.04.005</pub-id> <pub-id pub-id-type="pmid">31051108</pub-id></mixed-citation></ref>
<ref id="B46"><mixed-citation publication-type="book"><collab>R Core Team.</collab> (<year>2024</year>). <source><italic>R: A Language and Environment for Statistical Computing.</italic></source> <publisher-loc>Vienna</publisher-loc>: <publisher-name>R Foundation for Statistical Computing</publisher-name>.</mixed-citation></ref>
<ref id="B47"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rolls</surname> <given-names>E. T.</given-names></name> <name><surname>Tovee</surname> <given-names>M. J.</given-names></name></person-group> (<year>1995</year>). <article-title>Sparseness of the neuronal representation of stimuli in the primate temporal visual cortex.</article-title> <source><italic>J. Neurophysiol.</italic></source> <volume>73</volume> <fpage>713</fpage>&#x2013;<lpage>726</lpage>. <pub-id pub-id-type="doi">10.1152/jn.1995.73.2.713</pub-id> <pub-id pub-id-type="pmid">7760130</pub-id></mixed-citation></ref>
<ref id="B48"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Salinas Ru&#x00ED;z</surname> <given-names>J.</given-names></name> <name><surname>Montesinos L&#x00F3;pez</surname> <given-names>O. A.</given-names></name> <name><surname>Hern&#x00E1;ndez Ram&#x00ED;rez</surname> <given-names>G.</given-names></name> <name><surname>Crossa Hiriart</surname> <given-names>J.</given-names></name></person-group> (<year>2023</year>). <source><italic>Generalized Linear Mixed Models with Applications in Agriculture and Biology.</italic></source> <publisher-loc>Berlin</publisher-loc>: <publisher-name>Springer International Publishing</publisher-name>. <pub-id pub-id-type="doi">10.1007/978-3-031-32800-8</pub-id></mixed-citation></ref>
<ref id="B49"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Scarf</surname> <given-names>D.</given-names></name> <name><surname>Stuart</surname> <given-names>M.</given-names></name> <name><surname>Johnston</surname> <given-names>M.</given-names></name> <name><surname>Colombo</surname> <given-names>M.</given-names></name></person-group> (<year>2016</year>). <article-title>Visual response properties of neurons in four areas of the avian pallium.</article-title> <source><italic>J. Comp. Physiol. A</italic></source> <volume>202</volume> <fpage>235</fpage>&#x2013;<lpage>245</lpage>. <pub-id pub-id-type="doi">10.1007/s00359-016-1071-6</pub-id> <pub-id pub-id-type="pmid">26868923</pub-id></mixed-citation></ref>
<ref id="B50"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Shimizu</surname> <given-names>T.</given-names></name></person-group> (<year>1998</year>). <article-title>Conspecific recognition in pigeons (<italic>Columba livia</italic>) using dynamic video images.</article-title> <source><italic>Behaviour</italic></source> <volume>135</volume> <fpage>43</fpage>&#x2013;<lpage>53</lpage>. <pub-id pub-id-type="doi">10.1163/156853998793066429</pub-id></mixed-citation></ref>
<ref id="B51"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Shimizu</surname> <given-names>T.</given-names></name> <name><surname>Bowers</surname> <given-names>A. N.</given-names></name></person-group> (<year>1999</year>). <article-title>Visual circuits of the avian telencephalon: Evolutionary implications.</article-title> <source><italic>Behav. Brain Res.</italic></source> <volume>98</volume> <fpage>183</fpage>&#x2013;<lpage>191</lpage>. <pub-id pub-id-type="doi">10.1016/S0166-4328(98)00083-7</pub-id> <pub-id pub-id-type="pmid">10683106</pub-id></mixed-citation></ref>
<ref id="B52"><mixed-citation publication-type="web"><collab>The MathWorks Inc.</collab> (<year>2024</year>). <source><italic>MATLAB.</italic></source> <comment>Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.mathworks.com">https://www.mathworks.com</ext-link> (accessed September 12, 2024)</comment>.</mixed-citation></ref>
<ref id="B53"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tsao</surname> <given-names>D. Y.</given-names></name> <name><surname>Freiwald</surname> <given-names>W. A.</given-names></name> <name><surname>Tootell</surname> <given-names>R. B.</given-names></name> <name><surname>Livingstone</surname> <given-names>M. S.</given-names></name></person-group> (<year>2006</year>). <article-title>A cortical region consisting entirely of face-selective cells.</article-title> <source><italic>Science</italic></source> <volume>311</volume> <fpage>670</fpage>&#x2013;<lpage>674</lpage>. <pub-id pub-id-type="doi">10.1126/science.1119983</pub-id> <pub-id pub-id-type="pmid">16456083</pub-id></mixed-citation></ref>
<ref id="B54"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>B.</given-names></name> <name><surname>Ponce</surname> <given-names>C. R.</given-names></name></person-group> (<year>2022</year>). <article-title>Tuning landscapes of the ventral stream.</article-title> <source><italic>Cell Rep.</italic></source> <volume>41</volume>:<fpage>111595</fpage>. <pub-id pub-id-type="doi">10.1016/j.celrep.2022.111595</pub-id> <pub-id pub-id-type="pmid">36351386</pub-id></mixed-citation></ref>
<ref id="B55"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ware</surname> <given-names>E. L. R.</given-names></name> <name><surname>Saunders</surname> <given-names>D. R.</given-names></name> <name><surname>Troje</surname> <given-names>N. F.</given-names></name></person-group> (<year>2017</year>). <article-title>Social interactivity in pigeon courtship behavior.</article-title> <source><italic>Curr. Zool.</italic></source> <volume>63</volume> <fpage>85</fpage>&#x2013;<lpage>95</lpage>. <pub-id pub-id-type="doi">10.1093/cz/zow066</pub-id> <pub-id pub-id-type="pmid">29491966</pub-id></mixed-citation></ref>
<ref id="B56"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ware</surname> <given-names>E.</given-names></name> <name><surname>Saunders</surname> <given-names>D. R.</given-names></name> <name><surname>Troje</surname> <given-names>N. F.</given-names></name></person-group> (<year>2015</year>). <article-title>The influence of motion quality on responses towards video playback stimuli.</article-title> <source><italic>Biol. Open</italic></source> <volume>4</volume> <fpage>803</fpage>&#x2013;<lpage>811</lpage>. <pub-id pub-id-type="doi">10.1242/bio.011270</pub-id> <pub-id pub-id-type="pmid">25964659</pub-id></mixed-citation></ref>
<ref id="B57"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wilkinson</surname> <given-names>A.</given-names></name> <name><surname>Specht</surname> <given-names>H. L.</given-names></name> <name><surname>Huber</surname> <given-names>L.</given-names></name></person-group> (<year>2010</year>). <article-title>Pigeons can discriminate group mates from strangers using the concept of familiarity.</article-title> <source><italic>Anim. Behav.</italic></source> <volume>80</volume> <fpage>109</fpage>&#x2013;<lpage>115</lpage>. <pub-id pub-id-type="doi">10.1016/j.anbehav.2010.04.006</pub-id></mixed-citation></ref>
<ref id="B58"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Willenbockel</surname> <given-names>V.</given-names></name> <name><surname>Sadr</surname> <given-names>J.</given-names></name> <name><surname>Fiset</surname> <given-names>D.</given-names></name> <name><surname>Horne</surname> <given-names>G. O.</given-names></name> <name><surname>Gosselin</surname> <given-names>F.</given-names></name> <name><surname>Tanaka</surname> <given-names>J. W.</given-names></name></person-group> (<year>2010</year>). <article-title>Controlling low-level image properties: The SHINE toolbox.</article-title> <source><italic>Behav. Res. Methods</italic></source> <volume>42</volume> <fpage>671</fpage>&#x2013;<lpage>684</lpage>. <pub-id pub-id-type="doi">10.3758/BRM.42.3.671</pub-id> <pub-id pub-id-type="pmid">20805589</pub-id></mixed-citation></ref>
<ref id="B59"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Yu</surname> <given-names>Z.</given-names></name> <name><surname>Guindani</surname> <given-names>M.</given-names></name> <name><surname>Grieco</surname> <given-names>S. F.</given-names></name> <name><surname>Chen</surname> <given-names>L.</given-names></name> <name><surname>Holmes</surname> <given-names>T. C.</given-names></name> <name><surname>Xu</surname> <given-names>X.</given-names></name></person-group> (<year>2022</year>). <article-title>Beyond t test and ANOVA: Applications of mixed-effects models for more rigorous statistical analysis in neuroscience research.</article-title> <source><italic>Neuron</italic></source> <volume>110</volume> <fpage>21</fpage>&#x2013;<lpage>35</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuron.2021.10.030</pub-id> <pub-id pub-id-type="pmid">34784504</pub-id></mixed-citation></ref>
</ref-list>
<fn-group>
<fn id="n1" fn-type="custom" custom-type="edited-by"><p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2731429/overview">Mengmeng Li</ext-link>, Zhengzhou University, China</p></fn>
<fn id="n2" fn-type="custom" custom-type="reviewed-by"><p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/918995/overview">Lukas Anneser</ext-link>, Friedrich Miescher Institute for Biomedical Research (FMI), Switzerland</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3311323/overview">Jiangtao Wang</ext-link>, Zhengzhou University, China</p></fn>
</fn-group>
</back>
</article>