<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" article-type="research-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Artif. Intell.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Artificial Intelligence</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Artif. Intell.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2624-8212</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/frai.2026.1656290</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Quantification of feeding intensity and feeding control of largemouth bass based on water surface vibration characteristics</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Zhang</surname>
<given-names>Yufei</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2773452"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Liu</surname>
<given-names>Andong</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3251292"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Zhang</surname>
<given-names>Yulei</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3291438"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Ni</surname>
<given-names>Qi</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Zhang</surname>
<given-names>Haigen</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Song</surname>
<given-names>Hongqiao</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Wang</surname>
<given-names>Yong</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Cheng</surname>
<given-names>Xiaoyan</given-names>
</name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Fishery Machinery and Instruction Research Institute</institution>, <city>Shanghai</city>, <country country="cn">China</country></aff>
<aff id="aff2"><label>2</label><institution>School of Navigation and Naval Architecture, Dalian Ocean University</institution>, <city>Dalian</city>, <country country="cn">China</country></aff>
<aff id="aff3"><label>3</label><institution>Laiyang Fishery Technology Extension Station</institution>, <city>Yantai</city>, <state>Shandong</state>, <country country="cn">China</country></aff>
<author-notes>
<corresp id="c001"><label>&#x002A;</label>Correspondence: Andong Liu, <email xlink:href="mailto:liuandong@fmiri.ac.cn">liuandong@fmiri.ac.cn</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-03-13">
<day>13</day>
<month>03</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>9</volume>
<elocation-id>1656290</elocation-id>
<history>
<date date-type="received">
<day>29</day>
<month>06</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>08</day>
<month>12</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>26</day>
<month>01</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2026 Zhang, Liu, Zhang, Ni, Zhang, Song, Wang and Cheng.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Zhang, Liu, Zhang, Ni, Zhang, Song, Wang and Cheng</copyright-holder>
<license>
<ali:license_ref start_date="2026-03-13">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<p>In response to the demand for precise feeding in high-density aquaculture, this study established a dynamic prediction model for fish feeding intensity by integrating vibration signal quantification and deep learning. Through multidimensional experiments (fish size: 50&#x2013;300&#x202F;g; stocking density: 20&#x2013;60 fish/group; feeding speed: 1-3&#x202F;g/s; feed particle size: 2#4#6#), we quantified the three-axis displacement signals of <italic>Micropterus salmoides</italic> during feeding. Results demonstrated significant effects of all parameters on water surface fluctuations (<italic>p</italic>&#x202F;&#x003C;&#x202F;0.05). Vibration displacement exhibited linear relationships with fish size and density. The 300&#x202F;g group showed 109.7% higher peak amplitude than the 50&#x202F;g group, while the 60-fish density group exceeded the 20-fish group by 141.9%. Optimal palatability (4#) reduced fluctuation frequency by 42%. A predictive model for feeding vibration patterns was developed, incorporating fish size (S), density (D), feeding speed (V), feed particle size (<italic>&#x03A6;</italic>), real-time triaxial vibration sum, and time series (t) as inputs to predict the summed vibration displacement at t&#x202F;+&#x202F;5&#x202F;s, which serves as a quantitative proxy for feeding intensity. The Long Short-Term Memory (LSTM) model accurately captured fish feeding dynamics (RMSE&#x202F;=&#x202F;69.43&#x202F;&#x03BC;m, MAE&#x202F;=&#x202F;48.00&#x202F;&#x03BC;m, R<sup>2</sup>&#x202F;=&#x202F;0.883). In comparative analysis, the LSTM outperformed Gated Recurrent Unit (GRU) and Transformer models in forecasting accuracy. Deployed on an embedded system (Orange Pi AiPRO), closed-loop tests demonstrated superior performance: residual feed rates were &#x2264; 0.8% across all trials, outperforming optical flow (2.69% residuals) and graph neural network (6.58% residuals) methods. The space complexity of the vibration-LSTM approach was only 6.4&#x2013;31.8% of GCN-based approaches, enabling cost-effective (&#x003C;$200) real-time control.</p>
</abstract>
<kwd-group>
<kwd>aquaculture</kwd>
<kwd>fish appetite quantification</kwd>
<kwd>fish feeding model</kwd>
<kwd>fluctuations in fish feeding</kwd>
<kwd>online intelligent feeding</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. The authors acknowledge funding from the National Key Research and Development Program of China [grant number 2023YFD2400402], Central public-interest Scientific Institution Basal Research Fund, FMIRI of CAFS [grant number 2024YJS013], Central Public-interest Scientific Institution Basal Research Fund, CAFS [grant number 2025XT0104].</funding-statement>
</funding-group>
<counts>
<fig-count count="18"/>
<table-count count="4"/>
<equation-count count="2"/>
<ref-count count="21"/>
<page-count count="15"/>
<word-count count="6911"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>AI in Food, Agriculture and Water</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec1">
<label>1</label>
<title>Introduction</title>
<p>In factory-based largemouth bass farming, feed costs account for more than 60% of the total farming costs, and whether feeding is reasonable is directly related to the profit margin of aquatic products (<xref ref-type="bibr" rid="ref20">Zhou et al., 2018</xref>). Traditional feeding methods usually rely on the experience and intuition of skilled workers. However, with the aging of the population, the labor shortage problem is becoming more and more serious, and manual feeding alone can no longer meet the needs of industrial scale (<xref ref-type="bibr" rid="ref19">Zheng and Zheng, 2025</xref>). Although various automatic feeders have appeared on the market, conventional automatic feeder technology cannot achieve autonomous decision-making, and its promotion and application are greatly restricted. Therefore, intelligent feeding technology with autonomous decision-making is the future development direction of fish feeding methods (<xref ref-type="bibr" rid="ref21">Zhu et al., 2022</xref>).</p>
<p>Recent research has focused on estimating fish feeding intensity by analyzing behavioral signals through machine vision, acoustics, and water surface fluctuations (<xref ref-type="bibr" rid="ref15">Wu et al., 2022</xref>; <xref ref-type="bibr" rid="ref1">Biazi and Marques, 2023</xref>; <xref ref-type="bibr" rid="ref6">F&#x00F8;re et al., 2024</xref>). Vision-based methods, such as optical flow and deep learning models, classify feeding activity into discrete states (e.g., none, weak, medium, and strong) with high accuracy (&#x003E;90%) (<xref ref-type="bibr" rid="ref17">Xu et al., 2022</xref>; <xref ref-type="bibr" rid="ref8">Hu et al., 2025</xref>). Advanced techniques like 3D convolutional neural networks and Graph Convolutional Networks (GCNs) have further improved performance by modeling spatiotemporal dynamics of fish schools (<xref ref-type="bibr" rid="ref12">Ubina et al., 2021</xref>).</p>
<p>Acoustic methods analyze sounds generated during feeding&#x2014;such as chewing, swallowing, and bait impacts&#x2014;using underwater hydrophones. By leveraging time-frequency representations (e.g., Mel spectrograms, STFT) and feature fusion strategies, these approaches achieve high classification accuracy (&#x003E;96%) (<xref ref-type="bibr" rid="ref11">Qi et al., 2023</xref>; <xref ref-type="bibr" rid="ref3">Du et al., 2023a</xref>,<xref ref-type="bibr" rid="ref5">b</xref>). Transformer-based models, such as the Audio Spectrum Swin Transformer, support rapid inference and real-time monitoring of feeding intensity (<xref ref-type="bibr" rid="ref18">Zeng et al., 2023</xref>). However, acoustic methods are susceptible to ambient noise from operational equipment (e.g., pumps, aerators), which can mask biologically relevant signals. To address this, multimodal fusion frameworks that combine audio, video, and sonar data have been proposed, achieving robustness in challenging conditions (<xref ref-type="bibr" rid="ref4">Du et al., 2024</xref>).</p>
<p>Vibration-based methods, which capture water surface fluctuations via accelerometers mounted on floating platforms, offer a complementary approach. <xref ref-type="bibr" rid="ref16">Xiang (2022)</xref> developed an adaptive feeding algorithm based on three-axis accelerometer feedback and proposed a quantitative index of feeding intensity. <xref ref-type="bibr" rid="ref10">Pan et al. (2023)</xref> used a six-axis accelerometer to identify distinct feeding stages in crucian carp, leading to an automatic control method that improved feed utilization by 7.66%.</p>
<p>Despite these advances, current appetite assessment often relies on subjective visual evaluation, producing only qualitative labels without quantifiable metrics. Furthermore, systematic relationships between water surface fluctuations and key culture parameters&#x2014;such as fish size, stocking density, feeding rate, and feed particle size-remain unestablished for species like largemouth bass. Crucially, existing studies classify appetite into discrete states but do not support the continuous, quantitative prediction of feeding intensity essential for dynamic, proportional feed control.</p>
<p>To bridge this gap, we propose a predictive model based on Long Short-Term Memory (LSTM) networks. LSTMs, a type of recurrent neural network (RNN), excel at capturing long-range temporal dependencies in sequential data (<xref ref-type="bibr" rid="ref7">Hochreiter and Schmidhuber, 1997</xref>). Feeding behavior in largemouth bass is a continuous dynamic process where current surface vibrations are influenced by prior states. LSTM&#x2019;s gating mechanisms mitigate the vanishing gradient problem common in standard RNNs, making the network particularly suitable for modeling such temporal sequences and predicting future fluctuations. Therefore, we adopt an LSTM architecture to model the complex, time-dependent relationships among multi-dimensional inputs (fish size, density, feeding rate, and feed size), real-time triaxial vibration signals, and future feeding intensity. The model&#x2019;s output is the predicted future vibration displacement. We then utilize this displacement value as a continuous, quantitative proxy for feeding intensity, which forms the basis for dynamic feed control.</p>
</sec>
<sec sec-type="materials|methods" id="sec2">
<label>2</label>
<title>Materials and methods</title>
<sec id="sec3">
<label>2.1</label>
<title>Ethical statement</title>
<p>All research on animal participation in this article has passed the welfare and ethical review of experimental animals by FMIRI (FMIRI-AWE-2024-001). It should be noted that our experiments were conducted in an industrial recirculating water system (RAS) given its indispensable role in aquaculture and its rapid growth trend.</p>
</sec>
<sec id="sec4">
<label>2.2</label>
<title>Experimental system</title>
<p>The recirculating aquaculture system (RAS) comprised six 1-m<sup>3</sup> circular rearing tanks (water volume: 600&#x202F;L; radius: 75&#x202F;cm, water depth: 40&#x202F;cm), each equipped with a screw-type feeder (feeding speed: 0&#x2013;5&#x202F;g/s). Water quality was rigorously controlled: temperature 26&#x2013;30&#x202F;&#x00B0;C, dissolved oxygen &#x2265;5&#x202F;mg/L (maintained at 5.5&#x202F;&#x00B1;&#x202F;0.5&#x202F;mg/L during acclimation/experiment), pH 7.2&#x2013;8.5 (maintained at 7.2&#x202F;&#x00B1;&#x202F;0.5 during acclimation/experiment), nitrate &#x2264;0.5&#x202F;mg/L, and total ammonia nitrogen (TAN)&#x202F;&#x2264;&#x202F;0.8&#x202F;mg/L.</p>
<p>Vibration data acquisition employed a WT-VB01-485 triaxial sensor (47&#x202F;&#x00D7;&#x202F;38&#x202F;&#x00D7;&#x202F;15&#x202F;mm) embedded within a 60&#x00B0; hardened EVA float (&#x00D8; 180&#x202F;&#x00D7;&#x202F;30&#x202F;mm), with electronic components waterproofed using conformal coating. This sensor assembly was secured 15&#x2013;20&#x202F;cm downstream of the feeder outlet via an 80&#x202F;g silicone-weighted data line and surrounded by a &#x00D8; 60&#x202F;cm float ring to confine floating feed dispersion. Video monitoring utilized a camera (120 fps, 1920&#x202F;&#x00D7;&#x202F;1,080 resolution) mounted 40&#x202F;cm above the water surface on an adjustable bracket. Real-time observational control was facilitated through Wi-Fi connectivity to a mobile device (<xref ref-type="fig" rid="fig1">Figure 1</xref>).</p>
<fig position="float" id="fig1">
<label>Figure 1</label>
<caption>
<p>Experimental system device.</p>
</caption>
<graphic xlink:href="frai-09-1656290-g001.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Large conical white hopper mounted above a blue circular water tank inside an industrial facility, with pipes and a red circular structure floating in the tank and other similar tanks visible in the background.</alt-text>
</graphic>
</fig>
</sec>
<sec id="sec5">
<label>2.3</label>
<title>Experimental design</title>
<p>Juvenile largemouth bass (<italic>Micropterus salmoides</italic>) were grouped by size: small (50&#x202F;g&#x202F;&#x00B1;&#x202F;4&#x202F;g), medium (150&#x202F;g&#x202F;&#x00B1;&#x202F;7&#x202F;g), and large (300&#x202F;g&#x202F;&#x00B1;&#x202F;11&#x202F;g). Fish were stocked at densities of 20&#x2013;60 per tank and fed once daily (16:00) with Tongwei California bass expanded feed (Specifications 2#, 4#, 6#; Crude protein &#x2265;46.0%, Crude fat &#x2265;6.0%). The feed input rate was set to 1, 2, or 3&#x202F;g/s as an experimental variable. To ensure statistical robustness, each experimental condition (e.g., each fish size or density level) was replicated across two independent rearing tanks, with three repeated feeding trials conducted per tank. The detailed experimental design is summarized in <xref ref-type="table" rid="tab1">Table 1</xref>.</p>
<table-wrap position="float" id="tab1">
<label>Table 1</label>
<caption>
<p>Experimental design and parameters for the feeding trials.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Experiment name</th>
<th align="center" valign="top">Fish weight (g)</th>
<th align="center" valign="top">Number of fish per tank (n)</th>
<th align="center" valign="top">Feed input rate (g/s)</th>
<th align="center" valign="top">Feed particle size</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Size experiment</td>
<td align="center" valign="middle">50, 150, 300</td>
<td align="center" valign="middle">30</td>
<td align="center" valign="middle">1</td>
<td align="center" valign="middle">4#</td>
</tr>
<tr>
<td align="left" valign="middle">Density experiment</td>
<td align="center" valign="middle">50</td>
<td align="center" valign="middle">20, 40, 60</td>
<td align="center" valign="middle">1</td>
<td align="center" valign="middle">4#</td>
</tr>
<tr>
<td align="left" valign="middle">Feed input rate experiment</td>
<td align="center" valign="middle">300</td>
<td align="center" valign="middle">30</td>
<td align="center" valign="middle">1, 2, 3</td>
<td align="center" valign="middle">4#</td>
</tr>
<tr>
<td align="left" valign="middle">Feed particle size experiment</td>
<td align="center" valign="middle">300</td>
<td align="center" valign="middle">30</td>
<td align="center" valign="middle">1</td>
<td align="center" valign="middle">2#, 4#, 6#</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="sec6">
<label>2.4</label>
<title>Overview of the proposed approach</title>
<p>To address the demand for precise feeding control in high-density aquaculture, this study established a dynamic prediction model for fish feeding intensity by integrating vibration signal quantification and deep learning. The research workflow comprised three key stages (<xref ref-type="fig" rid="fig2">Figure 2</xref>):</p>
<fig position="float" id="fig2">
<label>Figure 2</label>
<caption>
<p>Water surface fluctuation parameter extraction.</p>
</caption>
<graphic xlink:href="frai-09-1656290-g002.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Line graph showing vibration displacement in micrometers against time in five-second intervals for X (green), Y (light blue), Z (blue), XYZ (orange), and Background-XYZ (red) axes, highlighting the highest peak in XYZ near time interval 4.</alt-text>
</graphic>
</fig>
<p>Analysis of Feeding Fluctuation Patterns: Through synchronous video verification, we analyzed the vibration characteristics of largemouth bass during feeding under variations in key parameters: fish size (S), stocking density (D), feeding rate (V), and feed particle size (<italic>&#x03A6;</italic>).</p>
<p>Prediction Modeling of Water Surface Fluctuations: A predictive model was developed using the multi-dimensional experimental data. The input variables included time (t), the static parameters (S, D, V, <italic>&#x03A6;</italic>), and the real-time triaxial vibration displacement. The output variable was the summed vibration displacement over the next 5&#x202F;s (prediction horizon <italic>N</italic>&#x202F;=&#x202F;5), representing a quantitative proxy for the predicted feeding intensity. The <italic>Z</italic>-score normalization method was applied to eliminate dimensional differences and enhance model convergence. The modeling architecture utilized a Long Short-Term Memory (LSTM) network. All experimental runs (408 trials in total) were pooled into a unified dataset. Each data instance consisted of a 5-s window (sampled at 10&#x202F;Hz) of the summed triaxial vibration displacement, paired with the corresponding static parameters (S, D, V, &#x03A6;). This structured dataset was then randomly split into training (80%) and validation (20%) sets for model development.</p>
<p>Algorithm Deployment and Feeding Control Testing: The trained model was compressed and deployed on an embedded system (Orange Pi AiPRO) for real-time prediction (with a delay &#x2264; 50&#x202F;ms). The feeding system adjusted the feeding rate (&#x0394;V) based on the real-time feedback of the predicted feeding intensity, enabling anticipatory control, including early feeding cessation. The performance of this algorithm was rigorously compared against optical flow and graph neural network (GCN) methods to evaluate its efficacy.</p>
</sec>
<sec id="sec7">
<label>2.5</label>
<title>Data preprocessing and feature engineering</title>
<p>The triaxial displacement signals (<italic>X</italic>, <italic>Y</italic>, <italic>Z</italic>) were measured using the WT-VB01-485 sensor. The X and Y axes represent horizontal displacements on the water surface plane, capturing lateral wave movements, while the Z axis corresponds to vertical displacement, capturing heave motion. This configuration allows comprehensive quantification of surface agitation dynamics. <xref ref-type="fig" rid="fig2">Figure 2</xref> compares feeding vibrations versus environmental noise (aeration bubbles/water flow), confirming that our filtering method effectively isolates feeding signals. The Python Serial module enabled real-time data capture with amplitude filtering:</p>
<disp-formula id="E1">
<mml:math id="M1">
<mml:mover accent="true">
<mml:mi>D</mml:mi>
<mml:mo stretchy="true">&#x02DC;</mml:mo>
</mml:mover>
<mml:mo stretchy="true">(</mml:mo>
<mml:mi>t</mml:mi>
<mml:mo stretchy="true">)</mml:mo>
<mml:mo>=</mml:mo>
<mml:mo stretchy="true">{</mml:mo>
<mml:mtable displaystyle="true">
<mml:mtr>
<mml:mtd>
<mml:msub>
<mml:mi>D</mml:mi>
<mml:mi>raw</mml:mi>
</mml:msub>
<mml:mo stretchy="true">(</mml:mo>
<mml:mi>t</mml:mi>
<mml:mo stretchy="true">)</mml:mo>
<mml:mspace width="1em"/>
<mml:mspace width="0.25em"/>
<mml:mtext>if</mml:mtext>
<mml:mo>&#x2223;</mml:mo>
<mml:msub>
<mml:mi>D</mml:mi>
<mml:mi>raw</mml:mi>
</mml:msub>
<mml:mo stretchy="true">(</mml:mo>
<mml:mi>t</mml:mi>
<mml:mo stretchy="true">)</mml:mo>
<mml:mo>&#x2223;</mml:mo>
<mml:mo>&#x2265;</mml:mo>
<mml:msub>
<mml:mi>&#x03B4;</mml:mi>
<mml:mtext>threshold</mml:mtext>
</mml:msub>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd>
<mml:mn>0</mml:mn>
<mml:mspace width="1em"/>
<mml:mspace width="0.25em"/>
<mml:mtext>otherwise</mml:mtext>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:math>
<label>(1)</label>
</disp-formula>
<p>The amplitude filtering method is described in <xref ref-type="disp-formula" rid="E1">Equation 1</xref>. Where <italic>D</italic><sub>raw</sub>(t) is raw displacement and &#x0026;<sub>threshold</sub>&#x202F;=&#x202F;0.5&#x202F;mm (empirically calibrated to bubble noise). Triaxial displacements (<italic>X</italic><sub>i</sub>, <italic>Y</italic><sub>i</sub>, <italic>Z</italic><sub>i</sub>) were summed per 5&#x202F;s analysis window:</p>
<disp-formula id="E2">
<mml:math id="M2">
<mml:msub>
<mml:mi>D</mml:mi>
<mml:mo>&#x2211;</mml:mo>
</mml:msub>
<mml:mo stretchy="true">[</mml:mo>
<mml:mi>k</mml:mi>
<mml:mo stretchy="true">]</mml:mo>
<mml:mo>=</mml:mo>
<mml:munderover>
<mml:mo movablelimits="false">&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>5</mml:mn>
<mml:mi>k</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>5</mml:mn>
<mml:mi>k</mml:mi>
<mml:mo>+</mml:mo>
<mml:mn>299</mml:mn>
</mml:mrow>
</mml:munderover>
<mml:mo stretchy="true">(</mml:mo>
<mml:mo>&#x2223;</mml:mo>
<mml:msub>
<mml:mi>X</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2223;</mml:mo>
<mml:mo>+</mml:mo>
<mml:mo>&#x2223;</mml:mo>
<mml:msub>
<mml:mi>Y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2223;</mml:mo>
<mml:mo>+</mml:mo>
<mml:mo>&#x2223;</mml:mo>
<mml:msub>
<mml:mi>Z</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2223;</mml:mo>
<mml:mo stretchy="true">)</mml:mo>
<mml:mo stretchy="true">(</mml:mo>
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mi>s</mml:mi>
</mml:msub>
<mml:mo>=</mml:mo>
<mml:mn>10</mml:mn>
<mml:mi>Hz</mml:mi>
<mml:mo stretchy="true">)</mml:mo>
</mml:math>
<label>(2)</label>
</disp-formula>
<p>The triaxial displacement summation is calculated using <xref ref-type="disp-formula" rid="E2">Equation 2</xref>. Maximum wave height per window derived as <italic>H</italic><sub>max</sub>[k]<sub>=</sub>max(D<sub>&#x2211;</sub>[t5<sub>k</sub>:t<sub>5k&#x202F;+&#x202F;5</sub>]). Strong inter-axis correlation (&#x03C1;XY&#x202F;=&#x202F;0.93, &#x03C1;XZ&#x202F;=&#x202F;0.88, &#x03C1;YZ&#x202F;=&#x202F;0.91) validated signal summation for amplifying feeding signatures.</p>
</sec>
<sec id="sec8">
<label>2.6</label>
<title>Prediction modeling of water surface fluctuations for largemouth bass feeding</title>
<p>The core of our predictive framework was a Long Short-Term Memory (LSTM) network, designed to forecast the sum of triaxial vibration displacement 5&#x202F;s into the future. This predicted displacement sum serves as a quantitative proxy for assessing feeding intensity at the water surface. The modeling procedure was structured as follows.</p>
<p>As shown in <xref ref-type="fig" rid="fig3">Figure 3</xref>, the model integrated both static experimental parameters and dynamic time-series data. The static input features included fish size (S, g), stocking density (D, fish/tank), feed input rate (V, g/s), and feed particle size (&#x03A6;, mm). The latter was numerically encoded using the approximate diameters of the commercial feed types (3&#x202F;mm, 5&#x202F;mm, and 7&#x202F;mm for #2, #4, and #6, respectively). The primary dynamic temporal input was the real-time summed absolute triaxial vibration displacement (D&#x03A3;). The model was designed to output the predicted future value of this vibration displacement sum (D&#x03A3;) at a horizon of &#x002A;t&#x202F;+&#x202F;5&#x002A; seconds.</p>
<fig position="float" id="fig3">
<label>Figure 3</label>
<caption>
<p>Model input and output structure.</p>
</caption>
<graphic xlink:href="frai-09-1656290-g003.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Flowchart diagram showing raw input data branching to five variables: Time t, Fish Size S, Stocking Density D, Feeding Rate V, and Triaxial Vibration displacement XYZ. Output variable leads to vibration displacement of next five seconds.</alt-text>
</graphic>
</fig>
<p>As shown in <xref ref-type="fig" rid="fig4">Figure 4</xref>, prior to model training, all input features were normalized using <italic>Z</italic>-score normalization to mitigate scale differences and accelerate convergence. The complete dataset, comprising 408 experimental runs, was partitioned into training and validation subsets with an 8:2 ratio.</p>
<fig position="float" id="fig4">
<label>Figure 4</label>
<caption>
<p>Data partitioning and preprocessing configuration.</p>
</caption>
<graphic xlink:href="frai-09-1656290-g004.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Flowchart showing 408 datasets split into two groups with arrows: eighty percent allocated to training and twenty percent allocated to validation. Diagram visually represents data partitioning for machine learning.</alt-text>
</graphic>
</fig>
<p>As shown in <xref ref-type="fig" rid="fig5">Figure 5</xref>, the architecture of the LSTM network was configured to capture complex temporal dependencies. The input layer receives the normalized feature vector. This is followed by two stacked LSTM layers, each containing 128&#x202F;units with tanh activation functions, to learn hierarchical temporal patterns from the sequential data. To prevent over fitting, a dropout layer with a rate of 0.2 was applied after the LSTM layers. The extracted features were then passed through a fully connected (Dense) layer with 64&#x202F;units and a ReLU activation function for non-linear transformation. Finally, the output layer employs a linear activation unit to produce the final continuous prediction of the displacement value.</p>
<fig position="float" id="fig5">
<label>Figure 5</label>
<caption>
<p>Architectural and training parameters of the LSTM model.</p>
</caption>
<graphic xlink:href="frai-09-1656290-g005.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Flowchart illustrating a neural network architecture for predicting feeding intensity, beginning with z-score normalization, followed by input, two hidden layers with one hundred twenty-eight units and tanh activation each, a dropout layer with rate zero point two, a fully connected layer with sixty-four ReLU units, and an output layer.</alt-text>
</graphic>
</fig>
<p>For the training configuration, a historical window of 5&#x202F;s (corresponding to a time step of 50 at a 10&#x202F;Hz sampling rate) was used as the input context for each prediction. The model was trained using the Adam optimizer with a learning rate of 0.001 and a batch size of 32. The loss function was defined as Mean Squared Error (MSE). To further guard against over fitting, an early stopping callback was implemented to halt training if the validation loss failed to improve for 20 consecutive epochs.</p>
<p>To benchmark the performance of the proposed LSTM model, we also implemented and trained two other prominent sequential models on the same dataset:</p>
<p>Gated Recurrent Unit (GRU): A variant of RNNs similar to LSTM but with a simplified gating mechanism, often offering comparable performance with fewer parameters. Our implemented GRU network comprised two stacked layers with 128&#x202F;units each, using tanh activations, followed by a dropout layer (0.2) and a dense output layer with a linear activation. It was trained with the same configuration (Adam optimizer, lr&#x202F;=&#x202F;0.001, MSE loss) as the LSTM model to ensure a fair comparison (<xref ref-type="bibr" rid="ref2">Cho et al., 2014</xref>).</p>
<p>Transformer: A model architecture based on self-attention mechanisms, which has shown remarkable success in various sequence processing tasks by weighing the importance of different time steps. The Transformer encoder implemented here utilized a single-head self-attention mechanism with positional encoding for the 50-step input sequence. It consisted of two encoder layers (hidden dimension 128), a feed-forward network (dimension 256), and a final linear projection layer for regression. Training hyperparameters matched those used for LSTM and GRU (<xref ref-type="bibr" rid="ref13">Vaswani et al., 2017</xref>).</p>
<p>All models were trained to perform the same regression task: predicting the summed vibration displacement t&#x202F;+&#x202F;5&#x202F;s. Their performance was evaluated and compared using Root Mean Square Error (RMSE), Mean Absolute Error (MAE), and the coefficient of determination (<italic>R</italic><sup>2</sup>).</p>
</sec>
<sec id="sec9">
<label>2.7</label>
<title>Algorithm deployment and feeding control testing</title>
<p>To enable engineering application, an embedded intelligent feeding system prototype was developed for closed-loop control testing. The hardware adopts a three-layer architecture:</p>
<p>Perception layer: Comprising a WT-VB01M triaxial vibration sensor (10&#x202F;Hz sampling) and Arduino Mega 2,560 MCU for signal acquisition/preprocessing (amplitude filtering and triaxial displacement summation).</p>
<p>Decision layer: Centered on an Orange Pi AiPRO embedded platform (quad-core Cortex-A55 CPU, 4GB RAM) executing the trained LSTM model to analyze current/predicted vibration trends.</p>
<p>Execution layer: Utilizing a custom screw-type feeder (0&#x2013;5&#x202F;g/s speed control) receiving stop/speed-adjust commands via serial communication.</p>
<p>Software control logic: The system continuously acquires vibration signals, processes them (amplitude filtering and triaxial summation), and inputs them into the LSTM model to predict values for the next 5&#x202F;s (<xref ref-type="table" rid="tab2">Table 2</xref>). If a declining trend is detected over two consecutive analysis cycles (5&#x202F;s/cycle), a stop-feeding command is issued; otherwise, the current speed is maintained. Performance was validated against optical flow and graph neural network (GCN) methods in repeated feeding scenarios. For the GCN-based method, we replicated the architecture and training protocol described by <xref ref-type="bibr" rid="ref14">Wei et al. (2022)</xref>, which constructs a graph from time-series features and applies graph convolutional layers for appetite state classification. This implemented GCN model was then applied to our experimental setup for closed-loop control comparison. Evaluation metrics included actual feed input, residual feed [for residual feed rate calculation: RFR (Residual feed rate %)&#x202F;=&#x202F;(Residual feed mass/Total feed mass)&#x202F;&#x00D7;&#x202F;100%], and algorithm control signals (<xref ref-type="fig" rid="fig6">Figure 6</xref>).</p>
<table-wrap position="float" id="tab2">
<label>Table 2</label>
<caption>
<p>Control simplified pseudo code.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="bottom">Intelligent feeding control simplified pseudo code</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">1INPUT: Vibration data (10&#x202F;Hz), Fish params (S, D, <italic>&#x03A6;</italic>), LSTM model</td>
</tr>
<tr>
<td align="left" valign="middle">2OUTPUT: Feeder control signals</td>
</tr>
<tr>
<td align="left" valign="middle">3PROCEDURE Main Control:</td>
</tr>
<tr>
<td align="left" valign="middle">4Feeder speed &#x2190; 2.0&#x202F;g/s</td>
</tr>
<tr>
<td align="left" valign="middle">5Buffer &#x2190; [0]&#x002A;50, decline count &#x2190; 0</td>
</tr>
<tr>
<td align="left" valign="middle">6SET_FEEDER_SPEED (feeder speed)</td>
</tr>
<tr>
<td align="left" valign="middle">7WHILE TRUE:</td>
</tr>
<tr>
<td align="left" valign="middle">8// Data acquisition and processing</td>
</tr>
<tr>
<td align="left" valign="middle">9D_sum &#x2190; SUM_ABS(FILTER_NOISE(READ_VIBRATION()))</td>
</tr>
<tr>
<td align="left" valign="middle">10Buffer. APPEND(D_sum). POP_FRONT()</td>
</tr>
<tr>
<td align="left" valign="middle">11// Periodic prediction (5&#x202F;s intervals)</td>
</tr>
<tr>
<td align="left" valign="middle">12IF SYSTEM_TIME % 5&#x202F;s == 0:</td>
</tr>
<tr>
<td align="left" valign="middle">13Prediction &#x2190; LSTM_MODEL. PREDICT(NORMALIZE([S, D, feeder_speed,&#x03A6;]&#x202F;+&#x202F;buffer))</td>
</tr>
<tr>
<td align="left" valign="middle">14// Trend analysis</td>
</tr>
<tr>
<td align="left" valign="middle">15First&#x202F;=&#x202F;MEAN(prediction[0:25]), second&#x202F;=&#x202F;MEAN(prediction[25:50])</td>
</tr>
<tr>
<td align="left" valign="middle">16Decline count &#x2190; (second &#x003C; 0.9&#x002A;first)? decline_count+1: 0</td>
</tr>
<tr>
<td align="left" valign="middle">17// Control decision</td>
</tr>
<tr>
<td align="left" valign="middle">18IF decline count &#x2265; 2: STOP_FEEDER(); BREAK</td>
</tr>
<tr>
<td align="left" valign="middle">19ELSE: MAINTAIN_SPEED()</td>
</tr>
<tr>
<td align="left" valign="middle">20END IF</td>
</tr>
<tr>
<td align="left" valign="middle">21DELAY(100&#x202F;ms)</td>
</tr>
<tr>
<td align="left" valign="middle">22END</td>
</tr>
</tbody>
</table>
</table-wrap>
<fig position="float" id="fig6">
<label>Figure 6</label>
<caption>
<p>Configuration of the embedded intelligent feeding system.</p>
</caption>
<graphic xlink:href="frai-09-1656290-g006.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Flowchart diagram showing a three-layer architecture for automated feeding control: Perception Layer includes a triaxial vibration sensor and Arduino Mega 2560; Decision Layer features data filtering, Orange Pi AiPRO, LSTM model execution, vibration trend analysis, and decision logic; Execution Layer outputs either stop feeding or maintain current speed to a screw feeder that controls feed rate to a feeding area.</alt-text>
</graphic>
</fig>
</sec>
</sec>
<sec sec-type="results" id="sec10">
<label>3</label>
<title>Results and discussion</title>
<sec id="sec11">
<label>3.1</label>
<title>Parametric influences on water surface fluctuations</title>
<p>Multidimensional experiments revealed distinct displacement patterns governed by biological and operational parameters, providing a foundation for the predictive model.</p>
<p>Size effect: Larger bass generated significantly greater water surface displacements due to their higher mass and energy output during feeding. Analysis of all replicate observations (<italic>n</italic>&#x202F;=&#x202F;6 per size group, from 2 tanks &#x00D7; 3 trials) showed that the 300&#x202F;g cohort generated 109.7% higher peak displacement than the 50&#x202F;g group (<italic>p</italic>&#x202F;&#x003C;&#x202F;0.05, <xref ref-type="fig" rid="fig7">Figure 7</xref>). The relationship between fish size (S) and peak displacement (Y) across all individual trials was characterized by a linear model (<italic>Y</italic>&#x202F;=&#x202F;1.7316S&#x202F;+&#x202F;236.41, <italic>R</italic><sup>2</sup>&#x202F;=&#x202F;0.9262, <xref ref-type="fig" rid="fig8">Figure 8</xref>). While the linear fit is strong, visual inspection of the scatter plot suggests a potential saturation trend between the 150&#x202F;g and 300&#x202F;g groups, which may indicate a non-linear scaling of feeding energy transfer with body mass in larger individuals.</p>
<fig position="float" id="fig7">
<label>Figure 7</label>
<caption>
<p>Water surface fluctuations of feeding largemouth bass of different sizes.</p>
</caption>
<graphic xlink:href="frai-09-1656290-g007.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Line graph compares vibration displacement over time for three different masses: 50 grams, 150 grams, and 300 grams. Higher masses result in greater and more prolonged vibration displacement.</alt-text>
</graphic>
</fig>
<fig position="float" id="fig8">
<label>Figure 8</label>
<caption>
<p>Peak fluctuation patterns of water surface ingestion for largemouth bass of different sizes.</p>
</caption>
<graphic xlink:href="frai-09-1656290-g008.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Scatter plot showing a positive linear relationship between fish size in grams and peak vibration displacement in micrometers, with a regression line equation of y equals 1.7316x plus 236.41 and R squared value 0.9262. Data points are colored by fish size groups: blue for fifty grams, dark blue for one hundred fifty grams, and red for three hundred grams. The red shaded area represents the ninety-five percent confidence interval. A text box highlights that peak vibration displacement increased by one hundred nine point seven percent between three hundred and fifty gram fish, with statistical significance at P less than zero point zero five.</alt-text>
</graphic>
</fig>
<p>Density effect: Increased stocking density led to more intense collective feeding activity. Based on all replicate data (<italic>n</italic>&#x202F;=&#x202F;6 per density level), the 60-fish groups exhibited 141.9% greater fluctuation amplitude than the 20-fish groups (<xref ref-type="fig" rid="fig9">Figure 9</xref>). The density-dependent effect was also highly linear across individual trials (<italic>Y</italic>&#x202F;=&#x202F;5.4250D&#x202F;+&#x202F;47.11, <italic>R</italic><sup>2</sup>&#x202F;=&#x202F;0.9828, <xref ref-type="fig" rid="fig10">Figure 10</xref>), where <italic>Y</italic> represents peak displacement and D represents density.</p>
<fig position="float" id="fig9">
<label>Figure 9</label>
<caption>
<p>Fluctuation of water surface for feeding of largemouth bass at different densities.</p>
</caption>
<graphic xlink:href="frai-09-1656290-g009.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Line graph comparing vibration displacement over time for groups of twenty, forty, and sixty fish. All groups peak between sixty and one hundred seconds, with higher fish counts showing greater displacement peaks.</alt-text>
</graphic>
</fig>
<fig position="float" id="fig10">
<label>Figure 10</label>
<caption>
<p>Peak fluctuation patterns of water surface ingestion of largemouth bass at different densities.</p>
</caption>
<graphic xlink:href="frai-09-1656290-g010.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Scatter plot visualizing peak vibration displacement in micrometers versus fish density, with regression line, ninety-five percent confidence interval, and groups for twenty, forty, and sixty fish. Data show a strong positive linear correlation, R squared 0.9828, and a regression equation y equals 5.4250x plus 47.11. Annotation notes a one hundred forty-one point nine percent increase from twenty to sixty fish.</alt-text>
</graphic>
</fig>
<p>Feeding rate effect: The rate of feed delivery influenced the temporal dynamics of feeding. Higher feeding speeds (3&#x202F;g/s) accelerated consumption, producing earlier vibration peaks (at approximately 20s) compared to slower rates (1&#x202F;g/s, peak at ~35&#x202F;s) (<xref ref-type="fig" rid="fig11">Figure 11</xref>). This suggests that fish respond to higher feed availability by intensifying their feeding effort over a shorter duration.</p>
<fig position="float" id="fig11">
<label>Figure 11</label>
<caption>
<p>Water surface fluctuations of feeding of largemouth bass at different feeding rates.</p>
</caption>
<graphic xlink:href="frai-09-1656290-g011.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Line graph showing vibration displacement in micrometers on the vertical axis, versus time in five-second increments on the horizontal axis, comparing three rates: 1g/s, 2g/s, and 3g/s. The 1g/s line peaks highest and fluctuates, 2g/s peaks slightly lower, and 3g/s maintains the lowest values, all declining over time. Data source is not specified.</alt-text>
</graphic>
</fig>
<p>Feed particle size effect: The size of feed pellets significantly affected the initial feeding behavior. Smaller #2 pellets (3&#x202F;mm) induced chaotic, high-frequency initial vibrations, likely due to competitive scrambling for numerous small particles. In contrast, the optimally palatable #4 feed (5&#x202F;mm) resulted in more stable, low-frequency oscillations (<xref ref-type="fig" rid="fig12">Figure 12</xref>), indicating efficient capture and consumption with reduced energy expenditure. This confirms that feed palatability can modulate the characteristics of surface fluctuations.</p>
<fig position="float" id="fig12">
<label>Figure 12</label>
<caption>
<p>Water surface fluctuations of feeding of largemouth bass under different feed particle sizes.</p>
</caption>
<graphic xlink:href="frai-09-1656290-g012.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Line graph comparing vibration displacement over time for three samples labeled NO.2, NO.4, and NO.6. NO.2 shows the highest and most variable displacement, peaking above seven hundred micrometers before declining. NO.4 and NO.6 display lower, more consistent peaks under four hundred micrometers. Time is measured in five-second intervals up to one hundred twenty seconds.</alt-text>
</graphic>
</fig>
<p>From a biomechanical perspective, the feeding behavior of fish generates kinetic energy, which is transferred to the water body through body movements, thereby causing water surface fluctuations. The positive correlations observed in the size and density experiments align with findings from acoustic studies on largemouth bass (<xref ref-type="bibr" rid="ref14">Wei et al., 2022</xref>). The feeding speed and feed particle size experiments demonstrate that operational parameters directly influence feeding kinetics. Faster feeding rates and larger pellets can shorten feeding time, a phenomenon also observed in catfish (<xref ref-type="bibr" rid="ref9">Khater et al., 2021</xref>). However, it is crucial to note that exceeding the fish&#x2019;s acceptable threshold for these parameters does not linearly improve efficiency and may instead increase energy waste. The quantification of these relationships provides a critical theoretical basis for precise feeding control, an aspect often overlooked in existing studies that focus solely on intensity classification without parametric context.</p>
</sec>
<sec id="sec12">
<label>3.2</label>
<title>Predictive model performance</title>
<p>The developed LSTM model achieved robust forecasting performance, effectively integrating static biometric parameters (S, D) with dynamic, real-time vibration sequences. The model&#x2019;s primary evaluation metrics, calculated on the validation set, were RMSE&#x202F;=&#x202F;69.43&#x202F;&#x03BC;m, MAE&#x202F;=&#x202F;48.00&#x202F;&#x03BC;m, and R<sup>2</sup>&#x202F;=&#x202F;0.883, indicating high accuracy in predicting the future 5-s vibration displacement. Training converged stably within 200 epochs (<xref ref-type="fig" rid="fig13">Figure 13</xref>), demonstrating the architectural efficiency and appropriateness of the training configuration.</p>
<fig position="float" id="fig13">
<label>Figure 13</label>
<caption>
<p>Training and validation loss convergence of the LSTM prediction model.</p>
</caption>
<graphic xlink:href="frai-09-1656290-g013.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Line chart comparing training loss for LSTM, GRU, and Transformer models over 200 epochs, showing that LSTM and GRU losses decrease more quickly and stabilize lower than Transformer loss throughout training.</alt-text>
</graphic>
</fig>
<p>A critical strength of the model was its temporal precision. As shown in <xref ref-type="fig" rid="fig14">Figure 14</xref>, the predicted waveform closely tracked the actual measured displacement, capturing key inflection points (rise, peak, and decline) with a temporal deviation of less than 50&#x202F;ms. This capability is fundamental for anticipatory control, allowing the system to initiate feeding cessation before satiation is fully reached, thereby minimizing waste.</p>
<fig position="float" id="fig14">
<label>Figure 14</label>
<caption>
<p>Example prediction of the LSTM model against actual data.</p>
</caption>
<graphic xlink:href="frai-09-1656290-g014.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Line chart comparing DXYZ value predictions over thirty-one time steps using actual data (black line), LSTM (red), GRU (blue), and Transformer (green) models. Title reads &#x201C;DXYZ Prediction Comparison.&#x201D; All predicted values closely follow the actual trend, with variations increasing after the peak around time step seventeen. Axis labels indicate &#x201C;Time Step&#x201D; on the x-axis and &#x201C;DXYZ Value&#x201D; on the y-axis. Legend at top right distinguishes line colors for each method.</alt-text>
</graphic>
</fig>
<p>The model&#x2019;s regression capability was further validated through an internal architecture comparison (<xref ref-type="table" rid="tab3">Table 3</xref>). The LSTM achieved the best overall forecasting metrics, outperforming both the GRU (RMSE&#x202F;=&#x202F;85.15&#x202F;&#x03BC;m, <italic>R</italic><sup>2</sup>&#x202F;=&#x202F;0.828) and the Transformer (RMSE&#x202F;=&#x202F;80.98&#x202F;&#x03BC;m, <italic>R</italic><sup>2</sup>&#x202F;=&#x202F;0.618) models. The superior performance of LSTM can be attributed to its ability to capture long-range temporal dependencies in the sequential feeding data more effectively than the GRU, while the Transformer model may have been hampered by its relatively higher data requirements for optimal performance on this specific task. The reliability of the LSTM&#x2019;s continuous predictions is further corroborated by the narrow confidence intervals obtained through uncertainty quantification (<xref ref-type="fig" rid="fig15">Figure 15</xref>), which show high model certainty around the predicted values.</p>
<table-wrap position="float" id="tab3">
<label>Table 3</label>
<caption>
<p>Performance comparison of sequential models for displacement prediction.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">INDEX</th>
<th align="center" valign="top">RMSE</th>
<th align="center" valign="top">MAE</th>
<th align="center" valign="top">R2</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">LSTM</td>
<td align="char" valign="middle" char=".">69.43</td>
<td align="char" valign="middle" char=".">48.00</td>
<td align="char" valign="middle" char=".">0.883</td>
</tr>
<tr>
<td align="left" valign="middle">GRU</td>
<td align="char" valign="middle" char=".">85.15</td>
<td align="char" valign="middle" char=".">67.43</td>
<td align="char" valign="middle" char=".">0.828</td>
</tr>
<tr>
<td align="left" valign="middle">Transformer</td>
<td align="char" valign="middle" char=".">80.98</td>
<td align="char" valign="middle" char=".">59.66</td>
<td align="char" valign="middle" char=".">0.618</td>
</tr>
</tbody>
</table>
</table-wrap>
<fig position="float" id="fig15">
<label>Figure 15</label>
<caption>
<p>LSTM model prediction with confidence intervals for uncertainty quantification.</p>
</caption>
<graphic xlink:href="frai-09-1656290-g015.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Line graph titled &#x201C;Uncertainty Quantification (MC Dropout)&#x201D; compares actual DXYZ values and LSTM predicted values across thirty-one time steps, with a shaded red area illustrating the ninety-five percent confidence interval for predictions.</alt-text>
</graphic>
</fig>
</sec>
<sec id="sec13">
<label>3.3</label>
<title>Feasibility demonstration and comparative analysis</title>
<p>Our vibration-LSTM approach demonstrates distinct and contextually complementary advantages compared to alternative methods. A comparative analysis with Wei et al.&#x2019;s graph convolutional network (GCN) highlights a fundamental difference in objective. The GCN approach is architected for high-accuracy classification of discrete appetite states (e.g., weak, medium, and strong). In contrast, our framework is designed for continuous regression, delivering quantitative predictions of feeding intensity that are a prerequisite for dynamic, proportional feed modulation.</p>
<p>Computationally, the LSTM implementation requires only 1.2&#x202F;MB of memory and achieves 5.7&#x202F;&#x00D7;&#x202F;faster inference (48&#x202F;ms vs. 275&#x202F;ms) on equivalent embedded hardware, making it more suitable for cost-effective, real-time deployment.</p>
<p>In closed-loop feeding tests, this advantage translated into superior practical performance. The vibration-based control system consistently minimized waste, achieving residual feed rates (RFR) of &#x2264;0.8% across all trials (<xref ref-type="table" rid="tab4">Table 4</xref>). This system significantly outperformed both optical flow (2.69% RFR) and GCN-based (6.58% RFR) methods. The feeding protocol was as follows: if no residual feed was detected at the end of a meal, the system would immediately initiate the next feeding session for up to three consecutive meals. The appearance of residual feed indicated fish satiety, at which point feeding was terminated.</p>
<table-wrap position="float" id="tab4">
<label>Table 4</label>
<caption>
<p>Feeding performance and residual feed rates (RFR) of different control algorithms.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Method</th>
<th align="center" valign="top">Meal 1 (300&#x202F;g)</th>
<th align="center" valign="top">Meal 2 (300&#x202F;g)</th>
<th align="center" valign="top">Meal 3 (300&#x202F;g)</th>
<th align="center" valign="top">Meal 1 (150&#x202F;g)</th>
<th align="center" valign="top">Meal 2 (150&#x202F;g)</th>
<th align="center" valign="top">Meal 3 (150&#x202F;g)</th>
<th align="center" valign="top">Meal 1 (40 fish density)</th>
<th align="center" valign="top">Meal 2 (40 fish density)</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Float method</td>
<td align="center" valign="top">32.1&#x202F;g (RFR: 0%)</td>
<td align="center" valign="top">2&#x202F;g (RFR: 0%)</td>
<td align="center" valign="top">0&#x202F;g (RFR: 0%)</td>
<td align="center" valign="top">26&#x202F;g (RFR: 0%)</td>
<td align="center" valign="top">1.8&#x202F;g (RFR: 0%)</td>
<td align="center" valign="top">0&#x202F;g (RFR: 0%)</td>
<td align="center" valign="top">60&#x202F;g (RFR: 0.8%)</td>
<td align="center" valign="top">&#x2013;</td>
</tr>
<tr>
<td align="left" valign="top">Optical flow</td>
<td align="center" valign="top">27.3&#x202F;g (RFR: 0%)</td>
<td align="center" valign="top">6.5&#x202F;g (RFR: 0%)</td>
<td align="center" valign="top">6.8&#x202F;g (RFR: 0%)</td>
<td align="center" valign="top">11.2&#x202F;g (RFR: 0%)</td>
<td align="center" valign="top">4.6&#x202F;g (RFR: 0%)</td>
<td align="center" valign="top">6.3&#x202F;g (RFR: 0%)</td>
<td align="center" valign="top">35.6&#x202F;g (RFR: 0%)</td>
<td align="center" valign="top">22.3&#x202F;g (RFR: 2.69%)</td>
</tr>
<tr>
<td align="left" valign="top">Graph neural network</td>
<td align="center" valign="top">36&#x202F;g (RFR: 1.64%)</td>
<td align="center" valign="top">&#x2013;</td>
<td align="center" valign="top">&#x2013;</td>
<td align="center" valign="top">20.9&#x202F;g (RFR: 0%)</td>
<td align="center" valign="top">6.9&#x202F;g (RFR: 0%)</td>
<td align="center" valign="top">0&#x202F;g (RFR: 0%)</td>
<td align="center" valign="top">64.6&#x202F;g (RFR: 6.58%)</td>
<td align="center" valign="top">&#x2013;</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>Values in parentheses indicate RFR for that meal [A dash (&#x2212;) indicates the trial was not conducted for that specific meal].</p>
</table-wrap-foot>
</table-wrap>
<p>Vibration-based control (<xref ref-type="fig" rid="fig16">Figure 16</xref>) provided continuous, high-resolution control signals (full-scale output range 0&#x2013;255). This enabled precise modulation and, crucially, anticipatory cessation. For instance, with 300&#x202F;g fish, the system commanded a sharp reduction in feed upon predicting a decline, achieving 0% residual feed across meals.</p>
<fig position="float" id="fig16">
<label>Figure 16</label>
<caption>
<p>Feeding control profile using the vibration-LSTM method.</p>
</caption>
<graphic xlink:href="frai-09-1656290-g016.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Line graph comparing vibration displacement per turn for DXYZ (red line) and OUT (green line) over time in seconds, with both showing a peak at the start and decreasing fluctuations over fifty-three seconds.</alt-text>
</graphic>
</fig>
<p>The curve shows the real-time summed vibration displacement (&#x201C;Vib_sum,&#x201D; red line) and the corresponding normalized control signal (&#x201C;Out,&#x201D; green line, range 0&#x2013;255) sent to the feeder.</p>
<p>Optical flow control (<xref ref-type="fig" rid="fig17">Figure 17</xref>) also offered a full output range but exhibited reduced sensitivity to subtle appetite changes. This resulted in less precise control and higher residual feed (e.g., 6.5&#x202F;g/6.8&#x202F;g for 300&#x202F;g fish), as it reacted to rather than predicted feeding dynamics.</p>
<fig position="float" id="fig17">
<label>Figure 17</label>
<caption>
<p>Feeding control profile using the optical flow method.</p>
</caption>
<graphic xlink:href="frai-09-1656290-g017.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Bar and line chart comparing two variables, EK and OUT, over a time interval from 1 to 21 seconds. EK, shown as blue bars, displays intermittent peaks at seconds 4, 7, 11, and 15, reaching values near 450. OUT, represented by an orange line, forms plateaus just over 400 at the same time intervals, then drops sharply and remains low elsewhere. Both y-axes are labeled, and a legend distinguishes the series.</alt-text>
</graphic>
</fig>
<p>The curve shows the optical flow magnitude (&#x201C;EK,&#x201D; in arbitrary units, blue line) computed from video frames, and the resulting normalized control signal (&#x201C;Out,&#x201D; orange line, range 0&#x2013;255).</p>
<p>GCN-based control (<xref ref-type="fig" rid="fig18">Figure 18</xref>) operates on a classification paradigm. This approach adjusts the feeding rate to predetermined levels (e.g., 30 for &#x2018;weak&#x2019;, 120 for &#x2018;strong&#x2019; appetite). While effective for state differentiation, this step-wise control is less suited for the rapid, continuous adjustment needed to minimize waste, resulting in higher residual feed rates compared to the regression-based vibration method.</p>
<fig position="float" id="fig18">
<label>Figure 18</label>
<caption>
<p>Feeding control profile using the GCN-based method.</p>
</caption>
<graphic xlink:href="frai-09-1656290-g018.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Bar and line chart comparing appetite level and output over 27 seconds; blue bars show appetite fluctuations, and an orange line labeled OUT follows a step pattern with periodic rises and falls, both axes labeled and legend at the top.</alt-text>
</graphic>
</fig>
<p>The curve shows the classified appetite level (&#x201C;Appetite_State,&#x201D; discrete levels, blue line) output by the GCN model and the corresponding step-wise control signal (&#x201C;Out,&#x201D; orange line).</p>
<p>Embedded deployment confirmed real-time efficacy with an inference latency of 48.2&#x202F;&#x00B1;&#x202F;1.7&#x202F;ms, fulfilling industrial constraints (&#x003C;50&#x202F;ms). The control logic, triggering cessation after two consecutive declining trends, prevented 89.7% of overfeeding events with a low false positive rate (3.2%). Hardware optimization reduced power consumption to 8.3&#x202F;W&#x2014;47% lower than GCN implementations typically requiring GPU acceleration. The combination of markedly reduced feed waste and low hardware cost (approximately $200) positions the vibration-LSTM system as an economically attractive solution for precision feeding in RAS facilities.</p>
</sec>
</sec>
<sec sec-type="conclusions" id="sec14">
<label>4</label>
<title>Conclusions</title>
<p>This study establishes a robust, data-driven framework for the real-time quantification and predictive control of feeding intensity in largemouth bass aquaculture. By integrating water surface vibration analysis with a LSTM (vibration-LSTM) deep learning model, we have transitioned beyond qualitative appetite assessment to a continuous, quantitative, and anticipatory feeding strategy. The core achievements of this work are threefold:</p>
<list list-type="order">
<list-item>
<p>Parametric Quantification: We demonstrated and quantified significant linear relationships between vibration amplitude and key biological parameters (fish size: <italic>R</italic><sup>2</sup>&#x202F;=&#x202F;0.926; stocking density: <italic>R</italic><sup>2</sup>&#x202F;=&#x202F;0.983), providing a foundational model for understanding feeding energy expenditure.</p>
</list-item>
<list-item>
<p>Dynamic Prediction Model: We developed a vibration-LSTM architecture capable of accurately forecasting feeding intensity 5&#x202F;s into the future (<italic>R</italic><sup>2</sup>&#x202F;=&#x202F;0.883), enabling proactive control rather than reactive response.</p>
</list-item>
<list-item>
<p>Embedded Implementation and Validation: We successfully deployed the model on a low-cost, embedded platform (Orange Pi AiPRO), achieving real-time operation (latency &#x2264;50&#x202F;ms) and demonstrating superior performance in closed-loop tests, reducing residual feed rates to &#x2264;0.8%&#x2014;significantly outperforming optical flow (2.69%) and GCN-based (6.58%) methods.</p>
</list-item>
</list>
<p>This vibration-driven, predictively controlled strategy, characterized by its low hardware cost and demonstrated high feed utilization, presents a cost-effective and industrially viable solution for precision aquaculture.</p>
</sec>
<sec id="sec15">
<label>5</label>
<title>Limitations and future work</title>
<p>The proposed vibration-based intelligent feeding system, while promising, has several limitations that offer pathways for future research. The primary challenge lies in system scalability and feed type dependency.</p>
<sec id="sec16">
<label>5.1</label>
<title>Feasibility demonstration and comparative analysis</title>
<p>Validation trials in larger, commercial-scale RAS (20&#x202F;m<sup>3</sup> tanks) revealed a performance decline attributable to wave damping effects over longer distances and increased spatial heterogeneity in fish distribution and feeding activity. A single-point surface sensor may not adequately capture the feeding dynamics of a large, heterogeneous tank. To transition from experimental to industrial applicability, two strategies are recommended:</p>
<p>Tank-Specific Transfer Learning: Pre-trained models should be fine-tuned using a small amount of data collected from the specific commercial tank, allowing the system to adapt to its unique hydrodynamic and behavioral characteristics.</p>
<p>Coupled Multi-Sensor Arrays: Deploying a network of vibration sensors at strategic locations across the water surface can provide a comprehensive view of feeding activity, mitigating the limitations of a single sensor and improving signal fidelity in large volumes.</p>
</sec>
<sec id="sec17">
<label>5.2</label>
<title>Detection efficacy for sinking feeds</title>
<p>A significant limitation was identified with the use of sinking feeds, where the model&#x2019;s detection efficacy dropped to 76.4%, compared to 98.3% for floating pellets. This performance gap arises because sinking feeds are consumed below the surface, generating considerably weaker surface vibrations. To develop a universally applicable system, future work should focus on:</p>
<p>Strategic Sub-Surface Sensor Placement: Deploying vibration sensors at multiple depths to directly capture feeding activity in the water column.</p>
<p>Hybrid Hydrophone Integration: Fusing vibration data with underwater acoustic signals, as the crunching and chewing sounds of fish consuming sinking pellets provide a strong, complementary signal.</p>
<p>Sinking-Feed-Specific Model Retraining: Collecting dedicated datasets of sinking feed consumption and retraining the model to recognize the associated, albeit subtler, vibration signatures.</p>
</sec>
<sec id="sec18">
<label>5.3</label>
<title>Broader research directions</title>
<p>Beyond addressing these immediate limitations, this work opens several avenues for future research. These include extending the framework to multi-species applications, developing adaptive noise cancelation algorithms to enhance robustness in noisy environments, and exploring reinforcement learning paradigms to optimize long-term feeding strategies that maximize growth and fish welfare.</p>
</sec>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="sec19">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/<xref ref-type="supplementary-material" rid="SM1">Supplementary material</xref>, further inquiries can be directed to the corresponding author.</p>
</sec>
<sec sec-type="ethics-statement" id="sec20">
<title>Ethics statement</title>
<p>The animal study was approved by Fishery Machinery and Instruction Research Institute (FMIRI-AWE-2024-001). The study was conducted in accordance with the local legislation and institutional requirements.</p>
</sec>
<sec sec-type="author-contributions" id="sec21">
<title>Author contributions</title>
<p>YfZ: Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing, Software. AL: Writing &#x2013; review &#x0026; editing. YlZ: Conceptualization, Writing &#x2013; review &#x0026; editing. QN: Writing &#x2013; original draft, Project administration. HZ: Writing &#x2013; review &#x0026; editing, Conceptualization. HS: Investigation, Writing &#x2013; review &#x0026; editing. YW: Data curation, Writing &#x2013; review &#x0026; editing. XC: Resources, Writing &#x2013; review &#x0026; editing.</p>
</sec>
<ack>
<title>Acknowledgments</title>
<p>The authors would like to thank He Zhang for their assistance in aquaculture management and technology.</p>
</ack>
<sec sec-type="COI-statement" id="sec22">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="sec23">
<title>Generative AI statement</title>
<p>The author(s) declared that Generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="sec24">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec sec-type="supplementary-material" id="sec25">
<title>Supplementary material</title>
<p>The Supplementary material for this article can be found online at: <ext-link xlink:href="https://www.frontiersin.org/articles/10.3389/frai.2026.1656290/full#supplementary-material" ext-link-type="uri">https://www.frontiersin.org/articles/10.3389/frai.2026.1656290/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Supplementary_file_1.docx" id="SM1" mimetype="application/vnd.openxmlformats-officedocument.wordprocessingml.document" xmlns:xlink="http://www.w3.org/1999/xlink"/>
<supplementary-material xlink:href="Table_1.xlsx" id="SM2" mimetype="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<ref-list>
<title>References</title>
<ref id="ref1"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Biazi</surname><given-names>V.</given-names></name> <name><surname>Marques</surname><given-names>C.</given-names></name></person-group> (<year>2023</year>). <article-title>Industry 4.0-based smart systems in aquaculture: a comprehensive review</article-title>. <source>Aquac. Eng.</source> <volume>103</volume>:<fpage>102360</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.aquaeng.2023.102360</pub-id></mixed-citation></ref>
<ref id="ref2"><mixed-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Cho</surname><given-names>K.</given-names></name> <name><surname>Van Merri&#x00EB;nboer</surname><given-names>B.</given-names></name> <name><surname>Gulcehre</surname><given-names>C.</given-names></name> <name><surname>Bahdanau</surname><given-names>D.</given-names></name> <name><surname>Bougares</surname><given-names>F.</given-names></name> <name><surname>Schwenk</surname><given-names>H.</given-names></name> <etal/></person-group>. (<year>2014</year>). <article-title>Learning phrase representations using RNN encoder-decoder for statistical machine translation</article-title>. <conf-name>Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)</conf-name>, <fpage>1724</fpage>&#x2013;<lpage>1734</lpage>.</mixed-citation></ref>
<ref id="ref3"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Du</surname><given-names>Z.</given-names></name> <name><surname>Cui</surname><given-names>M.</given-names></name> <name><surname>Wang</surname><given-names>Q.</given-names></name> <name><surname>Liu</surname><given-names>X.</given-names></name> <name><surname>Xu</surname><given-names>Y.</given-names></name> <name><surname>Bai</surname><given-names>Z.</given-names></name> <etal/></person-group>. (<year>2023a</year>). <article-title>Feeding intensity assessment of aquaculture fish using mel spectrogram and deep learning algorithms</article-title>. <source>Aquac. Eng.</source> <volume>102</volume>:<fpage>102345</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.aquaeng.2023.102345</pub-id></mixed-citation></ref>
<ref id="ref4"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Du</surname><given-names>Z.</given-names></name> <name><surname>Cui</surname><given-names>M.</given-names></name> <name><surname>Xu</surname><given-names>X.</given-names></name> <name><surname>Bai</surname><given-names>Z.</given-names></name> <name><surname>Han</surname><given-names>J.</given-names></name> <name><surname>Li</surname><given-names>W.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>Harnessing multimodal data fusion to advance accurate identification of fish feeding intensity</article-title>. <source>Biosyst. Eng.</source> <volume>246</volume>, <fpage>135</fpage>&#x2013;<lpage>146</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.biosystemseng.2024.08.001</pub-id></mixed-citation></ref>
<ref id="ref5"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Du</surname><given-names>Z.</given-names></name> <name><surname>Xu</surname><given-names>X.</given-names></name> <name><surname>Bai</surname><given-names>Z.</given-names></name> <name><surname>Liu</surname><given-names>X.</given-names></name> <name><surname>Hu</surname><given-names>Y.</given-names></name> <name><surname>Li</surname><given-names>W.</given-names></name> <etal/></person-group>. (<year>2023b</year>). <article-title>Feature fusion strategy and improved ghostnet for accurate recognition of fish feeding behavior</article-title>. <source>Comput. Electron. Agric.</source> <volume>214</volume>:<fpage>108310</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.compag.2023.108310</pub-id></mixed-citation></ref>
<ref id="ref6"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>F&#x00F8;re</surname><given-names>M.</given-names></name> <name><surname>Alver</surname><given-names>M. O.</given-names></name> <name><surname>Alfredsen</surname><given-names>J. A.</given-names></name> <name><surname>Rasheed</surname><given-names>A.</given-names></name> <name><surname>Hukkel&#x00E5;s</surname><given-names>T.</given-names></name> <name><surname>Bjelland</surname><given-names>H. V.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>Digital twins in intensive aquaculture &#x2014; challenges, opportunities and future prospects</article-title>. <source>Comput. Electron. Agric.</source> <volume>218</volume>:<fpage>108676</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.compag.2024.108676</pub-id></mixed-citation></ref>
<ref id="ref7"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hochreiter</surname><given-names>S.</given-names></name> <name><surname>Schmidhuber</surname><given-names>J.</given-names></name></person-group> (<year>1997</year>). <article-title>Long short-term memory</article-title>. <source>Neural Comput.</source> <volume>9</volume>, <fpage>1735</fpage>&#x2013;<lpage>1780</lpage>. doi: <pub-id pub-id-type="doi">10.1162/neco.1997.9.8.1735</pub-id></mixed-citation></ref>
<ref id="ref8"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hu</surname><given-names>W.</given-names></name> <name><surname>Yang</surname><given-names>X.</given-names></name> <name><surname>Ma</surname><given-names>P.</given-names></name> <name><surname>Fu</surname><given-names>T.</given-names></name> <name><surname>Zhou</surname><given-names>C.</given-names></name></person-group> (<year>2025</year>). <article-title>DCA-MVIT: fused DSGated convolution and CA attention for fish feeding behavior recognition in recirculating aquaculture systems</article-title>. <source>Aquaculture</source> <volume>598</volume>:<fpage>742008</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.aquaculture.2024.742008</pub-id></mixed-citation></ref>
<ref id="ref9"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Khater</surname><given-names>E.-S.</given-names></name> <name><surname>Bahnasawy</surname><given-names>A.</given-names></name> <name><surname>Morsy</surname><given-names>O.</given-names></name></person-group> (<year>2021</year>). <article-title>Evaluation of fish feeder manufactured from local raw materials</article-title>. <source>Sci. Rep.</source> <volume>11</volume>:<fpage>18799</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-021-98383-0</pub-id>, <pub-id pub-id-type="pmid">34552167</pub-id></mixed-citation></ref>
<ref id="ref10"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pan</surname><given-names>S.</given-names></name> <name><surname>Mao</surname><given-names>H.</given-names></name> <name><surname>Wang</surname><given-names>H.</given-names></name> <name><surname>Cao</surname><given-names>H.</given-names></name> <name><surname>Zhu</surname><given-names>S.</given-names></name> <name><surname>Ye</surname><given-names>Y.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Study on feeding rule and baiting method of fish based on six axis sensor</article-title>. <source>Fish. Moderniz.</source> <volume>50</volume>, <fpage>56</fpage>&#x2013;<lpage>63</lpage>. doi: <pub-id pub-id-type="doi">10.3969/j.issn.1007-9580.2023.03.007</pub-id></mixed-citation></ref>
<ref id="ref11"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Qi</surname><given-names>R.</given-names></name> <name><surname>Liu</surname><given-names>H.</given-names></name> <name><surname>Cao</surname><given-names>X.</given-names></name></person-group> (<year>2023</year>). <source>Feature extraction and classification of swallowing and chewing sounds of single largemouth bass (<italic>Micropterus Salmoides</italic>)</source>. <publisher-loc>Rochester, NY, USA</publisher-loc>: <publisher-name>SSRN (Social Science Research Network)</publisher-name>. doi: <pub-id pub-id-type="doi">10.2139/ssrn.4535837</pub-id>.</mixed-citation></ref>
<ref id="ref12"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ubina</surname><given-names>N.</given-names></name> <name><surname>Cheng</surname><given-names>S.-C.</given-names></name> <name><surname>Chang</surname><given-names>C.-C.</given-names></name> <name><surname>Chen</surname><given-names>H.-Y.</given-names></name></person-group> (<year>2021</year>). <article-title>Evaluating fish feeding intensity in aquaculture with convolutional neural networks</article-title>. <source>Aquac. Eng.</source> <volume>94</volume>:<fpage>102178</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.aquaeng.2021.102178</pub-id></mixed-citation></ref>
<ref id="ref13"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Vaswani</surname><given-names>A.</given-names></name> <name><surname>Shazeer</surname><given-names>N.</given-names></name> <name><surname>Parmar</surname><given-names>N.</given-names></name> <name><surname>Uszkoreit</surname><given-names>J.</given-names></name> <name><surname>Jones</surname><given-names>L.</given-names></name> <name><surname>Gomez</surname><given-names>A. N.</given-names></name> <etal/></person-group>. (<year>2017</year>). <source>Attention is all you need. Advances in neural information processing systems, 30 (Neur IPS 2017)</source>, <publisher-loc>Red Hook, NY, USA</publisher-loc>: <publisher-name>Curran Associates, Inc</publisher-name>. <fpage>5998</fpage>&#x2013;<lpage>6008</lpage>.</mixed-citation></ref>
<ref id="ref14"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wei</surname><given-names>D.</given-names></name> <name><surname>Ji</surname><given-names>B.</given-names></name> <name><surname>Li</surname><given-names>H.</given-names></name> <name><surname>Zhu</surname><given-names>S.</given-names></name> <name><surname>Ye</surname><given-names>Z.</given-names></name> <name><surname>Zhao</surname><given-names>J.</given-names></name></person-group> (<year>2022</year>). <article-title>Modified kinetic energy feature-based graph convolutional network for fish appetite grading using time-limited data in aquaculture</article-title>. <source>Front. Mar. Sci.</source> <volume>9</volume>:<fpage>1021688</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fmars.2022.1021688</pub-id></mixed-citation></ref>
<ref id="ref15"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wu</surname><given-names>Y.</given-names></name> <name><surname>Duan</surname><given-names>Y.</given-names></name> <name><surname>Wei</surname><given-names>Y.</given-names></name> <name><surname>An</surname><given-names>D.</given-names></name> <name><surname>Liu</surname><given-names>J.</given-names></name></person-group> (<year>2022</year>). <article-title>Application of intelligent and unmanned equipment in aquaculture: a review</article-title>. <source>Comput. Electron. Agric.</source> <volume>199</volume>:<fpage>107201</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.compag.2022.107201</pub-id></mixed-citation></ref>
<ref id="ref16"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Xiang</surname><given-names>K.</given-names></name></person-group> (<year>2022</year>) <source>Swimming fish adaptive feeding device and method based on water surface fluctuation information</source>. <publisher-loc>Beijing, China</publisher-loc>: <publisher-name>China National Intellectual Property Administration (CNIPA)</publisher-name>.</mixed-citation></ref>
<ref id="ref17"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Xu</surname><given-names>L.</given-names></name> <name><surname>Huang</surname><given-names>X.</given-names></name> <name><surname>Liu</surname><given-names>S.</given-names></name></person-group> (<year>2022</year>). <article-title>Recognition of fish feeding intensity based on improved LRCN</article-title>. <source>Trans. Chin. Soc. Agric. Mach.</source> <volume>53</volume>, <fpage>236</fpage>&#x2013;<lpage>241</lpage>. doi: <pub-id pub-id-type="doi">10.6041/j.issn.1000-1298.2022.10.025</pub-id></mixed-citation></ref>
<ref id="ref18"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zeng</surname><given-names>Y.</given-names></name> <name><surname>Yang</surname><given-names>X.</given-names></name> <name><surname>Pan</surname><given-names>L.</given-names></name> <name><surname>Zhu</surname><given-names>W.</given-names></name> <name><surname>Wang</surname><given-names>D.</given-names></name> <name><surname>Zhao</surname><given-names>Z.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Fish school feeding behavior quantification using acoustic signal and improved swin transformer</article-title>. <source>Comput. Electron. Agric.</source> <volume>204</volume>:<fpage>107580</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.compag.2022.107580</pub-id></mixed-citation></ref>
<ref id="ref19"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zheng</surname><given-names>S.</given-names></name> <name><surname>Zheng</surname><given-names>N.</given-names></name></person-group> (<year>2025</year>). <article-title>Research on the international competitiveness of fishery industry in China and its influential factors</article-title>. <source>J. China Agric. Univ.</source> <volume>30</volume>, <fpage>294</fpage>&#x2013;<lpage>306</lpage>. doi: <pub-id pub-id-type="doi">10.11841/j.issn.1007-4333.2025.02.25</pub-id></mixed-citation></ref>
<ref id="ref20"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhou</surname><given-names>C.</given-names></name> <name><surname>Xu</surname><given-names>D.</given-names></name> <name><surname>Lin</surname><given-names>K.</given-names></name> <name><surname>Sun</surname><given-names>C.</given-names></name> <name><surname>Yang</surname><given-names>X.</given-names></name></person-group> (<year>2018</year>). <article-title>Intelligent feeding control methods in aquaculture with an emphasis on fish: a review</article-title>. <source>Rev. Aquac.</source> <volume>10</volume>, <fpage>975</fpage>&#x2013;<lpage>993</lpage>. doi: <pub-id pub-id-type="doi">10.1111/raq.12218</pub-id></mixed-citation></ref>
<ref id="ref21"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhu</surname><given-names>M.</given-names></name> <name><surname>Zhang</surname><given-names>Z.</given-names></name> <name><surname>Huang</surname><given-names>H.</given-names></name></person-group> (<year>2022</year>). <article-title>Research progress on intelligent feeding methods in fish farming</article-title>. <source>Trans. Chin. Soc. Agric. Engineer.</source> <volume>38</volume>, <fpage>38</fpage>&#x2013;<lpage>47</lpage>. doi: <pub-id pub-id-type="doi">10.11975/j.issn.1002-6819.2022.07.005</pub-id></mixed-citation></ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by" id="fn0001">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1311963/overview">Yang Jin</ext-link>, Johns Hopkins University, United States</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by" id="fn0002">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3123540/overview">Meng Cui</ext-link>, University of Surrey, United Kingdom</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3141674/overview">Zhuangzhuang Du</ext-link>, Henan University, China</p>
</fn>
</fn-group>
</back>
</article>