<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<?covid-19-tdm?>
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Artif. Intell.</journal-id>
<journal-title>Frontiers in Artificial Intelligence</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Artif. Intell.</abbrev-journal-title>
<issn pub-type="epub">2624-8212</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/frai.2023.1100112</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Artificial Intelligence</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Coronavirus diagnosis using cough sounds: Artificial intelligence approaches</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Askari Nasab</surname> <given-names>Kazem</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2101502/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Mirzaei</surname> <given-names>Jamal</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/2151712/overview"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Zali</surname> <given-names>Alireza</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/2191690/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Gholizadeh</surname> <given-names>Sarfenaz</given-names></name>
<xref ref-type="aff" rid="aff6"><sup>6</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/2191073/overview"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Akhlaghdoust</surname> <given-names>Meisam</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1879124/overview"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Materials Science and Engineering Department, Sharif University of Technology</institution>, <addr-line>Tehran</addr-line>, <country>Iran</country></aff>
<aff id="aff2"><sup>2</sup><institution>Infectious Disease Research Center, Department of Infectious Diseases, Aja University of Medical Sciences</institution>, <addr-line>Tehran</addr-line>, <country>Iran</country></aff>
<aff id="aff3"><sup>3</sup><institution>Infectious Disease Research Center, Shahid Beheshti University of Medical Sciences</institution>, <addr-line>Tehran</addr-line>, <country>Iran</country></aff>
<aff id="aff4"><sup>4</sup><institution>Functional Neurosurgery Research Center, Shohada Tajrish Comprehensive Neurosurgical Center of Excellence, Shahid Beheshti University of Medical Sciences</institution>, <addr-line>Tehran</addr-line>, <country>Iran</country></aff>
<aff id="aff5"><sup>5</sup><institution>USERN Office, Functional Neurosurgery Research Center, Shahid Beheshti University of Medical Sciences</institution>, <addr-line>Tehran</addr-line>, <country>Iran</country></aff>
<aff id="aff6"><sup>6</sup><institution>Civil Engineering Department, Tehran University of Technology</institution>, <addr-line>Tehran</addr-line>, <country>Iran</country></aff>
<author-notes>
<fn fn-type="edited-by"><p>Edited by: Francesco Napolitano, University of Sannio, Italy</p></fn>
<fn fn-type="edited-by"><p>Reviewed by: Umut &#x000D6;zkaya, Konya Technical University, T&#x000FC;rkiye; Lal Hussain, University of Azad Jammu and Kashmir, Pakistan</p></fn>
<corresp id="c001">&#x0002A;Correspondence: Alireza Zali &#x02709; <email>info_fnrc&#x00040;sbmu.ac.ir</email></corresp>
<fn fn-type="other" id="fn001"><p>This article was submitted to Medicine and Public Health, a section of the journal Frontiers in Artificial Intelligence</p></fn></author-notes>
<pub-date pub-type="epub">
<day>15</day>
<month>02</month>
<year>2023</year>
</pub-date>
<pub-date pub-type="collection">
<year>2023</year>
</pub-date>
<volume>6</volume>
<elocation-id>1100112</elocation-id>
<history>
<date date-type="received">
<day>16</day>
<month>11</month>
<year>2022</year>
</date>
<date date-type="accepted">
<day>24</day>
<month>01</month>
<year>2023</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2023 Askari Nasab, Mirzaei, Zali, Gholizadeh and Akhlaghdoust.</copyright-statement>
<copyright-year>2023</copyright-year>
<copyright-holder>Askari Nasab, Mirzaei, Zali, Gholizadeh and Akhlaghdoust</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license> </permissions>
<abstract>
<sec>
<title>Introduction</title>
<p>The Coronavirus disease 2019 (COVID-19) pandemic has caused irreparable damage to the world. In order to prevent the spread of pathogenicity, it is necessary to identify infected people for quarantine and treatment. The use of artificial intelligence and data mining approaches can lead to prevention and reduction of treatment costs. The purpose of this study is to create data mining models in order to diagnose people with the disease of COVID-19 through the sound of coughing.</p>
</sec>
<sec>
<title>Method</title>
<p>In this research, Supervised Learning classification algorithms have been used, which include Support Vector Machine (SVM), random forest, and Artificial Neural Networks, that based on the standard &#x0201C;Fully Connected&#x0201D; neural network, Convolutional Neural Networks (CNN) and Long Short-Term Memory (LSTM) recurrent neural networks have been established. The data used in this research was from the online site <ext-link ext-link-type="uri" xlink:href="https://sorfeh.com/sendcough/en">sorfeh.com/sendcough/en</ext-link>, which has data collected during the spread of COVID-19.</p>
</sec>
<sec>
<title>Result</title>
<p>With the data we have collected (about 40,000 people) in different networks, we have reached acceptable accuracies.</p>
</sec>
<sec>
<title>Conclusion</title>
<p>These findings show the reliability of this method for using and developing a tool as a screening and early diagnosis of people with COVID-19. This method can also be used with simple artificial intelligence networks so that acceptable results can be expected. Based on the findings, the average accuracy was 83% and the best model was 95%.</p>
</sec></abstract>
<kwd-group>
<kwd>coronavirus</kwd>
<kwd>cough</kwd>
<kwd>artificial intelligence</kwd>
<kwd>machine learning</kwd>
<kwd>respiratory sounds</kwd>
<kwd>deep learning</kwd>
</kwd-group>
<counts>
<fig-count count="4"/>
<table-count count="3"/>
<equation-count count="2"/>
<ref-count count="42"/>
<page-count count="9"/>
<word-count count="6523"/>
</counts>
</article-meta>
</front>
<body>
<sec id="s1">
<title>1. Introduction</title>
<p>The COVID-19 pandemic is caused by the SARS-CoV-2 virus (Laguarta et al., <xref ref-type="bibr" rid="B16">2020</xref>; Jamshidi et al., <xref ref-type="bibr" rid="B14">2021</xref>; Moazzami et al., <xref ref-type="bibr" rid="B21">2021</xref>; Haritaoglu et al., <xref ref-type="bibr" rid="B13">2022</xref>; Samieefar et al., <xref ref-type="bibr" rid="B29">2022</xref>). This disease was detected for the first time in December 2019 in Wuhan, Hubei Province, China (Laguarta et al., <xref ref-type="bibr" rid="B16">2020</xref>; Jamshidi et al., <xref ref-type="bibr" rid="B14">2021</xref>; Liu et al., <xref ref-type="bibr" rid="B19">2021</xref>; Moazzami et al., <xref ref-type="bibr" rid="B21">2021</xref>; Samieefar et al., <xref ref-type="bibr" rid="B29">2022</xref>). On March 11, the World Health Organization (WHO) declared the COVID-19 pandemic (Laguarta et al., <xref ref-type="bibr" rid="B16">2020</xref>; Liu et al., <xref ref-type="bibr" rid="B19">2021</xref>; Chadaga et al., <xref ref-type="bibr" rid="B10">2022</xref>). COVID-19 disease spreads to other people through tiny respiratory droplets (Wang and Wong, <xref ref-type="bibr" rid="B37">2020</xref>; Liu et al., <xref ref-type="bibr" rid="B19">2021</xref>; Chadaga et al., <xref ref-type="bibr" rid="B10">2022</xref>). Studies show that one of the most common symptoms in COVID-19 is dry cough with a prevalence of about 68% (Bai et al., <xref ref-type="bibr" rid="B6">2020</xref>; Wang and Wong, <xref ref-type="bibr" rid="B37">2020</xref>; Mohammed et al., <xref ref-type="bibr" rid="B22">2021</xref>).</p>
<p>During the peak of the spread of COVID-19, medical diagnostic laboratories and health centers face many problems with current common methods such as clinical examinations, computerized tomography (CT) scans, real-time polymerase chain reaction (PCR) and serology techniques (Wan et al., <xref ref-type="bibr" rid="B36">2016</xref>; Ai et al., <xref ref-type="bibr" rid="B1">2020</xref>; Guan et al., <xref ref-type="bibr" rid="B12">2020</xref>).</p>
<p>Despite the fact that real-time PCR is an accurate method for detecting COVID-19, it has some drawbacks. Its major drawbacks include the high cost of testing and the time-consuming testing process, which makes all members of society unable to access it (Cabitza et al., <xref ref-type="bibr" rid="B9">2020</xref>; Xiao et al., <xref ref-type="bibr" rid="B41">2020</xref>). Due to the similar effects of influenza and COVID-19 on the lung, diagnosis based on CT scan images is difficult, and the virus may not infect the lung or maybe due to the imaging done before the third day of infection with COVID-19 the lung infection cannot be detected in the CT scan images (Bleier and Welch, <xref ref-type="bibr" rid="B7">2020</xref>; Khorramdelazad et al., <xref ref-type="bibr" rid="B15">2021</xref>). It should be noted that CT scan is prohibited in infants and pregnant women (Li and Xia, <xref ref-type="bibr" rid="B18">2020</xref>), therefore CT scan imaging during the period of the spread of COVID-19 cannot be an accurate diagnostic method and its use has limitations. Serological tests such as C-Reactive Protein (CRP) indicate any viral infection in the body and are not specific for the COVID-19 virus (Wang, <xref ref-type="bibr" rid="B38">2020</xref>). Also, clinical symptoms are different in people and due to the similarity of the symptoms of COVID-19 with other diseases such as cold and flu, the doctor may make a mistake in diagnosis and this mistake can have adverse consequences for the patient (Struyf et al., <xref ref-type="bibr" rid="B34">2020</xref>).</p>
<p>Other problems include limited medical and health staff, the lack of raw materials and diagnostic devices and in different cities and countries, and the burnout of personnel due to the high number of samples and the time-consuming nature of the testing process (Pooladi et al., <xref ref-type="bibr" rid="B27">2020</xref>). All these problems reduce accuracy and increase errors in diagnosis, which can have very adverse effects on patients and the country&#x00027;s health system, Therefore, one of the ways to solve the problem is to use artificial intelligence, artificial intelligence can significantly reduce the error caused by the accuracy and duration of the detection process (Naud&#x000E9; W., <xref ref-type="bibr" rid="B23">2020</xref>; Soltani et al., <xref ref-type="bibr" rid="B33">2022</xref>).</p>
<p>By using the anatomy of the respiratory system, it is possible to measure the amount of changed respiratory infections based on the cough sound (Bai et al., <xref ref-type="bibr" rid="B6">2020</xref>; Alqudaihi et al., <xref ref-type="bibr" rid="B2">2021</xref>; Mohammed et al., <xref ref-type="bibr" rid="B22">2021</xref>). In the past years, studies have been conducted to identify whooping cough (pertussis), chronic obstructive pulmonary disease (COPD), tuberculosis and asthma using an algorithm of audio signals by analyzing cough sounds (Bai et al., <xref ref-type="bibr" rid="B6">2020</xref>; Alqudaihi et al., <xref ref-type="bibr" rid="B2">2021</xref>; Mohammed et al., <xref ref-type="bibr" rid="B22">2021</xref>; Sadhana et al., <xref ref-type="bibr" rid="B28">2021</xref>; Santosh et al., <xref ref-type="bibr" rid="B30">2022</xref>). Nowadays, some universities in the world, including MIT University in the United States, Cambridge University in England, EPFL University in Switzerland, and Carnegie Mellon University in United States are studying the diagnosis of COVID-19 through cough with the help of artificial intelligence methods (Bai et al., <xref ref-type="bibr" rid="B6">2020</xref>; Wang and Wong, <xref ref-type="bibr" rid="B37">2020</xref>; Alqudaihi et al., <xref ref-type="bibr" rid="B2">2021</xref>; Mohammed et al., <xref ref-type="bibr" rid="B22">2021</xref>). It has been reported that simple machine learning tools, such as binary classifiers, can distinguish COVID-19 breath sounds from healthy counterparts with an area under the ROC curve (AUC) &#x0003E;0.80 (Brown et al., <xref ref-type="bibr" rid="B8">2020</xref>). There appeared to be unique patterns in the COVID-19 coughs that allowed the pre-trained Resnet18 classifier to identify the COVID-19 coughs with an AUC of 0.72. In this case, cough samples were collected by telephone from 3,621 people with confirmed COVID-19 (Bagad et al., <xref ref-type="bibr" rid="B5">2020</xref>).</p>
<p>A high AUC of more than 0.98 was also reported when distinguishing COVID-19-positive from COVID-19-negative coughs in a clinically validated dataset of 2,339 COVID-19-positive and 6,041 COVID-19-negative cases using classifiers based on DNN was obtained (Andreu-Perez et al., <xref ref-type="bibr" rid="B4">2021</xref>).</p>
<p>Comparison of chest CT scans of people with pneumonia not related to COVID-19 and pneumonia related to COVID-19 shows that the possibility of peripheral distribution, ground-glass opacities and thickening of vessels is more in pneumonia related to COVID-19. These findings shows that cough sounds with pneumonia caused by COVID-19 probably have some specific characteristics that result from the fundamental pathomorphological changes (Wang and Wong, <xref ref-type="bibr" rid="B37">2020</xref>). Coughs of respiratory syndromes such as COVID-19 have hidden and specific characteristics, even if they are not spontaneous (Bai et al., <xref ref-type="bibr" rid="B6">2020</xref>). Therefore, cough can be used as a pre-screening method. However, Because of that cough is a symptom of more than 30 different diseases, it is difficult to diagnose COVID-19 through the analysis of cough sounds using artificial intelligence (Bai et al., <xref ref-type="bibr" rid="B6">2020</xref>; Mohammed et al., <xref ref-type="bibr" rid="B22">2021</xref>).</p>
<p>In our research, we went to people&#x00027;s cough because there were the following reasons for this: The sound of people&#x00027;s cough is less diverse than the accent and dialect that is present in speaking. Two factors, the fact that the sound of coughing is more involuntary than talking and that coughing is part of the body&#x00027;s natural mechanism, make the role of accents and dialects, culture and geographical location in coughing less. Easier to collect cough data than other types of data. Easier to train people to use this method. Our work differs from these works in data collection, as we use an entirely crowdsourced dataset, the following are significant and important, Even though some datasets are publicly available, the datasets are naturally limited in COVID-positive samples compared to the negative samples. The collected sounds were collected through the website and online, and there are no direct recorded sounds by offline recorders. The recorded sounds are not from a specific type of microphone, but by different types of smartphones, tablets and laptops, which have different brands and have different browsers. This can have a positive effect on not being limited to one type of microphone to avoid bias and single device. we must further overcome the challenges of data coming from different phones and microphones, possibly in very different environments.</p>
<p>In all the above cases, due to the fact that we have been able to obtain a high amount of data, we were able to eliminate the effects of the lack of data for training our models and achieve acceptable results.</p>
<p>The purpose of this study is to create data mining models in order to diagnose people with the disease of COVID-19 through the sound of coughing. This study was designed for the first time in Iran, and its most important advantage is the rapid and non-invasive diagnosis of COVID-19.</p>
</sec>
<sec id="s2">
<title>2. Methods</title>
<p>Since the deficiency of data can cause overfitting, we increased the data (data augmentation) to be learned by artificial intelligence with a standard method. According to the data augmentation standard, the data should be changed in such a way that the quality and nature of the data do not change, because it can cause a fundamental change in the data. In the data used in model training, the speed of the audio signal was changed with different rates (0.8&#x02013;1.10 times) and the pitch of the audio signal was changed with different steps (&#x02212;2.5 to 2.5 times). There was no change in the test data.</p>
<sec>
<title>2.1. Data collection</title>
<p>From January 2021, data related to cough of people with COVID-19 was recorded and collected using the online site &#x0201C;<ext-link ext-link-type="uri" xlink:href="https://sorfeh.com/sendcough/en">sorfeh.com/sendcough/en</ext-link>.&#x0201D; In order to increase the number of data, the contact information of patients with COVID-19 was received through laboratories, hospitals and infectious specialists. During this process, the data was collected after the patient&#x00027;s consent and having the required conditions. The conditions for the patient to enter the study include a positive PCR test, or CT Scan diagnosis of lung involvement by CT scan, or a definite diagnosis of corona virus by a physician based on clinical examinations, and no more than 8 days have passed since the definite diagnosis of COVID-19. People admitted to the hospital were excluded from the study because usually more than 8 days have passed since the duration of the illness of the hospitalized people and their coughs could be pulmonary complications after COVID-19 and not related to COVID-19.</p>
<p>Inclusion criteria for healthy people into the study: (1) lack of symptoms of the COVID-19, (2) if the symptoms are present, the physician&#x00027;s diagnosis should not be COVID-19, and (3) None of the close people should have COVID-19 (Alqudaihi et al., <xref ref-type="bibr" rid="B2">2021</xref>; Sadhana et al., <xref ref-type="bibr" rid="B28">2021</xref>; Santosh et al., <xref ref-type="bibr" rid="B30">2022</xref>). During data collection and recording of coughing, healthy people and patients were asked to cough in a safe environment without the presence of other people.</p>
</sec>
<sec>
<title>2.2. Data collection app</title>
<p>The link of the online site was provided to the patients. Patients first chose their symptoms on the online site and recorded their cough for 7 s in a quiet environment and without the presence of people. The presence of sounds other than coughing can cause misdiagnosis (<xref ref-type="fig" rid="F1">Figure 1</xref>). Then the recorded sound will be played and if the quality is confirmed, it will go to the next stage. In the next step, the user must select the symptoms he has and select the time of occurrence. Then he will be asked additional questions, such as the current status, age, gender, previous history of infection, PCR test, CT scan of the lung, whether there is a conflict or not. The recorded sounds are not from a specific type of microphone and have been recorded by different types of smartphones, tablets and laptops. This case can have a positive effect on the study and avoid bias. All the data collected are from Tehran city, which are from different minorities and ethnicities with various accents. All audio files used in our study are in uncompressed Pulse-code modulation (PCM) 16-bit format with a sampling rate of 48 kHz and a fixed 7-s length (Pahar et al., <xref ref-type="bibr" rid="B24">2022</xref>).</p>
<fig id="F1" position="float">
<label>Figure 1</label>
<caption><p>How to collect data and its steps on the website.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="frai-06-1100112-g0001.tif"/>
</fig>
</sec>
<sec>
<title>2.3. Crowdsourced dataset</title>
<p>The total number of collected data is almost 40,353. Almost 17,690 data were manually removed due to incomplete information, low sound quality, silent audio content, presence of surrounding noise when recording cough, absence of cough sound in audio content. Among the remaining 22,663 cough sounds, there were 14,521 negative coughs and 8,142 positive coughs. On the website, in addition to recording the sound of coughing, patients also recorded information such as clinical symptoms, days of onset of symptoms, the status of PCR tests and CT scans, the presence of previous infections, age, gender, and the person&#x00027;s disease status. The information about the symptoms of the patients is shown in <xref ref-type="table" rid="T1">Table 1</xref>. According to <xref ref-type="table" rid="T1">Table 1</xref>, the most common symptom among people with COVID-19 is fever (51.75%). Dry cough and productive cough are among the most common symptoms of the patients participating in the study with prevalence of 25.01 and 21.22%. Also, 29.2% of patients had no clinical symptoms. The symptoms of healthy people were negligible, so it was ignored.</p>
<table-wrap position="float" id="T1">
<label>Table 1</label>
<caption><p>Distribution of symptoms of COVID-19 patients.</p></caption>
<table frame="box" rules="all">
<thead>
<tr style="background-color:#919497; color:#ffffff">
<th valign="top" align="left"><bold>Symptom</bold></th>
<th valign="top" align="left"><bold>Percentage</bold></th>
<th valign="top" align="left"><bold>Number</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Fever</td>
<td valign="top" align="left">51.75</td>
<td valign="top" align="left">4,214</td>
</tr> <tr>
<td valign="top" align="left">Dry cough</td>
<td valign="top" align="left">25.01</td>
<td valign="top" align="left">2,037</td>
</tr> <tr>
<td valign="top" align="left">Productive cough</td>
<td valign="top" align="left">21.22</td>
<td valign="top" align="left">1,728</td>
</tr> <tr>
<td valign="top" align="left">Fatigue</td>
<td valign="top" align="left">19.83</td>
<td valign="top" align="left">1,615</td>
</tr> <tr>
<td valign="top" align="left">Body pain</td>
<td valign="top" align="left">17.31</td>
<td valign="top" align="left">1,410</td>
</tr> <tr>
<td valign="top" align="left">Sore throat</td>
<td valign="top" align="left">15.99</td>
<td valign="top" align="left">1,302</td>
</tr> <tr>
<td valign="top" align="left">Loose stools</td>
<td valign="top" align="left">12.29</td>
<td valign="top" align="left">1,001</td>
</tr> <tr>
<td valign="top" align="left">Runny nose</td>
<td valign="top" align="left">11.05</td>
<td valign="top" align="left">900</td>
</tr> <tr>
<td valign="top" align="left">Vertigo</td>
<td valign="top" align="left">3.16</td>
<td valign="top" align="left">258</td>
</tr> <tr>
<td valign="top" align="left">Shivering</td>
<td valign="top" align="left">9.83</td>
<td valign="top" align="left">801</td>
</tr> <tr>
<td valign="top" align="left">Sweating</td>
<td valign="top" align="left">9.03</td>
<td valign="top" align="left">736</td>
</tr> <tr>
<td valign="top" align="left">Dyspnea</td>
<td valign="top" align="left">8.64</td>
<td valign="top" align="left">704</td>
</tr> <tr>
<td valign="top" align="left">Abdominal pain</td>
<td valign="top" align="left">4.27</td>
<td valign="top" align="left">348</td>
</tr> <tr>
<td valign="top" align="left">Nasal obstruction</td>
<td valign="top" align="left">3.13</td>
<td valign="top" align="left">255</td>
</tr> <tr>
<td valign="top" align="left">Taste disorder</td>
<td valign="top" align="left">3.09</td>
<td valign="top" align="left">252</td>
</tr> <tr>
<td valign="top" align="left">Chest pain</td>
<td valign="top" align="left">2.03</td>
<td valign="top" align="left">166</td>
</tr> <tr>
<td valign="top" align="left">Olfactory disorder</td>
<td valign="top" align="left">2.01</td>
<td valign="top" align="left">164</td>
</tr> <tr>
<td valign="top" align="left">Anorexia</td>
<td valign="top" align="left">1.98</td>
<td valign="top" align="left">162</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The age range and gender of patients and healthy people are also mentioned in <xref ref-type="table" rid="T2">Table 2</xref>. The age range of the participants was 5&#x02013;90 years, most of the participants were between 15 and 30 years old and most of them were women. All participants were residents of Tehran city, which are from different minorities and ethnicities with various accents.</p>
<table-wrap position="float" id="T2">
<label>Table 2</label>
<caption><p>Age and gender distribution of participants.</p></caption>
<table frame="box" rules="all">
<thead>
<tr style="background-color:#919497; color:#ffffff">
<th valign="top" align="left" colspan="2"><bold>Characteristics</bold></th>
<th valign="top" align="left"><bold>Number of patients</bold></th>
<th valign="top" align="left"><bold>Number of healthy people</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left" colspan="2">Number of participants</td>
<td valign="top" align="left">8,142</td>
<td valign="top" align="left">14,521</td>
</tr> <tr>
<td valign="top" align="left">Gender</td>
<td valign="top" align="left">Male</td>
<td valign="top" align="left">2,301</td>
<td valign="top" align="left">4,649</td>
</tr> <tr>
<td/>
<td valign="top" align="left">Female</td>
<td valign="top" align="left">5,841</td>
<td valign="top" align="left">9,872</td>
</tr> <tr>
<td valign="top" align="left">Age range</td>
<td valign="top" align="left">5&#x02013;15</td>
<td valign="top" align="left">754</td>
<td valign="top" align="left">1,240</td>
</tr> <tr>
<td/>
<td valign="top" align="left">15&#x02013;30</td>
<td valign="top" align="left">3,836</td>
<td valign="top" align="left">4,151</td>
</tr> <tr>
<td/>
<td valign="top" align="left">30&#x02013;45</td>
<td valign="top" align="left">2,241</td>
<td valign="top" align="left">1,985</td>
</tr> <tr>
<td/>
<td valign="top" align="left">45&#x02013;60</td>
<td valign="top" align="left">946</td>
<td valign="top" align="left">725</td>
</tr> <tr>
<td/>
<td valign="top" align="left">60&#x02013;75</td>
<td valign="top" align="left">326</td>
<td valign="top" align="left">578</td>
</tr> <tr>
<td/>
<td valign="top" align="left">75&#x02013;90</td>
<td valign="top" align="left">39</td>
<td valign="top" align="left">47</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec>
<title>2.4. Dataset used for this analysis</title>
<p>The analysis will only be done on the classification and diagnosis of the infected and healthy person based on the sound of the cough, so no classification was done on each of the files of each category of coughs. We used all the positive samples of COVID-19 in our data set and randomly used the same number of negative samples of COVID-19 for a balanced distribution of samples (8,142 person in each group). The patients in the case group were selected with the conditions of (1) a positive PCR test, or (2) diagnosis of lung involvement by CT scan or (3) definite diagnosis of corona virus by a physician based on clinical examinations, and (4) no more than 8 days have passed since the definite diagnosis of COVID-19 and healthy people were selected in the control group with the conditions of (1) lack of symptoms of the COVID-19, (2) if the symptoms are present, the physician&#x00027;s diagnosis should not be COVID-19, and (3) none of the close people should have COVID-19.</p>
</sec>
<sec>
<title>2.5. Feature extraction</title>
<p>We used Handcrafted Features to extract audio features. The frequency of the recorded raw audio was 48 kHz and was stored without compression. We used the librosa library to extract features. Extracting more features does not always lead to beneficial results, so first we extracted the available features according to their previous applications in medical diagnosis. If no result is obtained, more features are extracted.</p>
<p>The most common and useful features for speech and audio recognition are Mel-Frequency Cepstral Coefficients (MFCC). This feature can create higher resolutions at lower frequencies. The main idea in extracting MFCC coefficients is the property of the human ear in receiving and understanding speech, and this issue has made these coefficients a powerful tool in all areas of audio processing and recognition. The number of coefficients used in voice recognition usually varies between 9 and 13. The coefficient of 0 indicates energy, which is referred to as the characteristic of Shimmer. In the first step of extracting these coefficients, the Fourier is converted into a signal. Then the obtained spectrum exponent is expressed in mel scale and logarithm is taken from the exponent at each mel frequency. In the last step, the logarithmic spectrum of Mel is returned to the time domain. The result of these transformations is the Capstral representation of the signal spectrum, which shows the spectral characteristics of a frame of the audio signal (<xref ref-type="fig" rid="F2">Figure 2</xref>).</p>
<fig id="F2" position="float">
<label>Figure 2</label>
<caption><p>Steps of capstral conversions of the signal spectrum. Steps to obtain mel-scale capstral coefficients from an audio file.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="frai-06-1100112-g0002.tif"/>
</fig>
<p>We apply the Discrete Fourier Transform (DFT) on each cough audio (Picone, <xref ref-type="bibr" rid="B26">1993</xref>).</p>
<disp-formula id="E1"><label>(1)</label><mml:math id="M1"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>Y</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mstyle displaystyle="true"><mml:munderover accentunder="false" accent="false"><mml:mrow><mml:mo>&#x02211;</mml:mo></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mo>=</mml:mo><mml:mn>0</mml:mn></mml:mrow><mml:mrow><mml:mi>N</mml:mi><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:munderover></mml:mstyle><mml:msub><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mi>w</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo class="qopname">exp</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mo>-</mml:mo><mml:mn>2</mml:mn><mml:mi>&#x003C0;</mml:mi><mml:mi>i</mml:mi><mml:mi>k</mml:mi><mml:mi>t</mml:mi><mml:mo>/</mml:mo><mml:mi>N</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo><mml:mi>k</mml:mi><mml:mo>=</mml:mo><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mo>.</mml:mo><mml:mo>.</mml:mo><mml:mo>.</mml:mo><mml:mo>,</mml:mo><mml:mi>N</mml:mi><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>Where <italic>N</italic> denotes the number of samples in frame, <italic>y</italic><sub><italic>i</italic></sub><italic>[t]</italic> is the discrete time domain cough signal, <italic>w(t)</italic> is the window function in time domain and <italic>Y(k)</italic> is the kth harmonic corresponding to the frequency.</p>
<p><italic>f</italic> (k) = k<italic>F</italic>s/<italic>N</italic> where Fs is the sampling frequency. MFCCs use Mel filter bank or triangular bandpass filter on each cough audio DFT output, equally spaced on the Mel-scale. At last, we apply the Discrete Cosine Transform (DCT) on the output of the log filter bank in order to get the MFCCs:</p>
<disp-formula id="E2"><label>(2)</label><mml:math id="M2"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>c</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>i</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msqrt><mml:mrow><mml:mfrac><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mrow><mml:mi>M</mml:mi></mml:mrow></mml:mfrac></mml:mrow></mml:msqrt><mml:mstyle displaystyle="true"><mml:munderover accentunder="false" accent="false"><mml:mrow><mml:mo>&#x02211;</mml:mo></mml:mrow><mml:mrow><mml:mi>m</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>M</mml:mi></mml:mrow></mml:munderover></mml:mstyle><mml:mo class="qopname">log</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>E</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>m</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo class="qopname">cos</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mfrac><mml:mrow><mml:mi>&#x003C0;</mml:mi><mml:mi>i</mml:mi></mml:mrow><mml:mrow><mml:mi>M</mml:mi></mml:mrow></mml:mfrac><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>m</mml:mi><mml:mo>-</mml:mo><mml:mn>0</mml:mn><mml:mo>.</mml:mo><mml:mn>5</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>Where <italic>i</italic> = 1, 2, &#x02026;, l; l denotes the cepstrum order, <italic>E(m)</italic> and M are the filter bank energies and total number of mel-filters, respectively. An audio signal is constantly changing, so for ease of understanding, it is assumed that on short time scales the audio signal does not change statistically. For this reason, we frame the signal in 10&#x02013;100 ms frames. If the frame is too short, we won&#x00027;t have enough samples to get a reliable spectral estimate, and if it&#x00027;s too long, the signal will change too much over the entire frame. Mel&#x00027;s scale was used to more accurately determine which frequency is present in the frame. The Mel scale relates the perceived frequency or pitch of a pure sound to its actual measured frequency. Using this scale makes the features more consistent with what humans hear.</p>
<p>In extracting features, we first set pre-emphasis equal to 0.97 and normalize audio for each audio file, and in extracting features, in addition to MFCC, we considered delta and delta2 MFCC. Where delta MFCC is the temporal differential (delta) of the MFCC and delta2 MFCC is the differential of the delta of the MFCC (acceleration coefficients). The number of coefficients considered 13 and 23 ms step was used to generate MFCC coefficients, and the number of 302 frames was obtained, and average numbers were used in each frame.</p>
<p>Also, during to 7 s considered extracting chroma stft, rmse, spectral centroid, spectral bandwidth, rolloff, zero crossing rate, tonnetz and melspectro cases. Finally, 914 features were obtained (3 <sup>&#x0002A;</sup> 302 &#x0002B; 8 = 914).</p>
<p>Where (chroma stft) is chromagram from a waveform or power spectrogram and (rmse) is the root-mean-square of the magnitude of a short-time Fourier transform which provides the power of the signal. (Spectral centroid) is the mean (centroid) extracted per frame of the magnitude spectrogram. (Spectral bandwidth) is the band width of light at one-half the peak maximum. (Rolloff) is the center frequency for a spectrogram bin so that at least 85% of the energy of the spectrum in this frame is contained in this bin and the bins below. (Zero crossing rate) is the rate of sign-changes of the signal. (Tonnetz) is tonal centroid features and (melspectro) is mel-scaled spectrogram.</p>
</sec>
<sec>
<title>2.6. Data exploration</title>
<p>From each control and case group, one sample was selected that were similar in terms of the number of coughs and phonetics, and their extracted characteristics are shown in <xref ref-type="fig" rid="F3">Figure 3</xref>.</p>
<fig id="F3" position="float">
<label>Figure 3</label>
<caption><p>Schematic of audio characteristics for two audio file samples of a healthy and an infected person that were randomly selected.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="frai-06-1100112-g0003.tif"/>
</fig>
<p>Twenty-three milliseconds step was used to generate MFCC coefficients, and the number of 302 frames was obtained, and average numbers were used in each frame. After generating the MFCC coefficients, they can be plotted on a spectrogram to visualize the sound. Heat maps can easily distinguish between two groups. In the healthy and sick group, the minimum, maximum and average values of the features that had a significant difference after extraction are shown in <xref ref-type="fig" rid="F4">Figure 4</xref>.</p>
<fig id="F4" position="float">
<label>Figure 4</label>
<caption><p>Average, minimum and maximum values of features extracted from healthy and diseased groups.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="frai-06-1100112-g0004.tif"/>
</fig>
</sec>
</sec>
<sec id="s3">
<title>3. Evaluation</title>
<sec>
<title>3.1. Experimental setup</title>
<p>In this study, after the data was collected and the initial pre-processing was done, the dataset of the samples was prepared in two train and test groups. Then, data augmentation was done using standard methods, and finally common and useful features were extracted from them.</p>
<p>We trained our models in two modes. The first case is the use of primary data without augmentation the data between two categories of positive and negative COVID-19 and the second case is the use of augmentation data that has been increased for both positive and negative COVID-19 categories.</p>
<p>We have trained and evaluated five machine learning classifiers in total. Supervised Learning classification algorithms have been used, which include Support Vector Machine (SVM), random forest, SVM classifiers have performed well in classifying cough events (Sharan et al., <xref ref-type="bibr" rid="B31">2017</xref>). Also the artificial neural networks that were based on the standard &#x0201C;Fully Connected&#x0201D; neural network, Convolutional Neural Networks (CNN) and Long Short-Term Memory (LSTM) recurrent neural networks have been established. CNN is a popular deep neural network architecture primarily used in image classification. It has also performed well in the classification of breathing and speech of COVID-19 (Pahar and Niesler, <xref ref-type="bibr" rid="B25">2021</xref>). An LSTM model is a type of recurrent neural network whose architecture allows it to remember previously-seen inputs when making its classification decision. It has been successfully used in automatic cough detection (Miranda et al., <xref ref-type="bibr" rid="B20">2019</xref>).</p>
<p>We tested different techniques for creating classification models, first to examine how effective these methods were for classification ability, and finally to examine the success rate in each mode to see which type of classification more efficiency can be achieved.</p>
<p>The independent term in the kernel functions is chosen as a hyperparameter during the optimization of the SVM classifier. The networks was optimized using the <italic>Cross-Entropy Loss</italic> and <italic>Adam optimizer</italic> with default parameters.</p>
<p>We selected several standard evaluation measures such as F1-Score, Sensitivity/Recall, Specificity, Precision and Accuracy for each model to evaluate the best models and their results. Several iterations were performed and the results reported. <xref ref-type="table" rid="T3">Table 3</xref> shows the results of each model with the following observations: It was observed that most of the models have accuracy, sensitivity and specificity higher than 70% and this shows that cough provides the necessary data about the respiratory system and the pathogens involved. The signal processing features enable the model to capture latent cough sounds and detect COVID-19 with sufficient sensitivity and specificity.</p>
<table-wrap position="float" id="T3">
<label>Table 3</label>
<caption><p>Results of each model before and after augmentation data.</p></caption>
<table frame="box" rules="all">
<thead>
<tr style="background-color:#919497; color:#ffffff">
<th valign="top" align="left"><bold>Model base</bold></th>
<th valign="top" align="center" colspan="2"><bold>F1-Score (%)</bold></th>
<th valign="top" align="center" colspan="2"><bold>Sensitivity (%)</bold></th>
<th valign="top" align="center" colspan="2"><bold>Specificity (%)</bold></th>
<th valign="top" align="center" colspan="2"><bold>Precision (%)</bold></th>
<th valign="top" align="center" colspan="2"><bold>Accuracy (%)</bold></th>
</tr>
<tr style="background-color:#919497; color:#ffffff">
<th/>
<th valign="top" align="center"><bold>Before</bold></th>
<th valign="top" align="center"><bold>After</bold></th>
<th valign="top" align="center"><bold>Before</bold></th>
<th valign="top" align="center"><bold>After</bold></th>
<th valign="top" align="center"><bold>Before</bold></th>
<th valign="top" align="left"><bold>After</bold></th>
<th valign="top" align="left"><bold>Before</bold></th>
<th valign="top" align="left"><bold>After</bold></th>
<th valign="top" align="left"><bold>Before</bold></th>
<th valign="top" align="left"><bold>After</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">SVM</td>
<td valign="top" align="center">81.98</td>
<td valign="top" align="center">83.56</td>
<td valign="top" align="center">82.96</td>
<td valign="top" align="center">85.57</td>
<td valign="top" align="center">80.63</td>
<td valign="top" align="left">80.83</td>
<td valign="top" align="left">81.01</td>
<td valign="top" align="left">81.64</td>
<td valign="top" align="left">81.8</td>
<td valign="top" align="left">83.2</td>
</tr> <tr>
<td valign="top" align="left">Random-forest</td>
<td valign="top" align="center">62.27</td>
<td valign="top" align="center">74.19</td>
<td valign="top" align="center">64.83</td>
<td valign="top" align="center">75.75</td>
<td valign="top" align="center">59.22</td>
<td valign="top" align="left">71.65</td>
<td valign="top" align="left">59.91</td>
<td valign="top" align="left">72.69</td>
<td valign="top" align="left">61.94</td>
<td valign="top" align="left">73.7</td>
</tr> <tr>
<td valign="top" align="left">Fully connected</td>
<td valign="top" align="center">81.23</td>
<td valign="top" align="center">86.16</td>
<td valign="top" align="center">81.56</td>
<td valign="top" align="center">87.97</td>
<td valign="top" align="center">80.8</td>
<td valign="top" align="left">83.83</td>
<td valign="top" align="left">80.91</td>
<td valign="top" align="left">84.42</td>
<td valign="top" align="left">81.2</td>
<td valign="top" align="left">85.9</td>
</tr> <tr>
<td valign="top" align="left">CNN</td>
<td valign="top" align="center">76.76</td>
<td valign="top" align="center">82.17</td>
<td valign="top" align="center">77.15</td>
<td valign="top" align="center">83.16</td>
<td valign="top" align="center">76.24</td>
<td valign="top" align="left">80.83</td>
<td valign="top" align="left">76.38</td>
<td valign="top" align="left">81.21</td>
<td valign="top" align="left">76.7</td>
<td valign="top" align="left">82</td>
</tr> <tr>
<td valign="top" align="left">LSTM</td>
<td valign="top" align="center">89.06</td>
<td valign="top" align="center">95.01</td>
<td valign="top" align="center">90.65</td>
<td valign="top" align="center">95.50</td>
<td valign="top" align="center">87</td>
<td valign="top" align="left">94.71</td>
<td valign="top" align="left">87.52</td>
<td valign="top" align="left">94.53</td>
<td valign="top" align="left">88.83</td>
<td valign="top" align="left">95.1</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Although all the models were able to achieve the minimum required evaluations, but not all of them are necessarily acceptable, and in the existing conditions, each of them shows different functions, and the best performance in the final use of the model and method should be considered. It can also be seen that by using the data augmentation method, most of the parameters related to the evaluation of the models have been improved.</p>
</sec>
</sec>
<sec id="s4">
<title>4. Discussion and conclusions</title>
<p>The results of the study by Zhao and et al. and Li and et al. show that the three symptoms of fever, dry cough and fatigue are the most common symptoms of COVID-19 patients (Li et al., <xref ref-type="bibr" rid="B17">2020</xref>; Zhao et al., <xref ref-type="bibr" rid="B42">2020</xref>). Based on the results of this study, fever (51.75%), dry cough (25.01%) and fatigue (19.83%) were among the 3 most common symptoms of patients (<xref ref-type="table" rid="T1">Table 1</xref>). The report of the World Health Organization also confirms the results of the studies (World Health Organization., <xref ref-type="bibr" rid="B40">2020</xref>).</p>
<p>After collecting about 40,000 data and initial processing, the data were divided into test and training groups. Then, in order to strengthen the data, data augmentation was done in a standard method. in this study, Supervised Learning classification algorithms have been used, which include Support Vector Machine (SVM), random forest, and also the artificial neural networks that were based on the standard &#x0201C;Fully Connected&#x0201D; neural network, Convolutional Neural Networks (CNN) and Long Short-Term Memory (LSTM) recurrent neural networks have been established, and we selected several standard evaluation measures such as F1-Score, Sensitivity, Specificity, Precision and Accuracy for each model to evaluate the best models and their results. Accuracy, the percentage of correct identification and diagnosis of the system, and sensitivity, the percentage of the diagnosis of infected people shows that the higher the percentage of sensitivity, the more number of infected people will be identified (Sharma et al., <xref ref-type="bibr" rid="B32">2021</xref>). In this study, after increasing the data, the accuracy of all models increased. before increasing the data, the average accuracy of the models was 78%, and after increasing the data, the average accuracy reached 83% and the best model was 95% (<xref ref-type="table" rid="T3">Table 3</xref>).</p>
<p>In the present study, a favorable sensitivity was considered for the participants, and with the average accuracy obtained, we hope to be able to identify more patients in the society with this screening method in order to reduce the rate of COVID-19 infection and prevent its increase. Companies and organizations that are more sensitive to the disease of COVID-19, can consider a higher sensitivity to identify more patients using this screening method. If the sensitivity increases in the fixed accuracy level, healthy people may also be mistakenly included in the group of patients, but because the purpose of this method is to reduce the rate of infection, this method will be of higher quality. We propose a triaging tool that could be used by both individuals and health care officials.</p>
<p>The possibility of unwanted entry of some patients into the control group and vice versa is one of the most important limitations of this study (Alsharif et al., <xref ref-type="bibr" rid="B3">2020</xref>; Ghose et al., <xref ref-type="bibr" rid="B11">2020</xref>; Wong et al., <xref ref-type="bibr" rid="B39">2020</xref>; van Ginneken, <xref ref-type="bibr" rid="B35">2021</xref>). A person may be in a stage of COVID-19 disease that has no clinical symptoms or is not infected with COVID-19 but the lungs are affected due to other respiratory complications or smoking. It is also possible that the person has coughed several times while recording the cough sound, which causes inflammation of the person&#x00027;s larynx (Ghose et al., <xref ref-type="bibr" rid="B11">2020</xref>). However, increasing the number of data can lead to better discrimination and higher accuracy.</p>
<p>The implementation of this screening and diagnostic method at the community level can lead to useful results. The importance and benefits of conducting this study include reducing the workload of medical and health staff, especially during the peak of the outbreak of COVID-19, identifying more patients and reducing diagnostic and treatment costs, especially in less developed countries. Diagnosis and screening in this method is very simple and people will not have any worries about doing it, so more patients can be identified and prevent the spread of the disease. Also, according to the diagnostic method of this study, the workload of health and treatment staff is reduced and the staff can devote their time and energy to treating patients, which can also have a positive effect on increasing the number of recoveries of COVID-19 patients.</p>
<p>In order to use this method more effectively in medicine, it seems necessary to conduct more studies to distinguish between the cough sounds of COVID-19 patients and other people with lung problems and complications.</p>
</sec>
<sec sec-type="data-availability" id="s5">
<title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p>
</sec>
<sec sec-type="author-contributions" id="s6">
<title>Author contributions</title>
<p>Conceptualization of this study by SG, and took the lead in writing the manuscript in consultation with MA. JM and AZ assisted with the calculations, technical details, and drafting the manuscript. SG, JM, and AZ helped to write the manuscript. All authors contributed to the article and approved the submitted version.</p>
</sec>
</body>
<back>
<ack><p>Thanks to the participants who accompanied us in this study and this study with ethics code IR.SBMU.RETECH.REC.1400.235 has been approved by Research Ethics Committees of Vice-Chancellor in Research Affairs - Shahid Beheshti University of Medical Sciences.</p>
</ack>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="disclaimer" id="s7">
<title>Publisher&#x00027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ai</surname> <given-names>T.</given-names></name> <name><surname>Yang</surname> <given-names>Z.</given-names></name> <name><surname>Hou</surname> <given-names>H.</given-names></name> <name><surname>Zhan</surname> <given-names>C.</given-names></name> <name><surname>Chen</surname> <given-names>C.</given-names></name> <name><surname>Lv</surname> <given-names>W.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Correlation of chest CT and RT-PCR testing in coronavirus disease (2019). (COVID-19) in China: a report of 1014 cases</article-title>. <source>Radiology</source> <volume>296</volume>, <fpage>32</fpage>&#x02013;<lpage>40</lpage>. <pub-id pub-id-type="doi">10.1148/radiol.2020200642</pub-id><pub-id pub-id-type="pmid">32101510</pub-id></citation></ref>
<ref id="B2">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Alqudaihi</surname> <given-names>K. S.</given-names></name> <name><surname>Aslam</surname> <given-names>N.</given-names></name> <name><surname>Khan</surname> <given-names>I. U.</given-names></name> <name><surname>Almuhaideb</surname> <given-names>A. M.</given-names></name> <name><surname>Alsunaidi</surname> <given-names>S. J.</given-names></name> <name><surname>Ibrahim</surname> <given-names>N. M. A. R.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Cough sound detection and diagnosis using artificial intelligence techniques: challenges and opportunities</article-title>. <source>IEEE Access</source> <volume>9</volume>, <fpage>102327</fpage>&#x02013;<lpage>102344</lpage>. <pub-id pub-id-type="doi">10.1109/ACCESS.2021.3097559</pub-id><pub-id pub-id-type="pmid">34786317</pub-id></citation></ref>
<ref id="B3">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Alsharif</surname> <given-names>M. H.</given-names></name> <name><surname>Alsharif</surname> <given-names>Y. H.</given-names></name> <name><surname>Chaudhry</surname> <given-names>S. A.</given-names></name> <name><surname>Albreem</surname> <given-names>M. A.</given-names></name> <name><surname>Jahid</surname> <given-names>A.</given-names></name> <name><surname>Hwang</surname> <given-names>E.</given-names></name></person-group> (<year>2020</year>). <article-title>Artificial intelligence technology for diagnosing COVID-19 cases: a review of substantial issues</article-title>. <source>Eur. Rev. Med. Pharmacol. Sci</source>. <volume>24</volume>, <fpage>9226</fpage>&#x02013;<lpage>9233</lpage>. <pub-id pub-id-type="doi">10.26355/eurrev_202009_22875</pub-id><pub-id pub-id-type="pmid">32965018</pub-id></citation></ref>
<ref id="B4">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Andreu-Perez</surname> <given-names>J.</given-names></name> <name><surname>P&#x00027;erez-Espinosa</surname> <given-names>H.</given-names></name> <name><surname>Timonet</surname> <given-names>E.</given-names></name> <name><surname>Kiani</surname> <given-names>M.</given-names></name> <name><surname>Giron-Perez</surname> <given-names>M. I.</given-names></name> <name><surname>Benitez-Trinidad</surname> <given-names>A. B.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>A generic deep learning based cough analysis system from clinically validated samples for point-of- need Covid-19 test and severity levels</article-title>. <source>IEEE Trans. Serv. Comput.</source> <volume>15</volume>:<fpage>1220</fpage>&#x02013;<lpage>32</lpage> <pub-id pub-id-type="doi">10.31219/osf.io/tm2f7</pub-id><pub-id pub-id-type="pmid">35936760</pub-id></citation></ref>
<ref id="B5">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bagad</surname> <given-names>P.</given-names></name> <name><surname>Dalmia</surname> <given-names>A.</given-names></name> <name><surname>Doshi</surname> <given-names>J.</given-names></name> <name><surname>Nagrani</surname> <given-names>A.</given-names></name> <name><surname>Bhamare</surname> <given-names>P.</given-names></name> <name><surname>Mahale</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Cough against COVID: evidence of COVID-19 signature in cough sounds</article-title>. <source>arXiv preprint arXi</source>v:<italic>2009.08790</italic>. <pub-id pub-id-type="doi">10.48550/arXiv.2009.08790</pub-id></citation>
</ref>
<ref id="B6">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bai</surname> <given-names>H. X.</given-names></name> <name><surname>Hsieh</surname> <given-names>B.</given-names></name> <name><surname>Xiong</surname> <given-names>Z.</given-names></name> <name><surname>Halsey</surname> <given-names>K.</given-names></name> <name><surname>Choi</surname> <given-names>J. W.</given-names></name> <name><surname>Tran</surname> <given-names>T. M. L.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Performance of radiologists in differentiating COVID-19 from viral pneumonia on chest CT23</article-title>. <source>Radiology</source> <volume>296</volume>, <fpage>E46</fpage>&#x02013;<lpage>E54</lpage>. <pub-id pub-id-type="doi">10.1148/radiol.2020200823</pub-id><pub-id pub-id-type="pmid">32155105</pub-id></citation></ref>
<ref id="B7">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bleier</surname> <given-names>B. S.</given-names></name> <name><surname>Welch</surname> <given-names>K. C.</given-names></name></person-group> (<year>2020</year>). <article-title>Preprocedural COVID-19 screening: do rhinologic patients carry a unique risk burden for false-negative results?</article-title> <source>Int. Forum Allergy Rhinol</source>. <volume>10</volume>, <fpage>1186</fpage>&#x02013;<lpage>1188</lpage>. <pub-id pub-id-type="doi">10.1002/alr.22645</pub-id><pub-id pub-id-type="pmid">32558288</pub-id></citation></ref>
<ref id="B8">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Brown</surname> <given-names>C.</given-names></name> <name><surname>Chauhan</surname> <given-names>J.</given-names></name> <name><surname>Grammenos</surname> <given-names>A.</given-names></name> <name><surname>Han</surname> <given-names>j.</given-names></name> <name><surname>Hasthanasombat</surname> <given-names>A.</given-names></name> <name><surname>Spathis</surname> <given-names>D.</given-names></name> <name><surname>Xia</surname> <given-names>T.</given-names></name> <name><surname>Cicuta</surname> <given-names>P.</given-names></name> <name><surname>Mascolo</surname> <given-names>C.</given-names></name></person-group> (<year>2020</year>). <article-title>Exploring automatic diagnosis of COVID-19 from crowdsourced repiratory sound data</article-title>. <source>arXiv preprint arXiv:2006.05919</source>.<pub-id pub-id-type="doi">10.1145/3394486.3412865</pub-id></citation>
</ref>
<ref id="B9">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cabitza</surname> <given-names>F.</given-names></name> <name><surname>Campagner</surname> <given-names>A.</given-names></name> <name><surname>Ferrari</surname> <given-names>D.</given-names></name> <name><surname>Di Resta</surname> <given-names>C.</given-names></name> <name><surname>Ceriotti</surname> <given-names>D.</given-names></name> <name><surname>Sabetta</surname> <given-names>E.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Development, evaluation, and validation of machine learning models for COVID-19 detection based on routine blood tests</article-title>. <source>Clin. Chem. Lab. Med</source>. <volume>59</volume>, <fpage>421</fpage>&#x02013;<lpage>431</lpage>. <pub-id pub-id-type="doi">10.1515/cclm-2020-1294</pub-id><pub-id pub-id-type="pmid">33079698</pub-id></citation></ref>
<ref id="B10">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chadaga</surname> <given-names>K.</given-names></name> <name><surname>Chakraborty</surname> <given-names>C.</given-names></name> <name><surname>Prabhu</surname> <given-names>S.</given-names></name> <name><surname>Umakanth</surname> <given-names>S.</given-names></name> <name><surname>Bhat</surname> <given-names>V.</given-names></name> <name><surname>Sampathila</surname> <given-names>N.</given-names></name></person-group> (<year>2022</year>). <article-title>Clinical and laboratory approach to diagnose COVID-19 Using machine learning</article-title>. <source>Interdiscip. Sci.</source> <volume>14</volume>, <fpage>452</fpage>&#x02013;<lpage>470</lpage>. <pub-id pub-id-type="doi">10.1007/s12539-021-00499-4</pub-id><pub-id pub-id-type="pmid">35133633</pub-id></citation></ref>
<ref id="B11">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ghose</surname> <given-names>A.</given-names></name> <name><surname>Roy</surname> <given-names>S.</given-names></name> <name><surname>Vasdev</surname> <given-names>N.</given-names></name> <name><surname>Olsburgh</surname> <given-names>J.</given-names></name> <name><surname>Dasgupta</surname> <given-names>P.</given-names></name></person-group> (<year>2020</year>). <article-title>The emerging role of artificial intelligence in the fight against COVID-19</article-title>. <source>Eur. Urol.</source> <volume>78</volume>, <fpage>775</fpage>&#x02013;<lpage>776</lpage>. <pub-id pub-id-type="doi">10.1016/j.eururo.2020.09.031</pub-id><pub-id pub-id-type="pmid">32994064</pub-id></citation></ref>
<ref id="B12">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Guan</surname> <given-names>W. J.</given-names></name> <name><surname>Ni</surname> <given-names>Z. Y.</given-names></name> <name><surname>Hu</surname> <given-names>Y.</given-names></name> <name><surname>Liang</surname> <given-names>W. H.</given-names></name> <name><surname>Ou</surname> <given-names>C. Q.</given-names></name> <name><surname>He</surname> <given-names>J. X.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Clinical characteristics of coronavirus disease 2019 in China</article-title>. <source>N. Engl. J. Med</source>. <volume>382</volume>, <fpage>1708</fpage>&#x02013;<lpage>1720</lpage>. <pub-id pub-id-type="doi">10.1056/NEJMoa2002032</pub-id><pub-id pub-id-type="pmid">32109013</pub-id></citation></ref>
<ref id="B13">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Haritaoglu</surname> <given-names>E. D.</given-names></name> <name><surname>Rasmussen</surname> <given-names>N.</given-names></name> <name><surname>Tan</surname> <given-names>D. C. H.</given-names></name> <name><surname>Jennifer Ranjani</surname> <given-names>J.</given-names></name> <name><surname>Xiao</surname> <given-names>J.</given-names></name> <name><surname>Chaudhari</surname> <given-names>G.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Using deep learning with large aggregated datasets for COVID-19 classification from cough</article-title>. <source>ArXiv ab</source>s/<italic>2201.01669</italic>.</citation>
</ref>
<ref id="B14">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jamshidi</surname> <given-names>E.</given-names></name> <name><surname>Asgary</surname> <given-names>A.</given-names></name> <name><surname>Tavakoli</surname> <given-names>N.</given-names></name> <name><surname>Zali</surname> <given-names>A.</given-names></name> <name><surname>Dastan</surname> <given-names>F.</given-names></name> <name><surname>Daaee</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Symptom prediction and mortality risk calculation for COVID-19 using machine learning</article-title>. <source>Front Artif Intell</source>. <volume>4</volume>, 673527. <pub-id pub-id-type="doi">10.3389/frai.2021.673527</pub-id><pub-id pub-id-type="pmid">34250465</pub-id></citation></ref>
<ref id="B15">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Khorramdelazad</surname> <given-names>H.</given-names></name> <name><surname>Kazemi</surname> <given-names>M. H.</given-names></name> <name><surname>Najafi</surname> <given-names>A.</given-names></name> <name><surname>Keykhaee</surname> <given-names>M.</given-names></name> <name><surname>Zolfaghari Emameh</surname> <given-names>R.</given-names></name> <name><surname>Falak</surname> <given-names>R.</given-names></name></person-group> (<year>2021</year>). <article-title>Immunopathological similarities between COVID-19 and influenza: investigating the consequences of Co-infection</article-title>. <source>Microb. Pathog</source>. <volume>152</volume>, 104554. <pub-id pub-id-type="doi">10.1016/j.micpath.2020.104554</pub-id><pub-id pub-id-type="pmid">33157216</pub-id></citation></ref>
<ref id="B16">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Laguarta</surname> <given-names>J.</given-names></name> <name><surname>Hueto</surname> <given-names>F.</given-names></name> <name><surname>Subirana</surname> <given-names>B.</given-names></name></person-group> (<year>2020</year>). <article-title>COVID-19 artificial intelligence diagnosis using only cough recordings</article-title>. <source>IEEE Open J. Eng. Med. Biol.</source> <volume>1</volume>, <fpage>275</fpage>&#x02013;<lpage>281</lpage>. <pub-id pub-id-type="doi">10.1109/OJEMB.2020.3026928</pub-id><pub-id pub-id-type="pmid">34812418</pub-id></citation></ref>
<ref id="B17">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>K.</given-names></name> <name><surname>Wu</surname> <given-names>J.</given-names></name> <name><surname>Wu</surname> <given-names>F.</given-names></name> <name><surname>Guo</surname> <given-names>D.</given-names></name> <name><surname>Chen</surname> <given-names>L.</given-names></name> <name><surname>Fang</surname> <given-names>Z.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>The clinical and chest CT features associated with severe and critical COVID-19 pneumonia</article-title>. <source>Invest. Radiol.</source> <volume>55</volume>, <fpage>327</fpage>&#x02013;<lpage>331</lpage>. <pub-id pub-id-type="doi">10.1097/RLI.0000000000000672</pub-id><pub-id pub-id-type="pmid">32118615</pub-id></citation></ref>
<ref id="B18">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>Y.</given-names></name> <name><surname>Xia</surname> <given-names>L.</given-names></name></person-group> (<year>2020</year>). <article-title>Coronavirus disease 2019 (COVID-19): role of chest CT in diagnosis and management</article-title>. <source>AJR Am. J. Roentgenol</source>. <volume>214</volume>, <fpage>1280</fpage>&#x02013;<lpage>1286</lpage>. <pub-id pub-id-type="doi">10.2214/AJR.20.22954</pub-id><pub-id pub-id-type="pmid">32130038</pub-id></citation></ref>
<ref id="B19">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>S.</given-names></name> <name><surname>Mallol-Ragolta</surname> <given-names>A.</given-names></name> <name><surname>Schuller</surname> <given-names>B. W.</given-names></name></person-group> (<year>2021</year>). <article-title>COVID-19 detection with a novel multi-type deep fusion method using breathing and coughing information</article-title>. <source>Annu. Int. Conf. IEEE Eng. Med. Biol. Soc</source>. <volume>2021</volume>, <fpage>1840</fpage>&#x02013;<lpage>1843</lpage>. <pub-id pub-id-type="doi">10.1109/EMBC46164.2021.9630050</pub-id><pub-id pub-id-type="pmid">34891645</pub-id></citation></ref>
<ref id="B20">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Miranda</surname> <given-names>D.</given-names></name> <name><surname>Diacon</surname> <given-names>A. H.</given-names></name> <name><surname>Niesler</surname> <given-names>T. R.</given-names></name></person-group> (<year>2019</year>). <article-title>&#x0201C;A comparative study of features for acoustic cough detection using deep architectures,&#x0201D;</article-title> in <source>2019 41st Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC)</source> (<publisher-loc>Istanbul</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>2601</fpage>&#x02013;<lpage>2605</lpage>. <pub-id pub-id-type="doi">10.1109/EMBC.2019.8856412</pub-id><pub-id pub-id-type="pmid">31946429</pub-id></citation></ref>
<ref id="B21">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Moazzami</surname> <given-names>B.</given-names></name> <name><surname>Chaichian</surname> <given-names>S.</given-names></name> <name><surname>Samie</surname> <given-names>S.</given-names></name> <name><surname>Zolbin</surname> <given-names>M. M.</given-names></name> <name><surname>Jesmi</surname> <given-names>F.</given-names></name> <name><surname>Akhlaghdoust</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Does endometriosis increase susceptibility to COVID-19 infections? A case-control study in women of reproductive age</article-title>. <source>BMC Womens Health</source> <volume>21</volume>, <fpage>119</fpage>. <pub-id pub-id-type="doi">10.1186/s12905-021-01270-z</pub-id><pub-id pub-id-type="pmid">33752656</pub-id></citation></ref>
<ref id="B22">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mohammed</surname> <given-names>E. A.</given-names></name> <name><surname>Keyhani</surname> <given-names>M.</given-names></name> <name><surname>Sanati-Nezhad</surname> <given-names>A.</given-names></name> <name><surname>Hejazi</surname> <given-names>S. H.</given-names></name> <name><surname>Far</surname> <given-names>B. H.</given-names></name></person-group> (<year>2021</year>). <article-title>An ensemble learning approach to digital corona virus preliminary screening from cough sounds</article-title>. <source>Sci. Rep</source>. <volume>11</volume>, <fpage>15404</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-021-95042-2</pub-id><pub-id pub-id-type="pmid">34321592</pub-id></citation></ref>
<ref id="B23">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Naud&#x000E9;</surname> <given-names>W</given-names></name></person-group>. (<year>2020</year>). <article-title>Artificial intelligence vs COVID-19: limitations, constraints and pitfalls</article-title>. <source>AI Soc</source>. <volume>35</volume>, <fpage>761</fpage>&#x02013;<lpage>765</lpage>. <pub-id pub-id-type="doi">10.1007/s00146-020-00978-0</pub-id><pub-id pub-id-type="pmid">32346223</pub-id></citation></ref>
<ref id="B24">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pahar</surname> <given-names>M.</given-names></name> <name><surname>Klopper</surname> <given-names>M.</given-names></name> <name><surname>Warren</surname> <given-names>R.</given-names></name> <name><surname>Niesler</surname> <given-names>T.</given-names></name></person-group> (<year>2022</year>). <article-title>COVID-19 detection in cough, breath and speech using deep transfer learning and bottleneck features</article-title>. <source>Comput. Biol. Med</source>. <volume>141</volume>, <fpage>105153</fpage>. <pub-id pub-id-type="doi">10.1016/j.compbiomed.2021.105153</pub-id><pub-id pub-id-type="pmid">34954610</pub-id></citation></ref>
<ref id="B25">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pahar</surname> <given-names>M.</given-names></name> <name><surname>Niesler</surname> <given-names>T.</given-names></name></person-group> (<year>2021</year>). <article-title>Machine learning based COVID-19 detection from smartphone recordings: cough, breath and speech</article-title>. <source>arXiv preprint arXi</source>v<italic>:2104.02477</italic>.</citation>
</ref>
<ref id="B26">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Picone</surname> <given-names>J. W.</given-names></name></person-group> (<year>1993</year>). <article-title>Signal modeling techniques in speech recognition</article-title>. <source>Proc. IEEE</source> <volume>81</volume>, <fpage>1215</fpage>&#x02013;<lpage>1247</lpage>. <pub-id pub-id-type="doi">10.1109/5.237532</pub-id></citation>
</ref>
<ref id="B27">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pooladi</surname> <given-names>M.</given-names></name> <name><surname>Entezari</surname> <given-names>M.</given-names></name> <name><surname>Hashemi</surname> <given-names>M.</given-names></name> <name><surname>Bahonar</surname> <given-names>A.</given-names></name> <name><surname>Hushmandi</surname> <given-names>K.</given-names></name> <name><surname>Raei</surname> <given-names>M.</given-names></name></person-group> (<year>2020</year>). <article-title>Investigating the efficient management of different countries in the COVID-19 pandemic</article-title>. <source>J Mar Med</source>. <volume>2</volume>, <fpage>18</fpage>&#x02013;<lpage>25</lpage>. <pub-id pub-id-type="doi">10.30491/1.1.3</pub-id></citation>
</ref>
<ref id="B28">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sadhana</surname> <given-names>S.</given-names></name> <name><surname>Pandiarajan</surname> <given-names>S.</given-names></name> <name><surname>Sivaraman</surname> <given-names>E.</given-names></name> <name><surname>Daniel</surname> <given-names>D.</given-names></name></person-group> (<year>2021</year>). <article-title>AI-based power screening solution for SARS-CoV2 infection: a sociodemographic survey and COVID-19 cough detector</article-title>. <source>Proc. Comput. Sci</source>. <volume>194</volume>, <fpage>255</fpage>&#x02013;<lpage>271</lpage>. <pub-id pub-id-type="doi">10.1016/j.procs.2021.10.081</pub-id><pub-id pub-id-type="pmid">34876935</pub-id></citation></ref>
<ref id="B29">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Samieefar</surname> <given-names>N.</given-names></name> <name><surname>Rashedi</surname> <given-names>R.</given-names></name> <name><surname>Akhlaghdoust</surname> <given-names>M.</given-names></name> <name><surname>Mashhadi</surname> <given-names>M.</given-names></name> <name><surname>Darzi</surname> <given-names>P.</given-names></name> <name><surname>Rezaei</surname> <given-names>N.</given-names></name></person-group> (<year>2022</year>). <article-title>Delta variant: the new challenge of COVID-19 pandemic, an overview of epidemiological, clinical, and immune characteristics</article-title>. <source>Acta Biomed</source>. <volume>93</volume>, e2022179. <pub-id pub-id-type="doi">10.23750/abm.v93i1.12210</pub-id><pub-id pub-id-type="pmid">35315394</pub-id></citation></ref>
<ref id="B30">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Santosh</surname> <given-names>K. C.</given-names></name> <name><surname>Rasmussen</surname> <given-names>N.</given-names></name> <name><surname>Mamun</surname> <given-names>M.</given-names></name> <name><surname>Aryal</surname> <given-names>S.</given-names></name></person-group> (<year>2022</year>). <article-title>A systematic review on cough sound analysis for COVID-19 diagnosis and screening: is my cough sound COVID-19?</article-title> <source>Peer J. Comput. Sci</source>. <volume>8</volume>, e958. <pub-id pub-id-type="doi">10.7717/peerj-cs.958</pub-id><pub-id pub-id-type="pmid">35634112</pub-id></citation></ref>
<ref id="B31">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Sharan</surname> <given-names>R. V.</given-names></name> <name><surname>Abeyratne</surname> <given-names>U. R.</given-names></name> <name><surname>Swarnkar</surname> <given-names>V. R.</given-names></name> <name><surname>Porter</surname> <given-names>P.</given-names></name></person-group> (<year>2017</year>). <article-title>&#x0201C;Cough sound analysis for diagnosing croup in pediatric patients using biologically inspired features,&#x0201D;</article-title> in <source>2017 39<sup>th</sup> Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC)</source> (<publisher-name>IEEE</publisher-name>). <fpage>4578</fpage>&#x02013;<lpage>4581</lpage>. <pub-id pub-id-type="doi">10.1109/EMBC.2017.8037875</pub-id><pub-id pub-id-type="pmid">29060916</pub-id></citation></ref>
<ref id="B32">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sharma</surname> <given-names>A.</given-names></name> <name><surname>Baldi</surname> <given-names>A.</given-names></name> <name><surname>Kumar Sharma</surname> <given-names>D.</given-names></name></person-group> (<year>2021</year>). <article-title>How to spot COVID-19 patients: speech &#x00026; sound audio analysis for preliminary diagnosis of SARS-CoV-2 corona patients</article-title>. <source>Int. J. Clin. Pract</source>. <volume>75</volume>, e14134. <pub-id pub-id-type="doi">10.1111/ijcp.14134</pub-id><pub-id pub-id-type="pmid">33683774</pub-id></citation></ref>
<ref id="B33">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Soltani</surname> <given-names>A.</given-names></name> <name><surname>Samieefar</surname> <given-names>N.</given-names></name> <name><surname>Akhlaghdoust</surname> <given-names>M.</given-names></name></person-group> (<year>2022</year>) <article-title>Change in lifestyle behaviour dietary patterns among Iranian medical students during COVID-19 lockdown</article-title>. <source>East Mediterr Health J</source>. <volume>28</volume>:<fpage>896</fpage>&#x02013;<lpage>903</lpage>. <pub-id pub-id-type="doi">10.26719/emhj.22.094</pub-id><pub-id pub-id-type="pmid">36573570</pub-id></citation></ref>
<ref id="B34">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Struyf</surname> <given-names>T.</given-names></name> <name><surname>Deeks</surname> <given-names>J. J.</given-names></name> <name><surname>Dinnes</surname> <given-names>J.</given-names></name> <name><surname>Takwoingi</surname> <given-names>Y.</given-names></name> <name><surname>Davenport</surname> <given-names>C.</given-names></name> <name><surname>Leeflang</surname> <given-names>M. M.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Signs and symptoms to determine if a patient presenting in primary care or hospital outpatient settings has COVID-19 disease</article-title>. <source>Cochrane Database Syst. Rev</source>. <volume>7</volume>, CD013665. <pub-id pub-id-type="doi">10.1002/14651858.CD013665</pub-id><pub-id pub-id-type="pmid">32633856</pub-id></citation></ref>
<ref id="B35">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>van Ginneken</surname> <given-names>B.</given-names></name></person-group> (<year>2021</year>). <article-title>The potential of artificial intelligence to analyze chest radiographs for signs of COVID-19 pneumonia</article-title>. <source>Radiology</source> <volume>299</volume>, <fpage>E214</fpage>&#x02013;<lpage>E215</lpage>. <pub-id pub-id-type="doi">10.1148/radiol.2020204238</pub-id><pub-id pub-id-type="pmid">33236962</pub-id></citation></ref>
<ref id="B36">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wan</surname> <given-names>Z.</given-names></name> <name><surname>Zhang</surname> <given-names>Y. N.</given-names></name> <name><surname>He</surname> <given-names>Z.</given-names></name> <name><surname>Liu</surname> <given-names>J.</given-names></name> <name><surname>Lan</surname> <given-names>K.</given-names></name> <name><surname>Hu</surname> <given-names>Y.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>A melting curve-based multiplex RT-qPCR assay for simultaneous detection of four human coronaviruses</article-title>. <source>Int. J. Mol. Sci</source>. <volume>17</volume>, <fpage>1880</fpage>. <pub-id pub-id-type="doi">10.3390/ijms17111880</pub-id><pub-id pub-id-type="pmid">27886052</pub-id></citation></ref>
<ref id="B37">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>L</given-names></name> <name><surname>Lin</surname> <given-names>Z. Q.</given-names></name> <name><surname>Wong</surname> <given-names>A</given-names></name></person-group>. (<year>2020</year>). <article-title>COVID-net: a tailored deep convolutional neural network design for detection of COVID-19 cases from chest X-ray images</article-title>. <source>Scientific reports</source>. <volume>10</volume>:<fpage>1</fpage>&#x02013;<lpage>2</lpage>.<pub-id pub-id-type="pmid">33177550</pub-id></citation></ref>
<ref id="B38">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>L.</given-names></name></person-group> (<year>2020</year>). <article-title>C-reactive protein levels in the early stage of COVID-19</article-title>. <source>Med. Mal. Infect</source>. <volume>50</volume>, <fpage>332</fpage>&#x02013;<lpage>334</lpage>. <pub-id pub-id-type="doi">10.1016/j.medmal.2020.03.007</pub-id><pub-id pub-id-type="pmid">32243911</pub-id></citation></ref>
<ref id="B39">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wong</surname> <given-names>C. K.</given-names></name> <name><surname>Ho</surname> <given-names>D. T. Y.</given-names></name> <name><surname>Tam</surname> <given-names>A. R.</given-names></name> <name><surname>Zhou</surname> <given-names>M.</given-names></name> <name><surname>Lau</surname> <given-names>Y. M.</given-names></name> <name><surname>Tang</surname> <given-names>M. O. Y.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Artificial intelligence mobile health platform for early detection of COVID-19 in quarantine subjects using a wearable biosensor: protocol for a randomised controlled trial</article-title>. <source>BMJ Open</source> <volume>10</volume>, <fpage>e038555</fpage>. <pub-id pub-id-type="doi">10.1136/bmjopen-2020-038555</pub-id><pub-id pub-id-type="pmid">32699167</pub-id></citation></ref>
<ref id="B40">
<citation citation-type="journal"><person-group person-group-type="author"><collab>World Health Organization</collab></person-group>. (<year>2020</year>). <source>Coronavirus Disease 2019 (COVID-19): Situation Report, 73</source>.</citation>
</ref>
<ref id="B41">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Xiao</surname> <given-names>A. T.</given-names></name> <name><surname>Tong</surname> <given-names>Y. X.</given-names></name> <name><surname>Zhang</surname> <given-names>S.</given-names></name></person-group> (<year>2020</year>). <article-title>False negative of RT-PCR and prolonged nucleic acid conversion in COVID-19: rather than recurrence</article-title>. <source>J. Med. Virol</source>. <volume>92</volume>, <fpage>1755</fpage>&#x02013;<lpage>1756</lpage>. <pub-id pub-id-type="doi">10.1002/jmv.25855</pub-id><pub-id pub-id-type="pmid">32270882</pub-id></citation></ref>
<ref id="B42">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhao</surname> <given-names>D.</given-names></name> <name><surname>Yao</surname> <given-names>F.</given-names></name> <name><surname>Wang</surname> <given-names>L.</given-names></name> <name><surname>Zheng</surname> <given-names>L.</given-names></name> <name><surname>Gao</surname> <given-names>Y.</given-names></name> <name><surname>Ye</surname> <given-names>J.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>A comparative study on the clinical features of coronavirus 2019(COVID-19) pneumonia with other pneumonias</article-title>. <source>Clin. Infect. Dis</source>. <volume>71</volume>, <fpage>756</fpage>&#x02013;<lpage>761</lpage>. <pub-id pub-id-type="doi">10.1093/cid/ciaa247</pub-id><pub-id pub-id-type="pmid">32161968</pub-id></citation></ref>
</ref-list> 
</back>
</article> 