<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.1 20151215//EN" "http://jats.nlm.nih.gov/publishing/1.1/JATS-journalpublishing1.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:mml="http://www.w3.org/1998/Math/MathML" xml:lang="en" article-type="research-article" dtd-version="1.1">
<front>
<journal-meta>
<journal-id journal-id-type="pmc">CMC</journal-id>
<journal-id journal-id-type="nlm-ta">CMC</journal-id>
<journal-id journal-id-type="publisher-id">CMC</journal-id>
<journal-title-group>
<journal-title>Computers, Materials &#x0026; Continua</journal-title>
</journal-title-group>
<issn pub-type="epub">1546-2226</issn>
<issn pub-type="ppub">1546-2218</issn>
<publisher>
<publisher-name>Tech Science Press</publisher-name>
<publisher-loc>USA</publisher-loc>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">38915</article-id>
<article-id pub-id-type="doi">10.32604/cmc.2023.038915</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Article</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Comparative Analysis of COVID-19 Detection Methods Based on Neural Network</article-title>
<alt-title alt-title-type="left-running-head">Comparative Analysis of COVID-19 Detection Methods Based on Neural Network</alt-title>
<alt-title alt-title-type="right-running-head">Comparative Analysis of COVID-19 Detection Methods Based on Neural Network</alt-title>
</title-group>
<contrib-group>
<contrib id="author-1" contrib-type="author" corresp="yes">
<name name-style="western"><surname>Hilali-Jaghdam</surname><given-names>In&#x00E8;s</given-names></name><xref ref-type="aff" rid="aff-1">1</xref><email>Imalihilali@pnu.edu.sa</email></contrib>
<contrib id="author-2" contrib-type="author">
<name name-style="western"><surname>Elhag</surname><given-names>Azhari A</given-names></name><xref ref-type="aff" rid="aff-2">2</xref></contrib>
<contrib id="author-3" contrib-type="author">
<name name-style="western"><surname>Ishak</surname><given-names>Anis Ben</given-names></name><xref ref-type="aff" rid="aff-3">3</xref></contrib>
<contrib id="author-4" contrib-type="author">
<name name-style="western"><surname>Elamin Elnaim</surname><given-names>Bushra M.</given-names></name><xref ref-type="aff" rid="aff-4">4</xref></contrib>
<contrib id="author-5" contrib-type="author">
<name name-style="western"><surname>Mohammed Elhag</surname><given-names>Omer Eltag</given-names></name><xref ref-type="aff" rid="aff-5">5</xref></contrib>
<contrib id="author-6" contrib-type="author">
<name name-style="western"><surname>Abuhaimed</surname><given-names>Feda Muhammed</given-names></name><xref ref-type="aff" rid="aff-1">1</xref></contrib>
<contrib id="author-7" contrib-type="author">
<name name-style="western"><surname>Abdel-Khalek</surname><given-names>S.</given-names></name><xref ref-type="aff" rid="aff-2">2</xref><xref ref-type="aff" rid="aff-6">6</xref></contrib>
<aff id="aff-1"><label>1</label><institution>Computer Sciences and Information Technology Programs, Applied College, Princess Nourah bint Abdulrahman University</institution>, <addr-line>Riyadh</addr-line>, <country>Saudi Arabia</country></aff>
<aff id="aff-2"><label>2</label><institution>Department of Mathematics and Statistics, College of Science, Taif University</institution>, <addr-line>P. O. Box 11099, Taif, 21944</addr-line>, <country>Saudi Arabia</country></aff>
<aff id="aff-3"><label>3</label><institution>Department of Quantitative Methods, Higher Institute of Management, University of Tunis</institution>, <addr-line>1073</addr-line>, <country>Tunisia</country></aff>
<aff id="aff-4"><label>4</label><institution>Department of Computer Science, College of Science and Humanities in Al-Sulail, Prince Sattam Bin Abdulaziz University</institution>, <addr-line>Al Kharj</addr-line>, <country>Saudi Arabia</country></aff>
<aff id="aff-5"><label>5</label><institution>Department of Computer Science, Faculty of Science and Arts, King Khalid University</institution>, <addr-line>Abha, 61421</addr-line>, <country>Saudi Arabia</country></aff>
<aff id="aff-6"><label>6</label><institution>Department of Mathematics, Faculty of Science, Sohag University</institution>, <addr-line>Sohag</addr-line>, <country>Egypt</country></aff>
</contrib-group>
<author-notes>
<corresp id="cor1"><label>&#x002A;</label>Corresponding Author: In&#x00E8;s Hilali-Jaghdam. Email: <email>Imalihilali@pnu.edu.sa</email></corresp>
</author-notes>
<pub-date date-type="collection" publication-format="electronic">
<year>2023</year></pub-date>
<pub-date date-type="pub" publication-format="electronic"><day>09</day>
<month>6</month>
<year>2023</year></pub-date>
<volume>76</volume>
<issue>1</issue>
<fpage>1127</fpage>
<lpage>1150</lpage>
<history>
<date date-type="received"><day>03</day><month>1</month><year>2023</year></date>
<date date-type="accepted"><day>16</day><month>3</month><year>2023</year></date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2023 Hilali-Jaghdam et al.</copyright-statement>
<copyright-year>2023</copyright-year>
<copyright-holder>Hilali-Jaghdam et al.</copyright-holder>
<license xlink:href="https://creativecommons.org/licenses/by/4.0/">
<license-p>This work is licensed under a <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</ext-link>, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.</license-p>
</license>
</permissions>
<self-uri content-type="pdf" xlink:href="TSP_CMC_38915.pdf"></self-uri>
<abstract>
<p>In 2019, the novel coronavirus disease 2019 (COVID-19) ravaged the world. As of July 2021, there are about 192 million infected people worldwide and 4.1365 million deaths. At present, the new coronavirus is still spreading and circulating in many places around the world, especially since the emergence of Delta variant strains has increased the risk of the COVID-19 pandemic again. The symptoms of COVID-19 are diverse, and most patients have mild symptoms, with fever, dry cough, and fatigue as the main manifestations, and about 15.7&#x0025; to 32.0&#x0025; of patients will develop severe symptoms. Patients are screened in hospitals or primary care clinics as the initial step in the therapy for COVID-19. Although transcription-polymerase chain reaction (PCR) tests are still the primary method for making the final diagnosis, in hospitals today, the election protocol is based on medical imaging because it is quick and easy to use, which enables doctors to diagnose illnesses and their effects more quickly3. According to this approach, individuals who are thought to have COVID-19 first undergo an X-ray session and then, if further information is required, a CT-scan session. This methodology has led to a significant increase in the use of computed tomography scans (CT scans) and X-ray pictures in the clinic as substitute diagnostic methods for identifying COVID-19. To provide a significant collection of various datasets and methods used to diagnose COVID-19, this paper provides a comparative study of various state-of-the-art methods. The impact of medical imaging techniques on COVID-19 is also discussed.</p>
</abstract>
<kwd-group kwd-group-type="author">
<kwd>Neural networks</kwd>
<kwd>frameworks</kwd>
<kwd>intelligent systems</kwd>
<kwd>COVID-19</kwd>
<kwd>CT</kwd>
</kwd-group>
<funding-group>
<award-group id="awg1">
<funding-source>Deanship of Scientific Research, Princess Nourah bint Abdulrahman University</funding-source>
<award-id>43-PRFA-P-42</award-id>
</award-group>
</funding-group>
</article-meta>
</front>
<body>
<sec id="s1"><label>1</label><title>Introduction</title>
<p>Since December 2019, a new form of coronavirus illness has spread internationally from Wuhan, China. SARS-CoV-2 is the new coronavirus, and COVID-19 is the illness it produces. The diagnosis of COVID-19 is based on a positive SARS-CoV-2 nucleic acid test. However, due to nucleic acid testing constraints such as long detection times, false negatives, and stringent biosafety standards, it cannot fully satisfy clinical needs [<xref ref-type="bibr" rid="ref-1">1</xref>&#x2013;<xref ref-type="bibr" rid="ref-3">3</xref>]. A radiological imaging test, particularly computed tomography (CT), is a rapid and simple technique to screen for a lung infection. It can not only assess the presence or absence of infection, but it can also serve as a reference for pathogen infection and has unique diagnostic benefits. COVID-19 lung CT symptoms are mostly ground glass [<xref ref-type="bibr" rid="ref-4">4</xref>].</p>
<p>The new coronavirus (SARS-CoV-2) is an enveloped positive-sense single-stranded RNA virus that is spreading globally, posing a significant danger to human health and the global economy [<xref ref-type="bibr" rid="ref-5">5</xref>]. More than 539 million confirmed cases and 6.32 million fatalities had been recorded globally as of June 24, 2022. Because present therapeutic options are limited, the development and administration of vaccines remain the most significant strategy for controlling the pandemic of new coronavirus pneumonia [<xref ref-type="bibr" rid="ref-6">6</xref>,<xref ref-type="bibr" rid="ref-7">7</xref>]. Reference [<xref ref-type="bibr" rid="ref-8">8</xref>] proposed a new approach for predicting COVID-19 using machine learning algorithm. This method has achieved 93&#x0025; accuracy and has limited time span for detecting datasets. The complexity is also worth notable.</p>
<p>As a result of the expansion of linked research initiatives and the collection of medical picture data, several datasets have become available. This paper gathers multiple dispersed open-source datasets that have been quoted in various works of literature and research, as well as relevant descriptions and download links; discusses the picture&#x2019;s properties; and evaluates and summarizes the prevalent algorithm models.</p>
<p>This paper provides an in-depth review of various state-of-the-art methods for diagnosing COVID-19. The limitations and advantages of each method is also discussed and tabularized. A detailed graphical imaging approach is used to further clarify the role of each method on concerned dataset.</p>
</sec>
<sec id="s2"><label>2</label><title>COVID-19 Imaging Performance</title>
<p>Chest medical imaging data such as CT and chest X-ray (CXR) images are often used and crucial. The statistical and texture aspects of lesion pictures serve as a crucial foundation for image identification and recognition in medical image analysis and are frequently employed to quantitatively define the properties of lesion images [<xref ref-type="bibr" rid="ref-9">9</xref>].</p>
<sec id="s2_1"><label>2.1</label><title>CT Image Performance</title>
<p>In individuals with COVID-19, consolidation (CL) and ground-glass opacities (GGO) are the most frequent lung CT abnormalities [<xref ref-type="bibr" rid="ref-10">10</xref>&#x2013;<xref ref-type="bibr" rid="ref-12">12</xref>]. They are primarily located in the lung margin. The lesions eventually disappear to generate fibrotic streaks when the condition becomes better [<xref ref-type="bibr" rid="ref-13">13</xref>&#x2013;<xref ref-type="bibr" rid="ref-19">19</xref>]. The majority of patients also exhibited imaging characteristics such as thickened bronchial vessels and interlobular septa [<xref ref-type="bibr" rid="ref-20">20</xref>,<xref ref-type="bibr" rid="ref-21">21</xref>]. The patient&#x2019;s lungs&#x2019; CT imaging results are shown in <xref ref-type="fig" rid="fig-1">Fig. 1</xref>.</p>
<fig id="fig-1"><label>Figure 1</label><caption><title>COVID-19 patient&#x2019;s CT of lungs. (a) GGO (in red box) (b) Consolidation (in red box)</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_38915-fig-1.tif"/></fig>
<p>The results are shown in <xref ref-type="table" rid="table-1">Table 1</xref>. Even though it is now a segmented dataset with relatively clear data images and enhanced segmentation labels, the CC-CCII dataset will be explained in detail in Section 3.1.</p>
<table-wrap id="table-1"><label>Table 1</label><caption><title>Texture feature analysis of CT images</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="left">Parameter</th>
<th align="center" colspan="3">Group</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left"/>
<td align="left">1</td>
<td align="left">2</td>
<td align="left">3</td>
</tr>
<tr>
<td align="left">Standard deviation</td>
<td align="left">58 (Patient)<break/>43.3 (Normal)</td>
<td align="left">50.2 (Patient)<break/>45.1 (Normal)</td>
<td align="left">49.9 (Patient)<break/>53.6 (Normal)</td>
</tr>
<tr>
<td align="left">Entropy</td>
<td align="left">6.86 (Patient)<break/>6.84 (Normal)</td>
<td align="left">7.31 (Patient)<break/>6.84 (Normal)</td>
<td align="left">7.47 (Patient)<break/>7.06 (Normal)</td>
</tr>
<tr>
<td align="left">Skewness</td>
<td align="left">0.026 (Patient)<break/>0.950 (Normal)</td>
<td align="left">0.250 (Patient)<break/>1.340 (Normal)</td>
<td align="left">0.170 (Patient)<break/>0.840 (Normal)</td>
</tr>
<tr>
<td align="left">Energy</td>
<td align="left">0.019 (Patient)<break/>0.012 (Normal)</td>
<td align="left">0.008 (Patient)<break/>0.012 (Normal)</td>
<td align="left">0.007 (Patient)<break/>0.010 (Normal)</td>
</tr>
<tr>
<td align="left">Average gray</td>
<td align="left">179.3 (Patient)<break/>94.3 (Normal)</td>
<td align="left">166.5 (Patient)<break/>86.4 (Normal)</td>
<td align="left">160.1 (Patient)<break/>87.3 (Normal)</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s2_2"><label>2.2</label><title>X-Ray Image Appearance</title>
<p>X-ray CXR images are more common in chest image detection than CT scanning tomography because they are simpler to collect. The primary barrier to using CXR in the imaging diagnosis of COVID-19 is the absence of information that can be verified visually. As illustrated in <xref ref-type="fig" rid="fig-2">Fig. 2</xref>, CXR pictures reveal airspace turbidity, which is mostly dispersed in the lung margins [<xref ref-type="bibr" rid="ref-22">22</xref>]. In practice, CXR and CT are frequently combined to provide a more accurate diagnostic evaluation [<xref ref-type="bibr" rid="ref-23">23</xref>].</p>
<fig id="fig-2"><label>Figure 2</label><caption><title>Lungs images of X-ray. (a) Normal (b) with COVID-19
</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_38915-fig-2.tif"/></fig>
<p><xref ref-type="table" rid="table-2">Table 2</xref> displays the outcomes of the examination of texture features. The textural properties of the CXR images of healthy lungs vary from those of COVID-19-infected lungs. However, some variations are less visible than others. The contrast of infected photos is two to three times greater than that of healthy lung images.</p>
<table-wrap id="table-2"><label>Table 2</label><caption><title>Texture feature analysis of normal and patient chest CXR images</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="left">Group</th>
<th align="left">Types of</th>
<th align="left">Contrast</th>
<th align="left">Dissimilarity</th>
<th align="left">Inverse gap</th>
<th align="left">Energy</th>
<th align="left">Relevant</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">First group</td>
<td align="left">Normal<break/>Patient</td>
<td align="left">23.58<break/>69.36</td>
<td align="left">3.01<break/>4.53</td>
<td align="left">0.34<break/>0.33</td>
<td align="left">0.008<break/>0.004</td>
<td align="left">0.99<break/>0.99</td>
</tr>
<tr>
<td align="left">Second group</td>
<td align="left">Normal<break/>Patient</td>
<td align="left">25.04<break/>35.80</td>
<td align="left">2.80<break/>3.38</td>
<td align="left">0.39<break/>0.33</td>
<td align="left">0.129<break/>0.028</td>
<td align="left">1.00<break/>0.99</td>
</tr>
<tr>
<td align="left">The third group</td>
<td align="left">Normal<break/>Patient</td>
<td align="left">33.67<break/>79.61</td>
<td align="left">3.48<break/>5.22</td>
<td align="left">0.32<break/>0.25</td>
<td align="left">0.103<break/>0.020</td>
<td align="left">0.99<break/>0.98</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec id="s3"><label>3</label><title>Related Open Source Datasets</title>
<p>Datasets are an important basis for building deep learning-based COVID-19 diagnosis and segmentation models, especially datasets that can be downloaded as open source [<xref ref-type="bibr" rid="ref-24">24</xref>,<xref ref-type="bibr" rid="ref-25">25</xref>].</p>
<p><xref ref-type="table" rid="table-3">Table 3</xref> lists the data type, quantity, and data source of each dataset, and describes its use. Since lung CT images carry more detailed information, CT datasets are widely used in the detection and segmentation of COVID-19, while CXR datasets are mostly used in the detection of COVID-19. Images in these datasets are stored in various formats including.nii.gz, JPG, PNG, and DICOM. Table A1 in the Appendix gives legends for all datasets.</p>
<table-wrap id="table-3"><label>Table 3</label><caption><title>Comparison of various datasets</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="left">Serial number</th>
<th align="left">Data set</th>
<th align="left">Type of data</th>
<th align="left">Data composition</th>
<th align="left">Description</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">1</td>
<td align="left">COVID-19-CT-Seg</td>
<td align="left">CT</td>
<td align="left">100 sheets</td>
<td align="left">Segmentation</td>
</tr>
<tr>
<td align="left">2</td>
<td align="left">Segmentation dataset nr.2</td>
<td align="left">CT</td>
<td align="left">9 cases</td>
<td align="left">Segmentation</td>
</tr>
<tr>
<td align="left">3</td>
<td align="left">COVID-19-CT-Seg-Benchmark [<xref ref-type="bibr" rid="ref-26">26</xref>]</td>
<td align="left">CT</td>
<td align="left">20 cases</td>
<td align="left">Segmentation</td>
</tr>
<tr>
<td align="left">4</td>
<td align="left">COVID19_1110 [<xref ref-type="bibr" rid="ref-27">27</xref>]</td>
<td align="left">CT</td>
<td align="left">1110 cases</td>
<td align="left">Classification-segmentation</td>
</tr>
<tr>
<td align="left">5</td>
<td align="left">CC-CCII data set</td>
<td align="left">CT</td>
<td align="left">617775 sheets</td>
<td align="left">Classification-segmentation</td>
</tr>
<tr>
<td align="left">6</td>
<td align="left">COVID-CT-Dataset [<xref ref-type="bibr" rid="ref-28">28</xref>]</td>
<td align="left">CT</td>
<td align="left">Positive: 349/Normal: 463</td>
<td align="left">Classification</td>
</tr>
<tr>
<td align="left">7</td>
<td align="left">SARS-CoV-2 CT [<xref ref-type="bibr" rid="ref-29">29</xref>]</td>
<td align="left">CT</td>
<td align="left">Positive: 1252/Normal: 1230</td>
<td align="left">Classification</td>
</tr>
<tr>
<td align="left">8</td>
<td align="left">COVID-CTset [<xref ref-type="bibr" rid="ref-30">30</xref>]</td>
<td align="left">CT</td>
<td align="left">Positive: 15589/Normal: 48260</td>
<td align="left">Classification</td>
</tr>
<tr>
<td align="left">9</td>
<td align="left">HUST-19 [<xref ref-type="bibr" rid="ref-31">31</xref>]</td>
<td align="left">CT</td>
<td align="left">19685 sheets</td>
<td align="left">Classification</td>
</tr>
<tr>
<td align="left">10</td>
<td align="left">CT-COVID-19-August2020 [<xref ref-type="bibr" rid="ref-32">32</xref>]</td>
<td align="left">CT</td>
<td align="left">632 examples</td>
<td align="left">Classification</td>
</tr>
<tr>
<td align="left">11</td>
<td align="left">Pneumonia-chest x-ray dataset [<xref ref-type="bibr" rid="ref-33">33</xref>]</td>
<td align="left">CXR</td>
<td align="left">5863 sheets</td>
<td align="left">Classification</td>
</tr>
<tr>
<td align="left">12</td>
<td align="left">COVID-chest x-ray-dataset [<xref ref-type="bibr" rid="ref-34">34</xref>]</td>
<td align="left">CXR</td>
<td align="left">434 sheets</td>
<td align="left">Classification</td>
</tr>
<tr>
<td align="left">13</td>
<td align="left">COVID-19 Radiography Database</td>
<td align="left">CXR</td>
<td align="left">Positive: 3616 sheets</td>
<td align="left">Classification</td>
</tr>
<tr>
<td align="left">14</td>
<td align="left">COVID-19-CT-CXR [<xref ref-type="bibr" rid="ref-35">35</xref>]</td>
<td align="left">CT-CXR</td>
<td align="left">Positive: 1327/Normal: 263</td>
<td align="left">Classification</td>
</tr>
<tr>
<td align="left">15</td>
<td align="left">COVID-19-AR [<xref ref-type="bibr" rid="ref-36">36</xref>]</td>
<td align="left">CT-CXR</td>
<td align="left">256 examples</td>
<td align="left">Classification</td>
</tr>
<tr>
<td align="left">16</td>
<td align="left">BIMCV COVID-19&#x002B; [<xref ref-type="bibr" rid="ref-37">37</xref>]</td>
<td align="left">CT-CXR</td>
<td align="left">CR:7377/DX:9 463/CT:6 687</td>
<td align="left">Classification</td>
</tr>
<tr>
<td align="left">17</td>
<td align="left">MIDRC-RICORD [<xref ref-type="bibr" rid="ref-38">38</xref>]</td>
<td align="left">CT-CXR</td>
<td align="left">CT: 240 cases/CXR: 1 000 cases</td>
<td align="left">Classification</td>
</tr>
<tr>
<td align="left">18</td>
<td align="left">COVIDx dataset [<xref ref-type="bibr" rid="ref-39">39</xref>]</td>
<td align="left">CT-CXR</td>
<td align="left">CXR: 16 352 sheets/CT: 194 922 sheets</td>
<td align="left">Classification</td>
</tr>
</tbody>
</table>
</table-wrap>
<sec id="s3_1"><label>3.1</label><title>CT Segmentation Dataset</title>
<p>Constructing a dataset for COVID-19 lesion segmentation requires a lot of annotation work. After sorting and searching, there are currently five open-source datasets available for COVID-19 segmentation as follows.
<list list-type="simple">
<list-item><label>(1)</label><p>COVID-19-CT-Seg dataset (<ext-link ext-link-type="uri" xlink:href="http://medicalsegmentation.com/covid19/">http://medicalsegmentation.com/covid19/</ext-link>): This dataset is collected by the Italian Society of Medical and Interventional Radiology and contains 100 CT images of more than 40 COVID-19 patients. It is used to train the COVID-19 lesion segmentation model, the labels include ground-glass opacity, consolidation, and pleural effusion. This dataset is most commonly used in lesion segmentation.</p></list-item>
<list-item><label>(2)</label><p>Segmentation dataset nr.2 datasets (<ext-link ext-link-type="uri" xlink:href="http://medicalsegmentation.com/covid19/">http://medicalsegmentation.com/covid19/</ext-link>) This dataset is derived from 3D CT images of 9 patients with new coronary pneumonia in Radiopaedia. A total of 829 slices were included, and 373 of them were labeled, and the labels included lungs and infected areas.</p></list-item>
<list-item><label>(3)</label><p>COVID-19-CT-Seg-Benchmark dataset (<ext-link ext-link-type="uri" xlink:href="https://zenodo.org/record/3757476#.YAj7HO">https://zenodo.org/record/3757476#.YAj7HO</ext-link>): This dataset was created by [<xref ref-type="bibr" rid="ref-26">26</xref>], which contains 20 labeled 3D CT images of the lungs of COVID-19 patients, with a slice size of 512&#x2009;&#x00D7;&#x2009;512 pixels. Segmentation labels contain the left lung, right lung, and infected area.</p></list-item>
<list-item><label>(4)</label><p>COVID19_1110 dataset (<ext-link ext-link-type="uri" xlink:href="https://mosmed.ai/datasets/covid19_1110">https://mosmed.ai/datasets/covid19_1110</ext-link>): This dataset [<xref ref-type="bibr" rid="ref-27">27</xref>] is provided by Moscow Hospital, including 3D lung CT images of 1 100 COVID-19 patients, with a slice size of 512&#x2009;&#x00D7;&#x2009;512 pixels. Among them, 50 cases have segmentation labels, marking ground glass opacities and consolidation areas for lesion area segmentation.</p></list-item>
<list-item><label>(5)</label><p>CC-CCII dataset (<ext-link ext-link-type="uri" xlink:href="http://ncovai.big.ac.cn/download">http://ncovai.big.ac.cn/download</ext-link>): This dataset is stored in the National Center for Bioinformatics, including COVID-19 pneumonia (NCP), common pneumonia (CP) and normal (Normal). A total of 750 CT slices from 150 patients were manually annotated as background, lung, GGO, and CL for segmentation. The image size of this dataset is 512&#x2009;&#x00D7;&#x2009;512 pixels, and the images are clear and suitable for classification and segmentation tasks. Reference [<xref ref-type="bibr" rid="ref-21">21</xref>] published this dataset and used it to develop an AI system for auxiliary diagnosis, detect and segment COVID-19 lesion areas, and further analyze the correlation between imaging features and clinical data.</p></list-item>
</list></p>
<p>In the field of lesion segmentation, the COVID-19-CT-Seg and CC-CCII datasets contain labeled 2D CT images. For 3D CT images, the contrast enhancement method can be used to improve the image quality after slicing to construct a larger number of 2D segmentation data sets.</p>
</sec>
<sec id="s3_2"><label>3.2</label><title>CT Classification Dataset</title>
<p>COVID-CT-Dataset (<ext-link ext-link-type="uri" xlink:href="https://github.com/UCSD-AI4H/COVID-CT">https://github.com/UCSD-AI4H/COVID-CT</ext-link>) and SARS-CoV-2 CT (<ext-link ext-link-type="uri" xlink:href="https://www.kaggle.com/plameneduardo/sarscov2-ctscan-dataset">https://www.kaggle.com/plameneduardo/sarscov2-ctscan-dataset</ext-link>) are early The most commonly used binary classification diagnostic datasets [<xref ref-type="bibr" rid="ref-28">28</xref>,<xref ref-type="bibr" rid="ref-29">29</xref>], but these datasets have too few samples and non-uniform image sizes. The COVID-19-CT-CXR (<ext-link ext-link-type="uri" xlink:href="https://github.com/ncbi-nlp/COVID-19-CT-CXR">https://github.com/ncbi-nlp/COVID-19-CT-CXR</ext-link>) dataset was extracted from the PubMed Central Open Access (PMC-OA) article. The following are the current three CT classification data sets with good data quality and sufficient quantity.
<list list-type="simple">
<list-item><label>(1)</label><p>COVID-CT set dataset (<ext-link ext-link-type="uri" xlink:href="https://github.com/mr7495/COVID-CTset">https://github.com/mr7495/COVID-CTset</ext-link>): This dataset was collected including 95 patients and 282 normal CT images, with a resolution of 512&#x2009;&#x00D7;&#x2009;512 pixels. Different from other data sets, the gray level of the images in this data set is 16 bits, and the image quality is the highest in the current data set, which is used for binary classification detection.</p></list-item>
<list-item><label>(2)</label><p>CT-COVID-19-August2020 dataset (<ext-link ext-link-type="uri" xlink:href="https://wiki.cancerimagingarchive.net/display/Public/COVID-19">https://wiki.cancerimagingarchive.net/display/Public/COVID-19</ext-link>): This dataset was released on the Cancer Imaging Archive (TCIA) and consists of two parts. The first part contains 650 lung CT scans of 632 patients with COVID-19 infection scans, the second part contained 121 CT scans from 29 patients. TCIA is a large-scale public database of medical images, which contains a variety of tumor data. Its imaging modalities include MRI, CT, etc., and the data on the website continues to increase, providing an interface for the source of imaging data.</p></list-item>
<list-item><label>(3)</label><p>HUST-19 dataset (<ext-link ext-link-type="uri" xlink:href="http://ictcf.biocuckoo.cn/">http://ictcf.biocuckoo.cn/</ext-link>): This dataset is provided by Huazhong University of Science and Technology, and a patient-centered resource library (iCTCF) has been developed, including COVID-19, Lung CT slices and corresponding clinical data of normal and suspicious patients. Among them, 19685 CT images were manually marked for model training. Reference [<xref ref-type="bibr" rid="ref-31">31</xref>] developed a hybrid learning model to predict the severity and mortality of patients by integrating the image classification results of the convolutional neural network (CNN) and the clinical data classification results of deep neural network (DNN).</p></list-item>
</list></p>
</sec>
<sec id="s3_3"><label>3.3</label><title>CXR Dataset</title>
<p>CXR imaging datasets typically include COVID-19-positive, other viral pneumonia, and normal chest X-ray images. pneumonia-chest ray. The dataset (<ext-link ext-link-type="uri" xlink:href="https://www.kaggle.com/paultmothymooney/chestxray-pneumonia">https://www.kaggle.com/paultmothymooney/chestxray-pneumonia</ext-link>) comes from the Guangzhou Maternal and Child Health Center. This dataset does not contain COVID-19 CXR images but is often used for data augmentation. COVID chestx-ray dataset (<ext-link ext-link-type="uri" xlink:href="https://github.com/ieee8023/covid-chestxraydataset">https://github.com/ieee8023/covid-chestxraydataset</ext-link>) comes from online open-source data, websites, and images. This dataset was released earlier, but the amount of data is small.COVID-19 Radiography Database (<ext-link ext-link-type="uri" xlink:href="https://www.kaggle.com/tawsifurrahman/covid19-radiography-database">https://www.kaggle.com/tawsifurrahman/covid19-radiography-database</ext-link>) was jointly established by researchers from Qatar University and Dhaka University. The dataset contains 3616 COVID-19 Positive, 1345 images of viral pneumonia, 6012 images of lung opacity (non-COVID-19), and 10192 images of normal.</p>
</sec>
<sec id="s3_4"><label>3.4</label><title>CT and CXR Hybrid Dataset</title>
<p>
<list list-type="simple">
<list-item><label>(1)</label><p>COVID-19-AR dataset (<ext-link ext-link-type="uri" xlink:href="https://wiki.cancerimagingarchive.net/display/Public/COVID-19">https://wiki.cancerimagingarchive.net/display/Public/COVID-19</ext-link>): This dataset [<xref ref-type="bibr" rid="ref-36">36</xref>] was released on TCIA, which includes 233 times of 105 patients CXR and 23 CT scans with a total of 31935 pictures. All image data is stored in DICOM standard format. Each patient is described by a set of clinical data.</p></list-item>
<list-item><label>(2)</label><p>BIMCV COVID-19&#x002B; dataset (<ext-link ext-link-type="uri" xlink:href="https://osf.io/nh7g8/">https://osf.io/nh7g8/</ext-link>):</p></list-item>
</list></p>
<p>The dataset is derived from the Valencia Medical Image Repository (BIMCV) [<xref ref-type="bibr" rid="ref-37">37</xref>], which contains chest CXR and CT images of COVID-19 patients, as well as related clinical data. In addition, a team of radiologists annotated 23 images for semantic segmentation of lesion regions.
<list list-type="simple">
<list-item><label>(3)</label><p>MIDRC-RICORD dataset (<ext-link ext-link-type="uri" xlink:href="https://wiki.cancerimagingarchive.net/display/Public/COVID-19">https://wiki.cancerimagingarchive.net/display/Public/COVID-19</ext-link>): This dataset was also released on TCIA, including CT scans and X-ray scans. The lesion areas of all COVID-19 CT images are marked pixel by pixel, and all X-ray films are classified and marked. The data set has three parts, including 240 cases of CT and 1000 cases of CXR images.</p></list-item>
</list></p>
<p>(4) COVIDx dataset (<ext-link ext-link-type="uri" xlink:href="https://github.com/lindawangg/COVID-Net">https://github.com/lindawangg/COVID-Net</ext-link>): This dataset is derived from the COVID-Net open-source project and is maintained by the Canadian Darwin AI Company and the Vision and Image Processing Research Group of the University of Waterloo, Canada. In the latest COVIDx8B version, 16352 CXR images are included, and in the COVIDx-CT version, 194 922 CT images are included.</p>
<p>In the field of classification, the CC-CCII and HUST-19CT image data sets released in China are of reliable quality, and more models are expected to be trained and compared on this data set. CT-COVID-19-August2020, the COVID-19-AR and MIDRC-RICORD datasets contain high-quality CT and CXR imaging data, but these data are based on patients. Researchers need to reconstruct a dataset suitable for deep learning model training on this dataset, which has potential research value [<xref ref-type="bibr" rid="ref-40">40</xref>].</p>
</sec>
</sec>
<sec id="s4"><label>4</label><title>Research Model Based on Deep Learning</title>
<p>From the standpoint of model tasks, research on COVID-19 may be categorized and displayed (classification or segmentation). Different lung lesions act differently, which presents some difficulties for classification. CNN provides the classification result through the softmax layer after learning the advanced characteristics of the picture and mapping them to a one-dimensional vector. A U-shaped structure serves as the segmentation&#x2019;s foundation, and the encoder initially extracts features using convolution before decoding. Deconvolution is then used to classify the pixels, and the segmentation label is then produced. The application structure of CNN for various tasks is depicted in <xref ref-type="fig" rid="fig-3">Fig. 3</xref>.</p>
<fig id="fig-3"><label>Figure 3</label><caption><title>CNN model for COVID-19 diagnosis</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_38915-fig-3.tif"/></fig>
<p><xref ref-type="fig" rid="fig-4">Fig. 4</xref> shows that there are several data sets for classification and that classification detection has wider applicability than lesion area segmentation. To increase the model&#x2019;s capacity for generalization, the majority of models undergo repeated data sets of training. Some older open-source datasets have seen widespread use, while others have not.</p>
<fig id="fig-4"><label>Figure 4</label><caption><title>Application of different models for COVID-19 diagnosis</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_38915-fig-4.tif"/></fig>
<sec id="s4_1"><label>4.1</label><title>COVID-19 Classification Model</title>
<p>There are often two categories and three classifications for the job of classifying new coronary pneumonia.</p>
<sec id="s4_1_1"><label>4.1.1</label><title>Classification of CT Images</title>
<p><xref ref-type="table" rid="table-4">Table 4</xref> shows the CT image classification models. Although there is presently no 3D pre-training model that is widely accessible, the 3D method is typically superior to the 2D model.
<list list-type="simple">
<list-item><label>(1)</label><p>Common backbone network</p>
</list-item>
</list></p>
<table-wrap id="table-4"><label>Table 4</label><caption><title>Comparison of features and evaluation of various models</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="left">Model</th>
<th align="left">Use dataset</th>
<th align="left">Model Features</th>
<th>Performance evaluation</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">3D ResNet-18 [<xref ref-type="bibr" rid="ref-45">45</xref>]</td>
<td align="left">CC-CCII<break/>110420 (80.4&#x0025;) of them are used for training and verification;<break/>26836 (19.6&#x0025;) were used for testing</td>
<td align="left">Explore the input convolution depth and the influence of the 3D model on the classification effect</td>
<td>ACC&#x2009;&#x003D;&#x2009;0.997 6<break/>Recall&#x2009;&#x003D;&#x2009;0.999 6<break/>Precision&#x2009;&#x003D;&#x2009;0.993 5<break/>F1&#x2009;&#x003D;&#x2009;0.992 4</td>
</tr>
<tr>
<td align="left">MNas3DNet41 [<xref ref-type="bibr" rid="ref-46">46</xref>]</td>
<td align="left">CC-CCII<break/>A total of 340 190 pieces after cleaning;<break/>Divide training and testing by 4:1</td>
<td align="left">3D CNNs are better than 2D; model performance is not significantly linked to the number of slices; mixed data augmentation improves performance</td>
<td>ACC &#x2009;&#x003D;&#x2009;0.874 1<break/>F1&#x2009;&#x003D;&#x2009;0.872 5<break/>AUC&#x2009;&#x003D;&#x2009;0.957 0</td>
</tr>
<tr>
<td align="left">CTnet-10 [<xref ref-type="bibr" rid="ref-47">47</xref>]</td>
<td align="left">COVID-CT-Dataset<break/>training:validation: test&#x2009;&#x003D;&#x2009;8:1:1</td>
<td align="left">VGG-19 classification performance is stronger than CTnet-10, but CTnet-10 has the shortest training and testing time</td>
<td>ACC(CTnet-10)&#x2009;&#x003D;&#x2009;<break/>0.821 0<break/>ACC(VGG-19)&#x2009;&#x003D;&#x2009;<break/>0.945 2</td>
</tr>
<tr>
<td align="left">Integrated learning [<xref ref-type="bibr" rid="ref-48">48</xref>]</td>
<td align="left">COVID-CT-Dataset<break/>train:validation: test&#x2009;&#x003D;&#x2009;60:15:25</td>
<td align="left">15 model ensembles, majority voting strategy;<break/>transfer learning method</td>
<td>ACC&#x2009;&#x003D;&#x2009;0.850 0<break/>Recall&#x2009;&#x003D;&#x2009;0.854 0<break/>Precision&#x2009;&#x003D;&#x2009;0.857 0</td>
</tr>
<tr>
<td align="left">DenseNet improvements [<xref ref-type="bibr" rid="ref-49">49</xref>]</td>
<td align="left">COVID-CT-Dataset<break/>train:test: validation&#x2009;&#x003D;&#x2009;60:15:25</td>
<td align="left">Transfer learning method</td>
<td>ACC&#x2009;&#x003D;&#x2009;0.870 0<break/>F1&#x2009;&#x003D;&#x2009;0.860 0</td>
</tr>
<tr>
<td align="left">ResNet50 [<xref ref-type="bibr" rid="ref-50">50</xref>]</td>
<td align="left">COVID-CT-Dataset<break/>train:validation:test&#x2009;&#x003D;&#x2009;4 292:870:94</td>
<td align="left">Traditional data augmentation; conditional generative adversarial networks</td>
<td>ACC (ResNet50)&#x003D;<break/>0.813 8</td>
</tr>
<tr>
<td align="left">DenseNet-169 [<xref ref-type="bibr" rid="ref-51">51</xref>]</td>
<td align="left">COVID-CT-Dataset<break/>training: validation&#x2009;&#x003D;&#x2009;1:1</td>
<td align="left">Transfer learning methods; self-supervised learning</td>
<td>ACC&#x2009;&#x003D;&#x2009;0.890 0<break/>AUC&#x2009;&#x003D;&#x2009;0.980 0<break/>F1&#x2009;&#x003D;&#x2009;0.900 0</td>
</tr>
<tr>
<td align="left">DECAPS [<xref ref-type="bibr" rid="ref-52">52</xref>]</td>
<td align="left">COVID-CT-Dataset<break/>training: 286 positives, 339 negatives<break/>tests: 47 positive, 58 negative</td>
<td align="left">Capsule network (CapsNet) captures fine-grained features of regions of interest; conditional generative adversarial networks</td>
<td>ACC&#x2009;&#x003D;&#x2009;0.876 0<break/>AUC&#x2009;&#x003D;&#x2009;0.961 0<break/>F1&#x2009;&#x003D;&#x2009;0.871 0</td>
</tr>
<tr>
<td align="left">DenseNet201 [<xref ref-type="bibr" rid="ref-53">53</xref>]</td>
<td align="left">SARS-CoV-2 CT<break/>train:validation: test&#x2009;&#x003D;&#x2009;68:17:15</td>
<td align="left">Transfer learning methods; traditional data augmentation</td>
<td>ACC (training)&#x2009;&#x003D;&#x2009;<break/>0.998 2<break/>ACC(test)&#x2009;&#x003D;&#x2009;0.962 5<break/>ACC(verification)&#x2009;&#x003D;&#x2009;<break/>0.974 0</td>
</tr>
<tr>
<td align="left">MADE-DBM [<xref ref-type="bibr" rid="ref-54">54</xref>]</td>
<td align="left">SARS-CoV-2 CT<break/>training:test&#x2009;&#x003D;&#x2009;3:2</td>
<td align="left">Deep bidirectional long short-term memory (DBM); adaptive differential evolution (memetic adaptive differential evolution, MADE) algorithm to tune hyperparameters</td>
<td>ACC&#x2009;&#x003D;&#x2009;0.984 0<break/>AUC&#x2009;&#x003D;&#x2009;0.983 0<break/>SEN&#x2009;&#x003D;&#x2009;0.989 0</td>
</tr>
<tr>
<td align="left">CNN-COVID [<xref ref-type="bibr" rid="ref-55">55</xref>]</td>
<td align="left">SARS-CoV-2 CT<break/>training: validation&#x2009;&#x003D;&#x2009;4:1</td>
<td align="left">Interpretable deep learning methods;<break/>transfer learning method</td>
<td>ACC&#x2009;&#x003D;&#x2009;0.973 8<break/>AUC&#x2009;&#x003D;&#x2009;0.973 6<break/>Precision&#x2009;&#x003D;&#x2009;0.991 6</td>
</tr>
<tr>
<td align="left">COVID-Net Improvements [<xref ref-type="bibr" rid="ref-56">56</xref>]</td>
<td align="left">Site A: SARS-CoV-2 CT<break/>Site B: COVID-CT-Dataset<break/>train:validation: test&#x2009;&#x003D;&#x2009;60:15:25</td>
<td align="left">Batch Normalization (BN) layer;<break/>cosine annealing learning rate;<break/>Joint Learning Program</td>
<td>Site A: ACC&#x2009;&#x003D;&#x2009;0.908 3<break/>Site B: ACC&#x2009;&#x003D;&#x2009;0.786 9</td>
</tr>
<tr>
<td align="left">ResNet50v2 [<xref ref-type="bibr" rid="ref-57">57</xref>]</td>
<td align="left">COVID-CTset<break/>training: validation&#x2009;&#x003D;&#x2009;4:1</td>
<td align="left">Improved Feature Pyramid Structure</td>
<td>ACC&#x2009;&#x003D;&#x2009;0.984 9<break/>SEN&#x2009;&#x003D;&#x2009;0.949 6</td>
</tr>
<tr>
<td align="left">COVIDNet-CT [<xref ref-type="bibr" rid="ref-58">58</xref>]</td>
<td align="left">COVIDx dataset<break/>train:validation: test&#x2009;&#x003D;&#x2009;60:15:25</td>
<td align="left">Before convolution: Map and then copy to increase dimension; After convolution: Map and expand the output (PRPE)</td>
<td>ACC&#x2009;&#x003D;&#x2009;0.991 0<break/>SEN&#x2009;&#x003D;&#x2009;0.973 0<break/>Spec&#x2009;&#x003D;&#x2009;0.999 0</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Use common backbone networks (including visual geometry group (VGG), ResNet, DenseNet, etc.) for effective feature extraction, and use them for subsequent fusion, classification, and other operations. Reference [<xref ref-type="bibr" rid="ref-41">41</xref>] compared different 3D ResNets and found that the 3D ResNet-18 classification performance is the best when the input depth is 4 and the batch size is 32 slices. Reference [<xref ref-type="bibr" rid="ref-42">42</xref>] compared the classification performance of Resnet-18, InceptionV3, and Mobile-NetV2 on CT and CXR, and found that ResNet-18 has the highest accuracy on CT, and InceptionV3 has the highest accuracy on CXR. Reference [<xref ref-type="bibr" rid="ref-28">28</xref>] trained DenseNet-169 for the detection of COVID-19 and used a feature extraction network and Atrous Spatial Pyramid Pooling (ASPP) to extract more accurate features. Reference [<xref ref-type="bibr" rid="ref-43">43</xref>] trained DenseNet121 on the COVID-19-CT-CXR dataset to test the CT classification performance. Reference [<xref ref-type="bibr" rid="ref-44">44</xref>] used ResNet50v2 and a modified feature pyramid structure to improve classification accuracy on COVID-CTset.
<list list-type="simple">
<list-item><label>(2)</label><p>Data enhancement</p></list-item>
</list></p>
<p>To avoid model training overfitting and improve the accuracy of model classification, data augmentation methods are often used to expand data sets.</p>
<p>Reference [<xref ref-type="bibr" rid="ref-45">45</xref>] used operations such as affine transformation and translation on the COVID-CT-Dataset. Additionally, most models use unsupervised generative adversarial networks (GANs) to augment data. Commonly used data augmentation methods include supervised geometric transformation and unsupervised GAN. Reference [<xref ref-type="bibr" rid="ref-46">46</xref>] used a combination of traditional data augmentation and CGAN to improve experimental accuracy and performance. Reference [<xref ref-type="bibr" rid="ref-47">47</xref>] used a conditional GAN (CGAN) based pix2pix network to generate images on the COVID-CT-Dataset. Reference [<xref ref-type="bibr" rid="ref-48">48</xref>] performed operations such as rotation, tilt, flip, and pixel filling on the SARS-CoV-2CT dataset. Reference [<xref ref-type="bibr" rid="ref-49">49</xref>] utilized Cycle Generative Adversarial Network (CycleGAN) to generate GGO images on a large-scale lung cancer dataset. Reference [<xref ref-type="bibr" rid="ref-50">50</xref>] used the mixed data augmentation (mixup) [<xref ref-type="bibr" rid="ref-51">51</xref>] method in 3D models and demonstrated that this method can effectively improve the model&#x2019;s accuracy.
<list list-type="simple">
<list-item><label>(3)</label><p>Migration Learning</p></list-item>
</list></p>
<p>The method of using migration learning can also make up for the problem of insufficient data sets, usually loading the pre-training parameters on ImageNet. Reference [<xref ref-type="bibr" rid="ref-52">52</xref>] used a deep transfer learning model (DTL) to train on the SARS-CoV-2 CT dataset by using the pre-trained DenseNet201. Reference [<xref ref-type="bibr" rid="ref-53">53</xref>] used five deep transfer learning models to train on the COVID-CT-Dataset, combined with data augmentation, and the results showed that ResNet50 had the best classification performance. Reference [<xref ref-type="bibr" rid="ref-54">54</xref>] proposed a method for COVID-19 detection based on transfer learning and conducted experiments on the COVID-19 dataset by fine-tuning the pre-trained CheXNet [<xref ref-type="bibr" rid="ref-55">55</xref>] model.
<list list-type="simple">
<list-item><label>(4)</label><p>Integrated Learning</p></list-item>
</list></p>
<p>Using ensemble learning to integrate multiple classification models and determine the classification results through voting and other methods can effectively improve classification accuracy. Reference [<xref ref-type="bibr" rid="ref-56">56</xref>] used 15 different pre-trained classification models for classification tasks, used ensemble learning methods to train on COVID-CT-Dataset, and output classification results using the number of votes.
<list list-type="simple">
<list-item><label>(5)</label><p>Lightweight model</p></list-item>
</list></p>
<p>Aiming at the characteristics of the COVID-19 data set and classification tasks, many kinds of literature proposed lightweight classification models. Reference [<xref ref-type="bibr" rid="ref-57">57</xref>] proposed a capsule network (CapsNet) structure DECAPS for fine-grained recognition, which uses activation maps to crop and extract fine-grained representations of regions of interest. Reference [<xref ref-type="bibr" rid="ref-58">58</xref>] designed a neural architecture search (NAS) [<xref ref-type="bibr" rid="ref-59">59</xref>] method based on reinforcement learning to generate a lightweight 3D model MNas3DNet41, and build the model by stacking predefined units. Reference [<xref ref-type="bibr" rid="ref-60">60</xref>] proposed a model COVIDNet-CT for pneumonia CT image classification by stacking mapping-replication-mapping-expansion (PRPE and PRPE-S) modules. Reference [<xref ref-type="bibr" rid="ref-61">61</xref>] proposed a federated learning scheme to improve diagnosis by learning from heterogeneous datasets. Reference [<xref ref-type="bibr" rid="ref-62">62</xref>] proposed the CTnet-10 model, and compared with five models, VGG-19 has the best classification effect, but CTnet-10 has the shortest prediction time.</p>
</sec>
<sec id="s4_1_2"><label>4.1.2</label><title>Classification of CXR</title>
<p>It&#x2019;s important to note that certain simple CNNs often outperform more sophisticated structures at categorization. <xref ref-type="table" rid="table-5">Table 5</xref> compared various dataset models of CXR. In terms of data augmentation, references [<xref ref-type="bibr" rid="ref-58">58</xref>&#x2013;<xref ref-type="bibr" rid="ref-60">60</xref>] all adopt traditional supervised data augmentation methods. Reference [<xref ref-type="bibr" rid="ref-61">61</xref>] used unsupervised GAN to augment the dataset. In terms of transfer learning, references [<xref ref-type="bibr" rid="ref-62">62</xref>&#x2013;<xref ref-type="bibr" rid="ref-64">64</xref>] all used pre-trained models on ImageNet as backbone networks. Different from using the pre-trained model on ImageNet, reference [<xref ref-type="bibr" rid="ref-65">65</xref>] improves the ability to capture the characteristics of the lesion area [<xref ref-type="bibr" rid="ref-66">66</xref>,<xref ref-type="bibr" rid="ref-67">67</xref>].</p>
<table-wrap id="table-5"><label>Table 5</label><caption><title>Comparison of features and evaluation of CXR dataset models</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="left">Model</th>
<th align="left">Use dataset</th>
<th align="left">Model features</th>
<th align="left">Performance evaluation</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">Integrated learning [<xref ref-type="bibr" rid="ref-59">59</xref>]</td>
<td align="left">COVID-19-CT-CXR<break/>Training:test&#x2009;&#x003D;&#x2009;4:1</td>
<td align="left">Integrate resnet18, resnet50, and densenet201 models to determine classification decisions through voting; traditional data augmentation; transfer learning</td>
<td align="left">Acc&#x2009;&#x003D;&#x2009;0.997 0<break/>Auc&#x2009;&#x003D;&#x2009;0.999 0</td>
</tr>
<tr>
<td align="left">CNN-COVID [<xref ref-type="bibr" rid="ref-60">60</xref>]</td>
<td align="left">Bimcv COVID-19&#x002B;<break/>Training: test:validation&#x2009;&#x003D;&#x2009;5:1:1</td>
<td align="left">4 convolutional layers; 4 pooling layers; 5 fully connected layers traditional data enhancement: Geometric transformation; 20&#x0025; neuron random deactivation</td>
<td align="left">Acc&#x2009;&#x003D;&#x2009;0.998 4<break/>Sen&#x2009;&#x003D;&#x2009;0.996 6<break/>Spec&#x2009;&#x003D;&#x2009;0.980 1</td>
</tr>
<tr>
<td align="left">Integrated learning [<xref ref-type="bibr" rid="ref-61">61</xref>]</td>
<td align="left">Pneumonia-chest x-ray dataset<break/>Training 5232; testing 624</td>
<td align="left">Integrate alexnet, resnet18, inception v3, densenet121 and Google net, voting for classification decisions; transfer learning; traditional data augmentation: Adding noise, cropping, and flipping</td>
<td align="left">Acc&#x2009;&#x003D;&#x2009;0.964 0<break/>Recall&#x2009;&#x003D;&#x2009;0.996 2</td>
</tr>
<tr>
<td align="left">Resnet18 [<xref ref-type="bibr" rid="ref-62">62</xref>]</td>
<td align="left">Part of the pneumonia-chest x-ray dataset<break/>624 sheets;<break/>Training: Test&#x2009;&#x003D;&#x2009;4:1</td>
<td align="left">Comparing the classification effects of alexnet, Squeeznet, googlenet, and resnet18; using GAN for data enhancement; transfer learning</td>
<td align="left">ACC (resnet18)&#x2009;&#x003D;&#x2009;<break/>0.990 0</td>
</tr>
<tr>
<td align="left">Cov-elm [<xref ref-type="bibr" rid="ref-63">63</xref>]</td>
<td align="left">Pneumonia-chest x-ray dataset<break/>Covid-chest x-ray<break/>Covid-19 radiography database</td>
<td align="left">Preprocessing: Equalization; extracting texture features and frequency features; extreme learning machines (elm)</td>
<td align="left">Acc&#x2009;&#x003D;&#x2009;0.944 0<break/>Recall&#x2009;&#x003D;&#x2009;0.957 8<break/>F1&#x2009;&#x003D;&#x2009;0.950 0</td>
</tr>
<tr>
<td align="left">Lightweight CNN [<xref ref-type="bibr" rid="ref-64">64</xref>]</td>
<td align="left">Pneumonia-chest x-ray dataset COVID-chest x-ray</td>
<td align="left">Fewer parameters, high efficiency; Avoid overfitting</td>
<td align="left">ACC&#x2009;&#x003D;&#x2009;0.996 9<break/>SEN&#x2009;&#x003D;&#x2009;1.000 0<break/>AUC&#x2009;&#x003D;&#x2009;0.999 5</td>
</tr>
<tr>
<td align="left">MF-TS [<xref ref-type="bibr" rid="ref-65">65</xref>]</td>
<td align="left">Covid-19-ar<break/>Bimcv covid-19&#x002B;<break/>Midrc-record<break/>Covidx dataset</td>
<td align="left">Semi-supervised tasks via a teacher-student approach; local phase image enhancement</td>
<td align="left">Using 30&#x0025; labeled data is equivalent to resnet50 using all labeled data</td>
</tr>
<tr>
<td align="left">Covidnet [<xref ref-type="bibr" rid="ref-66">66</xref>]</td>
<td align="left">Covidx dataset</td>
<td align="left">Lightweight mode;<break/>Before convolution: Low-dimensional mapping and high-dimensional expansion; after convolution: Low-dimensional mapping and high-dimensional expansion to the final feature output (PEPX)</td>
<td align="left">ACC&#x2009;&#x003D;&#x2009;0.933 0<break/>SEN&#x2009;&#x003D;&#x2009;0.910 0<break/>PPV&#x2009;&#x003D;&#x2009;0.989 0</td>
</tr>
<tr>
<td align="left">Cov-snet [<xref ref-type="bibr" rid="ref-67">67</xref>]</td>
<td align="left">Covidx dataset</td>
<td align="left">Transfer learning: Pre-training on chestx-ray14 pneumonia dataset</td>
<td align="left">Sen&#x2009;&#x003D;&#x2009;0.950 0</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>In terms of ensemble learning, reference [<xref ref-type="bibr" rid="ref-68">68</xref>] integrated 3 classification models, and reference [<xref ref-type="bibr" rid="ref-69">69</xref>] integrated 5 classification models, and voted to determine the classification results to improve classification accuracy. Reference [<xref ref-type="bibr" rid="ref-70">70</xref>] enhanced the local phase information of the image as data augmentation input into the neural network, combined with a semi-supervised training method, using small labeled data to train large unlabeled data.</p>
<p>By designing a lightweight X-ray classification model and reducing model parameters, good performance can also be achieved. Reference [<xref ref-type="bibr" rid="ref-71">71</xref>] proposed a lightweight CXR classification model COVID-Net, using the PEPX module, that is, through 1&#x2009;&#x00D7;&#x2009;1 convolution to realize the design pattern of mapping to an extension, and the classification effect exceeds VGG-19 and ResNet-50.</p>
</sec>
</sec>
<sec id="s4_2"><label>4.2</label><title>COVID-19 Segmentation Model</title>
<p>CT scans are often utilized for COVID-19 lesion area segmentation [<xref ref-type="bibr" rid="ref-72">72</xref>]. The segmentation industry is still facing difficulties. The segmentation performance comparison among the models is shown in <xref ref-type="table" rid="table-6">Table 6</xref>.
<list list-type="simple">
<list-item><label>(1)</label><p>Data enhancement</p>
</list-item>
</list></p>
<table-wrap id="table-6"><label>Table 6</label><caption><title>Comparison of various segmentation methods for COVID-19 diagnosis</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="left">Model</th>
<th align="left">Use dataset</th>
<th align="left">Model features</th>
<th align="left">Performance evaluation</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">Inf-Net [<xref ref-type="bibr" rid="ref-66">66</xref>]</td>
<td align="left">COVID-19-CT-Seg<break/>45 images for training; 5 images for verification; 50 images for testing</td>
<td align="left">The encoder uses the PPD module to aggregate high-level features;<break/>RA reverse attention module;<break/>Expanding datasets for semi-supervised learning</td>
<td align="left">Dice&#x2009;&#x003D;&#x2009;0.739<break/>Sen&#x2009;&#x003D;&#x2009;0.725<break/>Spec&#x2009;&#x003D;&#x2009;0.960</td>
</tr>
<tr>
<td align="left">MiniSeg [<xref ref-type="bibr" rid="ref-67">67</xref>]</td>
<td align="left">COVID-19-CT-Seg<break/>Training 60; validation 40 the training set is enhanced with crop flipping</td>
<td align="left">Note hierarchical space pyramid (AHSP) module;<break/>Lightweight multi-scale learning for small samples</td>
<td align="left">DSC&#x2009;&#x003D;&#x2009;0.773<break/>Sen&#x2009;&#x003D;&#x2009;0.836<break/>Spec&#x2009;&#x003D;&#x2009;0.974</td>
</tr>
<tr>
<td align="left">U-Net improvements [<xref ref-type="bibr" rid="ref-68">68</xref>]</td>
<td align="left">COVID-19-CT-Seg: 100 sheets<break/>Segmentation dataset nr.2: 373 sheets<break/>Training set: Test set&#x2009;&#x003D;&#x2009;4:1</td>
<td align="left">Build the Res_dil residual module;<break/>Introduce the scSE attention module in U-Net</td>
<td align="left">Dice&#x2009;&#x003D;&#x2009;0.831<break/>Sen&#x2009;&#x003D;&#x2009;0.867<break/>Spec&#x2009;&#x003D;&#x2009;0.993</td>
</tr>
<tr>
<td align="left">DDANet [<xref ref-type="bibr" rid="ref-69">69</xref>]</td>
<td align="left">COVID-19-CT-Seg and Segmentation dataset nr.2<break/>A total of 471 sheets after clearing</td>
<td align="left">Introduce the CCA attention module in U-Net architecture</td>
<td align="left">Say (GGO)&#x003D;0.734<break/>Say (Consol)&#x003D;0.613</td>
</tr>
<tr>
<td align="left">U-Net&#x002B;cGAN [<xref ref-type="bibr" rid="ref-70">70</xref>]</td>
<td align="left">Segmentation dataset nr.2<break/>300 images for training; 73 images for testing</td>
<td align="left">Generator: Dynamic weighted sum (DESUM) module;<break/>Discriminator: Dynamic feature matching (DFM) module</td>
<td align="left">Dice&#x2009;&#x003D;&#x2009;0.892<break/>PSNR&#x2009;&#x003D;&#x2009;26.89<break/>FID&#x2009;&#x003D;&#x2009;0.033</td>
</tr>
<tr>
<td align="left">CoSinGAN [<xref ref-type="bibr" rid="ref-71">71</xref>]</td>
<td align="left">COVID-19-CT-Seg-Benchmark: 3 520 training images;<break/>COVID19_1110: 50 tests</td>
<td align="left">The multi-scale architecture of the two-level GAN pyramid;<break/>Hybrid reconstruction loss;<break/>Hierarchical data augmentation module</td>
<td align="left">DSC&#x2009;&#x003D;&#x2009;0.713&#x2009;&#x00B1;&#x2009;0.190<break/>NSD&#x2009;&#x003D;&#x2009;0.720&#x2009;&#x00B1;&#x2009;0.209</td>
</tr>
<tr>
<td align="left">U-Net [<xref ref-type="bibr" rid="ref-72">72</xref>]</td>
<td align="left">COVID-19-CT-Seg-Benchmark<break/>Training: validation&#x2009;&#x003D;&#x2009;4:1;<break/>COVID19_1110: 50 tests</td>
<td align="left">Creation of the COVID-19-CT-Seg-Benchmark dataset;<break/>Use more than 40 baselines to segment the left lung, right lung, and lesion area on this dataset</td>
<td align="left">Lesion segmentation effect:<break/>DSC&#x2009;&#x003D;&#x2009;0.673<break/>NSD&#x2009;&#x003D;&#x2009;0.700</td>
</tr>
<tr>
<td align="left">D2A U-Net [<xref ref-type="bibr" rid="ref-73">73</xref>]</td>
<td align="left">COVID-19-CT-Seg: 100 tests;<break/>Segmentation dataset nr.2 and COVID-19-CTSeg-<break/>A total of 1 645 benchmarks are used for training</td>
<td align="left">Expanded convolution increases the receptive field;<break/>Double attention mechanism</td>
<td align="left">Dice&#x2009;&#x003D;&#x2009;0.730<break/>Recall&#x2009;&#x003D;&#x2009;0.707</td>
</tr>
<tr>
<td align="left">Improved U-Net [<xref ref-type="bibr" rid="ref-74">74</xref>]</td>
<td align="left">COVID-19-CT-Seg<break/>Segmentation dataset nr.2<break/>1 810 images for training; 150 images for verification; 10 images for testing</td>
<td align="left">Traditional data augmentation;<break/>EfficientNet-B0 for feature extraction;<break/>Replace traditional upsampling with a DUpsampling structure</td>
<td align="left">DSC&#x2009;&#x003D;&#x2009;0.851<break/>Recall&#x2009;&#x003D;&#x2009;0.804<break/>Precision&#x2009;&#x003D;&#x2009;0.842</td>
</tr>
<tr>
<td align="left">Improved threshold segmentation technology based on multi-level Kapur entropy [<xref ref-type="bibr" rid="ref-75">75</xref>]</td>
<td align="left">COVID-CT-Dataset</td>
<td align="left">Image contrast enhancement algorithm;<break/>Image correlation series for automatic thresholding</td>
<td align="left">Dice&#x2009;&#x003D;&#x2009;0.710<break/>ACC&#x2009;&#x003D;&#x2009;0.980</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Reference [<xref ref-type="bibr" rid="ref-73">73</xref>] randomly rotated, cropped, and flipped the existing dataset images and labels at the same time, used the Efficient-Net-B0 pre-trained on ImageNet as the feature extractor [<xref ref-type="bibr" rid="ref-74">74</xref>], and replaced the traditional one with Dusampling upsampling [<xref ref-type="bibr" rid="ref-75">75</xref>]. Upsampling structure to improveU-Net. Using the GAN network to synthesize infected images solves the problem of difficult data labeling to a certain extent.</p>
<p>Reference [<xref ref-type="bibr" rid="ref-70">70</xref>] proposed a CGAN-based CT image synthesis method for COVID-19 segmentation, using dynamic element weighting (Dynamic Element-wise Sum, DESUM) on the generator and dynamic feature matching on the discriminator (Dynamic Feature Matching, DFM) to improve the quality of synthesized images. Reference [<xref ref-type="bibr" rid="ref-76">76</xref>] proposed a generative model, CoSinGAN, which combines GAN and feature pyramid structures to reconstruct image details through conditional constraints and across scales.
<list list-type="simple">
<list-item><label>(2)</label><p>Attention mechanism</p></list-item>
</list></p>
<p>The scSE attention module was integrated into the U-Net architecture via reference, captured the data for optimal results, and dilated convolution residual blocks (Res dil) be used in the encoder and decoder parts to increase the receptive field. To continually train the attention coefficient, the Criss-Cross Attention device is added, resulting in the dynamically deformable attention network DDANet. Compared to U-Net and Inf-Net, this model&#x2019;s segmentation impact is noticeably better.
<list list-type="simple">
<list-item><label>(3)</label><p>Lightweight model</p></list-item>
</list></p>
<p>To adapt to the insufficient number of segmentation datasets, relatively lightweight models based on small sample datasets have been proposed one after another. The study proposed a COVID-19 lesion area CT segmentation model, Inf-Net, which uses a Reverse Attention (RA) module and an Edge Attention (EA) module to improve the infection area. Another dataset suggested the MiniSeg model in conjunction with the AHSP module for efficient multi-scale learning and demonstrated that, for the identical data set, this model&#x2019;s segmentation impact outperformed Inf-Net.</p>
</sec>
</sec>
<sec id="s5"><label>5</label><title>Conclusion</title>
<p>This study primarily examines the use of several imaging datasets from COVID-19 for various purposes. It has gathered and arranged many open-source imaging datasets, some of which contain CT pictures and others which include CXR images. According to various TCIA image data collection standards, image data format consistency, metadata standardization, and data labeling should be treated with unified specification criteria for picture completeness, or research on quality evaluation standards for recorded images should be conducted. Furthermore, because patient information is typically present in medical imaging data, de-privacy methods should be implemented during data collection to remove patient information from image and lesion label data.</p>
<p>Combined with the classification and segmentation tasks of COVID-19 images, the application of the current mainstream deep learning algorithm models is compared. The idea of an attention mechanism has achieved obvious results in medical image analysis, and the lesion area of medical imaging has typical local characteristics. The study of local attention mechanisms will become a more effective method in the future. At the same time, the research on small sample sets and data imbalance methods is still an issue worthy of an in-depth discussion in the field of medical image processing.</p>
</sec>
</body>
<back>
<sec><title>Funding Statement</title>
<p>This research project was funded by the Deanship of Scientific Research, Princess Nourah bint Abdulrahman University, through the Program of Research Project Funding After Publication, grant No (43-PRFA-P-42)</p></sec>
<sec sec-type="data-availability"><title>Availability of Data and Materials</title>
<p>The data used for the findings of this study are available within this article.</p></sec>
<sec sec-type="COI-statement"><title>Conflicts of Interest</title>
<p>The authors declare that they have no conflicts of interest to report regarding the present study.</p></sec>
<ref-list content-type="authoryear">
<title>References</title>
<ref id="ref-1"><label>[1]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>E.</given-names> <surname>Awulachew</surname></string-name>, <string-name><given-names>K.</given-names> <surname>Diriba</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Anja</surname></string-name> and <string-name><given-names>E.</given-names> <surname>Get</surname></string-name></person-group>, &#x201C;<article-title>Computed tomography (CT) imaging features of patients with COVID-19: A systematic review and meta-analysis</article-title>,&#x201D; <source>Radiology Research and Practice</source>, vol. <volume>2020</volume>, ID 1023506, pp. <fpage>1</fpage>&#x2013;<lpage>7</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-2"><label>[2]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Chung</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Bernheim</surname></string-name>, <string-name><given-names>X.</given-names> <surname>Mei</surname></string-name>, <string-name><given-names>N.</given-names> <surname>Zhang</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Huang</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>CT imaging features of 2019 novel coronavirus (2019-nCoV)</article-title>,&#x201D; <source>Radiology</source>, vol. <volume>296</volume>, pp. <fpage>202</fpage>&#x2013;<lpage>207</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-3"><label>[3]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Meisam</surname></string-name>, <string-name><given-names>K.</given-names> <surname>Shirbandi</surname></string-name>, <string-name><given-names>H.</given-names> <surname>Shahvandi</surname></string-name> and <string-name><given-names>B.</given-names> <surname>Arjmand</surname></string-name></person-group>, &#x201C;<article-title>The diagnostic accuracy of artificial intelligence-assisted CT imaging in COVID-19 disease: A systematic review and meta-analysis</article-title>,&#x201D; <source>Informatics and Medicine Unlocked</source>, vol. <volume>24</volume>, no. <issue>3</issue>, pp. <fpage>781</fpage>&#x2013;<lpage>793</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-4"><label>[4]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Y.</given-names> <surname>Mohamadou</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Halidou</surname></string-name> and <string-name><given-names>P.</given-names> <surname>Kapen</surname></string-name></person-group>, &#x201C;<article-title>A review of mathematical modeling, artificial intelligence and datasets used in the study, prediction and management of COVID-19</article-title>,&#x201D; <source>Applied Intelligence</source>, vol. <volume>50</volume>, pp. <fpage>3913</fpage>&#x2013;<lpage>3925</lpage>, <year>2020</year>; <pub-id pub-id-type="pmid">34764546</pub-id></mixed-citation></ref>
<ref id="ref-5"><label>[5]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Montazeri</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Zahedinasab</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Farahani</surname></string-name>, <string-name><given-names>H.</given-names> <surname>Mohseni</surname></string-name> and <string-name><given-names>F.</given-names> <surname>Ghasemian</surname></string-name></person-group>, &#x201C;<article-title>Machine learning models for image-based diagnosis and prognosis of COVID-19: Systematic review</article-title>,&#x201D; <source>JMIR Medical Informatics</source>, vol. <volume>9</volume>, no. <issue>4</issue>, pp. <fpage>1741</fpage>&#x2013;<lpage>1756</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-6"><label>[6]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Ghaderzadeh</surname></string-name> and <string-name><given-names>F.</given-names> <surname>Asadi</surname></string-name></person-group>, &#x201C;<article-title>Deep learning in the detection and diagnosis of COVID-19 using radiology modalities: A systematic review</article-title>,&#x201D; <source>Journal of Healthcare Engineering</source>, vol. <volume>21</volume>, no. <issue>4</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>9</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-7"><label>[7]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>P.</given-names> <surname>Jemiolou</surname></string-name>, <string-name><given-names>D.</given-names> <surname>Storman</surname></string-name> and <string-name><given-names>P.</given-names> <surname>Orzechowski</surname></string-name></person-group>, &#x201C;<article-title>Artificial intelligence for COVID-19 detection in medical imaging diagnostic measures and wasting&#x2013;a systematic umbrella review</article-title>,&#x201D; <source>Journal of Clinical Medicine</source>, vol. <volume>11</volume>, no. <issue>7</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>9</lpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-8"><label>[8]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Arowolo</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Ogundokun</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Misra</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Kadri</surname></string-name> and <string-name><given-names>T.</given-names> <surname>Aduragba</surname></string-name></person-group>, &#x201C;<article-title>Machine learning approach using KPCA-SVMs for predicting COVID-19</article-title>,&#x201D; <source>Healthcare Informatics for Fighting COVID-19 and Future Epidemics</source>, vol. <volume>4</volume>, no. <issue>3</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>17</lpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-9"><label>[9]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>R.</given-names> <surname>Abiyev</surname></string-name> and <string-name><given-names>A.</given-names> <surname>Ismail</surname></string-name></person-group>, &#x201C;<article-title>COVID-19 and pneumonia diagnosis in X-ray images using convolutional neural networks</article-title>,&#x201D; <source>Mathematical Problems in Engineering</source>, vol. <volume>2021</volume>, ID 328115, pp. <fpage>1</fpage>&#x2013;<lpage>13</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-10"><label>[10]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>P.</given-names> <surname>Mall</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Singh</surname></string-name> and <string-name><given-names>D.</given-names> <surname>Yadav</surname></string-name></person-group>, &#x201C;<article-title>GLCM based feature extraction and medical X-ray image classification using machine learning techniques</article-title>,&#x201D; <conf-name>IEEE Conf. on Information and Communication Technology</conf-name>,<conf-loc>Allahabad, India</conf-loc>, pp. <fpage>1</fpage>&#x2013;<lpage>6</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-11"><label>[11]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>R.</given-names> <surname>Mansour</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Gutierrez</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Gamarra</surname></string-name>, <string-name><given-names>D.</given-names> <surname>Gupta</surname></string-name>, <string-name><given-names>O.</given-names> <surname>Castill</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Unsupervised deep learning based variational autoencoder model for COVID-19 diagnosis and classification</article-title>,&#x201D; <source>Pattern Recognition Letters</source>, vol. <volume>151</volume>, no. <issue>3</issue>, pp. <fpage>267</fpage>&#x2013;<lpage>274</lpage>, <year>2021</year>; <pub-id pub-id-type="pmid">34566223</pub-id></mixed-citation></ref>
<ref id="ref-12"><label>[12]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Y.</given-names> <surname>Xue</surname></string-name>, <string-name><given-names>B.</given-names> <surname>Onzo</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Mansour</surname></string-name> and <string-name><given-names>S.</given-names> <surname>Su</surname></string-name></person-group>, &#x201C;<article-title>Deep convolutional neural network approach for COVID-19 detection</article-title>,&#x201D; <source>Computer Systems Science and Engineering</source>, vol. <volume>42</volume>, no. <issue>1</issue>, pp. <fpage>201</fpage>&#x2013;<lpage>211</lpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-13"><label>[13]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>L.</given-names> <surname>Huang</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Han</surname></string-name>, <string-name><given-names>T.</given-names> <surname>Ai</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Yu</surname></string-name>, <string-name><given-names>H.</given-names> <surname>Kan</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Serial quantitative chest CT assessment of COVID-19: A deep learning approach</article-title>,&#x201D; <source>Radiology: Cardiothoracic Imaging</source>, vol. <volume>2</volume>, no. <issue>2</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>8</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-14"><label>[14]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Y.</given-names> <surname>Li</surname></string-name> and <string-name><given-names>L.</given-names> <surname>Xia</surname></string-name></person-group>, &#x201C;<article-title>Coronavirus disease 2019 (COVID-19): Role of chest CT in diagnosis and management</article-title>,&#x201D; <source>American Journal of Roentgenology</source>, vol. <volume>214</volume>, pp. <fpage>1280</fpage>&#x2013;<lpage>1286</lpage>, <year>2020</year>; <pub-id pub-id-type="pmid">32130038</pub-id></mixed-citation></ref>
<ref id="ref-15"><label>[15]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>T.</given-names> <surname>Liu</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Huang</surname></string-name>, <string-name><given-names>H.</given-names> <surname>Liu</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Huang</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Lei</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Spectrum of chest CT findings in a familial cluster of COVID-19 infection</article-title>,&#x201D; <source>Radiology: Cardiothoracic Imaging</source>, vol. <volume>2</volume>, no. <issue>1</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>11</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-16"><label>[16]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>R.</given-names> <surname>Chen</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Chen</surname></string-name> and <string-name><given-names>Q.</given-names> <surname>Meng</surname></string-name></person-group>, &#x201C;<article-title>Chest computed tomography images of early coronavirus disease (COVID-19)</article-title>,&#x201D; <source>Canadian Journal of Anesthesia</source>, vol. <volume>67</volume>, no. <issue>6</issue>, pp. <fpage>754</fpage>&#x2013;<lpage>755</lpage>, <year>2020</year>; <pub-id pub-id-type="pmid">32162211</pub-id></mixed-citation></ref>
<ref id="ref-17"><label>[17]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Ng</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Lee</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Yang</surname></string-name>, <string-name><given-names>F.</given-names> <surname>Yang</surname></string-name>, <string-name><given-names>X.</given-names> <surname>Li</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Imaging profile of the COVID-19 infection: Radiologic findings and literature review</article-title>,&#x201D; <source>Radiology: Cardiothoracic Imaging</source>, vol. <volume>2</volume>, no. <issue>1</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>16</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-18"><label>[18]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Z.</given-names> <surname>Ye</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Zhang</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Wang</surname></string-name>, <string-name><given-names>Z.</given-names> <surname>Huang</surname></string-name> and <string-name><given-names>B.</given-names> <surname>Song</surname></string-name></person-group>, &#x201C;<article-title>Chest CT manifestations of new coronavirus disease 2019 (COVID-19): A pictorial review</article-title>,&#x201D; <source>Journal of European Radiology</source>, vol. <volume>30</volume>, no. <issue>8</issue>, pp. <fpage>4381</fpage>&#x2013;<lpage>4389</lpage>, <year>2020</year>; <pub-id pub-id-type="pmid">32193638</pub-id></mixed-citation></ref>
<ref id="ref-19"><label>[19]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Salehi</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Abedi</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Balakrishnan</surname></string-name> and <string-name><given-names>A.</given-names> <surname>Gholamrezanezhad</surname></string-name></person-group>, &#x201C;<article-title>Coronavirus disease 2019 (COVID-19): A systematic review of imaging findings in 919 patients</article-title>,&#x201D; <source>American Journal of Roentgenology</source>, vol. <volume>215</volume>, pp. <fpage>87</fpage>&#x2013;<lpage>93</lpage>, <year>2020</year>; <pub-id pub-id-type="pmid">32174129</pub-id></mixed-citation></ref>
<ref id="ref-20"><label>[20]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>K.</given-names> <surname>Li</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Fang</surname></string-name>, <string-name><given-names>W.</given-names> <surname>Li</surname></string-name>, <string-name><given-names>C.</given-names> <surname>Pan</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Qin</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>CT image visual quantitative evaluation and clinical classification of coronavirus disease (COVID-19)</article-title>,&#x201D; <source>Journal of European Radiology</source>, vol. <volume>30</volume>, pp. <fpage>4407</fpage>&#x2013;<lpage>4416</lpage>, <year>2020</year>; <pub-id pub-id-type="pmid">32215691</pub-id></mixed-citation></ref>
<ref id="ref-21"><label>[21]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>K.</given-names> <surname>Zhang</surname></string-name>, <string-name><given-names>X.</given-names> <surname>Liu</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Shen</surname></string-name>, <string-name><given-names>Z.</given-names> <surname>Li</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Sang</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Clinically applicable AI system for accurate diagnosis, quantitative measurements, and prognosis of COVID-19 pneumonia using computed tomography</article-title>,&#x201D; <source>Cell</source>, vol. <volume>181</volume>, pp. <fpage>1423</fpage>&#x2013;<lpage>1433</lpage>, <year>2020</year>; <pub-id pub-id-type="pmid">32416069</pub-id></mixed-citation></ref>
<ref id="ref-22"><label>[22]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Jacobi</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Chung</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Bernheim</surname></string-name> and <string-name><given-names>C.</given-names> <surname>Eber</surname></string-name></person-group>, &#x201C;<article-title>Portable chest X-ray in coronavirus disease-19 (COVID-19): A pictorial review</article-title>,&#x201D; <source>Clinical Imaging</source>, vol. <volume>64</volume>, pp. <fpage>35</fpage>&#x2013;<lpage>42</lpage>, <year>2020</year>; <pub-id pub-id-type="pmid">32302927</pub-id></mixed-citation></ref>
<ref id="ref-23"><label>[23]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>D.</given-names> <surname>Cozzi</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Albanesi</surname></string-name>, <string-name><given-names>E.</given-names> <surname>Cavigli</surname></string-name>, <string-name><given-names>C.</given-names> <surname>Moroni</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Bindi</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Chest X-ray in new coronavirus disease 2019 (COVID- 19) infection: Findings and correlation with clinical outcome</article-title>,&#x201D; <source>La Radiologia Medica</source>, vol. <volume>125</volume>, pp. <fpage>730</fpage>&#x2013;<lpage>737</lpage>, <year>2020</year>; <pub-id pub-id-type="pmid">32519256</pub-id></mixed-citation></ref>
<ref id="ref-24"><label>[24]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Chowdhury</surname></string-name>, <string-name><given-names>T.</given-names> <surname>Rahman</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Khandakar</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Mazhar</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Kadir</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Can AI help in screening viral and COVID-19 pneumonia</article-title>,&#x201D; <source>IEEE Access</source>, vol. <volume>8</volume>, pp. <fpage>132665</fpage>&#x2013;<lpage>132676</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-25"><label>[25]</label><mixed-citation publication-type="other"><person-group person-group-type="author"><string-name><given-names>R.</given-names> <surname>Summers</surname></string-name></person-group>, &#x201C;<article-title>Artificial intelligence of COVID-19 imaging: A hammer in search of a nail</article-title>,&#x201D; <source>Radiology</source>, vol. <volume>298</volume>, no. <issue>3</issue>, pp. <fpage>204226</fpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-26"><label>[26]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J.</given-names> <surname>Ma</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Wang</surname></string-name>, <string-name><given-names>X.</given-names> <surname>An</surname></string-name>, <string-name><given-names>C.</given-names> <surname>Ge</surname></string-name>, <string-name><given-names>Z.</given-names> <surname>Yu</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Toward data-efficient learning: A benchmark for COVID-19 CT lung and infection segmentation</article-title>,&#x201D; <source>Medical Physics</source>, vol. <volume>48</volume>, no. <issue>3</issue>, pp. <fpage>1197</fpage>&#x2013;<lpage>1210</lpage>, <year>2021</year>; <pub-id pub-id-type="pmid">33354790</pub-id></mixed-citation></ref>
<ref id="ref-27"><label>[27]</label><mixed-citation publication-type="web"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Morozov</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Anreychenko</surname></string-name>, <string-name><given-names>N.</given-names> <surname>Pavlov</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Vladzymyrsky</surname></string-name>, <string-name><given-names>N.</given-names> <surname>Ledikhova</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Mos-med data: Chest CT scans with COVID-19 related findings dataset</article-title>,&#x201D; <pub-id pub-id-type="doi">10.1101/2020.05.20.20100362</pub-id></mixed-citation></ref>
<ref id="ref-28"><label>[28]</label><mixed-citation publication-type="web"><person-group person-group-type="author"><string-name><given-names>X.</given-names> <surname>Yang</surname></string-name>, <string-name><given-names>X.</given-names> <surname>He</surname></string-name> and <string-name><given-names>J.</given-names> <surname>Zhao</surname></string-name></person-group>, &#x201C;<article-title>COVID-CT-dataset: A CT scan dataset about COVID-19</article-title>,&#x201D; <ext-link ext-link-type="uri" xlink:href="https://arxiv.org/pdf/2003.13865.pdf">https://arxiv.org/pdf/2003.13865.pdf</ext-link></mixed-citation></ref>
<ref id="ref-29"><label>[29]</label><mixed-citation publication-type="web"><person-group person-group-type="author"><string-name><given-names>E.</given-names> <surname>Soares</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Angelov</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Biaso</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Froes</surname></string-name>, <string-name><given-names>D.</given-names> <surname>Abe</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>SARS-CoV-2 CT-scan dataset: A large dataset of real patients CT scans for SARS-CoV identification</article-title>,&#x201D; <pub-id pub-id-type="doi">10.1101/2020.04.24.20078584</pub-id></mixed-citation></ref>
<ref id="ref-30"><label>[30]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Rahimzadeh</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Attar</surname></string-name> and <string-name><given-names>S.</given-names> <surname>Sakhaei</surname></string-name></person-group>, &#x201C;<article-title>A fully automated deep learning-based network for detecting COVID-19 from a new and large lung CT scan dataset</article-title>,&#x201D; <source>Biomedical Signal Processing and Control</source>, vol. <volume>68</volume>, no. <issue>6</issue>, pp. <fpage>643</fpage>&#x2013;<lpage>655</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-31"><label>[31]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>N.</given-names> <surname>Ning</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Lei</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Yang</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Cao</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Jiang</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Open resource of clinical data from patients with pneumonia for the prediction of COVID-19 outcomes via deep learning</article-title>,&#x201D; <source>Nature Biomedical Engineering</source>, vol. <volume>4</volume>, pp. <fpage>1197</fpage>&#x2013;<lpage>1207</lpage>, <year>2020</year>; <pub-id pub-id-type="pmid">33208927</pub-id></mixed-citation></ref>
<ref id="ref-32"><label>[32]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Harmon</surname></string-name>, <string-name><given-names>H.</given-names> <surname>Sanford</surname></string-name> and <string-name><given-names>S.</given-names> <surname>Xu</surname></string-name></person-group>, &#x201C;<article-title>Artificial intelligence for the detection of COVID-19 pneumonia on chest CT using multinational datasets</article-title>,&#x201D; <source>Nature Communication</source>, vol. <volume>11</volume>, no. <issue>1</issue>, pp. <fpage>4080</fpage>&#x2013;<lpage>4091</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-33"><label>[33]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>D.</given-names> <surname>Kermany</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Goldbaum</surname></string-name>, <string-name><given-names>W.</given-names> <surname>Cai</surname></string-name>, <string-name><given-names>C.</given-names> <surname>Valentim</surname></string-name>, <string-name><given-names>H.</given-names> <surname>Liang</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Identity medical diagnosis and treatable diseases by image-based deep learning</article-title>,&#x201D; <source>Cell</source>, vol. <volume>172</volume>, no. <issue>1</issue>, pp. <fpage>1122</fpage>&#x2013;<lpage>1131</lpage>, <year>2018</year>; <pub-id pub-id-type="pmid">29474911</pub-id></mixed-citation></ref>
<ref id="ref-34"><label>[34]</label><mixed-citation publication-type="web"><person-group person-group-type="author"><string-name><given-names>J.</given-names> <surname>Cohen</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Morrison</surname></string-name> and <string-name><given-names>L.</given-names> <surname>Dao</surname></string-name></person-group>, &#x201C;<article-title>COVID-19 image data collection: Prospective predictions are the future</article-title>,&#x201D; <ext-link ext-link-type="uri" xlink:href="https://arxiv.org/abs/2006.11988">https://arxiv.org/abs/2006.11988</ext-link></mixed-citation></ref>
<ref id="ref-35"><label>[35]</label><mixed-citation publication-type="web"><person-group person-group-type="author"><string-name><given-names>Y.</given-names> <surname>Peng</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Tang</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Lee</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Zhu</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Summers</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>COVID-19-CT-CXR: A freely accessible and weakly labeled chest X-ray and CT image collection on COVID-19 from biomedical literature</article-title>,&#x201D; <ext-link ext-link-type="uri" xlink:href="https://arxiv.org/abs/2006.06177">https://arxiv.org/abs/2006.06177</ext-link></mixed-citation></ref>
<ref id="ref-36"><label>[36]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Desai</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Bagha</surname></string-name>, <string-name><given-names>T.</given-names> <surname>Wongsurawat</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Jenjaroenpun</surname></string-name>, <string-name><given-names>T.</given-names> <surname>Powell</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Chest imaging representing a COVID-19 positive rural U.S. population</article-title>,&#x201D; <source>Scientific Data</source>, vol. <volume>7</volume>, no. <issue>1</issue>, pp. <fpage>414</fpage>&#x2013;<lpage>427</lpage>, <year>2020</year>; <pub-id pub-id-type="pmid">33235265</pub-id></mixed-citation></ref>
<ref id="ref-37"><label>[37]</label><mixed-citation publication-type="web"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Vaya</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Saborit</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Montell</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Perstusa</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Bustos</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>BIMCV COVID-19&#x002B;: A large annotated dataset of RX and CT images from COVID-19 patients</article-title>,&#x201D; <ext-link ext-link-type="uri" xlink:href="https://arxiv.org/abs/2006.01174">https://arxiv.org/abs/2006.01174</ext-link></mixed-citation></ref>
<ref id="ref-38"><label>[38]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>E.</given-names> <surname>Tsai</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Simpson</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Lungren</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Hershman</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Roshkovan</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>The RSNA international COVID-19 open annotated radiology database (RICORD)</article-title>,&#x201D; <source>Radiology</source>, vol. <volume>299</volume>, no. <issue>1</issue>, pp. <fpage>204</fpage>&#x2013;<lpage>213</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-39"><label>[39]</label><mixed-citation publication-type="web"><person-group person-group-type="author"><string-name><given-names>L.</given-names> <surname>Wang</surname></string-name> and <string-name><given-names>A.</given-names> <surname>Wong</surname></string-name></person-group>, &#x201C;<article-title>COVID-Net: A tailored deep convolutional neural network design for detection of COVID-19 cases from chest X-ray images</article-title>,&#x201D; <ext-link ext-link-type="uri" xlink:href="https://arxiv.org/abs/2003.09871">https://arxiv.org/abs/2003.09871</ext-link></mixed-citation></ref>
<ref id="ref-40"><label>[40]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>F.</given-names> <surname>Shi</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Wang</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Shi</surname></string-name>, <string-name><given-names>Z.</given-names> <surname>Wu</surname></string-name>, <string-name><given-names>Q.</given-names> <surname>Wang</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Review of artificial intelligence techniques in imaging data acquisition, segmentation, and diagnosis for COVID-19</article-title>,&#x201D; <source>IEEE Reviews in Biomedical Engineering</source>, vol. <volume>14</volume>, pp. <fpage>4</fpage>&#x2013;<lpage>15</lpage>, <year>2021</year>; <pub-id pub-id-type="pmid">32305937</pub-id></mixed-citation></ref>
<ref id="ref-41"><label>[41]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Y.</given-names> <surname>Li</surname></string-name>, <string-name><given-names>X.</given-names> <surname>Pei</surname></string-name> and <string-name><given-names>Y.</given-names> <surname>Guo</surname></string-name></person-group>, &#x201C;<article-title>A 3D CNN classification model for accurate diagnosis of coronavirus disease 2019 using computed tomography images</article-title>,&#x201D; <source>Journal of Medical Imaging</source>, vol. <volume>8</volume>, no. <issue>1</issue>, pp. <fpage>813</fpage>&#x2013;<lpage>824</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-42"><label>[42]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>X.</given-names> <surname>He</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Wang</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Shi</surname></string-name>, <string-name><given-names>X.</given-names> <surname>Chu</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Tang</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Benchmarking deep learning models and automated model design for COVID-19 detection with chest CT scans</article-title>,&#x201D; <source>medRxiv</source>, vol. <volume>1</volume>, no. <issue>3</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>13</lpage>, <year>2020</year>. <ext-link ext-link-type="uri" xlink:href="https://arxiv.org/abs/2101.05442">https://arxiv.org/abs/2101.05442</ext-link></mixed-citation></ref>
<ref id="ref-43"><label>[43]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>V.</given-names> <surname>Shah</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Keniya</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Shridharani</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Punjabi</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Shah</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Diagnosis of COVID-19 using CT scan images and deep learning techniques</article-title>,&#x201D; <source>Emergency Radiology</source>, vol. <volume>28</volume>, pp. <fpage>497</fpage>&#x2013;<lpage>505</lpage>, <year>2021</year>; <pub-id pub-id-type="pmid">33523309</pub-id></mixed-citation></ref>
<ref id="ref-44"><label>[44]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>P.</given-names> <surname>Gifani</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Shalbaf</surname></string-name> and <string-name><given-names>M.</given-names> <surname>Vafaeezadeh</surname></string-name></person-group>, &#x201C;<article-title>Automated detection of COVID-19 using an ensemble of transfer learning with deep convolutional neural network based on CT scans</article-title>,&#x201D; <source>International Journal of Computer Assisted Radiology and Surgery</source>, vol. <volume>16</volume>, no. <issue>2</issue>, pp. <fpage>115</fpage>&#x2013;<lpage>123</lpage>, <year>2021</year>; <pub-id pub-id-type="pmid">33191476</pub-id></mixed-citation></ref>
<ref id="ref-45"><label>[45]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>C.</given-names> <surname>Li</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Yang</surname></string-name>, <string-name><given-names>H.</given-names> <surname>Liang</surname></string-name> and <string-name><given-names>B.</given-names> <surname>Wu</surname></string-name></person-group>, &#x201C;<article-title>Transfer learning for the establishment of recognition of COVID-19 on CT imaging using small-sized training datasets</article-title>,&#x201D; <source>Knowledge-Based Systems</source>, vol. <volume>218</volume>, no. <issue>5</issue>, pp. <fpage>1068</fpage>&#x2013;<lpage>1079</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-46"><label>[46]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Loey</surname></string-name>, <string-name><given-names>G.</given-names> <surname>Manogaran</surname></string-name> and <string-name><given-names>N.</given-names> <surname>Khalifa</surname></string-name></person-group>, &#x201C;<article-title>A deep transfer learning model with classical data augmentation and CGAN to detect COVID-19 from chest CT radiography digital images</article-title>,&#x201D; <source>Neural Computing and Applications</source>, vol. <volume>5</volume>, no. <issue>2</issue>, pp. <fpage>1168</fpage>&#x2013;<lpage>1179</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-47"><label>[47]</label><mixed-citation publication-type="web"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Mobiny</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Cicalese</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Zare</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Yuan</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Abavisani</surname></string-name> <etal>et al.,</etal></person-group> <article-title>Radiologist level COVID-19 detection using CT scans with detail-oriented capsule networks</article-title>, <ext-link ext-link-type="uri" xlink:href="https://arxiv.org/abs/2004.07407">https://arxiv.org/abs/2004.07407</ext-link></mixed-citation></ref>
<ref id="ref-48"><label>[48]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Jaiswal</surname></string-name>, <string-name><given-names>N.</given-names> <surname>Gianchandani</surname></string-name>, <string-name><given-names>D.</given-names> <surname>Singh</surname></string-name>, <string-name><given-names>V.</given-names> <surname>Kumar</surname></string-name> and <string-name><given-names>M.</given-names> <surname>Kaur</surname></string-name></person-group>, &#x201C;<article-title>Classification of the COVID-19 infected patients using Densenet201 based deep transfer learning</article-title>,&#x201D; <source>Journal of Bio-Molecular Structure and Dynamics</source>, vol. <volume>24</volume>, no. <issue>2</issue>, pp. <fpage>381</fpage>&#x2013;<lpage>409</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-49"><label>[49]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Y.</given-names> <surname>Pathak</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Shukla</surname></string-name> and <string-name><given-names>K.</given-names> <surname>Arya</surname></string-name></person-group>, &#x201C;<article-title>Deep bidirectional classification model for COVID-19 disease infected patients</article-title>,&#x201D; <source>IEEE/ACM Transactions on Computational Biology and Bioinformatics</source>, vol. <volume>18</volume>, no. <issue>4</issue>, pp. <fpage>1234</fpage>&#x2013;<lpage>1241</lpage>, <year>2021</year>; <pub-id pub-id-type="pmid">32750891</pub-id></mixed-citation></ref>
<ref id="ref-50"><label>[50]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Z.</given-names> <surname>Wang</surname></string-name>, <string-name><given-names>Q.</given-names> <surname>Liu</surname></string-name> and <string-name><given-names>Q.</given-names> <surname>Dou</surname></string-name></person-group>, &#x201C;<article-title>Contrastive cross-site learning with redesigned net for COVID-19 CT classification</article-title>,&#x201D; <source>IEEE Journal of Biomedical and Health Informatics</source>, vol. <volume>24</volume>, no. <issue>2</issue>, pp. <fpage>2806</fpage>&#x2013;<lpage>2813</lpage>, <year>2020</year>; <pub-id pub-id-type="pmid">32915751</pub-id></mixed-citation></ref>
<ref id="ref-51"><label>[51]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>H.</given-names> <surname>Gunraj</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Wang</surname></string-name> and <string-name><given-names>A.</given-names> <surname>Wong</surname></string-name></person-group>, &#x201C;<article-title>COVIDNet-CT: A tailored deep convolutional neural network design for detection of COVID-19 cases from chest CT images</article-title>,&#x201D; <source>Frontiers in Medicine</source>, vol. <volume>7</volume>, no. <issue>1</issue>, pp. <fpage>6025</fpage>&#x2013;<lpage>6033</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-52"><label>[52]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>E.</given-names> <surname>Benmalek</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Elmhamdi</surname></string-name> and <string-name><given-names>A.</given-names> <surname>Jilbab</surname></string-name></person-group>, &#x201C;<article-title>Comparing CT scan and chest X-ray imaging for COVID-19 diagnosis</article-title>,&#x201D; <source>Biomedical Engineering Advances</source>, vol. <volume>1</volume>, no. <issue>3</issue>, pp. <fpage>939</fpage>&#x2013;<lpage>950</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-53"><label>[53]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>I.</given-names> <surname>Goodfellow</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Pouget-Abadie</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Mirza</surname></string-name>, <string-name><given-names>B.</given-names> <surname>Xu</surname></string-name>, <string-name><given-names>D.</given-names> <surname>Farley</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>Generative adversarial nets</article-title>,&#x201D; in <conf-name>IEEE 27th Int. Conf. on Neural Information Processing Systems</conf-name>, <conf-loc>Montreal, Canada</conf-loc>, pp. <fpage>2672</fpage>&#x2013;<lpage>2680</lpage>, <year>2014</year>.</mixed-citation></ref>
<ref id="ref-54"><label>[54]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>H.</given-names> <surname>Jiang</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Tang</surname></string-name>, <string-name><given-names>W.</given-names> <surname>Liu</surname></string-name> and <string-name><given-names>Y.</given-names> <surname>Zhang</surname></string-name></person-group>, &#x201C;<article-title>Deep learning for COVID-19 chest CT (computed tomography) image analysis: A lesson from lung cancer</article-title>,&#x201D; <source>Computational and Structural Biotechnology Journal</source>, vol. <volume>19</volume>, no. <issue>3</issue>, pp. <fpage>1391</fpage>&#x2013;<lpage>1399</lpage>, <year>2021</year>; <pub-id pub-id-type="pmid">33680351</pub-id></mixed-citation></ref>
<ref id="ref-55"><label>[55]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>H.</given-names> <surname>Zhang</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Cisse</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Dauphin</surname></string-name> and <string-name><given-names>D.</given-names> <surname>Paz</surname></string-name></person-group>, &#x201C;<article-title>Mix up: Beyond empirical risk minimization</article-title>,&#x201D; in <conf-name>Int. Conf. on ICLR</conf-name>, <conf-loc>Seoul, Korea</conf-loc>, pp. <fpage>1</fpage>&#x2013;<lpage>13</lpage>, <year>2018</year>.</mixed-citation></ref>
<ref id="ref-56"><label>[56]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>L.</given-names> <surname>Li</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Qin</surname></string-name>, <string-name><given-names>Z.</given-names> <surname>Xu</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Yin</surname></string-name>, <string-name><given-names>X.</given-names> <surname>Wang</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Artificial intelligence distinguishes COVID-19 from community-acquired pneumonia on chest CT</article-title>,&#x201D; <source>Radiology</source>, vol. <volume>296</volume>, no. <issue>2</issue>, pp. <fpage>65</fpage>&#x2013;<lpage>71</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-57"><label>[57]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>B.</given-names> <surname>Zoph</surname></string-name> and <string-name><given-names>Q.</given-names> <surname>Le</surname></string-name></person-group>, &#x201C;<article-title>Neural architecture search with reinforcement learning</article-title>,&#x201D; in <conf-name>Int. Conf. on ICLR</conf-name>, <conf-loc>Ottawa, Canada</conf-loc>, pp. <fpage>1</fpage>&#x2013;<lpage>16</lpage>, <year>2017</year>. <ext-link ext-link-type="uri" xlink:href="https://arxiv.org/pdf/1611.01578.pdf">https://arxiv.org/pdf/1611.01578.pdf</ext-link></mixed-citation></ref>
<ref id="ref-58"><label>[58]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>H.</given-names> <surname>Ragb</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Dover</surname></string-name> and <string-name><given-names>R.</given-names> <surname>Ali</surname></string-name></person-group>, &#x201C;<article-title>Fused deep convolutional neural network for precision diagnosis of COVID-19 using chest X-ray images</article-title>,&#x201D; in <conf-name>Int. Conf. on Electrical Engineering and Systems Science</conf-name>,<conf-loc>New York, USA</conf-loc>, pp. <fpage>1</fpage>&#x2013;<lpage>9</lpage>, <year>2020</year>. <ext-link ext-link-type="uri" xlink:href="https://arxiv.org/abs/2009.08831">https://arxiv.org/abs/2009.08831</ext-link></mixed-citation></ref>
<ref id="ref-59"><label>[59]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>P.</given-names> <surname>Sousa</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Carneiro</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Oliveira</surname></string-name>, <string-name><given-names>G.</given-names> <surname>Pereira</surname></string-name>, <string-name><given-names>C.</given-names> <surname>Junior</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>COVID-19 classification in X-ray chest images using a new convolutional neural network: CNN-COVID</article-title>,&#x201D; <source>Research on Biomedical Engineering</source>, vol. <volume>38</volume>, pp. <fpage>87</fpage>&#x2013;<lpage>97</lpage>, <year>2021</year>. <pub-id pub-id-type="doi">10.1007/s42600-020-00120-5</pub-id></mixed-citation></ref>
<ref id="ref-60"><label>[60]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>V.</given-names> <surname>Chouhan</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Singh</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Khamparia</surname></string-name>, <string-name><given-names>D.</given-names> <surname>Gupta</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Tiwari</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>A novel transfer learning based approach for pneumonia detection in chest X-ray images</article-title>,&#x201D; <source>Applied Sciences</source>, vol. <volume>10</volume>, no. <issue>2</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>18</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-61"><label>[61]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Khalifa</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Taha</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Hassanien</surname></string-name> and <string-name><given-names>A.</given-names> <surname>Hassananien</surname></string-name></person-group>, &#x201C;<article-title>Detection of coronavirus (COVID-19) associated pneumonia based on generative adversarial networks and a fine-tuned deep transfer learning model using chest X-ray dataset</article-title>,&#x201D; in <conf-name>Int. Conf. on Electrical Engineering and Systems Science</conf-name>, <conf-loc>New Jersey, USA</conf-loc>, pp. <fpage>1</fpage>&#x2013;<lpage>15</lpage>, <year>2004</year>. <ext-link ext-link-type="uri" xlink:href="https://arxiv.org/abs/2004.01184">https://arxiv.org/abs/2004.01184</ext-link></mixed-citation></ref>
<ref id="ref-62"><label>[62]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>H.</given-names> <surname>Mukherjee</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Ghosh</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Dhar</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Obaidullah</surname></string-name>, <string-name><given-names>K.</given-names> <surname>Santosh</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Shallow convolutional neural network for COVID-19 outbreak screening using chest X-rays</article-title>,&#x201D; <source>Cognitive Computation</source>, vol. <volume>3</volume>, no. <issue>1</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>14</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-63"><label>[63]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Qi</surname></string-name>, <string-name><given-names>D.</given-names> <surname>Zhao</surname></string-name>, <string-name><given-names>F.</given-names> <surname>Yu</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Heidari</surname></string-name>, <string-name><given-names>Z.</given-names> <surname>Wu</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Directional mutation and crossover boosted any colony optimization with application to COVID-19 X-ray image segmentation</article-title>,&#x201D; <source>Computers in Biology and Medicine</source>, vol. <volume>148</volume>, no. <issue>2</issue>, pp. <fpage>5810</fpage>&#x2013;<lpage>5823</lpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-64"><label>[64]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>R.</given-names> <surname>Hertel</surname></string-name> and <string-name><given-names>R.</given-names> <surname>Benlamri</surname></string-name></person-group>, &#x201C;<article-title>COV-SNET: A deep learning model for X-ray-based COVID-19 classification</article-title>,&#x201D; <source>Informatics in Medicine Unlocked</source>, vol. <volume>24</volume>, no. <issue>6</issue>, pp. <fpage>521</fpage>&#x2013;<lpage>534</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-65"><label>[65]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>X.</given-names> <surname>Wang</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Peng</surname></string-name>, <string-name><given-names>I.</given-names> <surname>Lu</surname></string-name>, <string-name><given-names>Z.</given-names> <surname>Lu</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Bagheri</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Chest X-ray8: Hospital scale chest X-ray database and benchmarks on weakly-supervised classification and localization of common thorax diseases</article-title>,&#x201D; in <conf-name>IEEE Conf. on Computer Vision and Pattern Recognition</conf-name>, <conf-loc>Honolulu, USA</conf-loc>, pp. <fpage>3462</fpage>&#x2013;<lpage>3471</lpage>, <year>2017</year>.</mixed-citation></ref>
<ref id="ref-66"><label>[66]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>D.</given-names> <surname>Fan</surname></string-name>, <string-name><given-names>T.</given-names> <surname>Zhou</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Ji</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Zhou</surname></string-name>, <string-name><given-names>G.</given-names> <surname>Chen</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Inf-Net: Automatic COVID-19 lung infection segmentation from CT images</article-title>,&#x201D; <source>IEEE Transactions on Medical Imaging</source>, vol. <volume>39</volume>, no. <issue>8</issue>, pp. <fpage>2626</fpage>&#x2013;<lpage>2637</lpage>, <year>2020</year>; <pub-id pub-id-type="pmid">32730213</pub-id></mixed-citation></ref>
<ref id="ref-67"><label>[67]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>Y.</given-names> <surname>Qiu</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Liu</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Li</surname></string-name> and <string-name><given-names>J.</given-names> <surname>Xu</surname></string-name></person-group>, &#x201C;<article-title>Mini-seg: An extremely minimum network for efficient COVID-19 segmentation</article-title>,&#x201D; in <conf-name>Int. AAAI Conf. on Artificial Intelligence</conf-name>, <conf-loc>Kuala Lampur, Malaysia</conf-loc>, pp. <fpage>1</fpage>&#x2013;<lpage>9</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-68"><label>[68]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>T.</given-names> <surname>Zhou</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Canu</surname></string-name> and <string-name><given-names>S.</given-names> <surname>Ruan</surname></string-name></person-group>, &#x201C;<article-title>Automatic COVID-19 CT segmentation using U-net integrated spatial and channel attention mechanism</article-title>,&#x201D; <source>International Journal of Imaging Systems and Technology</source>, vol. <volume>31</volume>, no. <issue>5</issue>, pp. <fpage>16</fpage>&#x2013;<lpage>27</lpage>, <year>2021</year>; <pub-id pub-id-type="pmid">33362345</pub-id></mixed-citation></ref>
<ref id="ref-69"><label>[69]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>K.</given-names> <surname>Rajamani</surname></string-name>, <string-name><given-names>H.</given-names> <surname>Siebert</surname></string-name> and <string-name><given-names>P.</given-names> <surname>Heinrich</surname></string-name></person-group>, &#x201C;<article-title>Dynamic deformable attention (DDANet) for semantic segmentation</article-title>,&#x201D; <source>IEEE Journal of Biomedical and Health Informatics</source>, vol. <volume>119</volume>, no. <issue>3</issue>, pp. <fpage>667</fpage>&#x2013;<lpage>678</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-70"><label>[70]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Y.</given-names> <surname>Jiang</surname></string-name>, <string-name><given-names>H.</given-names> <surname>Chen</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Loew</surname></string-name> and <string-name><given-names>H.</given-names> <surname>Ko</surname></string-name></person-group>, &#x201C;<article-title>COVID-19 CT image synthesis with a conditional generative adversarial network</article-title>,&#x201D; <source>IEEE Journal of Biomedical and Health Informatics</source>, vol. <volume>25</volume>, no. <issue>3</issue>, pp. <fpage>441</fpage>&#x2013;<lpage>452</lpage>, <year>2021</year>; <pub-id pub-id-type="pmid">33275588</pub-id></mixed-citation></ref>
<ref id="ref-71"><label>[71]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>P.</given-names> <surname>Zhang</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Zhong</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Deng</surname></string-name>, <string-name><given-names>X.</given-names> <surname>Tang</surname></string-name> and <string-name><given-names>X.</given-names> <surname>Li</surname></string-name></person-group>, &#x201C;<article-title>CoSinGAN: Learning COVID-19 infection segmentation from a single radiological image</article-title>,&#x201D; <source>Diagnostics</source>, vol. <volume>10</volume>, no. <issue>4</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>18</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-72"><label>[72]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>X.</given-names> <surname>Zhao</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Zhang</surname></string-name>, <string-name><given-names>F.</given-names> <surname>Song</surname></string-name>, <string-name><given-names>G.</given-names> <surname>Fan</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Sun</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>D2a U-Net: Automatic segmentation of COVID-19 lesions from CT slices with dilated convolution and dual attention mechanism</article-title>,&#x201D; <source>Computers in Biology and Medicine</source>, vol. <volume>135</volume>, no. <issue>5</issue>, pp. <fpage>1035</fpage>&#x2013;<lpage>1046</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-73"><label>[73]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>W.</given-names> <surname>Deng</surname></string-name>, <string-name><given-names>B.</given-names> <surname>Yang</surname></string-name>, <string-name><given-names>W.</given-names> <surname>Liu</surname></string-name>, <string-name><given-names>W.</given-names> <surname>Song</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Gao</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>CT image analysis and clinical diagnosis of new coronary pneumonia based on improved convolutional neural network</article-title>,&#x201D; <source>Computational and Mathematical Methods in Medicine</source>, vol. <volume>21</volume>, no. <issue>1</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>11</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-74"><label>[74]</label><mixed-citation publication-type="other"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Oolefki</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Agaian</surname></string-name>, <string-name><given-names>T.</given-names> <surname>Trongtirakul</surname></string-name> and <string-name><given-names>A.</given-names> <surname>Laouar</surname></string-name></person-group>, &#x201C;<article-title>Automatic COVID-19 lung infected region segmentation and measurement using CT-scans images</article-title>,&#x201D; <source>Pattern Recognition</source>, vol. <volume>114</volume>, no. <issue>7</issue>, pp. <fpage>3511</fpage>&#x2013;<lpage>3524</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-75"><label>[75]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>Z.</given-names> <surname>Tian</surname></string-name>, <string-name><given-names>T.</given-names> <surname>He</surname></string-name>, <string-name><given-names>C.</given-names> <surname>Shen</surname></string-name> and <string-name><given-names>Y.</given-names> <surname>Yan</surname></string-name></person-group>, &#x201C;<article-title>Decoders matter for semantic segmentation: Data-dependent decoding enables flexible feature aggregation</article-title>,&#x201D; in <conf-name>IEEE Conf. on Computer Vision and Pattern Recognition</conf-name>, <conf-loc>Long Beach, USA</conf-loc>, pp. <fpage>3126</fpage>&#x2013;<lpage>3135</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-76"><label>[76]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>Z.</given-names> <surname>Huang</surname></string-name>, <string-name><given-names>X.</given-names> <surname>Wang</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Huang</surname></string-name>, <string-name><given-names>H.</given-names> <surname>Shi</surname></string-name>, <string-name><given-names>W.</given-names> <surname>Liu</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>CCNet: Crisscross attention for semantic segmentation</article-title>,&#x201D; in <conf-name>IEEE/CVF Int. Conf. on Computer Vision</conf-name>, <conf-loc>Seoul, South Korea</conf-loc>, pp. <fpage>603</fpage>&#x2013;<lpage>612</lpage>, <year>2019</year>.</mixed-citation></ref>
</ref-list>
<app-group id="appg1">
<app id="app1">
<title>Appendix</title>
<table-wrap id="table-7"><label>Table A1</label><caption><title>Dataset partial information</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="left">Serial number</th>
<th align="left">Data set</th>
<th align="left">Image size</th>
<th align="left">Storage format</th>
<th align="left">Features</th>
<th align="left" colspan="2">Image example</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">1</td>
<td align="left">COVID-19-CT-Seg</td>
<td align="left">Not fixed</td>
<td align="left">Nii.gz</td>
<td align="left">Released earlier, the lesion is larger, which is conducive to visual segmentation</td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-1.tif"/></td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-2.tif"/></td>
</tr>
<tr>
<td align="left">2</td>
<td align="left">Segmentation dataset nr.2</td>
<td align="left">Slice: 512&#x2009;&#x00D7;&#x2009;512</td>
<td align="left">Nii.gz</td>
<td align="left">3D data, need to slice</td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-3.tif"/></td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-4.tif"/></td>
</tr>
<tr>
<td align="left">3</td>
<td align="left">COVID-19-CT-Seg-Benchmark</td>
<td align="left">Slice: 512&#x2009;&#x00D7;&#x2009;512</td>
<td align="left">Nii.gz</td>
<td align="left">3D data, need to slice</td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-5.tif"/></td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-6.tif"/></td>
</tr>
<tr>
<td align="left">4</td>
<td align="left">COVID19_1110</td>
<td align="left">Slice: 512&#x2009;&#x00D7;&#x2009;512</td>
<td align="left">Nii.gz</td>
<td align="left">3D data, need to slice</td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-7.tif"/></td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-8.tif"/></td>
</tr>
<tr>
<td align="left">5</td>
<td align="left">CC-CCII data set</td>
<td align="left">512&#x2009;&#x00D7;&#x2009;512</td>
<td align="left">JPG</td>
<td align="left">Clear image, a large amount of data, suitable for classification and segmentation experiments</td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-9.tif"/></td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-10.tif"/></td>
</tr>
<tr>
<td align="left">6</td>
<td align="left">COVID-CT-Dataset</td>
<td align="left">Not fixed</td>
<td align="left">PNG</td>
<td align="left">Commonly used in the early days, but the amount of data is small and the quality is average</td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-11.tif"/></td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-12.tif"/></td>
</tr>
<tr>
<td align="left">7</td>
<td align="left">SARS-cov-2 CT</td>
<td align="left">Not fixed</td>
<td align="left">PNG</td>
<td align="left">It is often used for classification and detection in the early stage, and the effect is better</td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-13.tif"/></td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-14.tif"/></td>
</tr>
<tr>
<td align="left">8</td>
<td align="left">COVID-ctset</td>
<td align="left">512&#x2009;&#x00D7;&#x2009;512</td>
<td align="left">DICOM</td>
<td align="left">16-bit grayscale, the best image quality, suitable for classification experiments</td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-15.tif"/></td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-16.tif"/></td>
</tr>
<tr>
<td align="left">9</td>
<td align="left">CT-COVID-19-August2020</td>
<td align="left">Slice: 512&#x2009;&#x00D7;&#x2009;512</td>
<td align="left">Nii.gz</td>
<td align="left">3D data, only images of the patient&#x2019;s lungs</td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-17.tif"/></td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-18.tif"/></td>
</tr>
<tr>
<td align="left">10</td>
<td align="left">COVID-19-AR</td>
<td align="left">Not fixed</td>
<td align="left">DICOM</td>
<td align="left">Good data quality</td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-19.tif"/></td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-20.tif"/></td>
</tr>
<tr>
<td align="left">11</td>
<td align="left">HUST-19</td>
<td align="left">12&#x2009;&#x00D7;&#x2009;512</td>
<td align="left">JPG</td>
<td align="left">The amount of data is sufficient and the quality is good, suitable for classification experiments</td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-21.tif"/></td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-22.tif"/></td>
</tr>
<tr>
<td align="left">12</td>
<td align="left">COVID-19-CT-CXR</td>
<td align="left">224&#x2009;&#x00D7;&#x2009;224</td>
<td align="left">JPG</td>
<td align="left">Collected from documents and web pages, the format is not uniform, and the quantity and quality<break/>General</td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-23.tif"/></td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-24.tif"/></td>
</tr>
<tr>
<td align="left">13</td>
<td align="left">BIMCV COVID-19&#x002B;</td>
<td align="left">Not fixed</td>
<td align="left">Nii.gz</td>
<td align="left">High-resolution, manual construction of training data sets is required</td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-25.tif"/></td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-26.tif"/></td>
</tr>
<tr>
<td align="left">14</td>
<td align="left">MIDRC-RICORD</td>
<td align="left">Not fixed</td>
<td align="left">DICOM</td>
<td align="left">There is a large amount of data, and the training data set needs to be manually constructed. Use less</td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-27.tif"/></td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-28.tif"/></td>
</tr>
<tr>
<td align="left">15</td>
<td align="left">Covidx dataset</td>
<td align="left">Not fixed</td>
<td align="left">PNG</td>
<td align="left">Build data from other open-source datasets with sufficient data volume</td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-29.tif"/></td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-30.tif"/></td>
</tr>
<tr>
<td align="left">16</td>
<td align="left">Pneumonia-chest X-ray dataset</td>
<td align="left">Not fixed</td>
<td align="left">JPEG</td>
<td align="left">No COVID-19 imagery used to augment the data</td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-31.tif"/></td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-32.tif"/></td>
</tr>
<tr>
<td align="left">17</td>
<td align="left">COVID-chest X-ray dataset</td>
<td align="left">Not fixed</td>
<td align="left">JPEG, JPG, etc.</td>
<td align="left">It was published earlier, with less data volume and inconsistent format and size</td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-33.tif"/></td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-34.tif"/></td>
</tr>
<tr>
<td align="left">18</td>
<td align="left">COVID-19 Radiography Database</td>
<td align="left">256&#x2009;&#x00D7;&#x2009;256</td>
<td align="left">PNG</td>
<td align="left">The data quality is good and the quantity is sufficient for classification experiments</td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-35.tif"/></td>
<td align="left"><inline-graphic xlink:href="CMC_38915-inline-36.tif"/></td>
</tr>
</tbody>
</table>
</table-wrap>
</app>
</app-group>
</back>
</article>
