<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.1 20151215//EN" "http://jats.nlm.nih.gov/publishing/1.1/JATS-journalpublishing1.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="1.1">
<front>
<journal-meta>
<journal-id journal-id-type="pmc">CMC</journal-id>
<journal-id journal-id-type="nlm-ta">CMC</journal-id>
<journal-id journal-id-type="publisher-id">CMC</journal-id>
<journal-title-group>
<journal-title>Computers, Materials &#x0026; Continua</journal-title>
</journal-title-group>
<issn pub-type="epub">1546-2226</issn>
<issn pub-type="ppub">1546-2218</issn>
<publisher>
<publisher-name>Tech Science Press</publisher-name>
<publisher-loc>USA</publisher-loc>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">12955</article-id>
<article-id pub-id-type="doi">10.32604/cmc.2021.012955</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Article</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>COVID-DeepNet: Hybrid Multimodal Deep Learning System for Improving COVID-19 Pneumonia Detection in Chest X-ray Images</article-title>
<alt-title alt-title-type="left-running-head">COVID-DeepNet: Hybrid Multimodal Deep Learning System for Improving COVID-19 Pneumonia Detection in Chest X-ray Images</alt-title>
<alt-title alt-title-type="right-running-head">COVID-DeepNet: Hybrid Multimodal Deep Learning System for Improving COVID-19 Pneumonia Detection in Chest X-ray Images</alt-title>
</title-group>
<contrib-group content-type="authors">
<contrib id="author-1" contrib-type="author">
<name name-style="western">
<surname>Al-Waisy</surname>
<given-names>A. S.</given-names>
</name>
<xref ref-type="aff" rid="aff-1">1</xref>
</contrib>
<contrib id="author-2" contrib-type="author">
<name name-style="western">
<surname>Mohammed</surname>
<given-names>Mazin Abed</given-names>
</name>
<xref ref-type="aff" rid="aff-1">1</xref></contrib>
<contrib id="author-3" contrib-type="author">
<name name-style="western">
<surname>Al-Fahdawi</surname>
<given-names>Shumoos</given-names>
</name>
<xref ref-type="aff" rid="aff-1">1</xref></contrib>
<contrib id="author-4" contrib-type="author">
<name name-style="western">
<surname>Maashi</surname>
<given-names>M. S.</given-names>
</name>
<xref ref-type="aff" rid="aff-2">2</xref></contrib>
<contrib id="author-5" contrib-type="author">
<name name-style="western">
<surname>Garcia-Zapirain</surname>
<given-names>Begonya</given-names>
</name>
<xref ref-type="aff" rid="aff-3">3</xref></contrib>
<contrib id="author-6" contrib-type="author">
<name name-style="western">
<surname>Abdulkareem</surname>
<given-names>Karrar Hameed</given-names>
</name>
<xref ref-type="aff" rid="aff-4">4</xref></contrib>
<contrib id="author-7" contrib-type="author">
<name name-style="western">
<surname>Mostafa</surname>
<given-names>S. A.</given-names>
</name>
<xref ref-type="aff" rid="aff-5">5</xref></contrib>
<contrib id="author-8" contrib-type="author">
<name name-style="western">
<surname>Kumar</surname>
<given-names>Nallapaneni Manoj</given-names>
</name>
<xref ref-type="aff" rid="aff-6">6</xref></contrib>
<contrib id="author-9" contrib-type="author" corresp="yes">
<name name-style="western">
<surname>Le</surname>
<given-names>Dac-Nhuong</given-names>
</name>
<xref ref-type="aff" rid="aff-7">7</xref><xref ref-type="aff" rid="aff-8">8</xref><email>Nhuongld@dhhp.edu.vn</email></contrib>
<aff id="aff-1"><label>1</label><institution>College of Computer Science and Information Technology, University of Anbar</institution>, <addr-line>Anbar, 31001</addr-line>, <country>Iraq</country></aff>
<aff id="aff-2"><label>2</label><institution>College of Computer and Information Sciences, King Saud University</institution>, <addr-line>Riyadh, 11451</addr-line>, <country>Saudi Arabia</country></aff>
<aff id="aff-3"><label>3</label><institution>eVIDA Lab, University of Deusto. Avda/Universidades</institution>, <addr-line>Bilbao, 24.48007</addr-line>, <country>Spain</country></aff>
<aff id="aff-4"><label>4</label><institution>College of Agriculture, Al-Muthanna University</institution>, <addr-line>Samawah, 66001</addr-line>, <country>Iraq</country></aff>
<aff id="aff-5"><label>5</label><institution>Faculty of Computer Science and Information Technology, University Tun Hussein Onn Malaysia</institution>, <addr-line>Johor, 86400</addr-line>, <country>Malaysia</country></aff>
<aff id="aff-6"><label>6</label><institution>School of Energy and Environment, City University of Hong Kong</institution>, <addr-line>Kowloon, 83</addr-line>, <country>Hong Kong</country></aff>
<aff id="aff-7"><label>7</label><institution>Institute of Research and Development, Duy Tan University</institution>, <addr-line>Danang, 550000</addr-line>, <country>Vietnam</country></aff>
<aff id="aff-8"><label>8</label><institution>Faculty of Information Technology, Duy Tan University</institution>, <addr-line>Danang, 550000</addr-line>, <country>Vietnam</country></aff>
</contrib-group>
<author-notes><corresp id="cor1">&#x002A;Corresponding Author: Dac-Nhuong Le. Email: <email>Nhuongld@dhhp.edu.vn</email></corresp></author-notes>
<pub-date pub-type="epub" date-type="pub" iso-8601-date="2021-01-02">
<day>02</day>
<month>01</month>
<year>2021</year>
</pub-date>
<volume>67</volume>
<issue>2</issue>
<fpage>2409</fpage>
<lpage>2429</lpage>
<history>
<date date-type="received">
<day>30</day>
<month>07</month>
<year>2020</year>
</date>
<date date-type="accepted">
<day>16</day>
<month>09</month>
<year>2020</year>
</date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2021 Al-Waisy et al.</copyright-statement>
<copyright-year>2021</copyright-year>
<copyright-holder>Al-Waisy et al.</copyright-holder>
<license xlink:href="https://creativecommons.org/licenses/by/4.0/">
<license-p>This work is licensed under a <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</ext-link>, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.</license-p>
</license>
</permissions>
<self-uri content-type="pdf" xlink:href="TSP_CMC_12955.pdf"></self-uri>
<abstract>
<p>Coronavirus (COVID-19) epidemic outbreak has devastating effects on daily lives and healthcare systems worldwide. This newly recognized virus is highly transmissible, and no clinically approved vaccine or antiviral medicine is currently available. Early diagnosis of infected patients through effective screening is needed to control the rapid spread of this virus. Chest radiography imaging is an effective diagnosis tool for COVID-19 virus and follow-up. Here, a novel hybrid multimodal deep learning system for identifying COVID-19 virus in chest X-ray (CX-R) images is developed and termed as the COVID-DeepNet system to aid expert radiologists in rapid and accurate image interpretation. First, Contrast-Limited Adaptive Histogram Equalization (CLAHE) and Butterworth bandpass filter were applied to enhance the contrast and eliminate the noise in CX-R images, respectively. Results from two different deep learning approaches based on the incorporation of a deep belief network and a convolutional deep belief network trained from scratch using a large-scale dataset were then fused. Parallel architecture, which provides radiologists a high degree of confidence to distinguish healthy and COVID-19 infected people, was considered. The proposed COVID-DeepNet system can correctly and accurately diagnose patients with COVID-19 with a detection accuracy rate of 99.93%, sensitivity of 99.90%, specificity of 100%, precision of 100%, F1-score of 99.93%, MSE of 0.021%, and RMSE of 0.016% in a large-scale dataset. This system shows efficiency and accuracy and can be used in a real clinical center for the early diagnosis of COVID-19 virus and treatment follow-up with less than 3 s per image to make the final decision.</p>
</abstract>
<kwd-group kwd-group-type="author">
<kwd>Coronavirus epidemic</kwd>
<kwd>deep learning</kwd>
<kwd>deep belief network</kwd>
<kwd>convolutional deep belief network</kwd>
<kwd>chest radiography imaging</kwd>
</kwd-group>
</article-meta>
</front>
<body>
<sec id="s1">
<label>1</label>
<title>Introduction</title>
<p>COVID-19 epidemic outbreak has devastating effects on daily lives and healthcare systems worldwide. This newly recognized virus is highly transmissible, and a clinically approved vaccine or antiviral medicine is not yet available. The first positive COVID-19 case was detected in Wuhan City in December 2019, and the disease then has rapidly spread to several cities in China and subsequently in many countries worldwide [<xref ref-type="bibr" rid="ref-1">1</xref>]. The world has strived and fought to limit the spread of this epidemic. To date, the number of positive recognized COVID-19 infections in the worldwide is approximately 6,851,720 total cases, 398,260 death cases, and 3,351,419 were recovered cases. <xref ref-type="fig" rid="fig-1">Fig. 1</xref> displays the distribution of confirmed COVID-19 cases in most affected countries worldwide. The United States leads in the number of confirmed infections constituting 28.70% (1,965,912 cases) of the total confirmed cases worldwide. Similar to other flu types, COVID-19 causes respiratory diseases, and the majority of the infected people may recover without any need for special treatment. The elderly and those with chronic diseases, such as diabetes chronic respiratory disease, cancer identification, chronic respiratory disease, and cardiovascular disease, are highly likely to experience a dangerous infection [<xref ref-type="bibr" rid="ref-2">2</xref>]. The most common critical symptoms are fever, dry cough, tiredness, headache, sore throat, sneezing, vomiting, dyspnea, myalgia, nasal congestion, and rhinorrhea. Patients with severe COVID-19 infection suffer from critical complications, such as cardiac injury, pulmonary edema, septic shock, and acute kidney injury [<xref ref-type="bibr" rid="ref-3">3</xref>,<xref ref-type="bibr" rid="ref-4">4</xref>].</p>
<fig id="fig-1">
<label>Figure 1</label>
<caption>
<title>Global distribution of confirmed COVID-19 cases (6 June 2020) [<xref ref-type="bibr" rid="ref-5">5</xref>]</title>
</caption>
<graphic mimetype="image" mime-subtype="png" xlink:href="fig-1.png"/>
</fig>
<p>A key factor in confronting the COVID-19 epidemic is the early diagnosis and separation of infected patients. An efficient screening technique for COVID-19 infected patients can substantially limit the rapid spread of the COVID-19 virus. Several screening methods, such as reverse transcriptase-polymerase chain reaction (RT-PCR) technique, are employed to reveal the onset symptoms of the COVID-19 virus [<xref ref-type="bibr" rid="ref-6">6</xref>]. Although RT-PCR is commonly used for SARS-CoV-2 diagnosis, this process is tedious and time consuming and requires user interaction. The reliability and validity of radiography imaging techniques (e.g., images of computed tomography or CX-R) for early COVID-19 diagnosis have been established to overcome RT-PCR restrictions [<xref ref-type="bibr" rid="ref-7">7</xref>]. Radiography imaging can display a wide degree of unpredictable ground-glass opacities that rapidly advance after any infection and is therefore one of the most critical biomedical imaging techniques in hospitals to detect chest abnormalities and COVID-19 infection. However, the main problem of using chest radiograph imaging is that reading and interpreting the images require a long time [<xref ref-type="bibr" rid="ref-8">8</xref>]. With COVID-19 virus identified as a pandemic, the number of patients who require a chest X-ray (CX-R) image examination has dramatically increased and exceeded the low number of available expert radiologists. As a result, the pressure on healthcare systems and radiologists is increased, disease diagnosis is delayed, patient&#x2019;s treatment and follow up are affected, and virus transmission likely occurs. Hence, the real-time and fully automated interpretation of radiography images is needed to help radiologists and clinicians in precisely detecting the COVID-19 infection. Computer-aided diagnostic systems based on deep learning approaches can be employed to assists radiologists to rapidly and correctly interpret and understand the details in the chest radiography images and overcome the limitations of the adopted imaging acquisition techniques [<xref ref-type="bibr" rid="ref-9">9</xref>,<xref ref-type="bibr" rid="ref-10">10</xref>]. This paper proposes a novel hybrid deep learning system termed as a COVID-DeepNet system for detecting COVID-19 pneumonia in CX-R images by using two discriminative deep learning approaches. This system comprises four main steps: image pre-processing, feature extraction, image classification, and fusion. The contrast of CX-R image is initially enhanced, and the noise level is reduced using CLAHE and Butterworth bandpass filter. In feature extraction and classification, two distinctive deep learning approaches based on DBN and CDBN are employed for the automated COVID-19 infection detection in CX-R images. Finally, the results obtained from these two approaches are fused to make the final decision. The primary contributions of this research are outlined as follows:
<list list-type="order">
<list-item><p>A novel hybrid COVID-19 detection system is proposed and termed as a COVID-DeepNet system to automatically differentiate between healthy and COVID-19 infected subjects by using CX-R images with two successful modern deep learning methods (e.g., DBN and CDBN). To the authors&#x2019; best knowledge, this work is the first to examine the possibility of using DBN and CDBN in a unified system to detect COVID-19 infection by learning high discriminative feature representations from CX-R images.</p></list-item>
<list-item><p>Different from most of the existing systems that make the final prediction using only one trained model, the proposed COVID-DeepNet system makes the final prediction by fusing the results obtained from two different deep learning approaches trained from scratch using a large-scale dataset. Parallel architecture, which provides radiologists a high degree of confidence to distinguish between healthy and COVID-19 infected subjects, is considered.</p></list-item>
<list-item><p>A large-scale CX-R images dataset is created and termed as the COVID19-<italic>vs.</italic>-Normal dataset. To the authors&#x2019; best knowledge, this dataset has the largest size, contains the largest number of CX-R images with confirmed COVID-19 infection among those currently available in the public domain.</p></list-item>
<list-item><p>The possibility of reducing the computational complexity and improving the generalization of the deep learning is further validated and examined using pre-processed CX-R images as input data to produce useful features representations during the training phase instead of using predefined features of raw images data.</p></list-item>
<list-item><p>A distinctive training procedure supported with various sets of training policies (e.g., data augmentation, AdaGrad algorithm, and dropout method) is also adopted to increase the generalization ability of the proposed COVID-DeepNet system and avoid overfitting.</p></list-item>
<list-item><p>The efficiency and usefulness of the proposed COVID-DeepNet system are established along with its possible clinical application for early COVID-19 diagnosis with less than 2 s per image to obtain the required results.</p></list-item>
</list></p>
<p>The remainder of this paper is divided into five sections: Section 2 provides a brief overview of the current related works, Section 3 discusses the strategy used to create the COVID-19 dataset and the implementation details of the COVID-DeepNet system, Section 4 presents the experimental results, and Section 5 displays the conclusion and future work.</p>
</sec>
<sec id="s2">
<label>2</label>
<title>Related Works</title>
<p>Deep learning has been effectively applied in the medical field with promising results and remarkable performance compared with human-level action in various challenging tasks, such as breast cancer detection [<xref ref-type="bibr" rid="ref-11">11</xref>], skin cancer classification [<xref ref-type="bibr" rid="ref-12">12</xref>], nasopharyngeal carcinoma identification [<xref ref-type="bibr" rid="ref-13">13</xref>,<xref ref-type="bibr" rid="ref-14">14</xref>], brain disease classification [<xref ref-type="bibr" rid="ref-15">15</xref>], lung segmentation [<xref ref-type="bibr" rid="ref-16">16</xref>], and pneumonia detection in CX-R images [<xref ref-type="bibr" rid="ref-17">17</xref>]. Several medical imaging tools using deep learning methods have also been established to help radiologists and clinicians in early COVID-19 detection, treatment, and follow-up investigation [<xref ref-type="bibr" rid="ref-18">18</xref>]. For instance, Wang et al. [<xref ref-type="bibr" rid="ref-19">19</xref>] developed a tailored model termed as COVID-Net to detect COVID-19 cases using CX-R images by classifying the input image into one of three different classes (e.g., normal, non-COVID19, and COVID19). This model has the highest accuracy rate of 92.4% as measured using a dataset containing 16,756 CX-R images collected from two different datasets (COVID-19 CX-R dataset provided in [<xref ref-type="bibr" rid="ref-20">20</xref>] and RSNA Pneumonia Detection Challenge dataset [<xref ref-type="bibr" rid="ref-21">21</xref>]). Hemdan et al. [<xref ref-type="bibr" rid="ref-22">22</xref>] proposed a deep learning system named as COVIDX-Net to identify COVID-19 infection in CX-R images. A comparative study among seven deep learning approaches (e.g., VGG19, ResNetV2, DenseNet201, Xception, MobileNetV2 Inception, and InceptionV3) was conducted using a small dataset of 50 images (e.g., with 25 images of positive COVID-19 infection). The best performance was obtained by pre-trained DenseNet201with an accuracy rate of 91%. Narin et al. [<xref ref-type="bibr" rid="ref-23">23</xref>] also conducted another comparison study among three different deep CNN-based models (e.g., InceptionV3, ResNet50, and Inception-ResNetV2) by using a dataset consisting of hundred CX-R images, half of which are infected COVID-19 cases. The best performance was achieved using the pre-trained ResNet50 model with an accuracy rate of 98%. Mohammed et al. [<xref ref-type="bibr" rid="ref-24">24</xref>] proposed a novel benchmarking method for choosing the best COVID-19 detection model by using the Entropy and TOPSIS method and established a decision matrix of 10 evaluation criteria and 12 machine learning classifiers for identifying COVID-19 infection in 50 CX-R images. The highest closeness coefficient of 98.99% was achieved by the linear SVM classifier. Kassani et al. [<xref ref-type="bibr" rid="ref-25">25</xref>] trained several CNN models as feature descriptors to encode the input image into low dimensional feature vectors, which are then processed by different classifiers to aggregate solutions. The performance was verified using the same dataset presented in [<xref ref-type="bibr" rid="ref-20">20</xref>]. The highest accuracy rate was 99% using the pre-trained DenseNet121 model as a feature descriptor and the Bagging tree classifier. Zhang et al. [<xref ref-type="bibr" rid="ref-26">26</xref>] used a pre-trained ResNet-18 model as a feature descriptor to extract useful feature representations from the CX-R image. These extracted features are then fed to a multi-layer perception to make the final decision. The highest accuracy rate of 96.00% was obtained using a dataset of 100 images captured from 70 patients. Many researchers have attempted to detect COVID-19 infection in CX-R or CT images using various deep learning approaches [<xref ref-type="bibr" rid="ref-27">27</xref>&#x2013;<xref ref-type="bibr" rid="ref-31">31</xref>]. A review on COVID 19 detection and diagnosis systems based on CX-R images revealed some limitations that need to be investigated. First, most of the existing systems have been evaluated using small X-ray datasets with a few numbers of positive COVID-19 cases. The dataset sizes are not sufficient to reveal the real performance of the proposed approaches. Second, although several studies have produced high accuracy rates using pre-trained models via transfer learning, minimal attention has been given to building and training a custom deep learning model from scratch mainly due to the unavailability of a large dataset containing sufficient number of CX-R images with confirmed COVID-19 infection. In addition, changing the architecture of pre-trained models by removing/adding some layers to obtain an optimal model architecture with high confidence is difficult. Finally, most of these studies only focused on training deep learning models on the top of raw images rather than pre-processed images, thus limiting the generalization ability of the last trained model. To overcome these limitations, the present work proposed a novel hybrid COVID-19 detection system termed as COVID-DeepNet system to automatically differentiate between healthy and COVID-19 infected subjects by using CX-R images under two successful modern deep learning approaches (e.g., DBN and CDBN). The proposed COVID-DeepNet system is trained from scratch using a large-scale and challenging dataset termed as the COVID19-<italic>vs.</italic>-Normal dataset.</p>
</sec>
<sec id="s3">
<label>3</label>
<title>Proposed COVID-DeepNet System</title>
<p>As depicted in <xref ref-type="fig" rid="fig-2">Fig. 2</xref>, a novel hybrid COVID19 detection system was proposed and termed as a COVID-DeepNet system to learn discriminative and useful feature representations by training two discriminative deep learning approaches (DBN and CDBN) over the pre-processed CX-R images. First, the adopted procedure to create the CX-R dataset was briefly described. Implementation details of the proposed approaches were then explained, such as the proposed image pre-processing algorithm, the main architecture, and training methodology of the proposed deep learning approaches (e.g., DBN and CDBN). Algorithm 1 shows the pseudo-code of the proposed COVID-DeepNet system.</p>
<fig id="fig-10">
<graphic mimetype="image" mime-subtype="png" xlink:href="fig-10.png"/>
</fig>
<fig id="fig-2">
<label>Figure 2</label>
<caption>
<title>Block diagram of the proposed COVID-DeepNet detection system </title>
</caption>
<graphic mimetype="image" mime-subtype="png" xlink:href="fig-2.png"/>
</fig>
<sec id="s3_1">
<label>3.1</label>
<title>COVID19-<italic>vs.</italic>-Normal Dataset</title>
<p>Several CX-R images were carefully selected from different sources to create a relatively large-scale COVID-19 CX-R image dataset of confirmed infected cases. This dataset was named as COVID19-<italic>vs.</italic>-Normal and then mixed with some CX-R images of normal cases for a reliable diagnosis of COVID-19 virus. The sources of the COVID19-<italic>vs.</italic>-Normal dataset are as follows:
<list list-type="bullet">
<list-item><p>A set of 200 CX-R images with confirmed COVID-19 infection of Cohen&#x2019;s GitHub repository [<xref ref-type="bibr" rid="ref-20">20</xref>].</p></list-item>
<list-item><p>A set of 200 COVID-19 CX-R images with confirmed COVID-19 infection gathered from three different sources: Radiopaedia dataset [<xref ref-type="bibr" rid="ref-32">32</xref>], Italian Society of Medical and Interventional Radiology (SIRM) [<xref ref-type="bibr" rid="ref-33">33</xref>], and Radiological Society of North America (RSNA) [<xref ref-type="bibr" rid="ref-34">34</xref>].</p></list-item>
<list-item><p>A set of 400 normal CX-R images from Kaggle&#x2019;s CX-R image (Pneumonia [<xref ref-type="bibr" rid="ref-35">35</xref>]) dataset.</p></list-item>
</list></p>
<p>Samples of the COVID-19 and normal cases of the large-scale COVID-19 CX-R images are shown in <xref ref-type="fig" rid="fig-3">Fig. 3</xref>. The established COVID19-<italic>vs.</italic>-Normal dataset will have a constantly updated number of the COVID-19 cases depending on the availability of new CX-R images with confirmed COVID-19 infection and is available publicly at <uri>https://github.com/AlaaSulaiman/COVID19-vs.-Normal-dataset</uri>. Data augmentation was applied to prevent overfitting and enhance the generalization ability of the last trained model. First, the size of the original image was rescaled to (<inline-formula id="ieqn-1"><alternatives><inline-graphic xlink:href="ieqn-1.png"/><tex-math id="tex-ieqn-1"><![CDATA[$224 \times 224$]]></tex-math><mml:math id="mml-ieqn-1"><mml:mn>224</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>224</mml:mn></mml:math></alternatives></inline-formula>) pixels, and five random image regions of size (<inline-formula id="ieqn-2"><alternatives><inline-graphic xlink:href="ieqn-2.png"/><tex-math id="tex-ieqn-2"><![CDATA[$128 \times 128$]]></tex-math><mml:math id="mml-ieqn-2"><mml:mn>128</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>128</mml:mn></mml:math></alternatives></inline-formula>) pixels were then extracted from each image. Horizontal flip and rotation of 5 degrees (e.g., clockwise and counter-clockwise) were then conducted for every single image in the dataset. A total of 24,000 CX-R images of size (<inline-formula id="ieqn-3"><alternatives><inline-graphic xlink:href="ieqn-3.png"/><tex-math id="tex-ieqn-3"><![CDATA[$128 \times 128$]]></tex-math><mml:math id="mml-ieqn-3"><mml:mn>128</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>128</mml:mn></mml:math></alternatives></inline-formula>) pixels were extracted from both classes (e.g., COVID-19 and normal images). Data augmentation was implemented after dividing the COVID19-<italic>vs.</italic>-Normal dataset into three mutually exclusive sets (e.g., training, validation, and testing set) to avoid generating biased prediction results.</p>
<fig id="fig-3">
<label>Figure 3</label>
<caption>
<title>An example of positive COVID-19 case versus negative COVID-19 case obtained from the created dataset of COVID19 and normal CX-R images</title>
</caption>
<graphic mimetype="image" mime-subtype="png" xlink:href="fig-3.png"/>
</fig>
</sec>
<sec id="s3_2">
<label>3.2</label>
<title>Image Pre-Processing Step</title>
<p>A raw CX-R image obtained by an electronic detector usually has poor quality and thus may be unsuitable for detection and diagnosis. Image enhancement methods should be applied to enhance the quality of CX-R images. Furthermore, training the DNNs on the top of pre-processed images instead of using raw images data can substantially reduce the generalization error of the DNNs and their training time. Hence, an effective image enhancement procedure was proposed to enhance the CX-R image&#x2019;s poor quality prior to feeding to the proposed approaches (e.g., DBN and CDBN). First, the small details, textures, and low contrast of the CX-R image was enhanced through adaptive contrast enhancement based on CLAHE [<xref ref-type="bibr" rid="ref-36">36</xref>]. CLAHE is different from the original histogram equalization method that computes several histograms (e.g., each one corresponding to a distinct part of an image) to redistribute the lightness values of the input image, as depicted in <xref ref-type="fig" rid="fig-4">Fig. 4b</xref>. Hence, this method can improve the image local contrast and enhance the visibility of the edges and curves in each part of an image. Second, the Butterworth bandpass filter was employed to reduce the noise in the image produced from the previous step, as shown in <xref ref-type="fig" rid="fig-4">Fig. 4c</xref>. The Butterworth Bandpass filter was calculated by multiplying the low and high pass filters as follows:</p>
<p><disp-formula id="eqn-1">
<label>(1)</label>
<alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-1.png"/>
<tex-math id="tex-eqn-1"><![CDATA[$$\begin{equation}H_{LP} \left(u,v\right)=\frac{1}{1+ \left[F(u,v)/F_{L}\right]^{2n}},
\label{eqn-1} \end{equation}$$]]></tex-math>
<mml:math id="mml-eqn-1" display="block"><mml:mrow></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>H</mml:mi></mml:mrow><mml:mrow><mml:mi>L</mml:mi><mml:mi>P</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>u</mml:mi><mml:mo>,</mml:mo><mml:mi>v</mml:mi></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mn>1</mml:mn><mml:mo>+</mml:mo><mml:msup><mml:mrow><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mi>F</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>u</mml:mi><mml:mo>,</mml:mo><mml:mi>v</mml:mi></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>/</mml:mo><mml:msub><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mi>L</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn><mml:mi>n</mml:mi></mml:mrow></mml:msup></mml:mrow></mml:mfrac><mml:mo>,</mml:mo></mml:mrow><mml:mrow></mml:mrow></mml:math>
</alternatives></disp-formula></p>
<p><disp-formula id="eqn-2">
<label>(2)</label>
<alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-2.png"/>
<tex-math id="tex-eqn-2"><![CDATA[$$\begin{equation}\mathrm{H}_{\mathrm{HP}} \left(\mathrm{u},\mathrm{v}\right)=1-\frac{1}{1+ \left[\mathrm{F}(\mathrm{u},\mathrm{v})/\mathrm{F}_{\mathrm{H}}\right]^{2\mathrm{n}}},
\label{eqn-2} \end{equation}$$]]></tex-math>
<mml:math id="mml-eqn-2" display="block"><mml:mrow></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>H</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>H</mml:mi><mml:mi>P</mml:mi></mml:mstyle></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>u</mml:mi></mml:mstyle><mml:mo>,</mml:mo><mml:mstyle mathvariant="normal"><mml:mi>v</mml:mi></mml:mstyle></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mn>1</mml:mn><mml:mo>+</mml:mo><mml:msup><mml:mrow><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>F</mml:mi></mml:mstyle><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>u</mml:mi></mml:mstyle><mml:mo>,</mml:mo><mml:mstyle mathvariant="normal"><mml:mi>v</mml:mi></mml:mstyle></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>/</mml:mo><mml:msub><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>F</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>H</mml:mi></mml:mstyle></mml:mrow></mml:msub></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn><mml:mstyle mathvariant="normal"><mml:mi>n</mml:mi></mml:mstyle></mml:mrow></mml:msup></mml:mrow></mml:mfrac><mml:mo>,</mml:mo></mml:mrow><mml:mrow></mml:mrow></mml:math>
</alternatives></disp-formula></p>
<disp-formula id="eqn-3">
<label>(3)</label>
<alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-3.png"/>
<tex-math id="tex-eqn-3"><![CDATA[$$\begin{equation}\mathrm{H}_{\mathrm{BP}} \left(\mathrm{u},\mathrm{v}\right)= \mathrm{H}_{\mathrm{LP}} \left(\mathrm{u},\mathrm{v}\right)*\mathrm{H}_{\mathrm{HP}} \left(\mathrm{u},\mathrm{v}\right),
\label{eqn-3}\end{equation}$$]]></tex-math>
<mml:math id="mml-eqn-3" display="block"><mml:mrow></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>H</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>B</mml:mi><mml:mi>P</mml:mi></mml:mstyle></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>u</mml:mi></mml:mstyle><mml:mo>,</mml:mo><mml:mstyle mathvariant="normal"><mml:mi>v</mml:mi></mml:mstyle></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>H</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>L</mml:mi><mml:mi>P</mml:mi></mml:mstyle></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>u</mml:mi></mml:mstyle><mml:mo>,</mml:mo><mml:mstyle mathvariant="normal"><mml:mi>v</mml:mi></mml:mstyle></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>*</mml:mo><mml:msub><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>H</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>H</mml:mi><mml:mi>P</mml:mi></mml:mstyle></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>u</mml:mi></mml:mstyle><mml:mo>,</mml:mo><mml:mstyle mathvariant="normal"><mml:mi>v</mml:mi></mml:mstyle></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mrow><mml:mrow></mml:mrow></mml:math>
</alternatives></disp-formula>
<p>where <inline-formula id="ieqn-4"><alternatives><inline-graphic xlink:href="ieqn-4.png"/><tex-math id="tex-ieqn-4"><![CDATA[$\boldsymbol{F}_{\boldsymbol{L}}$]]></tex-math><mml:math id="mml-ieqn-4"><mml:msub><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mi>L</mml:mi></mml:mrow></mml:msub></mml:math></alternatives></inline-formula> and <inline-formula id="ieqn-5"><alternatives><inline-graphic xlink:href="ieqn-5.png"/><tex-math id="tex-ieqn-5"><![CDATA[$\boldsymbol{F}_{\boldsymbol{H}}$]]></tex-math><mml:math id="mml-ieqn-5"><mml:msub><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mi>H</mml:mi></mml:mrow></mml:msub></mml:math></alternatives></inline-formula> are the cut frequencies of the low and high pass filters set as 15 and 30, respectively; <inline-formula id="ieqn-6"><alternatives><inline-graphic xlink:href="ieqn-6.png"/><tex-math id="tex-ieqn-6"><![CDATA[${\boldsymbol{n}}=\mathbf{3}$]]></tex-math><mml:math id="mml-ieqn-6"><mml:mi>n</mml:mi><mml:mo>=</mml:mo><mml:mstyle mathvariant="bold"><mml:mn>3</mml:mn></mml:mstyle></mml:math></alternatives></inline-formula> is the filter order; and is <inline-formula id="ieqn-7"><alternatives><inline-graphic xlink:href="ieqn-7.png"/><tex-math id="tex-ieqn-7"><![CDATA[$\boldsymbol{F} \left({\boldsymbol{u}}, {\boldsymbol{v}}\right)$]]></tex-math><mml:math id="mml-ieqn-7"><mml:mi>F</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>u</mml:mi><mml:mo>,</mml:mo><mml:mi>v</mml:mi></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math></alternatives></inline-formula> the distance from the origin.</p>
<fig id="fig-4">
<label>Figure 4</label> 
<caption>
<title>Proposed image enhancement procedure outputs: (a) Raw CX-R image, (b) applying the CLAHE method, and (c) applying the Butterworth Bandpass filter</title>
</caption>
<graphic mimetype="image" mime-subtype="png" xlink:href="fig-4.png"/>
</fig>
</sec>
<sec id="s3_3">
<label>3.3</label>
<title>Deep Learning for COVID-19 Detection</title>
<p>A hybrid deep learning detection system based on the incorporation of two discriminative deep learning approaches (e.g., DBN and CDBN) was proposed to detect COVID-19 infection in CX-R images. To the authors&#x2019; best knowledge, the possibility of using DBN and CDBN in a unified system to detect the COVID-19 virus in the CX-R images has not been previously investigated. <xref ref-type="fig" rid="fig-2">Fig. 2</xref> shows that the enhanced image was fed into the visible units of the proposed deep learning approaches to learn high-level feature representations. DBN is a new generative probabilistic model developed by Hinton et al. [<xref ref-type="bibr" rid="ref-37">37</xref>]. Different from other conventional deep neural networks (DNNs), DBN has one visible layer and several hidden layers that can learn the statistical correlations of the neurons in the previous layer [<xref ref-type="bibr" rid="ref-38">38</xref>]. Similar to other deep learning approaches, DBNs are directly applied to raw image data. Although DBNs have been effectively applied to solve many challenging problems (e.g., face recognition [<xref ref-type="bibr" rid="ref-39">39</xref>] and audio classification [<xref ref-type="bibr" rid="ref-40">40</xref>]), scaling them to high dimensional images is challenging for two reasons. First, the input image with a high dimensionality can increase the complexity of the learning process and require a long-time for convergence. Second, the features learned by DBNs are highly sensitive to image translations, especially when the raw image data are assigned directly to the visible layer. This phenomenon can lead to discarding most of the fine details in the input image, thus seriously affecting their performance. As a solution, the proposed DBN model was trained on the top of pre-processed images rather than raw image data to remarkably reduce the training time and learn additional discriminative feature representations [<xref ref-type="bibr" rid="ref-41">41</xref>]. Assigning pre-processed CX-R images to the input layers of DBN and CDBN can remarkably improve their ability to learn essential and prominent feature representations with less time require to obtain the last trained models. As depicted in <xref ref-type="fig" rid="fig-5">Fig. 5</xref>, the main architecture of the proposed DBN is composed of stacking five RBMs as hidden layers. The first four RBMs can be viewed as non-linear features descriptors trained sequentially using the CD learning algorithm in an unsupervised greedy layer-wised manner to learn a multi-layer non-linear generative model. The last RBM is a discriminative RBM (DRBM) trained as a non-linear classifier associated with SoftMax function to produce the probability distribution of each class label. DRBM comprises two layers of visible units to represent the input vector and a softmax label unit to represent the predicted class. During RBM training, the stochastic gradient descent algorithm was applied to maximize the log-likelihood of the training data. Hence, the updating rules for the weights can be defined as follows:</p>
<p><disp-formula id="eqn-4">
<label>(4)</label>
<alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-4.png"/>
<tex-math id="tex-eqn-4"><![CDATA[$$\begin{equation}\Delta \mathrm{w}_{\mathrm{i},\mathrm{j}}=\epsilon \left( \left\langle \frac{1}{\sigma _{\mathrm{i}}^{2}}\mathrm{v}_{\mathrm{i}}\mathrm{h}_{\mathrm{j}}\right\rangle _{\text{d}\text{a}\text{t}\text{a}}- \left\langle \frac{1}{\sigma _{\mathrm{i}}^{2}}\mathrm{v}_{\mathrm{i}}\mathrm{h}_{\mathrm{j}}\right\rangle _{\text{m}\text{o}\text{d}\text{e}\text{l}}\right),
\label{eqn-4} \end{equation}$$]]></tex-math>
<mml:math id="mml-eqn-4" display="block"><mml:mrow></mml:mrow><mml:mrow><mml:mi>&#x0394;</mml:mi><mml:msub><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>w</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>i</mml:mi></mml:mstyle><mml:mo>,</mml:mo><mml:mstyle mathvariant="normal"><mml:mi>j</mml:mi></mml:mstyle></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mi>&#x03F5;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mrow><mml:mo lspace='0pt' rspace='0pt'>&#x2329;</mml:mo><mml:mrow><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:msubsup><mml:mrow><mml:mi>&#x03C3;</mml:mi></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>i</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:mrow></mml:mfrac><mml:msub><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>v</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>i</mml:mi></mml:mstyle></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>h</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>j</mml:mi></mml:mstyle></mml:mrow></mml:msub></mml:mrow><mml:mo>&#x232A;</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mstyle><mml:mtext>d</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>a</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>t</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>a</mml:mtext></mml:mstyle></mml:mrow></mml:msub><mml:mo>-</mml:mo><mml:msub><mml:mrow><mml:mrow><mml:mo lspace='0pt' rspace='0pt'>&#x2329;</mml:mo><mml:mrow><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:msubsup><mml:mrow><mml:mi>&#x03C3;</mml:mi></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>i</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:mrow></mml:mfrac><mml:msub><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>v</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>i</mml:mi></mml:mstyle></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>h</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>j</mml:mi></mml:mstyle></mml:mrow></mml:msub></mml:mrow><mml:mo>&#x232A;</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mstyle><mml:mtext>m</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>o</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>d</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>e</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>l</mml:mtext></mml:mstyle></mml:mrow></mml:msub></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mrow><mml:mrow></mml:mrow></mml:math>
</alternatives></disp-formula></p>
<p><disp-formula id="eqn-5">
<label>(5)</label>
<alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-5.png"/>
<tex-math id="tex-eqn-5"><![CDATA[$$\begin{equation}\Delta \mathrm{b}_{\mathrm{i}}=\epsilon \left( \left\langle \frac{1}{\sigma _{\mathrm{i}}^{2}}\mathrm{v}_{\mathrm{i}}\right\rangle _{\text{d}\text{a}\text{t}\text{a}}- \left\langle \frac{1}{\sigma _{\mathrm{i}}^{2}}\mathrm{v}_{\mathrm{i}}\right\rangle _{\text{m}\text{o}\text{d}\text{e}\text{l}}\right),
\label{eqn-5} \end{equation}$$]]></tex-math>
<mml:math id="mml-eqn-5" display="block"><mml:mrow></mml:mrow><mml:mrow><mml:mi>&#x0394;</mml:mi><mml:msub><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>b</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>i</mml:mi></mml:mstyle></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mi>&#x03F5;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mrow><mml:mo lspace='0pt' rspace='0pt'>&#x2329;</mml:mo><mml:mrow><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:msubsup><mml:mrow><mml:mi>&#x03C3;</mml:mi></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>i</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:mrow></mml:mfrac><mml:msub><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>v</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>i</mml:mi></mml:mstyle></mml:mrow></mml:msub></mml:mrow><mml:mo>&#x232A;</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mstyle><mml:mtext>d</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>a</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>t</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>a</mml:mtext></mml:mstyle></mml:mrow></mml:msub><mml:mo>-</mml:mo><mml:msub><mml:mrow><mml:mrow><mml:mo lspace='0pt' rspace='0pt'>&#x2329;</mml:mo><mml:mrow><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:msubsup><mml:mrow><mml:mi>&#x03C3;</mml:mi></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>i</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:mrow></mml:mfrac><mml:msub><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>v</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>i</mml:mi></mml:mstyle></mml:mrow></mml:msub></mml:mrow><mml:mo>&#x232A;</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mstyle><mml:mtext>m</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>o</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>d</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>e</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>l</mml:mtext></mml:mstyle></mml:mrow></mml:msub></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mrow><mml:mrow></mml:mrow></mml:math>
</alternatives></disp-formula></p>
<disp-formula id="eqn-6">
<label>(6)</label>
<alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-6.png"/>
<tex-math id="tex-eqn-6"><![CDATA[$$\begin{equation}\Delta \mathrm{c}_{\mathrm{i}}=\epsilon \left( \left\langle \mathrm{h}_{\mathrm{j}}\right\rangle _{\text{d}\text{a}\text{t}\text{a}}- \left\langle \mathrm{h}_{\mathrm{j}}\right\rangle _{\text{m}\text{o}\text{d}\text{e}\text{l}}\right),
\label{eqn-6}\end{equation}$$]]></tex-math>
<mml:math id="mml-eqn-6" display="block"><mml:mrow></mml:mrow><mml:mrow><mml:mi>&#x0394;</mml:mi><mml:msub><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>c</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>i</mml:mi></mml:mstyle></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mi>&#x03F5;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mrow><mml:mo lspace='0pt' rspace='0pt'>&#x2329;</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>h</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>j</mml:mi></mml:mstyle></mml:mrow></mml:msub></mml:mrow><mml:mo>&#x232A;</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mstyle><mml:mtext>d</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>a</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>t</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>a</mml:mtext></mml:mstyle></mml:mrow></mml:msub><mml:mo>-</mml:mo><mml:msub><mml:mrow><mml:mrow><mml:mo lspace='0pt' rspace='0pt'>&#x2329;</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>h</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>j</mml:mi></mml:mstyle></mml:mrow></mml:msub></mml:mrow><mml:mo>&#x232A;</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mstyle><mml:mtext>m</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>o</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>d</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>e</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>l</mml:mtext></mml:mstyle></mml:mrow></mml:msub></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mrow><mml:mrow></mml:mrow></mml:math>
</alternatives></disp-formula>
<p>where <inline-formula id="ieqn-8"><alternatives><inline-graphic xlink:href="ieqn-8.png"/><tex-math id="tex-ieqn-8"><![CDATA[$\epsilon$]]></tex-math><mml:math id="mml-ieqn-8"><mml:mi>&#x03F5;</mml:mi></mml:math></alternatives></inline-formula> refers to the learning rate, <inline-formula id="ieqn-9"><alternatives><inline-graphic xlink:href="ieqn-9.png"/><tex-math id="tex-ieqn-9"><![CDATA[$ \left\langle \cdot\right\rangle _{\boldsymbol{data}}$]]></tex-math><mml:math id="mml-ieqn-9"><mml:msub><mml:mrow><mml:mrow><mml:mo lspace='0pt' rspace='0pt'>&#x2329;</mml:mo><mml:mrow><mml:mo lspace='0pt' rspace='0pt'>&#x22C5;</mml:mo></mml:mrow><mml:mo lspace='0pt' rspace='0pt'>&#x232A;</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>d</mml:mi><mml:mi>a</mml:mi><mml:mi>t</mml:mi><mml:mi>a</mml:mi></mml:mrow></mml:msub></mml:math></alternatives></inline-formula> and <inline-formula id="ieqn-10"><alternatives><inline-graphic xlink:href="ieqn-10.png"/><tex-math id="tex-ieqn-10"><![CDATA[$ \left\langle \cdot\right\rangle _{\boldsymbol{model}}$]]></tex-math><mml:math id="mml-ieqn-10"><mml:msub><mml:mrow><mml:mrow><mml:mo lspace='0pt' rspace='0pt'>&#x2329;</mml:mo><mml:mrow><mml:mo lspace='0pt' rspace='0pt'>&#x22C5;</mml:mo></mml:mrow><mml:mo lspace='0pt' rspace='0pt'>&#x232A;</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>m</mml:mi><mml:mi>o</mml:mi><mml:mi>d</mml:mi><mml:mi>e</mml:mi><mml:mi>l</mml:mi></mml:mrow></mml:msub></mml:math></alternatives></inline-formula> denote the positive stage and the negative stage, respectively. Finally, <inline-formula id="ieqn-11"><alternatives><inline-graphic xlink:href="ieqn-11.png"/><tex-math id="tex-ieqn-11"><![CDATA[${\boldsymbol{b}}_{{\boldsymbol{i}}}$]]></tex-math><mml:math id="mml-ieqn-11"><mml:msub><mml:mrow><mml:mi>b</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></alternatives></inline-formula> and <inline-formula id="ieqn-12"><alternatives><inline-graphic xlink:href="ieqn-12.png"/><tex-math id="tex-ieqn-12"><![CDATA[${\boldsymbol{c}}_{{\boldsymbol{i}}}$]]></tex-math><mml:math id="mml-ieqn-12"><mml:msub><mml:mrow><mml:mi>c</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></alternatives></inline-formula> and represent biases for visible and hidden units, respectively. Calculating the <inline-formula id="ieqn-13"><alternatives><inline-graphic xlink:href="ieqn-13.png"/><tex-math id="tex-ieqn-13"><![CDATA[$ \left\langle {\boldsymbol{v}}_{{\boldsymbol{i}}}{\boldsymbol{h}}_{{\boldsymbol{j}}}\right\rangle_{\boldsymbol{model}}$]]></tex-math><mml:math id="mml-ieqn-13"><mml:msub><mml:mrow><mml:mrow><mml:mo lspace='0pt' rspace='0pt'>&#x2329;</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>v</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>&#x232A;</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>m</mml:mi><mml:mi>o</mml:mi><mml:mi>d</mml:mi><mml:mi>e</mml:mi><mml:mi>l</mml:mi></mml:mrow></mml:msub></mml:math></alternatives></inline-formula> in <xref ref-type="disp-formula" rid="eqn-4">Eq. (4)</xref> is difficult. Thus, the CD algorithm [<xref ref-type="bibr" rid="ref-42">42</xref>] was used to update the parameters of a given RBM by applying <inline-formula id="ieqn-14"><alternatives><inline-graphic xlink:href="ieqn-14.png"/><tex-math id="tex-ieqn-14"><![CDATA[${\boldsymbol{k}}$]]></tex-math><mml:math id="mml-ieqn-14"><mml:mi>k</mml:mi></mml:math></alternatives></inline-formula> steps Gibbs sampling from the probability distribution to compute the second term in <xref ref-type="disp-formula" rid="eqn-4">Eq. (4)</xref>. The single-step of the CD algorithm can be implemented as follows:
<list list-type="order">
<list-item><p>Initially, the training data are given to the visible units (<inline-formula id="ieqn-15"><alternatives><inline-graphic xlink:href="ieqn-15.png"/><tex-math id="tex-ieqn-15"><![CDATA[${\boldsymbol{v}}_{{\boldsymbol{i}}}$]]></tex-math><mml:math id="mml-ieqn-15"><mml:msub><mml:mrow><mml:mi>v</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></alternatives></inline-formula>) to compute the probabilities of the hidden units. A hidden activation (<inline-formula id="ieqn-16"><alternatives><inline-graphic xlink:href="ieqn-16.png"/><tex-math id="tex-ieqn-16"><![CDATA[${\boldsymbol{h}}_{{\boldsymbol{j}}}$]]></tex-math><mml:math id="mml-ieqn-16"><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:math></alternatives></inline-formula>) vector is then sampled from the same probability distribution.</p></list-item>
<list-item><p>In the positive phase, the outer product of (<inline-formula id="ieqn-17"><alternatives><inline-graphic xlink:href="ieqn-17.png"/><tex-math id="tex-ieqn-17"><![CDATA[${\boldsymbol{v}}_{{\boldsymbol{i}}}$]]></tex-math><mml:math id="mml-ieqn-17"><mml:msub><mml:mrow><mml:mi>v</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></alternatives></inline-formula>) and (<inline-formula id="ieqn-18"><alternatives><inline-graphic xlink:href="ieqn-18.png"/><tex-math id="tex-ieqn-18"><![CDATA[${\boldsymbol{h}}_{{\boldsymbol{j}}}$]]></tex-math><mml:math id="mml-ieqn-18"><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:math></alternatives></inline-formula>) is computed.</p></list-item>
<list-item><p>A reconstruction of the visible units (<inline-formula id="ieqn-19"><alternatives><inline-graphic xlink:href="ieqn-19.png"/><tex-math id="tex-ieqn-19"><![CDATA[${\boldsymbol{v}}_{{\boldsymbol{i}}}^{\boldsymbol{\prime}}$]]></tex-math><mml:math id="mml-ieqn-19"><mml:msubsup><mml:mrow><mml:mi>v</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow><mml:mrow><mml:mi>&#x2032;</mml:mi></mml:mrow></mml:msubsup></mml:math></alternatives></inline-formula>) is sampled from (<inline-formula id="ieqn-20"><alternatives><inline-graphic xlink:href="ieqn-20.png"/><tex-math id="tex-ieqn-20"><![CDATA[${\boldsymbol{h}}_{{\boldsymbol{j}}}$]]></tex-math><mml:math id="mml-ieqn-20"><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:math></alternatives></inline-formula>) with <inline-formula id="ieqn-21"><alternatives><inline-graphic xlink:href="ieqn-21.png"/><tex-math id="tex-ieqn-21"><![CDATA[$p \left(h_{j}=1|v\right)$]]></tex-math><mml:math id="mml-ieqn-21"><mml:mi>p</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>|</mml:mo><mml:mi>v</mml:mi></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math></alternatives></inline-formula>, from which (<inline-formula id="ieqn-22"><alternatives><inline-graphic xlink:href="ieqn-22.png"/><tex-math id="tex-ieqn-22"><![CDATA[${\boldsymbol{v}}_{{\boldsymbol{i}}}^{\boldsymbol{\prime}}$]]></tex-math><mml:math id="mml-ieqn-22"><mml:msubsup><mml:mrow><mml:mi>v</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow><mml:mrow><mml:mi>&#x2032;</mml:mi></mml:mrow></mml:msubsup></mml:math></alternatives></inline-formula>) resamples the activations of the hidden units&#x2019; (<inline-formula id="ieqn-23"><alternatives><inline-graphic xlink:href="ieqn-23.png"/><tex-math id="tex-ieqn-23"><![CDATA[${\boldsymbol{h}}_{{\boldsymbol{i}}}^{\boldsymbol{\prime}}$]]></tex-math><mml:math id="mml-ieqn-23"><mml:msubsup><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow><mml:mrow><mml:mi>&#x2032;</mml:mi></mml:mrow></mml:msubsup></mml:math></alternatives></inline-formula>). (1 Gibbs sampling step).</p></list-item>
<list-item><p>In the negative phase, the outer product of (<inline-formula id="ieqn-24"><alternatives><inline-graphic xlink:href="ieqn-24.png"/><tex-math id="tex-ieqn-24"><![CDATA[${\boldsymbol{v}}_{{\boldsymbol{i}}}^{\boldsymbol{\prime}}$]]></tex-math><mml:math id="mml-ieqn-24"><mml:msubsup><mml:mrow><mml:mi>v</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow><mml:mrow><mml:mi>&#x2032;</mml:mi></mml:mrow></mml:msubsup></mml:math></alternatives></inline-formula>) and (<inline-formula id="ieqn-25"><alternatives><inline-graphic xlink:href="ieqn-25.png"/><tex-math id="tex-ieqn-25"><![CDATA[${\boldsymbol{h}}_{{\boldsymbol{i}}}^{\boldsymbol{\prime}}$]]></tex-math><mml:math id="mml-ieqn-25"><mml:msubsup><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow><mml:mrow><mml:mi>&#x2032;</mml:mi></mml:mrow></mml:msubsup></mml:math></alternatives></inline-formula>) is computed.</p></list-item>
<list-item><p>Finally, the weights matrix and biases are updated with <xref ref-type="disp-formula" rid="eqn-4">Eqs. (4)</xref>&#x2013;<xref ref-type="disp-formula" rid="eqn-6">(6)</xref>.</p></list-item>
</list></p>
<fig id="fig-5">
<label>Figure 5</label>
<caption>
<title>Main architecture of the proposed deep learning models: (a) DBN model, and (b) CDBN model</title>
</caption>
<graphic mimetype="image" mime-subtype="png" xlink:href="fig-5.png"/>
</fig>
<p>Herein, the <inline-formula id="ieqn-26"><alternatives><inline-graphic xlink:href="ieqn-26.png"/><tex-math id="tex-ieqn-26"><![CDATA[${\boldsymbol{k}}$]]></tex-math><mml:math id="mml-ieqn-26"><mml:mi>k</mml:mi></mml:math></alternatives></inline-formula> parameter of the CD learning algorithm was set to <bold>1</bold>, and all the weights were randomly set with small values computed from a normal distribution of zero mean and SD of 0.02. CDBN is a hierarchical generative representation developed by Lee et al. [<xref ref-type="bibr" rid="ref-43">43</xref>] and is composed of several convolutional RBMs (CRBMs) stacked on each other as building blocks. CRBM is an expansion of the traditional RBM. Different from RBM, the weights between the visible and hidden units in the CRBM are locally shared among all positions in the input image. This scheme of sharing parameters (weights) introduces a form of translational invariance that uses the same-trained filter to detect specific useful features at different locations in an image. The proposed CDBN consists of three stacked CRBMs associated with probabilistic max pooling. CRBM uses Gaussian-valued visible units and binary-valued hidden units. The first and second CRBMs consist of 32 and 64 trainable filters (<inline-formula id="ieqn-27"><alternatives><inline-graphic xlink:href="ieqn-27.png"/><tex-math id="tex-ieqn-27"><![CDATA[$\textbf{K}_{\textbf{1}}= 32$]]></tex-math><mml:math id="mml-ieqn-27"><mml:msub><mml:mrow><mml:mstyle class="text"><mml:mtext class="textbf" mathvariant="bold">K</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle class="text"><mml:mtext class="textbf" mathvariant="bold">1</mml:mtext></mml:mstyle></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mn>32</mml:mn></mml:math></alternatives></inline-formula>, and <inline-formula id="ieqn-28"><alternatives><inline-graphic xlink:href="ieqn-28.png"/><tex-math id="tex-ieqn-28"><![CDATA[$\textbf{K}_{\textbf{2}}= 64$]]></tex-math><mml:math id="mml-ieqn-28"><mml:msub><mml:mrow><mml:mstyle class="text"><mml:mtext class="textbf" mathvariant="bold">K</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle class="text"><mml:mtext class="textbf" mathvariant="bold">2</mml:mtext></mml:mstyle></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mn>64</mml:mn></mml:math></alternatives></inline-formula>) of (<inline-formula id="ieqn-29"><alternatives><inline-graphic xlink:href="ieqn-29.png"/><tex-math id="tex-ieqn-29"><![CDATA[$5 \times 5$]]></tex-math><mml:math id="mml-ieqn-29"><mml:mn>5</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>5</mml:mn></mml:math></alternatives></inline-formula>) pixels, respectively, and the last CRBM consists of 128 trainable filters (<inline-formula id="ieqn-30"><alternatives><inline-graphic xlink:href="ieqn-30.png"/><tex-math id="tex-ieqn-30"><![CDATA[$\textbf{K}_{\textbf{3}}= 128$]]></tex-math><mml:math id="mml-ieqn-30"><mml:msub><mml:mrow><mml:mstyle class="text"><mml:mtext class="textbf" mathvariant="bold">K</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle class="text"><mml:mtext class="textbf" mathvariant="bold">3</mml:mtext></mml:mstyle></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mn>128</mml:mn></mml:math></alternatives></inline-formula>) of (<inline-formula id="ieqn-31"><alternatives><inline-graphic xlink:href="ieqn-31.png"/><tex-math id="tex-ieqn-31"><![CDATA[$6\times 6$]]></tex-math><mml:math id="mml-ieqn-31"><mml:mn>6</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>6</mml:mn></mml:math></alternatives></inline-formula>) pixels. The max-pooling ratio is fixed to two for each pooling layer. The output of the last CRBM is fed into one fully connected layer composed of 256 units, followed by the application of a SoftMax function to produce the probability distribution of each class label. CDBM models are highly overcomplete because each CRBM (hidden layer) has <bold>K</bold> trainable filters (e.g., groups of units) with sizes roughly equal to that of the input image. In general, the overcomplete model runs the risk of learning trivial feature representations (e.g., single-pixel detectors). As a solution, a sparsity penalty term was added to the objective function to obtain a small part of the fired output. In practice, the following simple update process (applied before weight updates) can be employed:</p>
<p><disp-formula id="eqn-7">
<label>(7)</label>
<alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-7.png"/>
<tex-math id="tex-eqn-7"><![CDATA[$$\begin{equation}
\Delta \mathrm{p}_{\mathrm{k}}^{\text{s}\text{p}\text{a}\text{r}\text{s}\text{ity}}\propto \mathrm{p}-\frac{1}{\mathrm{N}_{\mathrm{H}}^{2}}\sum\limits_{\mathrm{i},\mathrm{j}}\mathrm{P}
 \left(\mathrm{h}_{\mathrm{ij}}^{\mathrm{k}}=1\mid \mathrm{v}\right),
\label{eqn-7}
\end{equation}$$]]></tex-math>
<mml:math id="mml-eqn-7" display="block"><mml:mi>&#x0394;</mml:mi><mml:msubsup><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>p</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>k</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle><mml:mtext>s</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>p</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>a</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>r</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>s</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>ity</mml:mtext></mml:mstyle></mml:mrow></mml:msubsup><mml:mo>&#x221D;</mml:mo><mml:mstyle mathvariant="normal"><mml:mi>p</mml:mi></mml:mstyle><mml:mo>-</mml:mo><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:msubsup><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>N</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>H</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:mrow></mml:mfrac><mml:munder><mml:mrow><mml:mo>&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>i</mml:mi></mml:mstyle><mml:mo>,</mml:mo><mml:mstyle mathvariant="normal"><mml:mi>j</mml:mi></mml:mstyle></mml:mrow></mml:munder><mml:mstyle mathvariant="normal"><mml:mi>P</mml:mi></mml:mstyle><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>h</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>k</mml:mi></mml:mstyle></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>&#x2223;</mml:mo><mml:mstyle mathvariant="normal"><mml:mi>v</mml:mi></mml:mstyle></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:math></alternatives></disp-formula></p>
<p>where <inline-formula id="ieqn-32"><alternatives><inline-graphic xlink:href="ieqn-32.png"/><tex-math id="tex-ieqn-32"><![CDATA[${\boldsymbol{p}}$]]></tex-math><mml:math id="mml-ieqn-32"><mml:mi>p</mml:mi></mml:math></alternatives></inline-formula> is referred to as the target sparsity. In this work, the target sparsity was set as 0.005 for all the CRBMs. The proposed training methodology for DBN and CDBN models is composed of three phases: unsupervised pre-training, supervised, and fine-tuning.</p>
<list list-type="order">
<list-item><p>In the unsupervised pre-training phase, the first four hidden layers (e.g., RBMs and CRBMs) are trained using an unsupervised greedily training algorithm based on the CD algorithm to train each added hidden layer as either an RBM for DBM model or a CRBM for CRBM model. The activations produced from the first trained hidden layer acted as discriminative features extracted from the input images. These features are then assigned to the (<inline-formula id="ieqn-33"><alternatives><inline-graphic xlink:href="ieqn-33.png"/><tex-math id="tex-ieqn-33"><![CDATA[${\boldsymbol{v}}_{{\boldsymbol{i}}}$]]></tex-math><mml:math id="mml-ieqn-33"><mml:msub><mml:mrow><mml:mi>v</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></alternatives></inline-formula>) as an input to train the next hidden layer. This phase is completed when the (<inline-formula id="ieqn-34"><alternatives><inline-graphic xlink:href="ieqn-34.png"/><tex-math id="tex-ieqn-34"><![CDATA[$\mathbf{N}- \mathbf{1}$]]></tex-math><mml:math id="mml-ieqn-34"><mml:mstyle mathvariant="bold"><mml:mi>N</mml:mi></mml:mstyle><mml:mo>-</mml:mo><mml:mstyle mathvariant="bold"><mml:mn>1</mml:mn></mml:mstyle></mml:math></alternatives></inline-formula>) RBM (hidden layer) is successfully trained. After training, these (<inline-formula id="ieqn-35"><alternatives><inline-graphic xlink:href="ieqn-35.png"/><tex-math id="tex-ieqn-35"><![CDATA[$\mathbf{N}- \mathbf{1}$]]></tex-math><mml:math id="mml-ieqn-35"><mml:mstyle mathvariant="bold"><mml:mi>N</mml:mi></mml:mstyle><mml:mo>-</mml:mo><mml:mstyle mathvariant="bold"><mml:mn>1</mml:mn></mml:mstyle></mml:math></alternatives></inline-formula>) hidden layers of the model can be viewed as a feature extractor that automatically extracts the most useful and discriminative features from the raw images. The main benefit of this unsupervised greedy training algorithm is the capability to train the DBN and CDBN models by using a huge amount of unlabeled training data.</p></list-item>
<list-item><p>In the supervised phase, the last DRBM in the DBM model and the SoftMax classifier in the CDBM model are trained in a supervised manner as non-linear classifiers by using the labeled data in the training and validation sets to monitor their performance during learning.</p></list-item>
<list-item><p>Finally, the back-propagation algorithm is implemented to fine-tune the parameters of the whole DBN model in a top-down manner to achieve satisfactory predictions.</p></list-item>
</list>
<p>Similar to other deep learning networks, DBN and CDBN need a massive amount of training data to prevent overfitting during the training process, reduce the generalization error of the last obtained model, and achieve satisfactory predictions. Thus, simple data augmentation was implemented to artificially increase the number of training samples in the COVID19-<italic>vs.</italic>-Normal dataset (see Subsection 3.1).</p>
</sec>
<sec id="s3_4">
<label>3.4</label>
<title>Evaluation Criteria</title>
<p>In the prediction phase, the average values of seven quantitative performance measures, namely, detection accuracy rate (DAR), sensitivity, specificity, precision, F1-score, mean squared error (MSE), and root Mean Squared Error (RMSE) were computed to measure the accuracy and the efficiency of the proposed COVID-DeepNet model by using the testing set. These seven quantitative performance measures are calculated as follows:</p>
<p><disp-formula id="eqn-8">
<label>(8)</label>
<alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-8.png"/>
<tex-math id="tex-eqn-8"><![CDATA[$$\begin{equation}\mathrm{DAR}=\frac{ \left(\mathrm{TP}+\mathrm{TN}\right)}{ \left(\mathrm{TP}+\mathrm{TN}+\mathrm{FP}
+\mathrm{FN}\right)},
\label{eqn-8} \end{equation}$$]]></tex-math>
<mml:math id="mml-eqn-8" display="block"><mml:mrow></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>D</mml:mi><mml:mi>A</mml:mi><mml:mi>R</mml:mi></mml:mstyle><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>T</mml:mi><mml:mi>P</mml:mi></mml:mstyle><mml:mo>+</mml:mo><mml:mstyle mathvariant="normal"><mml:mi>T</mml:mi><mml:mi>N</mml:mi></mml:mstyle></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>T</mml:mi><mml:mi>P</mml:mi></mml:mstyle><mml:mo>+</mml:mo><mml:mstyle mathvariant="normal"><mml:mi>T</mml:mi><mml:mi>N</mml:mi></mml:mstyle><mml:mo>+</mml:mo><mml:mstyle mathvariant="normal"><mml:mi>F</mml:mi><mml:mi>P</mml:mi></mml:mstyle><mml:mo>+</mml:mo><mml:mstyle mathvariant="normal"><mml:mi>F</mml:mi><mml:mi>N</mml:mi></mml:mstyle></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:mfrac><mml:mo>,</mml:mo></mml:mrow><mml:mrow></mml:mrow></mml:math>
</alternatives></disp-formula></p>
<p><disp-formula id="eqn-9">
<label>(9)</label>
<alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-9.png"/>
<tex-math id="tex-eqn-9"><![CDATA[$$\begin{equation}\text{S}\text{e}\text{n}\text{s}\text{i}\text{tivity}(\text{R}\text{e}\text{c}\text{a}\text{l}\text{l})=\frac{ \left(\mathrm{TP}\right)}{ \left(\mathrm{TP}+\mathrm{FN}\right)},`
\label{eqn-9} \end{equation}$$]]></tex-math>
<mml:math id="mml-eqn-9" display="block"><mml:mrow></mml:mrow><mml:mrow><mml:mstyle><mml:mtext>S</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>e</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>n</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>s</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>i</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>tivity</mml:mtext></mml:mstyle><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mstyle><mml:mtext>R</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>e</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>c</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>a</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>l</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>l</mml:mtext></mml:mstyle></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>T</mml:mi><mml:mi>P</mml:mi></mml:mstyle></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>T</mml:mi><mml:mi>P</mml:mi></mml:mstyle><mml:mo>+</mml:mo><mml:mstyle mathvariant="normal"><mml:mi>F</mml:mi><mml:mi>N</mml:mi></mml:mstyle></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:mfrac><mml:mo>,</mml:mo><mml:mi>&#x2018;</mml:mi></mml:mrow><mml:mrow></mml:mrow></mml:math>
</alternatives></disp-formula></p>
<disp-formula id="eqn-10">
<label>(10)</label>
<alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-10.png"/>
<tex-math id="tex-eqn-10"><![CDATA[$$\begin{equation}\text{Specificity }=\frac{ \left(\mathrm{TN}\right)}{ \left(\mathrm{TN}+\mathrm{FP}\right)},
\label{eqn-10} \end{equation}$$]]></tex-math>
<mml:math id="mml-eqn-10" display="block"><mml:mrow></mml:mrow><mml:mrow><mml:mstyle><mml:mtext>Specificity&#x00A0;</mml:mtext></mml:mstyle><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>T</mml:mi><mml:mi>N</mml:mi></mml:mstyle></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>T</mml:mi><mml:mi>N</mml:mi></mml:mstyle><mml:mo>+</mml:mo><mml:mstyle mathvariant="normal"><mml:mi>F</mml:mi><mml:mi>P</mml:mi></mml:mstyle></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:mfrac><mml:mo>,</mml:mo></mml:mrow><mml:mrow></mml:mrow></mml:math>
</alternatives></disp-formula>
<disp-formula id="eqn-11">
<label>(11)</label>
<alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-11.png"/>
<tex-math id="tex-eqn-11"><![CDATA[$$\begin{equation}\text{ Precision }=\frac{ \left(\mathrm{TP}\right)}{ \left(\mathrm{TP}+\mathrm{FP}\right)},
\label{eqn-11} \end{equation}$$]]></tex-math>
<mml:math id="mml-eqn-11" display="block"><mml:mrow></mml:mrow><mml:mrow><mml:mstyle><mml:mtext>&#x00A0;Precision&#x00A0;</mml:mtext></mml:mstyle><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>T</mml:mi><mml:mi>P</mml:mi></mml:mstyle></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>T</mml:mi><mml:mi>P</mml:mi></mml:mstyle><mml:mo>+</mml:mo><mml:mstyle mathvariant="normal"><mml:mi>F</mml:mi><mml:mi>P</mml:mi></mml:mstyle></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:mfrac><mml:mo>,</mml:mo></mml:mrow><mml:mrow></mml:mrow></mml:math>
</alternatives></disp-formula>
<disp-formula id="eqn-12">
<label>(12)</label>
<alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-12.png"/>
<tex-math id="tex-eqn-12"><![CDATA[$$\begin{equation}\mathrm{F}1\text{S}\text{c}\text{o}\text{r}\text{e}=\ast\frac{ \left(\text{P}\text{r}\text{e}\text{c}\text{i}\text{sion}\ast\text{R}\text{e}\text{c}\text{a}\text{l}\text{l}\right)}{ \left(\text{P}\text{r}\text{e}\text{c}\text{i}\text{sion}
+\text{ Recall}\right)},
\label{eqn-12}\end{equation}$$]]></tex-math>
<mml:math id="mml-eqn-12" display="block"><mml:mrow></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>F</mml:mi></mml:mstyle><mml:mn>1</mml:mn><mml:mstyle><mml:mtext>S</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>c</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>o</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>r</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>e</mml:mtext></mml:mstyle><mml:mo>=</mml:mo><mml:mo>*</mml:mo><mml:mfrac><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mstyle><mml:mtext>P</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>r</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>e</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>c</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>i</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>sion</mml:mtext></mml:mstyle><mml:mo>*</mml:mo><mml:mstyle><mml:mtext>R</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>e</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>c</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>a</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>l</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>l</mml:mtext></mml:mstyle></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mstyle><mml:mtext>P</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>r</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>e</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>c</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>i</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>sion</mml:mtext></mml:mstyle><mml:mo>+</mml:mo><mml:mstyle><mml:mtext>&#x00A0;Recall</mml:mtext></mml:mstyle></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:mfrac><mml:mo>,</mml:mo></mml:mrow><mml:mrow></mml:mrow></mml:math>
</alternatives></disp-formula>
<p>where <bold>TP</bold>, <bold>TN</bold>, <bold>FP</bold>, and <bold>FN</bold> stand for true positives, true negatives, false positives, and false negatives, respectively.</p>
<p><disp-formula id="eqn-13">
<label>(13)</label>
<alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-13.png"/>
<tex-math id="tex-eqn-13"><![CDATA[$$\begin{equation}\mathrm{MSE}=\frac{1}{\mathrm{n}}\sum\limits_{\mathrm{i}=1}^{\mathrm{n}} \left(\mathrm{Y}_{\mathrm{i}}
-\hat{\mathrm{Y}}_{\mathrm{i}}\right)^{2},
\label{eqn-13} \end{equation}$$]]></tex-math>
<mml:math id="mml-eqn-13" display="block"><mml:mrow></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>M</mml:mi><mml:mi>S</mml:mi><mml:mi>E</mml:mi></mml:mstyle><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>n</mml:mi></mml:mstyle></mml:mrow></mml:mfrac><mml:mstyle displaystyle='true'><mml:munderover><mml:mrow><mml:mo>&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>i</mml:mi></mml:mstyle><mml:mo lspace='0pt' rspace='0pt'>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>n</mml:mi></mml:mstyle></mml:mrow></mml:munderover></mml:mstyle><mml:msup><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>Y</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>i</mml:mi></mml:mstyle></mml:mrow></mml:msub><mml:mo>-</mml:mo><mml:msub><mml:mrow><mml:mover accent="true"><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>Y</mml:mi></mml:mstyle></mml:mrow><mml:mo>^</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>i</mml:mi></mml:mstyle></mml:mrow></mml:msub></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mo>,</mml:mo></mml:mrow><mml:mrow></mml:mrow></mml:math>
</alternatives></disp-formula></p>
<p><disp-formula id="eqn-14">
<label>(14)</label>
<alternatives>
<graphic mimetype="image" mime-subtype="png" xlink:href="eqn-14.png"/>
<tex-math id="tex-eqn-14"><![CDATA[$$\begin{equation}\text{R}\text{M}\text{S}\text{E}= \sqrt{\frac{1}{\mathrm{n}}\sum\limits_{\mathrm{i}=1}^{\mathrm{n}} \left(\mathrm{Y}_{\mathrm{i}}
-\hat{\mathrm{Y}}_{\mathrm{i}}\right)^{2}},
\label{eqn-14}\end{equation}$$]]></tex-math>
<mml:math id="mml-eqn-14" display="block"><mml:mrow></mml:mrow><mml:mrow><mml:mstyle><mml:mtext>R</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>M</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>S</mml:mtext></mml:mstyle><mml:mstyle><mml:mtext>E</mml:mtext></mml:mstyle><mml:mo>=</mml:mo><mml:msqrt><mml:mrow><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>n</mml:mi></mml:mstyle></mml:mrow></mml:mfrac><mml:mstyle displaystyle='true'><mml:munderover><mml:mrow><mml:mo>&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>i</mml:mi></mml:mstyle><mml:mo lspace='0pt' rspace='0pt'>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>n</mml:mi></mml:mstyle></mml:mrow></mml:munderover></mml:mstyle><mml:msup><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>Y</mml:mi></mml:mstyle></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>i</mml:mi></mml:mstyle></mml:mrow></mml:msub><mml:mo>-</mml:mo><mml:msub><mml:mrow><mml:mover accent="true"><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>Y</mml:mi></mml:mstyle></mml:mrow><mml:mo>^</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mstyle mathvariant="normal"><mml:mi>i</mml:mi></mml:mstyle></mml:mrow></mml:msub></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:mrow></mml:msqrt><mml:mo>,</mml:mo></mml:mrow><mml:mrow></mml:mrow></mml:math>
</alternatives></disp-formula></p>
<p>where <inline-formula id="ieqn-36"><alternatives><inline-graphic xlink:href="ieqn-36.png"/><tex-math id="tex-ieqn-36"><![CDATA[${\boldsymbol{n}}$]]></tex-math><mml:math id="mml-ieqn-36"><mml:mi>n</mml:mi></mml:math></alternatives></inline-formula> refers to the total number of data samples, <inline-formula id="ieqn-37"><alternatives><inline-graphic xlink:href="ieqn-37.png"/><tex-math id="tex-ieqn-37"><![CDATA[$\boldsymbol{Y}$]]></tex-math><mml:math id="mml-ieqn-37"><mml:mi>Y</mml:mi></mml:math></alternatives></inline-formula> <bold>is</bold> the vector of observed values of the variable being predicted, and <inline-formula id="ieqn-38"><alternatives><inline-graphic xlink:href="ieqn-38.png"/><tex-math id="tex-ieqn-38"><![CDATA[$\hat{\boldsymbol{Y}}$]]></tex-math><mml:math id="mml-ieqn-38"><mml:mover accent="true"><mml:mrow><mml:mi>Y</mml:mi></mml:mrow><mml:mo>^</mml:mo></mml:mover></mml:math></alternatives></inline-formula> is the vector of <inline-formula id="ieqn-39"><alternatives><inline-graphic xlink:href="ieqn-39.png"/><tex-math id="tex-ieqn-39"><![CDATA[${\boldsymbol{n}}$]]></tex-math><mml:math id="mml-ieqn-39"><mml:mi>n</mml:mi></mml:math></alternatives></inline-formula> predicted values.</p>
</sec>
</sec>
<sec id="s4">
<label>4</label>
<title>Experimental Results</title>
<p>Several extensive experiments on COVID19-<italic>vs.</italic>-Normal dataset were conducted to reveal the effectiveness of the proposed deep learning methods (e.g., DBN and CDBN) and their combination (e.g., using the proposed COVID-DeepNet system) and compare their performances with the current state-of-the-art approaches. The code of the proposed COVID-DeepNet system was written to run in MATLAB R2018a and later versions and trained using Windows 10 operating system, a Core i7-4510U CPU, 69 K GPU graphics card, and 24 GB of RAM. Following the proposed training methodology, all experiments were conducted using 75% randomly selected CX-R images as a training set to train the proposed deep learning approaches. The remaining reset 25% images were used as a testing set to assess their generalization ability in predicting new unseen data. During learning, 10% of the training set was randomly selected and employed as a validation set to assess their performance and store the weight configurations that produce the highest accuracy rate.</p>
<sec id="s4_1">
<label>4.1</label>
<title>COVID-DeepNet Architecture and Training Details</title>
<p>The main architecture of the COVID-DeepNet system is based on the combined output scores produced from two discriminative deep learning methods (e.g., DBN and CDBN). The main challenging task of using deep learning models is the huge number of structures and hyper-parameters to be assessed (e.g., number of hidden layers, filter size, number of epochs, and learning rate). Herein, several experiments were performed to find the best model&#x2019;s architecture for DBN and CDBN. The influence of different values of the hyper-parameters on the performance of the proposed approaches was also analyzed. Two different training set configurations from the COVID19-<italic>vs.</italic>-Normal dataset were created and evaluated to reveal the important contribution of the proposed image enhancement procedure in guiding the learning of proposed approaches and improving their performance compared with the use of raw images as input data. The two training set configurations were as follows: (i) <italic>TrainingSet_1</italic> consists of raw images and those produced using the proposed data augmentation procedure, and (ii) <italic>TraningSet_2</italic> consists of the pre-processed and those produced using the proposed data augmentation procedure.</p>
<p>In DBNs and CDBMs, the values of their hyper-parameters mainly depend on each other. Furthermore, the hyper-parameters values used in a specific RBM may be affected by the hyper-parameter values used in other RBMs. Thus, the hyper-parameter fine-tuning task in these two approaches requires a large cost. Therefore, a coarse search procedure was implemented to find the best hyper-parameter values. As shown in <xref ref-type="table" rid="table-1">Tab. 1</xref>, the proposed DBN model composed of stacking five RBMs (hidden layers) was trained in a bottom-up way using the proposed training methodology presented in (Subsection 3.3). The number of the hidden units in the first two layers was fixed to 4096 units, and the different numbers of hidden units were evaluated in the last three layers to find the best network configuration. When the unsupervised training of the first RBM (the first hidden layer) was completed, the weight matrix of the hidden layer was frozen and was used as an input data for the training of the second RBM (the second hidden layer) in the stack. With the CD learning algorithm (e.g., one step of Gibbs sampling), the first four RBMs were trained separately in an unsupervised greedily manner. Each RBM (hidden layer) was trained for 100 epochs with a mini-batch size of 100, a weight decay value of 0.0002, a learning rate of 10<sup>&#x2212;2</sup>, and a momentum value of 0.9. The weights were randomly initialized with small values computed from a normal distribution of zero mean and SD of 0.02. The last layer was trained as a non-linear DRBM classifier with SoftMax units to produce the final probability scores. The last DRBM was trained using the same hyper-parameters values of the first four RBMs. Finally, the back-propagation algorithm equipped with the dropout method was applied for the parameter fine-tuning of the whole DBN model in a top-down manner to avoid overfitting and achieve satisfactory predictions. The dropout ratio was 0.5. Initially, the whole DBN was trained in a top-down manner for 100 epochs; however, the model can be further improved by increasing the number of epochs. Therefore, the number of epochs was set to approximately 500 epochs using the early stopping procedure. As revealed in <xref ref-type="table" rid="table-1">Tab. 1</xref>, five DBN models were trained using two different training sets (e.g., <italic>TrainingSet_1</italic> and <italic>TrainingSet_2</italic>), and the highest accuracy was obtained using the fourth DBN (4096-4096-3000-2048-1024) model. The training time of all the five trained models was substantially decreased by training them on the top of the <italic>TrainingSet_2</italic> containing only the processed images data. This finding confirms our argument that training the proposed DNNs on the top of the pre-processed images can remarkably improve their ability to rapidly learn useful feature representations with less time required to obtain the last trained model. Therefore, <italic>TrainingSet_2</italic> was used for all subsequent experiments. Additional information on the DBN and its hyper-parameters are given in <xref ref-type="table" rid="table-2">Tab. 2</xref>. For an initial CDBN architecture, only two CRBMs were greedily trained using the same proposed trained methodology described in (Subsection 3.3). This initial CDBN architecture was referred to as a CDBN-A in the subsequent experiments. The number of filters was initially set to 32 filters at each CRBM layer, and the size of the filter was set to (<inline-formula id="ieqn-40"><alternatives><inline-graphic xlink:href="ieqn-40.png"/><tex-math id="tex-ieqn-40"><![CDATA[$5\times 5$]]></tex-math><mml:math id="mml-ieqn-40"><mml:mn>5</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>5</mml:mn></mml:math></alternatives></inline-formula>) pixels. The CD learning algorithm (e.g., one step of Gibbs sampling) was used to train all the CRBMs in an unsupervised greedily manner. Each CRBM was trained separately for 100 epochs with a mini-batch size of 100, target sparsity of 0.005, learning rate of 10<sup>&#x2212;2</sup>, a weight decay value of 0.0005, and a momentum value of 0.95. The weights were randomly initialized with small values computed from a normal distribution of zero mean and SD of 0.02.</p>
<table-wrap id="table-1">
<label>Table 1</label>
<caption>
<title>Comparison of five different DBN architectures in terms of DAR and training time</title>
</caption>
<table><colgroup>
<col/>
<col/>
<col/>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th>DBN Models</th>
<th colspan="2">TrainingSet_1</th>
<th colspan="2">TrainingSet_2</th>
</tr>
<tr>
<th></th>
<th>DAR</th>
<th>Training Time</th>
<th>DAR</th>
<th>Training Time</th>
</tr>
</thead>
<tbody>
<tr>
<td>4096-4096-1024-1024-1024</td>
<td>0.67</td>
<td>9 h, 40 min</td>
<td>0.81</td>
<td>6 h, 22 min</td>
</tr>
<tr>
<td>4096-4096-2048-1024-1024</td>
<td>0.77</td>
<td>10 h, 37 min</td>
<td>0.90</td>
<td>8 h, 11 min</td>
</tr>
<tr>
<td>4096-4096-2048-2048-1024</td>
<td>0.72</td>
<td>11 h, 28 min</td>
<td>0.94</td>
<td>8 h, 39 min</td>
</tr>
<tr>
<td>4096-4096-3000-2048-1024</td>
<td>0.88</td>
<td>12 h, 17 min</td>
<td>0.96</td>
<td>8 h, 41 min</td>
</tr>
<tr>
<td>4096-4096-3000-2048-2048</td>
<td>0.79</td>
<td>12 h, 47 min</td>
<td>0.92</td>
<td>9 h, 11 min</td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap id="table-2">
<label>Table 2</label>
<caption>
<title>Details of hyper-parameters for the proposed deep learning approaches (e.g., DBN and CDBN)</title>
</caption>
<table><colgroup>
<col/>
<col/>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th colspan="2">Five-layer DBN model</th>
<th colspan="2">CDBN model</th>
</tr>
<tr>
<th>Hyper-Parameters</th>
<th>Values</th>
<th>Hyper-Parameters</th>
<th>Values</th>
</tr>
</thead>
<tbody>
<tr>
<td>CD learning algorithm</td>
<td>1 Step of Gibbs</td>
<td>CD learning algorithm</td>
<td>1 Step of Gibbs</td>
</tr>
<tr>
<td/>
</tr>
<tr>
<td/>
</tr>
<tr>
<td/>
<td>sampling</td>
<td/>
<td>sampling</td>
</tr>
<tr>
<td>Optimization method</td>
<td>Adam</td>
<td>Optimization method</td>
<td>Adagrad</td>
</tr>
<tr>
<td>Target sparsity</td>
<td>0.05</td>
<td>Target sparsity</td>
<td>0.005</td>
</tr>
<tr>
<td>No. of layers</td>
<td>5 RBMs</td>
<td>No. of layers</td>
<td>3 CRBMs</td>
</tr>
<tr>
<td>No. of epochs for each RBMs</td>
<td>100</td>
<td>No. of epochs for each CRBMs</td>
<td>100</td>
</tr>
<tr>
<td>Momentum</td>
<td>0.9</td>
<td>Momentum</td>
<td>0.95</td>
</tr>
<tr>
<td>Weight decay</td>
<td>0.0002</td>
<td>Weight-decay</td>
<td>0.0005</td>
</tr>
<tr>
<td>Dropout</td>
<td>0.5</td>
<td>Dropout</td>
<td>0.5</td>
</tr>
<tr>
<td>Batch size</td>
<td>100</td>
<td>Batch size</td>
<td>100</td>
</tr>
<tr>
<td>Learning rate</td>
<td>10<sup>&#x2212;2</sup></td>
<td>Learning rate</td>
<td>10<sup>&#x2212;2</sup></td>
</tr>
<tr>
<td>Total No. of epochs (backward)</td>
<td>500</td>
<td>Total No. of epochs (backward)</td>
<td>300</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The dropout technique with the dropout ratio of 0.5 was applied only for the fully-connected layer. In the fine-tuning phase, the weights of the whole CDBN-A model were optimized using the back-propagation algorithm in a top-down manner to achieve satisfactory predictions. First, the CDBN-A was trained for 500 epochs with a mini-batch size of 100. However, the performance of the CDBN-A model using the validation data declined when 500 epochs were evaluated because the last trained model started overfitting the training set (see <xref ref-type="fig" rid="fig-6">Fig. 6</xref>).</p>
<fig id="fig-6">
<label>Figure 6</label>
<caption>
<title>Finding the best number of epochs to train the CDBN-A model using the back-propagation algorithm in a top-down manner</title>
</caption>
<graphic mimetype="image" mime-subtype="png" xlink:href="fig-6.png"/>
</fig>
<p>For the CDBN model, this hyper-parameter was determined empirically by varying its value from 100 epochs to 500 epochs in steps of 10. The highest validation accuracy rate was obtained from the validation set by training the CDBN-A model in a top-down manner for 300 epochs. Moreover, a high accuracy rate can be achieved by adding a new CRBM with 128 trainable filters of size (<inline-formula id="ieqn-41"><alternatives><inline-graphic xlink:href="ieqn-41.png"/><tex-math id="tex-ieqn-41"><![CDATA[$6 \times 6$]]></tex-math><mml:math id="mml-ieqn-41"><mml:mn>6</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>6</mml:mn></mml:math></alternatives></inline-formula>) pixels and changing the number of filters in the second CRBM from 32 trainable filters to 64 trainable filters. This newly added CRBM was trained using the same hyper-parameters of the other CRBM. This new architecture was denoted as a CDBN-B model and used for all remaining experiments instead of the CDBN-A model. Additional information on the CDBN-B and its hyper-parameters are presented in <xref ref-type="table" rid="table-2">Tab. 2</xref>. The ROC curves of the CDBN-A and CDBN-B models are shown in <xref ref-type="fig" rid="fig-7">Fig. 7</xref> to visualize their performances on the testing set of the COVID19-<italic>vs.</italic>-Normal dataset. <xref ref-type="fig" rid="fig-8">Fig. 8</xref> shows the learned high-level feature representations from the last hidden layers in CDBN-A and CDBN-B model after training.</p>
<fig id="fig-7">
<label>Figure 7</label>
<caption>
<title>Performance comparison between CDBN-A and CDBN-B on the testing set of the COVID19-<italic>vs.</italic>-Normal dataset</title>
</caption>
<graphic mimetype="image" mime-subtype="png" xlink:href="fig-7.png"/>
</fig>
<fig id="fig-8">
<label>Figure 8</label>
<caption>
<title>Visualization of the learned high-level feature representations from the last hidden layers: (a) CDBN-A model, and (b) CDBN-B model</title>
</caption>
<graphic mimetype="image" mime-subtype="png" xlink:href="fig-8.png"/>
</fig>
</sec>
<sec id="s4_2">
<label>4.2</label>
<title>Fusion Rule Evaluation</title>
<p>The proposed COVID-DeepNet system makes the final decision by integrating the results produced from two different deep learning models (e.g., DBN and CDBN). Every time a CX-R image is assigned to the proposed COVID-DeepNet system, two predicted probability scores are computed, and the highest probability score is used to assign the input image to one of two classes (e.g., either normal or COVID-19 class). In this section, the results obtained from the DBN and CDBN models were combined and evaluated using different fusion rules in the score-level fusion (e.g., using sum, weighted sum, product, max, and min rule) and decision-level fusion (e.g., using AND OR rule). Additional information on how these fusion rules are implemented in both levels can be found in [<xref ref-type="bibr" rid="ref-44">44</xref>].</p>
<p>Parallel architecture, which provides radiologists a high degree of confidence to make their final decision and to accurately distinguish between healthy and COVID-19 infected subjects, was considered in the proposed COVID-DeepNet system. During the implementation of the weighted sum rule (WSR) at the score-level, a slightly higher weight value was given to the CDBN-B model than to the DBN model due to the better performance of the former. Moreover, normalization is not required prior to applying the score fusion rules because both classifiers generate the same probability scores and within the same numeric range [0,1]. Herein, the average values of seven quantitative performance measures using various fusion rules at the score- and decision-level fusion are presented in <xref ref-type="table" rid="table-3">Tabs. 3</xref> and <xref ref-type="table" rid="table-4">4</xref>, respectively.</p>
<table-wrap id="table-3">
<label>Table 3</label>
<caption>
<title>Performance comparison of the proposed hybrid COVID-DeepNet system using five different rules in score-level fusion</title>
</caption>
<table><colgroup>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th>Evaluation criteria</th>
<th>DBN</th>
<th>CDBN</th>
<th colspan="5">Score fusion rules</th>
</tr>
<tr>
<th></th>
<th></th>
<th></th>
<th>SR</th>
<th>WSR</th>
<th>PR</th>
<th>Max</th>
<th>Min</th>
</tr>
</thead>
<tbody>
<tr>
<td>DAR</td>
<td>96</td>
<td>98.22</td>
<td>98.42</td>
<td>99.93</td>
<td>97.92</td>
<td>98.34</td>
<td>96.21</td>
</tr>
<tr>
<td>Sens. (recall)</td>
<td>95.10</td>
<td>97.54</td>
<td>97.66</td>
<td>99.90</td>
<td>97.84</td>
<td>98.81</td>
<td>96.12</td>
</tr>
<tr>
<td>Specificity</td>
<td>96.21</td>
<td>98.53</td>
<td>98.64</td>
<td>100</td>
<td>98.23</td>
<td>98.14</td>
<td>97.22</td>
</tr>
<tr>
<td>Precision</td>
<td>97</td>
<td>98.67</td>
<td>98.88</td>
<td>100</td>
<td>98.43</td>
<td>97.88</td>
<td>97.05</td>
</tr>
<tr>
<td>F1 Score</td>
<td>97.10</td>
<td>98.78</td>
<td>98.92</td>
<td>99.93</td>
<td>98.52</td>
<td>98.09</td>
<td>97.33</td>
</tr>
<tr>
<td>MSE</td>
<td>0.17</td>
<td>0.12</td>
<td>0.09</td>
<td>0.021</td>
<td>0.11</td>
<td>0.12</td>
<td>0.19</td>
</tr>
<tr>
<td>RMSE</td>
<td>0.21</td>
<td>0.15</td>
<td>0.11</td>
<td>0.016</td>
<td>0.14</td>
<td>0.17</td>
<td>0.24</td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap id="table-4">
<label>Table 4</label>
<caption>
<title>Performance comparison of the proposed hybrid COVID-DeepNet system using two different rules in decision-level fusion</title>
</caption>
<table><colgroup>
<col/>
<col/>
<col/>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th>Evaluation criteria</th>
<th>DBN</th>
<th>CDBN</th>
<th colspan="2">Decision fusion rules</th>
</tr>
<tr>
<th></th>
<th></th>
<th></th>
<th>AND</th>
<th>OR</th>
</tr>
</thead>
<tbody>
<tr>
<td>DAR</td>
<td>96</td>
<td>98.22</td>
<td>95.33</td>
<td>99.35</td>
</tr>
<tr>
<td>Sens. (recall)</td>
<td>95.10</td>
<td>97.54</td>
<td>95.66</td>
<td>99.33</td>
</tr>
<tr>
<td>Specificity</td>
<td>96.21</td>
<td>98.53</td>
<td>96.78</td>
<td>99.37</td>
</tr>
<tr>
<td>Precision</td>
<td>97</td>
<td>98.67</td>
<td>96.84</td>
<td>99.37</td>
</tr>
<tr>
<td>F1 Score</td>
<td>97.10</td>
<td>98.78</td>
<td>97.84</td>
<td>99.35</td>
</tr>
<tr>
<td>MSE</td>
<td>0.17</td>
<td>0.12</td>
<td>0.19</td>
<td>0.064</td>
</tr>
<tr>
<td>RMSE</td>
<td>0.21</td>
<td>0.15</td>
<td>0.25</td>
<td>0.036</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The accuracy of the proposed hybrid COVID-DeepNet system was remarkably enhanced compared with that of DBN or CDBN alone. The highest values of the adopted seven quantitative measures were obtained using the WSR and the OR rule in the score- and decision-level fusion, respectively. The proposed COVID-DeepNet system correctly and accurately diagnosed the patients with COVID-19 in the score-level fusion with a DAR of 99.93%, sensitivity of 99.90%, specificity of 100%, precision of 100%, F1-score of 99.93%, MSE of 0.021%, and RMSE of 0.016% using the WSR and in the decision-level fusion using the OR rule with a DAR of 99.35%, sensitivity of 99.33%, specificity of 99.37%, precision of 99.37%, F1-score of 99.35%, MSE of 0.064%, and RMSE of 0.036%. The high precision value of 100% achieved in the score-level fusion using the WSR is essential in reducing the number of misclassified healthy cases as COVID-19 cases. Finally, the two confusion matrices of COVID-19 infected and normal test results using the WSR and OR rule are shown in <xref ref-type="fig" rid="fig-9">Fig. 9</xref>. Under WSR rule, only one COVID-19 infected image was misidentified as a healthy image, and three healthy images were misclassified as COVID-19. Under OR rule in the decision-level fusion, 19 COVID-19 infected images were misclassified as healthy images, and 20 healthy images were misclassified as COVID-19. Thus, WSR was used in the performance comparison of the proposed COVID-DeepNet system with current state-of-the-art systems due to its effectiveness in exploiting the strength of each classifier. These results further strengthened the possibility of employing the proposed COVID-DeepNet system in real-world settings to seriously moderate the workload of radiologists and help them accurately detect COVID-19 infection by using CX-R images.</p>
<fig id="fig-9">
<label>Figure 9</label>
<caption>
<title>Confusion matrices for the proposed COVID-DeepNet system using different fusion rules: (a) WSR rule in the score-level fusion, and (b) OR rule in the decision-level fusion</title>
</caption>
<graphic mimetype="image" mime-subtype="png" xlink:href="fig-9.png"/>
</fig>
</sec>
<sec id="s4_3">
<label>4.3</label>
<title>Comparison Study and Discussion</title>
<p>The efficiency and reliability of the proposed COVID-DeepNet system were compared with those of the most current state-of-the-art COVID-19 detection systems. The first three COVID-19 detection systems were evaluated on the COVIDx dataset containing only 76 CX-R images with confirmed COVID-19. The first system was developed by Wang et al. [<xref ref-type="bibr" rid="ref-19">19</xref>] using a deep tailored designed model based on a CNN termed as a COVID-Net. The second system was proposed by Farooq et al. [<xref ref-type="bibr" rid="ref-27">27</xref>] by employing a pre-trained ResNet-50 model termed as a COVID-ResNet. The third system was proposed by Luz et al. [<xref ref-type="bibr" rid="ref-45">45</xref>]. The performance of different architectures of EfficientNet was assessed using an updated version of the COVIDx dataset containing 183 chest radiography images with confirmed COVID-19. The performances of these three systems were evaluated by computing four quantitative measures (e.g., accuracy, sensitivity, precision, and F1-score) for three different classes (e.g., normal, non-COVID19, and COVID-19). For an impartial comparison, these four quantitative measures were averaged, and the values are shown in <xref ref-type="table" rid="table-5">Tab. 5</xref>. The proposed COVID-DeepNet system obtained better results compared with the other systems. Although the EfficientNet B3 model described in [<xref ref-type="bibr" rid="ref-45">45</xref>] achieved the same precision of 100%, the proposed COVID-DeepNet system produced better results in the other two measures (e.g., accuracy and sensitivity) by using a large dataset containing many CX-R images with confirmed COVID-19. A comparison study among three different CNN models (e.g., InceptionV3, ResNet50, and Inception-ResNetV2) was conducted by Narin et al. [<xref ref-type="bibr" rid="ref-23">23</xref>] to detect COVID-19 infected patients using CX-R images. The mean values of five different quantitative measures (e.g., accuracy, recall, specificity, precision, and F1-score) were calculated using fivefold cross-validation to assess their performance of these systems. The best performance was obtained using the pre-trained ResNet50 model with an accuracy rate of 98%, recall of 96%, and a specificity value of 100%. Although the ResNet50 model achieved the same sensitivity and precision with the proposed COVID-DeepNet system, its results for the other five measurements were inferior (<xref ref-type="table" rid="table-6">Tab. 6</xref>).</p>
<table-wrap id="table-5">
<label>Table 5</label>
<caption>
<title>Performance comparison between the proposed COVID-DeepNet system and three current state-of-the-art COVID-19 detection systems evaluated on the COVIDx dataset</title>
</caption>
<table><colgroup>
<col/>
<col/>
<col/>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th>Quantitative measures</th>
<th>Proposed COVID- DeepNet system</th>
<th>COVID-net model [<xref ref-type="bibr" rid="ref-19">19</xref>]</th>
<th>COVID-ResNet model [<xref ref-type="bibr" rid="ref-27">27</xref>]</th>
<th>EfficientNet B3 Model [<xref ref-type="bibr" rid="ref-45">45</xref>]</th>
</tr>
</thead>
<tbody>
<tr>
<td>Accuracy</td>
<td>99.93</td>
<td>92.4</td>
<td>96.23</td>
<td>93.9</td>
</tr>
<tr>
<td>Sens. (recall)</td>
<td>99.90</td>
<td>88.6</td>
<td>96.92</td>
<td>96.8</td>
</tr>
<tr>
<td>Precision</td>
<td>100</td>
<td>91.3</td>
<td>96.86</td>
<td>100</td>
</tr>
<tr>
<td>F1 Score</td>
<td>99.93</td>
<td>&#x2013;</td>
<td>96.88</td>
<td>&#x2013;</td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap id="table-6">
<label>Table 6</label>
<caption>
<title>Performance comparison between the proposed COVID-DeepNet system and ResNet50 [<xref ref-type="bibr" rid="ref-23">23</xref>]</title>
</caption>
<table><colgroup>
<col/>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th>Quantitative measures</th>
<th>Proposed COVID- DeepNet system</th>
<th>ResNet50 model [<xref ref-type="bibr" rid="ref-23">23</xref>]</th>
</tr>
</thead>
<tbody>
<tr>
<td>Accuracy</td>
<td>99.93</td>
<td>98</td>
</tr>
<tr>
<td>Sens. (recall)</td>
<td>99.90</td>
<td>96</td>
</tr>
<tr>
<td>Specificity</td>
<td>100</td>
<td>100</td>
</tr>
<tr>
<td>Precision</td>
<td>100</td>
<td>100</td>
</tr>
<tr>
<td>F1-score</td>
<td>99.93</td>
<td>98</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec id="s5">
<label>5</label>
<title>Conclusion and Future Work</title>
<p>An accurate and automated system for COVID-19 diagnosis is presented and named as COVID-DeepNet system to distinguish between healthy and COVID-19 infected subjects by using chest radiography images. In the COVID-DeepNet system, CLAHE and Butterworth bandpass filter were applied to enhance the contrast of the CX-R image and eliminate the noise, respectively. Two discriminate deep learning approaches (e.g., DBN and CDBN) were trained from scratch on the top of the pre-processed chest radiography images to prevent overfitting and enhance the generalization capabilities of the proposed deep learning approaches. A large-scale CX-R image dataset was created and termed as the COVID19-<italic>vs.</italic>-Normal dataset to assess the performance of the COVID-DeepNet system. The proposed system achieved comparable performance with expert radiologists with a DAR of 99.93%, sensitivity of 99.90%, specificity of 100%, precision of 100%, F1-score of 99.93%, MSE of 0.021%, and RMSE of 0.016% using the weighted sum rule in the score-level fusion. The main limitation of the proposed COVID-DeepNet system is that it was trained to classify the input CX-R image into one of two classes (e.g., healthy and COVID-19 infected). The proposed COVID-DeepNet system is currently being trained to classify the CX-R image to other types of diseases (e.g., bacterial pneumonia and viral pneumonia). Further experimental investigations are required to prove the effectiveness of the proposed COVID-DeepNet system using a large and challenging dataset containing many COVID-19 cases.</p>
</sec>
</body>
<back>
<fn-group><fn fn-type="other"><p><bold>Funding Statement:</bold> The authors received no specific funding for this study.</p></fn>
<fn fn-type="conflict"><p><bold>Conflicts of Interest:</bold> The authors declare that they have no conflicts of interest to report regarding the present study.</p></fn></fn-group>
<ref-list content-type="authoryear">
<title>References</title>
<ref id="ref-1"><label>[1]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>L.</given-names> <surname>Zhong</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Mu</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Li</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Wang</surname></string-name>, <string-name><given-names>Z.</given-names> <surname>Yin</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>Early prediction of the 2019 novel coronavirus outbreak in the mainland China based on simple mathematical model</article-title>,&#x201D; <source>IEEE Access</source>, vol. <volume>8</volume>, pp. <fpage>51761</fpage>&#x2013;<lpage>51769</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-2"><label>[2]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>N.</given-names> <surname>Chen</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Zhou</surname></string-name>, <string-name><given-names>X.</given-names> <surname>Dong</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Qu</surname></string-name>, <string-name><given-names>F.</given-names> <surname>Gong</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>Epidemiological and clinical characteristics of 99 cases of 2019 novel coronavirus pneumonia in Wuhan, China: A descriptive study</article-title>,&#x201D; <source>Lancet</source>, vol. <volume>395</volume>, no. <issue>10223</issue>, pp. <fpage>507</fpage>&#x2013;<lpage>513</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-3"><label>[3]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Chavez</surname></string-name>, <string-name><given-names>B.</given-names> <surname>Long</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Koyfman</surname></string-name> and <string-name><given-names>S. Y.</given-names> <surname>Liang</surname></string-name></person-group>, &#x201C;<article-title>Coronavirus disease (COVID-19): A primer for emergency physicians</article-title>,&#x201D; <source>American Journal of Emergency Medicine</source>, <year>2020</year> <comment>(Pre-online)</comment>.</mixed-citation></ref>
<ref id="ref-4"><label>[4]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>H.</given-names> <surname>Guo</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Zhou</surname></string-name>, <string-name><given-names>X.</given-names> <surname>Liu</surname></string-name> and <string-name><given-names>J.</given-names> <surname>Tan</surname></string-name></person-group>, &#x201C;<article-title>The impact of the COVID-19 epidemic on the utilization of emergency dental services</article-title>,&#x201D; <source>Journal of Dental Sciences</source>, vol. <volume>15</volume>, no. <issue>4</issue>, pp. <fpage>564</fpage>&#x2013;<lpage>567</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-5"><label>[5]</label><mixed-citation publication-type="other"><person-group person-group-type="author"><collab>Worldmeter</collab></person-group>, [Online]. Available: <uri>https://www.worldometers.info/coronavirus/worldwide-graphs/</uri> <comment>[Accessed: 6 June 2020]</comment>.</mixed-citation></ref>
<ref id="ref-6"><label>[6]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>W.</given-names> <surname>Wang</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Xu</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Gao</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Lu</surname></string-name>, <string-name><given-names>K.</given-names> <surname>Han</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>Detection of SARS-CoV-2 in different types of clinical specimens</article-title>,&#x201D; <source>Journal of the American Medical Association</source>, vol. <volume>332</volume>, no. <issue>18</issue>, pp. <fpage>1843</fpage>&#x2013;<lpage>1844</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-7"><label>[7]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>N.</given-names> <surname>Ming-Yen</surname></string-name>, <string-name><given-names>YP.</given-names> <surname>L.E.</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Jin</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Fangfang</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Xia</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>Imaging profile of the COVID-19 infection: Radiologic findings and literature review</article-title>,&#x201D; <source>Radiology: Cardiothoracic Imaging</source>, vol. <volume>2</volume>, no. <issue>1</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>8</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-8"><label>[8]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A. P.</given-names> <surname>Brady</surname></string-name></person-group>, &#x201C;<article-title>Error and discrepancy in radiology: Inevitable or avoidable?</article-title>,&#x201D; <source>Insights into Imaging</source>, vol. <volume>8</volume>, no. <issue>1</issue>, pp. <fpage>171</fpage>&#x2013;<lpage>182</lpage>, <year>2017</year>.</mixed-citation></ref>
<ref id="ref-9"><label>[9]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Rajkomar</surname></string-name>, <string-name><given-names>E.</given-names> <surname>Oren</surname></string-name>, <string-name><given-names>K.</given-names> <surname>Chen</surname></string-name>, <string-name><given-names>A. M.</given-names> <surname>Dai</surname></string-name>, <string-name><given-names>N.</given-names> <surname>Hajaj</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>Scalable and accurate deep learning with electronic health records</article-title>,&#x201D; <source>NPJ Digital Medicine</source>, vol. <volume>1</volume>, no. <issue>1</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>10</lpage>, <year>2018</year>.</mixed-citation></ref>
<ref id="ref-10"><label>[10]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>K. H.</given-names> <surname>Abdulkareem</surname></string-name>, <string-name><given-names>M. A.</given-names> <surname>Mohammed</surname></string-name>, <string-name><given-names>S. S.</given-names> <surname>Gunasekaran</surname></string-name>, <string-name><given-names>M. N.</given-names> <surname>AL-Mhiqani</surname></string-name>, <string-name><given-names>A. A.</given-names> <surname>Mutlag</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>A review of fog computing and machine learning: Concepts, applications, challenges, and open issues</article-title>,&#x201D; <source>IEEE Access</source>, vol. <volume>7</volume>, pp. <fpage>153123</fpage>&#x2013;<lpage>153140</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-11"><label>[11]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Cruz-Roaa</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Basavanhally</surname></string-name>, <string-name><given-names>F.</given-names> <surname>Gonz&#x00E1;lez</surname></string-name>, <string-name><given-names>H.</given-names> <surname>Gilmore</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Feldman</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>Automatic detection of invasive ductal carcinoma in whole slide images with convolutional neural networks</article-title>,&#x201D; <source>Medical Imaging 2014: Digital Pathology</source>, vol. <volume>9041</volume>, no. <issue>216</issue>, pp. <fpage>904103</fpage>, <year>2014</year>.</mixed-citation></ref>
<ref id="ref-12"><label>[12]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>K. M.</given-names> <surname>Hosny</surname></string-name>, <string-name><given-names>M. A.</given-names> <surname>Kassem</surname></string-name> and <string-name><given-names>M. M.</given-names> <surname>Foaud</surname></string-name></person-group>, &#x201C;<article-title>Skin cancer classification using deep learning and transfer learning</article-title>,&#x201D; in <conf-name>Proc. 9th Cairo International Biomedical Engineering Conference</conf-name>, Cairo, Egypt, <publisher-name>IEEE</publisher-name>, pp. <fpage>90</fpage>&#x2013;<lpage>93</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-13"><label>[13]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M. K. A.</given-names> <surname>Ghani</surname></string-name>, <string-name><given-names>M. A.</given-names> <surname>Mohammed</surname></string-name>, <string-name><given-names>N.</given-names> <surname>Arunkumar</surname></string-name>, <string-name><given-names>S. A.</given-names> <surname>Mostafa</surname></string-name>, <string-name><given-names>D. A.</given-names> <surname>Ibrahim</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>Decision-level fusion scheme for nasopharyngeal carcinoma identification using machine learning techniques</article-title>,&#x201D; <source>Neural Computing and Applications</source>, vol. <volume>32</volume>, no. <issue>3</issue>, pp. <fpage>625</fpage>&#x2013;<lpage>638</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-14"><label>[14]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M. A.</given-names> <surname>Mohammed</surname></string-name>, <string-name><given-names>M. K. Abd</given-names> <surname>Ghani</surname></string-name>, <string-name><given-names>N.</given-names> <surname>Arunkumar</surname></string-name>, <string-name><given-names>S. A.</given-names> <surname>Mostafa</surname></string-name>, <string-name><given-names>M. K.</given-names> <surname>Abdullah</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>Trainable model for segmenting and identifying nasopharyngeal carcinoma</article-title>,&#x201D; <source>Computers &#x0026; Electrical Engineering</source>, vol. <volume>71</volume>, pp. <fpage>372</fpage>&#x2013;<lpage>387</lpage>, <year>2018</year>.</mixed-citation></ref>
<ref id="ref-15"><label>[15]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Talo</surname></string-name>, <string-name><given-names>O.</given-names> <surname>Yildirim</surname></string-name>, <string-name><given-names>U. B.</given-names> <surname>Baloglu</surname></string-name>, <string-name><given-names>G.</given-names> <surname>Aydin</surname></string-name> and <string-name><given-names>U. R.</given-names> <surname>Acharya</surname></string-name></person-group>, &#x201C;<article-title>Convolutional neural networks for multi-class brain disease detection using mri images</article-title>,&#x201D; <source>Computerized Medical Imaging and Graphics</source>, vol. <volume>78</volume>, pp. <fpage>101673</fpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-16"><label>[16]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>C. L.</given-names> <surname>Van</surname></string-name>, <string-name><given-names>V.</given-names> <surname>Puri</surname></string-name>, <string-name><given-names>N. T.</given-names> <surname>Thao</surname></string-name> and <string-name><given-names>D.</given-names> <surname>Le</surname></string-name></person-group>, &#x201C;<article-title>Detecting lumbar implant and diagnosing scoliosis from Vietnamese X-ray imaging using the pre-trained API models and transfer learning</article-title>, <source>Computers, Materials &#x0026; Continua</source>, vol. <volume>66</volume>, no. <issue>1</issue>, pp. <fpage>17</fpage>&#x2013;<lpage>33</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-17"><label>[17]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>P.</given-names> <surname>Rajpurkar</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Irvin</surname></string-name>, <string-name><given-names>K.</given-names> <surname>Zhu</surname></string-name>, <string-name><given-names>B.</given-names> <surname>Yang</surname></string-name>, <string-name><given-names>H.</given-names> <surname>Mehta</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>Chexnet: Radiologist-level pneumonia detection on chest X-rays with deep learning</article-title>,&#x201D; <comment>arXiv Prepr. arXiv: 1711.05225</comment>, pp. <fpage>3</fpage>&#x2013;<lpage>9</lpage>, <year>2017</year> <comment>(Pre-print)</comment>.</mixed-citation></ref>
<ref id="ref-18"><label>[18]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J.</given-names> <surname>De Fauw</surname></string-name>, <string-name><given-names>J. R.</given-names> <surname>Ledsam</surname></string-name>, <string-name><given-names>B.</given-names> <surname>Romera-Paredes</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Nikolov</surname></string-name>, <string-name><given-names>N.</given-names> <surname>Tomasev</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>Clinically applicable deep learning for diagnosis and referral in retinal disease</article-title>,&#x201D; <source>Nature Medicine</source>, vol. <volume>24</volume>, no. <issue>9</issue>, pp. <fpage>1342</fpage>&#x2013;<lpage>1350</lpage>, <year>2018</year>.</mixed-citation></ref>
<ref id="ref-19"><label>[19]</label><mixed-citation publication-type="other"><person-group person-group-type="author"><string-name><given-names>L.</given-names> <surname>Wang</surname></string-name> and <string-name><given-names>A.</given-names> <surname>Wong</surname></string-name></person-group>, &#x201C;<article-title>Covid-net: A tailored deep convolutional neural network design for detection of COVID-19 cases from chest radiography images</article-title>,&#x201D; <source>Scientific Reports</source>, vol. <volume>10</volume>, no. <issue>1</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>12</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-20"><label>[20]</label><mixed-citation publication-type="other"><person-group person-group-type="author"><string-name><given-names>J. P.</given-names> <surname>Cohen</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Morrison</surname></string-name> and <string-name><given-names>L.</given-names> <surname>Dao</surname></string-name></person-group>, &#x201C;<article-title>COVID-19 image data collection</article-title>,&#x201D; <year>2020</year>. <comment>[Online]. Available: <uri>https://github.com/ieee8023/covid-chestxray-dataset</uri></comment>.</mixed-citation></ref>
<ref id="ref-21"><label>[21]</label><mixed-citation publication-type="other"><person-group person-group-type="author"><collab>Kaggle&#x2019;s Chest X-ray Images (Pneumonia) dataset</collab></person-group> <year>2020</year>. <comment>[Online]. Available: <uri>https://www.kaggle.com/paultimothymooney/chest-xray-pneumonia</uri></comment>.</mixed-citation></ref>
<ref id="ref-22"><label>[22]</label><mixed-citation publication-type="other"><person-group person-group-type="author"><string-name><given-names>E. E.</given-names> <surname>Hemdan</surname></string-name>, <string-name><given-names>M. A.</given-names> <surname>Shouman</surname></string-name> and <string-name><given-names>M. E.</given-names> <surname>Karar</surname></string-name></person-group>, &#x201C;<article-title>Covidx-net: A framework of deep learning classifiers to diagnose COVID-19 in X-ray images</article-title>,&#x201D; <comment>arXiv Prepr. arXiv: 2003.11055</comment>, <year>2020</year> <comment>(Pre-print)</comment>.</mixed-citation></ref>
<ref id="ref-23"><label>[23]</label><mixed-citation publication-type="other"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Narin</surname></string-name>, <string-name><given-names>C.</given-names> <surname>Kaya</surname></string-name> and <string-name><given-names>Z.</given-names> <surname>Pamuk</surname></string-name></person-group>, &#x201C;<article-title>Automatic detection of coronavirus disease (COVID-19) using X-ray images and deep convolutional neural networks</article-title>,&#x201D; <comment>arXiv Prepr. arXiv: 2003.10849</comment>, <year>2020</year> <comment>(Pre-print)</comment>.</mixed-citation></ref>
<ref id="ref-24"><label>[24]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M. A.</given-names> <surname>Mohammed</surname></string-name>, <string-name><given-names>K. H.</given-names> <surname>Abdulkareem</surname></string-name>, <string-name><given-names>A. S.</given-names> <surname>Al-waisy</surname></string-name>, <string-name><given-names>S. A.</given-names> <surname>Mostafa</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Al-fahdawi</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>Benchmarking methodology for selection of optimal COVID-19 diagnostic model based on entropy and topsis methods</article-title>,&#x201D; <source>IEEE Access</source>, vol. <volume>8</volume>, no. <issue>1</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>17</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-25"><label>[25]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S. H.</given-names> <surname>Kassani</surname></string-name>, <string-name><given-names>P. H.</given-names> <surname>Kassasni</surname></string-name>, <string-name><given-names>M. J.</given-names> <surname>Wesolowski</surname></string-name>, <string-name><given-names>K. A.</given-names> <surname>Schneider</surname></string-name> and <string-name><given-names>R.</given-names> <surname>Deters</surname></string-name></person-group>, &#x201C;<article-title>Automatic detection of coronavirus disease (COVID-19) in X-ray and ct images: A machine learning-based approach</article-title>,&#x201D; <comment>arXiv Prepr. arXiv: 2004.10641</comment>, pp. <fpage>1</fpage>&#x2013;<lpage>18</lpage>, <year>2020</year> <comment>(Pre-print)</comment>.</mixed-citation></ref>
<ref id="ref-26"><label>[26]</label><mixed-citation publication-type="other"><person-group person-group-type="author"><string-name><given-names>J.</given-names> <surname>Zhang</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Xie</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Li</surname></string-name>, <string-name><given-names>C.</given-names> <surname>Shen</surname></string-name> and <string-name><given-names>Y.</given-names> <surname>Xia</surname></string-name></person-group>, &#x201C;<article-title>COVID-19 screening on chest X-ray images using deep learning based anomaly detection</article-title>,&#x201D; <comment>arXiv Prepr. arXiv: 2003.12338</comment>, <year>2020</year> <comment>(Pre-print)</comment>.</mixed-citation></ref>
<ref id="ref-27"><label>[27]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>D.</given-names> <surname>Dansana</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Kumar</surname></string-name>, <string-name><given-names>J. D.</given-names> <surname>Adhikari</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Mohapatra</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Sharma</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>Global forecasting confirmed and fatal cases of COVID-19 outbreak using autoregressive integrated moving average model</article-title>,&#x201D; <source>Frontiers in Public Health</source>, vol. <volume>8</volume>, p. <fpage>580327</fpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-28"><label>[28]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>D. N.</given-names> <surname>Le</surname></string-name>, <string-name><given-names>V. S.</given-names> <surname>Parvathy</surname></string-name>, <string-name><given-names>D.</given-names> <surname>Gupta</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Khanna</surname></string-name>, <string-name><given-names>J. J. P. C.</given-names> <surname>Rodrigues</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>IoT enabled depthwise separable convolution neural network with deep support vector machine for COVID-19 diagnosis and classification</article-title>,&#x201D; <source>International Journal of Machine Learning and Cybernetics</source>, vol. <volume>12</volume>, no. <issue>1</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>14</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-29"><label>[29]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>P.</given-names> <surname>Kumar</surname></string-name> and <string-name><given-names>S.</given-names> <surname>Kumari</surname></string-name></person-group>, &#x201C;<article-title>Detection of coronavirus disease (COVID-19) based on deep features</article-title>,&#x201D; <comment>Preprint, 2020030300</comment>, <year>2020</year> <comment>(Pre-print)</comment>.</mixed-citation></ref>
<ref id="ref-30"><label>[30]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>O.</given-names> <surname>Gozes</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Frid-Adar</surname></string-name>, <string-name><given-names>H.</given-names> <surname>Greenspan</surname></string-name>, <string-name><given-names>P. D.</given-names> <surname>Browning</surname></string-name>, <string-name><given-names>H.</given-names> <surname>Zhang</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>Rapid AI development cycle for the coronavirus (COVID-19) pandemic: Initial results for automated detection &#x0026; patient monitoring using deep learning CT image analysis</article-title>,&#x201D; <comment>arXiv Prepr. arXiv: 2003.05037</comment>, pp. <fpage>1</fpage>&#x2013;<lpage>19</lpage>, <year>2020</year> <comment>(Pre-print)</comment>.</mixed-citation></ref>
<ref id="ref-31"><label>[31]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J.</given-names> <surname>Chen</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Wu</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Zhang</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Zhang</surname></string-name>, <string-name><given-names>D.</given-names> <surname>Gong</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>Deep learning-based model for detecting 2019 novel coronavirus pneumonia on high-resolution computed tomography: A prospective study</article-title>,&#x201D; <source>MedRxiv</source>, vol. <volume>91</volume>, no. <issue>10210</issue>, pp. <fpage>264</fpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-32"><label>[32]</label><mixed-citation publication-type="other"><person-group person-group-type="author"><collab>Radiopaedia dataset</collab></person-group>, <year>2020</year>. [Online]. Available: <uri>https://radiopaedia.org/search?lang=us&#x0026;q=covid&#x0026;scope=cases#collapse-by-diagnostic-certainties</uri> <comment>[Accessed: 11 May 2020]</comment>.</mixed-citation></ref>
<ref id="ref-33"><label>[33]</label><mixed-citation publication-type="other"><person-group person-group-type="author"><collab>Italian Society of Medical and Interventional Radiology (SIRM)</collab></person-group>, <year>2020</year>. [Online]. Available: <uri>https://www.sirm.org/en/italian-society-of-medical-and-interventional-radiology/</uri> <comment>[Accessed: 11 May 2020]</comment>.</mixed-citation></ref>
<ref id="ref-34"><label>[34]</label><mixed-citation publication-type="other"><person-group person-group-type="author"><collab>Radiological Society of North America (RSNA)</collab></person-group>, <year>2020</year>. [Online]. Available: <uri>https://www.kaggle.com/c/rsna-pneumonia-detection-challenge/data</uri> <comment>[Accessed: 11 May 2020]</comment>.</mixed-citation></ref>
<ref id="ref-35"><label>[35]</label><mixed-citation publication-type="other"><person-group person-group-type="author"><collab>Kaggle&#x2019;s Chest X-ray Images (Pneumonia) dataset</collab></person-group>, <year>2020</year>. [Online]. Available: https://www.kaggle.com/paultimothymooney/chest-xray-pneumonia%20%20%20 <comment>[Accessed: 12 May 2020]</comment>.</mixed-citation></ref>
<ref id="ref-36"><label>[36]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>K.</given-names> <surname>Koonsanit</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Thongvigitmanee</surname></string-name>, <string-name><given-names>N.</given-names> <surname>Pongnapang</surname></string-name> and <string-name><given-names>P.</given-names> <surname>Thajchayapong</surname></string-name></person-group>, &#x201C;<article-title>Image enhancement on digital X-ray images using N-clahe</article-title>,&#x201D; in <conf-name>Proc. 2017 10th Biomedical Engineering Int. Conf.</conf-name>, Hokkaido, Japan, pp. <fpage>1</fpage>&#x2013;<lpage>4</lpage>, <year>2017</year>.</mixed-citation></ref>
<ref id="ref-37"><label>[37]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>G. E.</given-names> <surname>Hinton</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Osindero</surname></string-name> and <string-name><given-names>Y. W.</given-names> <surname>Teh</surname></string-name></person-group>, &#x201C;<article-title>A fast learning algorithm for deep belief nets</article-title>,&#x201D; <source>Neural Computation</source>, vol. <volume>18</volume>, no. <issue>7</issue>, pp. <fpage>1527</fpage>&#x2013;<lpage>1554</lpage>, <year>2006</year>.</mixed-citation></ref>
<ref id="ref-38"><label>[38]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A. S.</given-names> <surname>Al-Waisy</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Qahwaji</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Ipson</surname></string-name> and <string-name><given-names>S.</given-names> <surname>Al-Fahdawi</surname></string-name></person-group>, &#x201C;<article-title>A multimodal deep learning framework using local feature representations for face recognition</article-title>,&#x201D; <source>Machine Vision and Applications</source>, vol. <volume>29</volume>, no. <issue>1</issue>, pp. <fpage>35</fpage>&#x2013;<lpage>54</lpage>, <year>2017</year>.</mixed-citation></ref>
<ref id="ref-39"><label>[39]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>A. S.</given-names> <surname>Al-Waisy</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Qahwaji</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Ipson</surname></string-name> and <string-name><given-names>S.</given-names> <surname>Al-fahdawi</surname></string-name></person-group>, &#x201C;<article-title>A multimodal biometric system for personal identification based on deep learning approaches</article-title>,&#x201D; in <conf-name>Proc. 2017 Seventh Int. Conf. on Emerging Security Technologies</conf-name>, Canterbury, UK, pp. <fpage>163</fpage>&#x2013;<lpage>168</lpage>, <year>2017</year>.</mixed-citation></ref>
<ref id="ref-40"><label>[40]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>H.</given-names> <surname>Lee</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Pham</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Largman</surname></string-name> and <string-name><given-names>A.</given-names> <surname>Ng</surname></string-name></person-group>, &#x201C;<article-title>Unsupervised feature learning for audio classification using convolutional deep belief networks</article-title>,&#x201D; in <conf-name>Proc. Advances in Neural Information Processing Systems Conference</conf-name>, Whistler, BC, Canada, pp. <fpage>1096</fpage>&#x2013;<lpage>1104</lpage>, <year>2009</year>.</mixed-citation></ref>
<ref id="ref-41"><label>[41]</label><mixed-citation publication-type="book"><person-group person-group-type="author"><string-name><given-names>A. S.</given-names> <surname>Al-Waisy</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Al-Fahdawi</surname></string-name> and <string-name><given-names>R.</given-names> <surname>Qahwaji</surname></string-name></person-group>, &#x201C;<chapter-title>4 A multi-biometric face recognition system based on multimodal deep learning representations</chapter-title>,&#x201D; in <source>Deep Learning in Computer Vision: Principles and Applications</source>, <publisher-loc>USA</publisher-loc>: <publisher-name>CRC Press</publisher-name>, pp. <fpage>89</fpage>&#x2013;<lpage>126</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-42"><label>[42]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>G. E.</given-names> <surname>Hinton</surname></string-name></person-group>, &#x201C;<article-title>Training products of experts by minimizing contrastive divergence</article-title>,&#x201D; <source>Neural Computation</source>, vol. <volume>14</volume>, no. <issue>8</issue>, pp. <fpage>1771</fpage>&#x2013;<lpage>1800</lpage>, <year>2002</year>.</mixed-citation></ref>
<ref id="ref-43"><label>[43]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>H.</given-names> <surname>Lee</surname></string-name>, <string-name><given-names>G.</given-names> <surname>Roger</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Ranganath</surname></string-name> and <string-name><given-names>A. Y.</given-names> <surname>Ng</surname></string-name></person-group>, &#x201C;<article-title>Convolutional deep belief networks for scalable unsupervised learning of hierarchical representations</article-title>,&#x201D; in <conf-name>Proc. of the 26th Annual Int. Conf. on Machine Learning</conf-name>, Montreal Quebec, Canada, pp. <fpage>609</fpage>&#x2013;<lpage>616</lpage>, <year>2009</year>.</mixed-citation></ref>
<ref id="ref-44"><label>[44]</label><mixed-citation publication-type="book"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Jain</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Flynn</surname></string-name> and <string-name><given-names>A. A.</given-names> <surname>Ross</surname></string-name></person-group>, <source>Handbook of Biometrics</source>. <publisher-loc>US</publisher-loc>: <publisher-name>Springer Science &#x0026; Business Media</publisher-name>, pp. <fpage>1</fpage>&#x2013;<lpage>556</lpage>, <year>2007</year>.</mixed-citation></ref>
<ref id="ref-45"><label>[45]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>E. J. S.</given-names> <surname>Luz</surname></string-name>, <string-name><given-names>P. L.</given-names> <surname>Silva</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Silva</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Silva</surname></string-name>, <string-name><given-names>G.</given-names> <surname>Moreira</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>Towards an effective and efficient deep learning model for COVID-19 patterns detection in X-ray images</article-title>,&#x201D; <comment>arXiv Prepr. arXiv: 2004.05717</comment>, pp. <fpage>1</fpage>&#x2013;<lpage>10</lpage>, <year>2020</year> <comment>(Pre-print)</comment>.</mixed-citation></ref>
</ref-list>
</back>
</article>