<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.1 20151215//EN" "http://jats.nlm.nih.gov/publishing/1.1/JATS-journalpublishing1.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="1.1">
<front>
<journal-meta>
<journal-id journal-id-type="pmc">CMC</journal-id>
<journal-id journal-id-type="nlm-ta">CMC</journal-id>
<journal-id journal-id-type="publisher-id">CMC</journal-id>
<journal-title-group>
<journal-title>Computers, Materials &#x0026; Continua</journal-title>
</journal-title-group>
<issn pub-type="epub">1546-2226</issn>
<issn pub-type="ppub">1546-2218</issn>
<publisher>
<publisher-name>Tech Science Press</publisher-name>
<publisher-loc>USA</publisher-loc>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">20914</article-id>
<article-id pub-id-type="doi">10.32604/cmc.2022.020914</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Article</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>An Automated Deep Learning Based Muscular Dystrophy Detection and Classification Model</article-title>
<alt-title alt-title-type="left-running-head">An Automated Deep Learning Based Muscular Dystrophy Detection and Classification Model</alt-title>
<alt-title alt-title-type="right-running-head">An Automated Deep Learning Based Muscular Dystrophy Detection and Classification Model</alt-title>
</title-group>
<contrib-group content-type="authors">
<contrib id="author-1" contrib-type="author"><name name-style="western"><surname>Gopalakrishnan</surname><given-names>T.</given-names></name><xref ref-type="aff" rid="aff-1">1</xref>
</contrib>
<contrib id="author-2" contrib-type="author"><name name-style="western"><surname>Sudhakaran</surname><given-names>Periakaruppan</given-names></name><xref ref-type="aff" rid="aff-2">2</xref>
</contrib>
<contrib id="author-3" contrib-type="author"><name name-style="western"><surname>Ramya</surname><given-names>K. C.</given-names></name><xref ref-type="aff" rid="aff-3">3</xref>
</contrib>
<contrib id="author-4" contrib-type="author"><name name-style="western"><surname>Sathesh Kumar</surname><given-names>K.</given-names></name><xref ref-type="aff" rid="aff-4">4</xref>
</contrib>
<contrib id="author-5" contrib-type="author" corresp="yes"><name name-style="western"><surname>Al-Wesabi</surname><given-names>Fahd N.</given-names></name><xref ref-type="aff" rid="aff-5">5</xref><xref ref-type="aff" rid="aff-6">6</xref><email>falwesabi@kku.edu.sa</email>
</contrib>
<contrib id="author-6" contrib-type="author"><name name-style="western"><surname>Alohali</surname><given-names>Manal Abdullah</given-names></name><xref ref-type="aff" rid="aff-7">7</xref>
</contrib>
<contrib id="author-7" contrib-type="author"><name name-style="western"><surname>Hilal</surname><given-names>Anwer Mustafa</given-names></name><xref ref-type="aff" rid="aff-8">8</xref>
</contrib>
<aff id="aff-1"><label>1</label><institution>School of Computer Science and Engineering, Vellore Institute of Technology</institution>, <addr-line>Vellore, 632014</addr-line>, <country>India</country></aff>
<aff id="aff-2"><label>2</label><institution>Department of Computer Science and Engineering, SRM TRP Engineering College</institution>, <addr-line>Tiruchirappalli, 621105</addr-line>, <country>India</country></aff>
<aff id="aff-3"><label>3</label><institution>Department of Electrical and Electronics Engineering, Sri Krishna College of Engineering and Technology</institution>, <addr-line>Coimbatore, 641008</addr-line>, <country>India</country></aff>
<aff id="aff-4"><label>4</label><institution>School of Computing, Kalasalingam Academy of Research and Education</institution>, <addr-line>Krishnankoil, 626128</addr-line>, <country>India</country></aff>
<aff id="aff-5"><label>5</label><institution>Department of Computer Science, King Khalid University</institution>, <addr-line>Muhayel Aseer</addr-line>, <country>KSA</country></aff>
<aff id="aff-6"><label>6</label><institution>Faculty of Computer and IT, Sana&#x0027;a University</institution>, <country>Yemen</country></aff>
<aff id="aff-7"><label>7</label><institution>Department of Information Systems, College of Computer and Information Sciences, Princess Nourah Bint Abdulrahman University</institution>, <country>Saudi Arabia</country></aff>
<aff id="aff-8"><label>8</label><institution>Department of Computer and Self Development, Preparatory Year Deanship, Prince Sattam bin Abdulaziz University</institution>, <addr-line>Alkharj</addr-line>, <country>Saudi Arabia</country></aff>
</contrib-group>
<author-notes>
<corresp id="cor1"><label>&#x002A;</label>Corresponding Author: Fahd N. Al-Wesabi. Email: <email>falwesabi@kku.edu.sa</email></corresp>
</author-notes>
<pub-date pub-type="epub" date-type="pub" iso-8601-date="2021-10-18">
<day>18</day>
<month>10</month>
<year>2021</year></pub-date>
<volume>71</volume>
<issue>1</issue>
<fpage>305</fpage>
<lpage>320</lpage>
<history>
<date date-type="received"><day>14</day><month>6</month><year>2021</year></date>
<date date-type="accepted"><day>15</day><month>7</month><year>2021</year></date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2022 Gopalakrishnan et al.</copyright-statement>
<copyright-year>2022</copyright-year>
<copyright-holder>Gopalakrishnan et al.</copyright-holder>
<license xlink:href="https://creativecommons.org/licenses/by/4.0/">
<license-p>This work is licensed under a <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</ext-link>, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.</license-p>
</license>
</permissions>
<self-uri content-type="pdf" xlink:href="TSP_CMC_20914.pdf"></self-uri>
<abstract>
<p>Muscular Dystrophy (MD) is a group of inherited muscular diseases that are commonly diagnosed with the help of techniques such as muscle biopsy, clinical presentation, and Muscle Magnetic Resonance Imaging (MRI). Among these techniques, Muscle MRI recommends the diagnosis of muscular dystrophy through identification of the patterns that exist in muscle fatty replacement. But the patterns overlap among various diseases whereas there is a lack of knowledge prevalent with regards to disease-specific patterns. Therefore, artificial intelligence techniques can be used in the diagnosis of muscular dystrophies, which enables us to analyze, learn, and predict for the future. In this scenario, the current research article presents an automated muscular dystrophy detection and classification model using Synergic Deep Learning (SDL) method with extreme Gradient Boosting (XGBoost), called SDL-XGBoost. SDL-XGBoost model has been proposed to act as an automated deep learning (DL) model that examines the muscle MRI data and diagnose muscular dystrophies. SDL-XGBoost model employs Kapur&#x0027;s entropy based Region of Interest (RoI) for detection purposes. Besides, SDL-based feature extraction process is applied to derive a useful set of feature vectors. Finally, XGBoost model is employed as a classification approach to determine proper class labels for muscle MRI data. The researcher conducted extensive set of simulations to showcase the superior performance of SDL-XGBoost model. The obtained experimental values highlighted the supremacy of SDL-XGBoost model over other methods in terms of high accuracy being 96.18&#x0025; and 94.25&#x0025; classification performance upon DMD and BMD respectively. Therefore, SDL-XGBoost model can help physicians in the diagnosis of muscular dystrophies by identifying the patterns of muscle fatty replacement in muscle MRI.</p>
</abstract>
<kwd-group kwd-group-type="author">
<kwd>Muscle magnetic resonance imaging</kwd>
<kwd>XGBoost</kwd>
<kwd>synergic deep learning</kwd>
<kwd>roI detection</kwd>
<kwd>kapur&#x0027;s entropy</kwd>
<kwd>muscular dystrophies</kwd>
</kwd-group>
</article-meta>
</front>
<body>
<sec id="s1"><label>1</label><title>Introduction</title>
<p>In 1954, Walton and Nattrass defined Muscular Dystrophy (MD) as a heterogeneous set of primary genetic diseases that impact muscles and is medically characterized by advanced muscular weakness and waste. Psychologically, this group of diseases is united by the occurrence of necrotic and regenerative processes that are frequently related to an increasing number of connective and adipose tissues [<xref ref-type="bibr" rid="ref-1">1</xref>]. Among MD diseases, the current study concentrates on dystrophinopathies, Emery-Dreifuss muscular dystrophies, congenital muscular dystrophies, limb-girdle muscular dystrophies, and facioscapulohumeral MD. The heterogeneity of distinct disorders is commonly described through the integration of medical, genetic, molecular, and pathological features.</p>
<p>Dystrophinopathies, an X-linked advanced genetic degenerative disease, occurs as a result of deficiency or absence of dystrophin, a sarcolemmal protein. This disease primarily affects the skeletal muscles. Dystrophin and the proteins related to the family form a complex yet essential architecture that works with intracellular actin cytoskeleton to extra-cellular matrix. This association strengthens the sarcolemma from mechanical stress during muscle contraction. It is coded by a very huge chromosome that contains over 2.5 million base pairs and 79 exons. Dystrophin may experience an out-of-the-frame mutation and result in Duchenne Muscular Dystrophy (DMD) while an in-frame mutation may result in milder allelic structure, named as Becker Muscular Dystrophy (BMD). The large scale removal in both DMD and BMD is the most commonly studied mutation type. <?A3B2 "fig1",5,"anchor"?><xref ref-type="fig" rid="fig-1">Fig. 1</xref> shows the dystrophinopathies diagnostic technique [<xref ref-type="bibr" rid="ref-2">2</xref>].</p>
<p>DMD, the commonly recorded MD, occurs in a frequency of 1 out of 5000 male children across the globe. The symptoms are typically identified within two years of childbirth by identifying an obvious delay in motor growth. Muscle participation is frequently symmetrical and bilateral. Though similar dystrophin chromosomes undergo mutation, the medical characteristics of BMD differ significantly from DMD in terms of phenotype to no-weak subjective. The onset of disease is experienced at later stages. Calf hypertrophy and muscle cramp are often cited as primary diagnostic features for dystrophinopathies. In such case, multiplex Polymerase Chain Reaction (PCR) should be used for diagnosis [<xref ref-type="bibr" rid="ref-2">2</xref>].</p>
<p>With advantaged presentation of Medical Magnetic Resonance Imaging (MMRI) in radiographic sciences, the researchers have noticed the capability of MRI to generate high-resolution anatomical images of skeletal muscle. In comparison with previous imaging modes, MRI shows a great difference in between several soft tissue forms. Thus the physicians can investigate separate muscles in sharp difference to adapt fat. In the recent years, muscle MRI has attained extensive medical usage in inflammatory myopathies. This has become possible after the introduction of novel immunosuppressive agent that can precisely diagnose and monitor the responses of human body to the treatment. The capability of MRI to differentiate acute inflammation from chronic fatty replacement in muscle offers a significant predictive data [<xref ref-type="bibr" rid="ref-3">3</xref>]. Initially, inherited testing was used since it was progressively accessible and inexpensive. Further, it also provided extremely specific diagnostic data, an impossible achievement in muscle imaging. Various studies compared the features of muscle MRI and existing investigation techniques and clarified the former&#x0027;s advantages. Some of the features in MRI can create an impact upon clinical decision making too. In spite of these constraints, there is a rising attention upon imaging technique (especially MRI) to investigate genetic muscle diseases [<xref ref-type="bibr" rid="ref-4">4</xref>,<xref ref-type="bibr" rid="ref-5">5</xref>]. The initial trial, conducted in these disease populations, has emphasized the applicability of this study results for muscle dystrophy only [<xref ref-type="bibr" rid="ref-6">6</xref>]. The purpose of noninvasive measure, which is recurrent for numerous times, is only to enhance the quality of trials conducted for this disease.</p><fig id="fig-1"><label>Figure 1</label><caption><title>Diagnosis of dystrophinopathies</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="CMC_20914-fig-1.png"/></fig>
<p>Muscle MRI recognizes the feature patterns usually observed in muscle participation and are associated with particular disorders [<xref ref-type="bibr" rid="ref-7">7</xref>]. But, majority of these patterns overlap in nature and high grade of specialty is required in MRI information to distinguish one disease from another. Artificial Intelligence (AI) technique has been applied successfully in most of the problem-solving techniques followed in healthcare domain [<xref ref-type="bibr" rid="ref-8">8</xref>]. Machine Learning (ML) utilizes techniques to examine information, learn and create a decision or forecast the future [<xref ref-type="bibr" rid="ref-9">9</xref>]. The study conducted earlier used ML approach upon a massive dataset of muscle MRIs which were taken from persons inherently confirmed with the diagnosis of MD. The goal of that study was to establish an information method that helps in the diagnostic procedure of these disorders.</p>
<p>The current research article presents an automated muscular dystrophy detection and classification model using Synergic Deep Learning (SDL) with extreme Gradient Boosting (XGBoost), called SDL-XGBoost. The aim of the proposed SDL-XGBoost model is to act as an automated Deep Learning (DL) model that examines muscle MRI data and diagnose the muscular dystrophies. In addition, the proposed SDL-XGBoost model employs Kapur&#x0027;s entropy-based Region of Interest (RoI) detection. Besides, SDL-based feature extraction process is applied to derive a useful set of feature vectors. Finally, XGBoost model is employed as a classification approach to determine proper class labels for muscle MRI data. The novelty of the current study lies in the design of SDL-XGBoost model proposed for diagnosing muscular dystrophies. A comprehensive experimental analysis was performed to showcase the superior performance of SDL-XGBoost model. The results were examined under different aspects. The contributions of the paper are summarized herewith.
<list list-type="bullet">
<list-item><p>An automated muscular dystrophy detection and classification model i.e., SDL-XGBoost model is proposed.</p></list-item>
<list-item><p>Kapur&#x0027;s entropy-based RoI detection and SDL based feature extraction processes are designed.</p></list-item>
<list-item><p>XGBoost model is employed as a classification approach to determine proper class labels for muscle MRI data</p></list-item>
</list></p>
<p>Remaining sections of the article are organized as follows. Section 2 offers an overview of deep learning and the existing works. Section 3 introduces the proposed SDL-XGBoost model and the experimental results are discussed in Section 4. At last, Section 5 concludes the work.</p>
</sec>
<sec id="s2"><label>2</label><title>Background Information and Literature Review</title>
<p>The current section briefs the basic concepts in DL models, existing works related to dystrophinopathy diagnosis and summarizes the existing works conducted so far in this domain.</p>
<sec id="s2_1"><label>2.1</label><title>Overview of Deep Learning</title>
<p>In general, ML techniques undergo training to perform helpful tasks based on manual stimulation. This training process occurs through feature extraction from raw information or through feature learning by additional simply ML techniques. In DL techniques, the system learns the beneficial representation and automate the features from raw information, bypassing manual and problematic phases. DL techniques are gradually being applied in the improvement of medical practices while the healthcare industry is inclining towards technology on a gradual manner. In clinical imaging process, convolutional neural networks (CNN) generates the attention towards DL [<xref ref-type="bibr" rid="ref-10">10</xref>]. This is done in a helpful and influential manner to study the representation of images and additional integrated information. Earlier, effective utilization of CNN is probable since this feature is usually designed by hand or made by least powerful ML techniques. When it became probable to utilize feature learning from the information, several handmade image features were left out and it turned out to be almost a worthless method, related to feature detectors, as established by CNN [<xref ref-type="bibr" rid="ref-11">11</xref>].</p>
<p>CNN is utilized to enhance the efficiency in radiology practices via protocol determination based on short text classifiers [<xref ref-type="bibr" rid="ref-12">12</xref>]. CNN is applied to decrease gadolinium dosage and, in parallel enhance the brain MRI through an arrangement of greatness [<xref ref-type="bibr" rid="ref-13">13</xref>] with no substantial decline in image quality. DL is employed in radiotherapy [<xref ref-type="bibr" rid="ref-14">14</xref>] through the integration of confocal laser endomicroscopy for automated recognition of intra-operative CLE images [<xref ref-type="bibr" rid="ref-15">15</xref>]. One more significant application of CNNs is the advanced deformable image registration that allows measurable investigation over distinct physical imaging modals and time. For instance, elastic registration is used between 3D MRI and transrectal ultra-sound to direct the target prostate biopsy [<xref ref-type="bibr" rid="ref-16">16</xref>]. Deformable registration is applied for brain MRI if &#x201C;cue-aware deep regression network&#x201D; studies are provided with a group of trained images. Here, the displacement vectors are related to a pair of reference subject patches. The brain MR image pairs are quickly registered for deformable image through patch-wise forecasting of Large Deformation Diffeomorphic Metric Mapping technique. Without supervision, CNN-dependent technique is used for deformable image registration of cone beam CT to utilize a deep convolution inverse graphic networks [<xref ref-type="bibr" rid="ref-17">17</xref>].</p>
</sec>
<sec id="s2_2"><label>2.2</label><title>Prior Works on Muscular Dystrophies Diagnosis</title>
<p>D&#x00ED;az et al. [<xref ref-type="bibr" rid="ref-18">18</xref>] proposed an automated model that identifies muscle MRI pattern and its application upon the diagnoses of MD was studied earlier. Random Forest (RF) ML approach was utilized in this study to find a technique that can differentiate the distinct disorders. RF is an ML technique which can fit in huge datasets and can perform both classifiers and regression processes. Most of the constraints in ML approaches, like RF, are complex enough to decode. Yang et al. [<xref ref-type="bibr" rid="ref-19">19</xref>] included CNN and MRI imaging processes and proposed a method for testing as well as authenticating the consistency of dystrophinopathy diagnosis in muscle MRI. In ROI, every actual MRI image uses the Otsu threshold and adjective window technique.</p>
</sec>
<sec id="s2_3"><label>2.3</label><title>Problem Identification/Summary of the Reviewed Works</title>
<p>MD can be particularly diagnosed based on specific signs and symptoms observed from medical records, physical investigation, and/or muscle biopsy, while the latter is further processed by geneticists through Sanger sequencing method. Recently, numerous muscle participation patterns are labeled for recognition and assistance for diagnostic procedures. But, it has been established that MRI-dependent diagnostic measures, projected for disease diagnosis, are not helpful always in regular healthcare setting, where MRI is examined by doctors. So, the researcher determines that a DL technique will be useful in recognizing the feature patterns that can guides the physicians towards genetic testing. This technique is created to differentiate the disorders with high accuracy over human specialists in the domain. These methods assist better in MD diagnostic procedure. Further, it provides a possible way for further genetic testing or strengthening the existing pathology to find the attained mutation. Researchers consider this as a proof of idea in which AI is employed in the domain of muscle MRI.</p>
</sec>
</sec>
<sec id="s3"><label>3</label><title>The Proposed Muscular Dystrophy Diagnosis and Classification Model</title>
<p>The presented SDL-XGBoost model utilizes the muscle MRI patterns to identify the presence of Muscular dystrophy as illustrated in <?A3B2 "fig2",5,"anchor"?><xref ref-type="fig" rid="fig-2">Fig. 2</xref>. The proposed SDL-XGBoost method involves three major processes such as RoI detection, feature selection, and classification. At the initial stage, Kapur&#x0027;s thresholding is applied to determine the regions of interest. Next, SDL model is employed to derive a useful set of feature vectors. Finally, XGBoost model is utilized to allocate appropriate class labels for muscle MRI data.</p>
<fig id="fig-2"><label>Figure 2</label><caption><title>The working process of SDL-XGBoost model</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="CMC_20914-fig-2.png"/></fig>
<sec id="s3_1"><label>3.1</label><title>Region of Interest Detection</title>
<p>Kapur [<xref ref-type="bibr" rid="ref-20">20</xref>] presented another thresholding approach which was utilized to find the best threshold for image segmentation. This method deploys entropy followed by probability distribution of the image histogram. This technique is custom-made to find an optimum <inline-formula id="ieqn-1"><mml:math id="mml-ieqn-1"><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> that can optimize the entire entropy. In order to achieve bi-level instance, the objective function of Kapur&#x0027;s problem is represented by the equation given herewith.
<disp-formula id="eqn-1"><label>(1)</label><mml:math id="mml-eqn-1" display="block"><mml:mrow><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mi>k</mml:mi><mml:mi>a</mml:mi><mml:mi>p</mml:mi><mml:mi>u</mml:mi><mml:mi>r</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:mrow><mml:msub><mml:mi>H</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:msub><mml:mi>H</mml:mi><mml:mn>2</mml:mn></mml:msub></mml:mrow></mml:math></disp-formula>
</p>
<p>Let entropy <inline-formula id="ieqn-2"><mml:math id="mml-ieqn-2"><mml:mrow><mml:msub><mml:mi>H</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula> and <inline-formula id="ieqn-3"><mml:math id="mml-ieqn-3"><mml:mrow><mml:msub><mml:mi>H</mml:mi><mml:mn>2</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula> is estimated by:
<disp-formula id="eqn-2"><label>(2)</label><mml:math id="mml-eqn-2" display="block"><mml:mrow><mml:msub><mml:mi>H</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:munderover><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mi mathvariant="normal">l</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:munderover><mml:mo>&#x2061;</mml:mo><mml:mfrac><mml:mrow><mml:mi>P</mml:mi><mml:mrow><mml:msub><mml:mi>h</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mrow><mml:mrow><mml:msub><mml:mi>&#x03C9;</mml:mi><mml:mn>0</mml:mn></mml:msub></mml:mrow></mml:mrow></mml:mfrac><mml:mi>l</mml:mi><mml:mi>n</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mfrac><mml:mrow><mml:mi>P</mml:mi><mml:mrow><mml:msub><mml:mi>h</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mrow><mml:mrow><mml:msub><mml:mi>&#x03C9;</mml:mi><mml:mn>0</mml:mn></mml:msub></mml:mrow></mml:mrow></mml:mfrac></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mtext>&#xA0;</mml:mtext><mml:mrow><mml:mi mathvariant="normal">a</mml:mi><mml:mi mathvariant="normal">n</mml:mi><mml:mi mathvariant="normal">d</mml:mi></mml:mrow><mml:mtext>&#xA0;</mml:mtext><mml:mrow><mml:msub><mml:mi>H</mml:mi><mml:mn>2</mml:mn></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:munderover><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mi>t</mml:mi><mml:mi>h</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>L</mml:mi></mml:munderover><mml:mo>&#x2061;</mml:mo><mml:mfrac><mml:mrow><mml:mi>P</mml:mi><mml:mrow><mml:msub><mml:mi>h</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mrow><mml:mrow><mml:msub><mml:mi>&#x03C9;</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow></mml:mrow></mml:mfrac><mml:mi>l</mml:mi><mml:mi>n</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mfrac><mml:mrow><mml:mi>P</mml:mi><mml:mrow><mml:msub><mml:mi>h</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mrow><mml:mrow><mml:msub><mml:mi>&#x03C9;</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow></mml:mrow></mml:mfrac></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula>
</p>
<p>In <xref ref-type="disp-formula" rid="eqn-2">Eq. (2)</xref>, <inline-formula id="ieqn-4"><mml:math id="mml-ieqn-4"><mml:mi>P</mml:mi><mml:mrow><mml:msub><mml:mi>h</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:math></inline-formula> denotes the probability distribution of the intensity level attained, <inline-formula id="ieqn-5"><mml:math id="mml-ieqn-5"><mml:mrow><mml:msub><mml:mi>&#x03C9;</mml:mi><mml:mn>0</mml:mn></mml:msub></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> and <inline-formula id="ieqn-6"><mml:math id="mml-ieqn-6"><mml:mrow><mml:msub><mml:mi>&#x03C9;</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> are probability distributions for the classes <inline-formula id="ieqn-7"><mml:math id="mml-ieqn-7"><mml:mrow><mml:msub><mml:mi>C</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula> and <inline-formula id="ieqn-8"><mml:math id="mml-ieqn-8"><mml:mrow><mml:msub><mml:mi>C</mml:mi><mml:mn>2</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula>. <inline-formula id="ieqn-9"><mml:math id="mml-ieqn-9"><mml:mi>l</mml:mi><mml:mi>n</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mo>.</mml:mo><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> represents the natural logarithm. In line with Otsu&#x0027;s approach, entropy dependent method is changed to suit the multi thresholding values. In this scenario, the image should be split into <italic>k</italic> class with the help of <inline-formula id="ieqn-10"><mml:math id="mml-ieqn-10"><mml:mi>k</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:math></inline-formula> thresholds. The objective function is represented herewith.
<disp-formula id="eqn-3"><label>(3)</label><mml:math id="mml-eqn-3" display="block"><mml:mrow><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mi>k</mml:mi><mml:mi>a</mml:mi><mml:mi>p</mml:mi><mml:mi>u</mml:mi><mml:mi>r</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>T</mml:mi><mml:mi>H</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:munderover><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>k</mml:mi></mml:munderover><mml:mo>&#x2061;</mml:mo><mml:mrow><mml:msub><mml:mi>H</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:math></disp-formula>
where <inline-formula id="ieqn-11"><mml:math id="mml-ieqn-11"><mml:mi>T</mml:mi><mml:mi>H</mml:mi><mml:mo>=</mml:mo><mml:mo stretchy="false">[</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mrow><mml:msub><mml:mi>h</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow><mml:mo>,</mml:mo><mml:mi>t</mml:mi><mml:mrow><mml:msub><mml:mi>h</mml:mi><mml:mn>2</mml:mn></mml:msub></mml:mrow><mml:mo>,</mml:mo><mml:mi>t</mml:mi><mml:mrow><mml:msub><mml:mi>h</mml:mi><mml:mrow><mml:mi>k</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:mrow><mml:mo stretchy="false">]</mml:mo></mml:math></inline-formula> denotes the vector comprised of multiple thresholds. Every entropy is calculated individually by corresponding <inline-formula id="ieqn-12"><mml:math id="mml-ieqn-12"><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> value, thus <xref ref-type="disp-formula" rid="eqn-3">Eq. (3)</xref> is extended for <italic>k</italic> entropy and is defined by [<xref ref-type="bibr" rid="ref-21">21</xref>]:
<disp-formula id="eqn-4"><label>(4)</label><mml:math id="mml-eqn-4" display="block"><mml:msubsup><mml:mi>H</mml:mi><mml:mi>k</mml:mi><mml:mi>c</mml:mi></mml:msubsup><mml:mo>=</mml:mo><mml:munderover><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mi>t</mml:mi><mml:mrow><mml:msub><mml:mi>h</mml:mi><mml:mrow><mml:mi>k</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:mrow><mml:mi>L</mml:mi></mml:munderover><mml:mo>&#x2061;</mml:mo><mml:mfrac><mml:mrow><mml:mi>P</mml:mi><mml:mrow><mml:msub><mml:mi>h</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mrow><mml:mrow><mml:msub><mml:mi>&#x03C9;</mml:mi><mml:mrow><mml:mi>k</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:mrow></mml:mfrac><mml:mi>l</mml:mi><mml:mi>n</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mfrac><mml:mrow><mml:mi>P</mml:mi><mml:mrow><mml:msub><mml:mi>h</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mrow><mml:mrow><mml:msub><mml:mi>&#x03C9;</mml:mi><mml:mrow><mml:mi>k</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:mrow></mml:mfrac></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula>
</p>
<p>Now, the probability occurrence <inline-formula id="ieqn-13"><mml:math id="mml-ieqn-13"><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msubsup><mml:mi>&#x03C9;</mml:mi><mml:mn>0</mml:mn><mml:mi>c</mml:mi></mml:msubsup><mml:mo>,</mml:mo><mml:mrow><mml:mtext>&#xA0;</mml:mtext></mml:mrow><mml:mrow><mml:msub><mml:mi>&#x03C9;</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow><mml:mo>,</mml:mo><mml:mo>&#x2026;</mml:mo><mml:mo>,</mml:mo><mml:mrow><mml:mtext>&#xA0;</mml:mtext></mml:mrow><mml:mrow><mml:msub><mml:mi>&#x03C9;</mml:mi><mml:mrow><mml:mi>k</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> values are attained for <italic>k</italic> class whereas the probability distribution is denoted by <inline-formula id="ieqn-14"><mml:math id="mml-ieqn-14"><mml:mi>P</mml:mi><mml:mrow><mml:msub><mml:mi>h</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:math></inline-formula>.</p>
</sec>
<sec id="s3_2"><label>3.2</label><title>SDL Based Feature Extraction</title>
<p>The presented SDL technique, represented by <inline-formula id="ieqn-15"><mml:math id="mml-ieqn-15"><mml:mi>S</mml:mi><mml:mi>D</mml:mi><mml:mrow><mml:msup><mml:mi>L</mml:mi><mml:mi>n</mml:mi></mml:msup></mml:mrow></mml:math></inline-formula> is comprised of two important components [<xref ref-type="bibr" rid="ref-22">22</xref>&#x2013;<xref ref-type="bibr" rid="ref-24">24</xref>]: an image pair input layer, <inline-formula id="ieqn-16"><mml:math id="mml-ieqn-16"><mml:msubsup><mml:mi>C</mml:mi><mml:mi>n</mml:mi><mml:mn>2</mml:mn></mml:msubsup></mml:math></inline-formula> synergic network and<inline-formula id="ieqn-17"><mml:math id="mml-ieqn-17"><mml:mi>n</mml:mi></mml:math></inline-formula> DCNN elements. A special case <inline-formula id="ieqn-18"><mml:math id="mml-ieqn-18"><mml:mi>S</mml:mi><mml:mi>D</mml:mi><mml:mrow><mml:msup><mml:mi>L</mml:mi><mml:mn>2</mml:mn></mml:msup></mml:mrow></mml:math></inline-formula> is displayed in <?A3B2 "fig3",5,"anchor"?><xref ref-type="fig" rid="fig-3">Fig. 3</xref> [<xref ref-type="bibr" rid="ref-24">24</xref>]. Every DCNN element, present in network structure, performs the images separately as per the study portrayal under the supervision of true label of input images [<xref ref-type="bibr" rid="ref-22">22</xref>]. In synergic network, an entirely integrated structure is utilized to authenticate whether an input pair belongs to similar classification or not. It further provides the correct feedback in case of a synergic error. SDL method have three components which are displayed in <xref ref-type="fig" rid="fig-3">Fig. 3</xref>.</p>
<fig id="fig-3"><label>Figure 3</label><caption><title>Architecture of SDL method</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="CMC_20914-fig-3.png"/></fig>
<sec id="s3_2_1"><label>3.2.1</label><title>Pair Input Layer</title>
<p>In comparison with conventional DCNN, the presented <inline-formula id="ieqn-19"><mml:math id="mml-ieqn-19"><mml:mrow><mml:mtext>SD</mml:mtext></mml:mrow><mml:mrow><mml:msup><mml:mrow><mml:mtext>L</mml:mtext></mml:mrow><mml:mi>n</mml:mi></mml:msup></mml:mrow></mml:math></inline-formula> technique concurrently admits <italic>n</italic> input images that are arbitrarily chosen from the trained set. Every image, composed of class label, is fed as input to DCNN modules whereas every pair of images corresponds to synergic label which is utilized with a synergic network. In order to unite the image size, the researcher resized every image into <inline-formula id="ieqn-20"><mml:math id="mml-ieqn-20"><mml:mn>224</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>224</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>3</mml:mn></mml:math></inline-formula> by utilizing bi-cubic interpolation.</p>
</sec>
<sec id="s3_2_2"><label>3.2.2</label><title>DCNN Components</title>
<p>The popular remaining network [<xref ref-type="bibr" rid="ref-23">23</xref>] possesses robust illustration capability due to which the researcher uses 50 pre-trained layers of residual NN (ResNet-50) in the beginning of all DCNN modules represented by DCNN-i <inline-formula id="ieqn-21"><mml:math id="mml-ieqn-21"><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>a</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mo>&#x2026;</mml:mo><mml:mo>,</mml:mo><mml:mi>n</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula>. But, it can be noted down that all DCNN like GoogLeNet, VGGNet, and AlexNet are rooted in <inline-formula id="ieqn-22"><mml:math id="mml-ieqn-22"><mml:mrow><mml:mtext>SD</mml:mtext></mml:mrow><mml:mrow><mml:msup><mml:mrow><mml:mtext>L</mml:mtext></mml:mrow><mml:mi>n</mml:mi></mml:msup></mml:mrow></mml:math></inline-formula> technique as DCNN modules. All the DCNN modules are trained with the help of an image order <inline-formula id="ieqn-23"><mml:math id="mml-ieqn-23"><mml:mi>X</mml:mi><mml:mo>=</mml:mo><mml:mo fence="false" stretchy="false">{</mml:mo><mml:mrow><mml:mrow><mml:msup><mml:mi>x</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mn>1</mml:mn><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:msup><mml:mi>x</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mn>2</mml:mn><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo>,</mml:mo><mml:mo>&#x2026;</mml:mo><mml:mo>,</mml:mo><mml:mrow><mml:msup><mml:mi>x</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>M</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow></mml:mrow><mml:mo fence="false" stretchy="false">}</mml:mo></mml:math></inline-formula> and a respective class label order <inline-formula id="ieqn-24"><mml:math id="mml-ieqn-24"><mml:mi>Y</mml:mi><mml:mo>=</mml:mo><mml:mo fence="false" stretchy="false">{</mml:mo><mml:mrow><mml:msup><mml:mi>y</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mn>1</mml:mn><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:msup><mml:mi>y</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mn>2</mml:mn><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo>,</mml:mo><mml:mo>&#x2026;</mml:mo><mml:mo>,</mml:mo><mml:mrow><mml:msup><mml:mi>y</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>M</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mrow><mml:mtext>D</mml:mtext></mml:mrow><mml:mo fence="false" stretchy="false">}</mml:mo></mml:math></inline-formula>. Both aim at identifying a group of variables <inline-formula id="ieqn-25"><mml:math id="mml-ieqn-25"><mml:mi>&#x03B8;</mml:mi></mml:math></inline-formula> that can reduce the succeeding cross entropy loss which is given by
<disp-formula id="eqn-5"><label>(5)</label><mml:math id="mml-eqn-5" display="block"><mml:mi>l</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>&#x03B8;</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:mo>&#x2212;</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mi>M</mml:mi></mml:mfrac><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:munderover><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>M</mml:mi></mml:munderover><mml:mo>&#x2061;</mml:mo><mml:munderover><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>b</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>K</mml:mi></mml:munderover><mml:mo>&#x2061;</mml:mo><mml:mn>1</mml:mn><mml:mo fence="false" stretchy="false">{</mml:mo><mml:mrow><mml:mrow><mml:msup><mml:mi>y</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo>=</mml:mo><mml:mi>b</mml:mi></mml:mrow><mml:mo fence="false" stretchy="false">}</mml:mo><mml:mi>log</mml:mi><mml:mo>&#x2061;</mml:mo><mml:mfrac><mml:mrow><mml:mrow><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:msubsup><mml:mi>z</mml:mi><mml:mrow><mml:mi mathvariant="normal">b</mml:mi></mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msubsup></mml:mrow></mml:msup></mml:mrow></mml:mrow><mml:mrow><mml:msubsup><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>l</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>K</mml:mi></mml:msubsup><mml:mo>&#x2061;</mml:mo><mml:mrow><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:mrow><mml:msub><mml:mi>z</mml:mi><mml:mrow><mml:mi mathvariant="normal">l</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow></mml:mrow></mml:mfrac></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:math></disp-formula>
where <italic>K</italic> denotes the number of classes, <inline-formula id="ieqn-26"><mml:math id="mml-ieqn-26"><mml:mrow><mml:msup><mml:mi>Z</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mi mathvariant="script">F</mml:mi></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mrow><mml:msup><mml:mi>x</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo>,</mml:mo><mml:mi>&#x03B8;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> indicates the forward computation. The optimized problem can be resolved by utilizing mini batch Stochastic Gradient Descent (SGD) (i.e., mini batch SGD) technique. The attained variable set for DCNN-a is represented as <inline-formula id="ieqn-27"><mml:math id="mml-ieqn-27"><mml:mrow><mml:msup><mml:mi>&#x03B8;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow></mml:math></inline-formula>, and the variables cannot be shared between distinct DCNN modules.</p>
</sec>
<sec id="s3_2_3"><label>3.2.3</label><title>Synergic Network</title>
<p>In addition, the trained modules of every DCNN are supervised by synergic label of all the pairs of images. The researcher implemented a synergic network that contains embedded layers, whole connected learning and output layers. Here, a pair of images <inline-formula id="ieqn-28"><mml:math id="mml-ieqn-28"><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>A</mml:mi></mml:msub></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>B</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> is assumed as input to two DCNN modules (DCNN-<inline-formula id="ieqn-29"><mml:math id="mml-ieqn-29"><mml:mi>a</mml:mi></mml:math></inline-formula>, DCNN-b), correspondingly. The output of 2<sup>nd</sup> last entire connecting layer in a DCNN module is determined as <inline-formula id="ieqn-30"><mml:math id="mml-ieqn-30"><mml:mrow><mml:mtext>aDs</mml:mtext></mml:mrow></mml:math></inline-formula>. The deep image feature learning technique with DCNN is attained through forwarding computation and is given by
<disp-formula id="eqn-6"><label>(6)</label><mml:math id="mml-eqn-6" display="block"><mml:mtable columnalign="left" rowspacing="4pt" columnspacing="1em"><mml:mtr><mml:mtd><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mi>A</mml:mi></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mrow><mml:mi mathvariant="script">F</mml:mi></mml:mrow></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>A</mml:mi></mml:msub></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:msup><mml:mi>&#x03B8;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mi>B</mml:mi></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mrow><mml:mi mathvariant="script">F</mml:mi></mml:mrow></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>B</mml:mi></mml:msub></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:msup><mml:mi>&#x03B8;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>b</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
</p>
<p>Next, both the images are concatenated as <inline-formula id="ieqn-31"><mml:math id="mml-ieqn-31"><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mrow><mml:mi>A</mml:mi><mml:mo>&#x2218;</mml:mo><mml:mi>B</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:math></inline-formula> and fed as input to synergic network. The respective predictable outcome in synergic label of the image pair is determined herewith.
<disp-formula id="eqn-7"><label>(7)</label><mml:math id="mml-eqn-7" display="block"><mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mi>S</mml:mi></mml:msub></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>A</mml:mi></mml:msub></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>B</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mtable columnalign="left" rowspacing="4pt" columnspacing="1em"><mml:mtr><mml:mtd><mml:mrow><mml:mn>1</mml:mn><mml:mspace width="1em" /><mml:mi>i</mml:mi><mml:mi>f</mml:mi><mml:mtext>&#xA0;</mml:mtext><mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mi>A</mml:mi></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mi>B</mml:mi></mml:msub></mml:mrow></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow><mml:mn>0</mml:mn><mml:mspace width="1em" /><mml:mi>i</mml:mi><mml:mi>f</mml:mi><mml:mtext>&#xA0;</mml:mtext><mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mi>A</mml:mi></mml:msub></mml:mrow><mml:mo>&#x2260;</mml:mo><mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mi>B</mml:mi></mml:msub></mml:mrow></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mrow><mml:mo fence="true" stretchy="true" symmetric="true"></mml:mo></mml:mrow></mml:math></disp-formula>
</p>
<p>To prevent the imbalanced data problem, the proportion of interclass image pairs in all the batches are maintained in the range of 45&#x0025;&#x2013;55&#x0025;. It is comfortable to observe the synergic signal via additional sigmoid layers and utilize the subsequent binary cross-entropy loss which are defined herewith.
<disp-formula id="eqn-8"><label>(8)</label><mml:math id="mml-eqn-8" display="block"><mml:mrow><mml:msup><mml:mi>l</mml:mi><mml:mi>S</mml:mi></mml:msup></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mrow><mml:msup><mml:mi>&#x03B8;</mml:mi><mml:mi>S</mml:mi></mml:msup></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mi>S</mml:mi></mml:msub></mml:mrow><mml:mi>log</mml:mi><mml:mo>&#x2061;</mml:mo><mml:mrow><mml:mover><mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mi>S</mml:mi></mml:msub></mml:mrow><mml:mo>&#x005E;</mml:mo></mml:mover></mml:mrow><mml:mo>+</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mi>S</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mi>log</mml:mi><mml:mo>&#x2061;</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:mrow><mml:mover><mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mi>S</mml:mi></mml:msub></mml:mrow><mml:mo>&#x005E;</mml:mo></mml:mover></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula>
where <inline-formula id="ieqn-32"><mml:math id="mml-ieqn-32"><mml:mrow><mml:msup><mml:mi>&#x03B8;</mml:mi><mml:mi>S</mml:mi></mml:msup></mml:mrow></mml:math></inline-formula> denotes the variables of synergic network, <inline-formula id="ieqn-33"><mml:math id="mml-ieqn-33"><mml:mrow><mml:mover><mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mi>S</mml:mi></mml:msub></mml:mrow><mml:mo>&#x005E;</mml:mo></mml:mover></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mi mathvariant="script">F</mml:mi></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mrow><mml:mi>A</mml:mi><mml:mo>&#x2218;</mml:mo><mml:mi>B</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:msup><mml:mi>&#x03B8;</mml:mi><mml:mi>S</mml:mi></mml:msup></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> is the forward computation of synergic network. It authenticates either the input image pair that belongs to a similar classification or not, and it provides correct feedback in case when a synergic error occurs.</p>
</sec>
<sec id="s3_2_4"><label>3.2.4</label><title>Training and Testing</title>
<p>The presented <inline-formula id="ieqn-34"><mml:math id="mml-ieqn-34"><mml:mrow><mml:mtext>SD</mml:mtext></mml:mrow><mml:mrow><mml:msup><mml:mrow><mml:mtext>L</mml:mtext></mml:mrow><mml:mi>n</mml:mi></mml:msup></mml:mrow></mml:math></inline-formula> technique contains <italic>n</italic> DCNN modules and <inline-formula id="ieqn-35"><mml:math id="mml-ieqn-35"><mml:msubsup><mml:mi>C</mml:mi><mml:mi>n</mml:mi><mml:mn>2</mml:mn></mml:msubsup></mml:math></inline-formula> synergic network. During end-to-end training, the variables of every DCNN module and all synergic networks are defined by [<xref ref-type="bibr" rid="ref-24">24</xref>],
<disp-formula id="eqn-9"><label>(9)</label><mml:math id="mml-eqn-9" display="block"><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mtable columnalign="left" rowspacing="4pt" columnspacing="1em"><mml:mtr><mml:mtd><mml:mrow><mml:mrow><mml:msup><mml:mi>&#x03B8;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:mrow><mml:msup><mml:mi>&#x03B8;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mtext>---</mml:mtext><mml:mi>&#x03B7;</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>&#x22C5;</mml:mo><mml:mrow><mml:msup><mml:mi>&#x0394;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow><mml:mrow><mml:msup><mml:mi>&#x03B8;</mml:mi><mml:mrow><mml:mi>S</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>a</mml:mi><mml:mo>,</mml:mo><mml:mi>b</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:mrow><mml:msup><mml:mi>&#x03B8;</mml:mi><mml:mrow><mml:mi>S</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>a</mml:mi><mml:mo>,</mml:mo><mml:mi>b</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mtext>---</mml:mtext><mml:mi>&#x03B7;</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>&#x22C5;</mml:mo><mml:mrow><mml:msup><mml:mi>&#x0394;</mml:mi><mml:mrow><mml:mi>S</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>a</mml:mi><mml:mo>,</mml:mo><mml:mi>b</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mrow><mml:mo fence="true" stretchy="true" symmetric="true"></mml:mo></mml:mrow></mml:math></disp-formula>
where <inline-formula id="ieqn-36"><mml:math id="mml-ieqn-36"><mml:mi>&#x03B7;</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> denotes the parameter learning rate, <inline-formula id="ieqn-37"><mml:math id="mml-ieqn-37"><mml:mi>S</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>a</mml:mi><mml:mo>,</mml:mo><mml:mi>b</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> denotes the synergic network between DCNN-a and DCNN-b,
<disp-formula id="eqn-10"><label>(10)</label><mml:math id="mml-eqn-10" display="block"><mml:mrow><mml:msup><mml:mi>&#x0394;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mi mathvariant="normal">&#x2202;</mml:mi><mml:mrow><mml:msup><mml:mi>l</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mrow><mml:msup><mml:mi>&#x03B8;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mrow><mml:mi mathvariant="normal">&#x2202;</mml:mi><mml:mrow><mml:msup><mml:mi>&#x03B8;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow></mml:mrow></mml:mfrac><mml:mo>+</mml:mo><mml:mi>&#x03BB;</mml:mi><mml:munderover><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>b</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mi>b</mml:mi><mml:mo>&#x2260;</mml:mo><mml:mi>a</mml:mi></mml:mrow><mml:mi>n</mml:mi></mml:munderover><mml:mo>&#x2061;</mml:mo><mml:mfrac><mml:mrow><mml:mi mathvariant="normal">&#x2202;</mml:mi><mml:mrow><mml:msup><mml:mi>l</mml:mi><mml:mrow><mml:mi>S</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>a</mml:mi><mml:mo>,</mml:mo><mml:mi>b</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mrow><mml:msup><mml:mi>&#x03B8;</mml:mi><mml:mrow><mml:mi>S</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>a</mml:mi><mml:mo>,</mml:mo><mml:mi>b</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mrow><mml:mi mathvariant="normal">&#x2202;</mml:mi><mml:mrow><mml:msup><mml:mi>&#x03B8;</mml:mi><mml:mrow><mml:mi>S</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>a</mml:mi><mml:mo>,</mml:mo><mml:mi>b</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow></mml:mrow></mml:mfrac></mml:math></disp-formula>
<disp-formula id="eqn-11"><label>(11)</label><mml:math id="mml-eqn-11" display="block"><mml:mrow><mml:msup><mml:mi>&#x0394;</mml:mi><mml:mrow><mml:mi>S</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>a</mml:mi><mml:mo>,</mml:mo><mml:mi>b</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mi mathvariant="normal">&#x2202;</mml:mi><mml:mrow><mml:msup><mml:mi>l</mml:mi><mml:mrow><mml:mi>S</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>a</mml:mi><mml:mo>,</mml:mo><mml:mi>b</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mrow><mml:msup><mml:mi>&#x03B8;</mml:mi><mml:mrow><mml:mi>S</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>a</mml:mi><mml:mo>,</mml:mo><mml:mi>b</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mrow><mml:mi mathvariant="normal">&#x2202;</mml:mi><mml:mrow><mml:msup><mml:mi>&#x03B8;</mml:mi><mml:mrow><mml:mi>S</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>a</mml:mi><mml:mo>,</mml:mo><mml:mi>b</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow></mml:mrow></mml:mfrac></mml:math></disp-formula>
</p>
<p>and <inline-formula id="ieqn-38"><mml:math id="mml-ieqn-38"><mml:mi>&#x03BB;</mml:mi></mml:math></inline-formula> denotes the tradeoff between subversive category and synergic error.</p>
<p>When employing the trained <inline-formula id="ieqn-39"><mml:math id="mml-ieqn-39"><mml:mrow><mml:mtext>SD</mml:mtext></mml:mrow><mml:mrow><mml:msup><mml:mrow><mml:mtext>L</mml:mtext></mml:mrow><mml:mi>n</mml:mi></mml:msup></mml:mrow></mml:math></inline-formula> technique in the classifiers of a tested image <inline-formula id="ieqn-40"><mml:math id="mml-ieqn-40"><mml:mi>x</mml:mi></mml:math></inline-formula>, all DCNN modules provide a predictive vector <inline-formula id="ieqn-41"><mml:math id="mml-ieqn-41"><mml:mrow><mml:msup><mml:mi>P</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo>=</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msubsup><mml:mi>p</mml:mi><mml:mn>1</mml:mn><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msubsup><mml:mo>,</mml:mo><mml:msubsup><mml:mi>p</mml:mi><mml:mn>2</mml:mn><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msubsup><mml:mo>,</mml:mo><mml:mo>&#x2026;</mml:mo><mml:mo>,</mml:mo><mml:msubsup><mml:mi>p</mml:mi><mml:mi>K</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msubsup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> which act as the activator in last connecting layers. The class label of tested image is defined herewith.
<disp-formula id="eqn-12"><label>(12)</label><mml:math id="mml-eqn-12" display="block"><mml:mi>y</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>x</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:munder><mml:mrow><mml:mrow><mml:mi mathvariant="normal">a</mml:mi><mml:mi mathvariant="normal">r</mml:mi><mml:mi mathvariant="normal">g</mml:mi><mml:mi mathvariant="normal">m</mml:mi><mml:mi mathvariant="normal">a</mml:mi><mml:mi mathvariant="normal">x</mml:mi></mml:mrow></mml:mrow><mml:mi>b</mml:mi></mml:munder><mml:mo>&#x2061;</mml:mo><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:munderover><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>n</mml:mi></mml:munderover><mml:mo>&#x2061;</mml:mo><mml:msubsup><mml:mi>p</mml:mi><mml:mn>1</mml:mn><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msubsup><mml:mo>,</mml:mo><mml:mo>&#x2026;</mml:mo><mml:mo>,</mml:mo><mml:munderover><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>n</mml:mi></mml:munderover><mml:mo>&#x2061;</mml:mo><mml:msubsup><mml:mi>p</mml:mi><mml:mi>b</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msubsup><mml:mo>,</mml:mo><mml:mo>&#x2026;</mml:mo><mml:mo>,</mml:mo><mml:munderover><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>n</mml:mi></mml:munderover><mml:mo>&#x2061;</mml:mo><mml:msubsup><mml:mi>p</mml:mi><mml:mi>K</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>a</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msubsup></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:math></disp-formula>
</p>
</sec>
</sec>
<sec id="s3_3"><label>3.3</label><title>XGBoost Based Classification</title>
<p>XGBoost is a supervised EL technique that simulates a generalization gradient boosting technique. A regulation term is involved in this technique to produce accuracy through multicore and distributed settings for classifiers, regression and ranking task [<xref ref-type="bibr" rid="ref-25">25</xref>]. The specific dataset comprises of <italic>n</italic> instances and <inline-formula id="ieqn-42"><mml:math id="mml-ieqn-42"><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:math></inline-formula> features <inline-formula id="ieqn-43"><mml:math id="mml-ieqn-43"><mml:mi>X</mml:mi><mml:mo>=</mml:mo><mml:mo fence="false" stretchy="false">{</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:msubsup><mml:mo fence="false" stretchy="false">}</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>n</mml:mi></mml:msubsup><mml:mo>,</mml:mo><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mo>&#x2208;</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mtext>R</mml:mtext></mml:mrow><mml:mi>m</mml:mi></mml:msup></mml:mrow></mml:math></inline-formula> by labels <inline-formula id="ieqn-44"><mml:math id="mml-ieqn-44"><mml:mrow><mml:mtext>y</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:mo fence="false" stretchy="false">{</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:msubsup><mml:mo fence="false" stretchy="false">}</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>n</mml:mi></mml:msubsup><mml:mo>,</mml:mo><mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>&#x2208;</mml:mo></mml:mrow></mml:msub></mml:mrow><mml:mo fence="false" stretchy="false">{</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>&#x03C9;</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x2200;</mml:mi><mml:mi>j</mml:mi><mml:mo>&#x2208;</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mo>&#x2026;</mml:mo><mml:mo>,</mml:mo><mml:mi>C</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msub></mml:mrow></mml:mrow><mml:mo fence="false" stretchy="false">}</mml:mo></mml:math></inline-formula>, where <inline-formula id="ieqn-45"><mml:math id="mml-ieqn-45"><mml:mrow><mml:msub><mml:mi>&#x03C9;</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow></mml:math></inline-formula> denotes the <inline-formula id="ieqn-46"><mml:math id="mml-ieqn-46"><mml:mrow><mml:msup><mml:mi>j</mml:mi><mml:mrow><mml:mrow><mml:mtext>th</mml:mtext></mml:mrow></mml:mrow></mml:msup></mml:mrow></mml:math></inline-formula> class from <italic>C</italic> total class, a collective of DT utilizes <italic>K</italic> additive function to forecast the output and is given by:
<disp-formula id="eqn-13"><label>(13)</label><mml:math id="mml-eqn-13" display="block"><mml:mrow><mml:msub><mml:mrow><mml:mover><mml:mi>y</mml:mi><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:munderover><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>k</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>K</mml:mi></mml:munderover><mml:mo>&#x2061;</mml:mo><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mi>k</mml:mi></mml:msub></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mo>,</mml:mo><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mi>k</mml:mi></mml:msub></mml:mrow><mml:mo>&#x2208;</mml:mo><mml:mi>F</mml:mi></mml:math></disp-formula>
where <inline-formula id="ieqn-47"><mml:math id="mml-ieqn-47"><mml:mi>F</mml:mi><mml:mo>=</mml:mo><mml:mo fence="false" stretchy="false">{</mml:mo><mml:mrow><mml:mi>f</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>x</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:mrow><mml:msub><mml:mi>w</mml:mi><mml:mi>q</mml:mi></mml:msub></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>x</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo fence="false" stretchy="false">}</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>q</mml:mi><mml:mo>:</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mtext>R</mml:mtext></mml:mrow><mml:mi>m</mml:mi></mml:msup></mml:mrow><mml:mo stretchy="false">&#x2192;</mml:mo><mml:mi>T</mml:mi><mml:mo>,</mml:mo><mml:mi>w</mml:mi><mml:mo>&#x2208;</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mtext>R</mml:mtext></mml:mrow><mml:mi>T</mml:mi></mml:msup></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> is the space of CART. Now, <italic>q</italic> and <italic>T</italic> denote the structure and count of leaves in tree. All the trees <inline-formula id="ieqn-48"><mml:math id="mml-ieqn-48"><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mi>k</mml:mi></mml:msub></mml:mrow></mml:math></inline-formula> correspond to an independent <italic>q</italic> and leaf weight, <inline-formula id="ieqn-49"><mml:math id="mml-ieqn-49"><mml:mi>w</mml:mi></mml:math></inline-formula>. For example, the decision rule in tree structure is utilized, represented as <italic>q</italic> to categorize the leaves. The last forecast is estimated by adding the score in corresponding leaves which are defined as <inline-formula id="ieqn-50"><mml:math id="mml-ieqn-50"><mml:mi>w</mml:mi></mml:math></inline-formula>. Next, the succeeding regulation term is utilized to learn the group of function in the ensemble technique as given herewith.
<disp-formula id="eqn-14"><label>(14)</label><mml:math id="mml-eqn-14" display="block"><mml:mi>&#x2113;</mml:mi><mml:mo>=</mml:mo><mml:munderover><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>n</mml:mi></mml:munderover><mml:mo>&#x2061;</mml:mo><mml:mi>l</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mrow><mml:mover><mml:mi>y</mml:mi><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow></mml:mrow><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mo>+</mml:mo><mml:munderover><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>k</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>K</mml:mi></mml:munderover><mml:mo>&#x2061;</mml:mo><mml:mi>&#x03A9;</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>f</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>,</mml:mo><mml:mtext>&#xA0;</mml:mtext><mml:mi>w</mml:mi><mml:mi>h</mml:mi><mml:mi>e</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mtext>&#xA0;</mml:mtext><mml:mi>&#x03A9;</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mi>k</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:mi>&#x03BE;</mml:mi><mml:mi>T</mml:mi><mml:mo>+</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mn>2</mml:mn></mml:mfrac><mml:mi>&#x03BE;</mml:mi><mml:mrow><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mi>w</mml:mi><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow></mml:mrow><mml:msup><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:msup></mml:mrow></mml:math></disp-formula>
where <italic>l</italic> denotes the differentiable convex loss function that measures the variance between prediction <inline-formula id="ieqn-51"><mml:math id="mml-ieqn-51"><mml:mrow><mml:msub><mml:mrow><mml:mover><mml:mi>y</mml:mi><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:math></inline-formula> and target <inline-formula id="ieqn-52"><mml:math id="mml-ieqn-52"><mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:math></inline-formula> whereas the 2<sup>nd</sup> term <inline-formula id="ieqn-53"><mml:math id="mml-ieqn-53"><mml:mspace width="thickmathspace" /><mml:mi>&#x03A9;</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>f</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> defines the difficulty of tree <inline-formula id="ieqn-54"><mml:math id="mml-ieqn-54"><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mi>k</mml:mi></mml:msub></mml:mrow></mml:math></inline-formula>. Here, <inline-formula id="ieqn-55"><mml:math id="mml-ieqn-55"><mml:mi>&#x03BE;</mml:mi><mml:mi>T</mml:mi></mml:math></inline-formula> and <inline-formula id="ieqn-56"><mml:math id="mml-ieqn-56"><mml:mi>&#x03BE;</mml:mi><mml:mrow><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mi>w</mml:mi><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow></mml:mrow><mml:msup><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:msup></mml:mrow></mml:math></inline-formula> penalize all the tree leaves included further and high weights, correspondingly.</p>
<p><xref ref-type="disp-formula" rid="eqn-14">Eq. (14)</xref> involves parametric function for the elements that could not be optimized practically with the help of classical optimization technique in Euclidean space. Since the technique underwent additional training, the objective function is declared for present iteration <italic>t</italic> based on the forecast during former iteration <inline-formula id="ieqn-57"><mml:math id="mml-ieqn-57"><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:math></inline-formula> adapted via latest tree <inline-formula id="ieqn-58"><mml:math id="mml-ieqn-58"><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mrow></mml:math></inline-formula> as given herewith.
<disp-formula id="eqn-15"><label>(15)</label><mml:math id="mml-eqn-15" display="block"><mml:mrow><mml:msup><mml:mi>&#x2113;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo>=</mml:mo><mml:munderover><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>n</mml:mi></mml:munderover><mml:mo>&#x2061;</mml:mo><mml:mi>l</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:msub><mml:mrow><mml:mrow><mml:mover><mml:mi>y</mml:mi><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow></mml:mrow><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup><mml:mo>+</mml:mo><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mo>,</mml:mo><mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mo>+</mml:mo><mml:mi>&#x03A9;</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula>
</p>
<p>When dependent loss function in Taylor expansion i.e., <xref ref-type="disp-formula" rid="eqn-15">Eq. (15)</xref>, for 1<sup>st</sup> and 2<sup>nd</sup> order gradients, attain subsequent simplification, the objective function is then given by,
<disp-formula id="eqn-16"><label>(16)</label><mml:math id="mml-eqn-16" display="block"><mml:mrow><mml:msup><mml:mi>&#x2113;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo>&#x2245;</mml:mo><mml:munderover><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>n</mml:mi></mml:munderover><mml:mo>&#x2061;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>g</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mo>+</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mn>2</mml:mn></mml:mfrac><mml:mrow><mml:msub><mml:mi>h</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:msubsup><mml:mi>f</mml:mi><mml:mi>t</mml:mi><mml:mn>2</mml:mn></mml:msubsup><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:mi>&#x03A9;</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula>
</p>
<p>where <inline-formula id="ieqn-59"><mml:math id="mml-ieqn-59"><mml:mrow><mml:msub><mml:mi>g</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:msub><mml:mi mathvariant="normal">&#x2202;</mml:mi><mml:mrow><mml:mrow><mml:msup><mml:mrow><mml:mrow><mml:mover><mml:mi>y</mml:mi><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow></mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow></mml:mrow></mml:msub></mml:mrow><mml:mi>l</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mrow><mml:mover><mml:mi>y</mml:mi><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow></mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> and <inline-formula id="ieqn-60"><mml:math id="mml-ieqn-60"><mml:mrow><mml:msub><mml:mi>h</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:msubsup><mml:mi mathvariant="normal">&#x2202;</mml:mi><mml:mrow><mml:mrow><mml:msup><mml:mrow><mml:mrow><mml:mover><mml:mi>y</mml:mi><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow></mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow></mml:mrow><mml:mn>2</mml:mn></mml:msubsup><mml:mi>l</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mrow><mml:mover><mml:mi>y</mml:mi><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow></mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula>.</p>
<p>A DT forecasts a constant value over a leaf. Next, a tree <inline-formula id="ieqn-61"><mml:math id="mml-ieqn-61"><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mi>k</mml:mi></mml:msub></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>x</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> is denoted by <inline-formula id="ieqn-62"><mml:math id="mml-ieqn-62"><mml:mrow><mml:msub><mml:mi>w</mml:mi><mml:mi>q</mml:mi></mml:msub></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>x</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula>, where <italic>w</italic> indicates the score vector for all leaves and <inline-formula id="ieqn-63"><mml:math id="mml-ieqn-63"><mml:mi>q</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>x</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> maps instance <italic>x</italic> to a leaf. In the expansion of 2<sup>nd</sup> term in <xref ref-type="disp-formula" rid="eqn-16">Eq. (16)</xref>, a sum on tree leaf is attained and the regulation term is defined herewith.
<disp-formula id="eqn-17"><label>(17)</label><mml:math id="mml-eqn-17" display="block"><mml:mrow><mml:msup><mml:mi>&#x2113;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo>&#x2245;</mml:mo><mml:munderover><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>T</mml:mi></mml:munderover><mml:mo>&#x2061;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>G</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow><mml:mrow><mml:msub><mml:mi>w</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mo>+</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mn>2</mml:mn></mml:mfrac><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>H</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow><mml:mo>+</mml:mo><mml:mi>&#x03BE;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:msubsup><mml:mi>w</mml:mi><mml:mi>j</mml:mi><mml:mn>2</mml:mn></mml:msubsup></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:mi>&#x03BB;</mml:mi><mml:mi>T</mml:mi><mml:mo>,</mml:mo><mml:mspace width="1em" /><mml:mi>w</mml:mi><mml:mi>h</mml:mi><mml:mi>e</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mtext>&#xA0;</mml:mtext><mml:mrow><mml:msub><mml:mi>G</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:munderover><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>j</mml:mi><mml:mo>&#x2208;</mml:mo><mml:mrow><mml:msub><mml:mi>I</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mrow><mml:msub><mml:mi>g</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:munderover><mml:mo>,</mml:mo><mml:mrow><mml:msub><mml:mi>H</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:munderover><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>j</mml:mi><mml:mo>&#x2208;</mml:mo><mml:mrow><mml:msub><mml:mi>I</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mrow><mml:msub><mml:mi>h</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:munderover></mml:math></disp-formula>
where <inline-formula id="ieqn-64"><mml:math id="mml-ieqn-64"><mml:mrow><mml:msub><mml:mi>I</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:mo fence="false" stretchy="false">{</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mi>q</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mo fence="false" stretchy="false">}</mml:mo></mml:math></inline-formula> denotes instance at leaf <inline-formula id="ieqn-65"><mml:math id="mml-ieqn-65"><mml:mi>j</mml:mi><mml:mo>.</mml:mo></mml:math></inline-formula></p>
<p>In suitable structure tree, the objective function is minimalized to <inline-formula id="ieqn-66"><mml:math id="mml-ieqn-66"><mml:mi mathvariant="normal">&#x2202;</mml:mi><mml:mrow><mml:msup><mml:mi>&#x2113;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mrow><mml:mo>/</mml:mo></mml:mrow><mml:mi mathvariant="normal">&#x2202;</mml:mi><mml:mrow><mml:msub><mml:mi>w</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:msub><mml:mi>G</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow><mml:mo>+</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>H</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow><mml:mo>+</mml:mo><mml:mi>&#x03BB;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mrow><mml:msub><mml:mi>w</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:mn>0</mml:mn><mml:mo>,</mml:mo></mml:math></inline-formula> and optimum weight of leaf <italic>j</italic> is represented by
<disp-formula id="eqn-18"><label>(18)</label><mml:math id="mml-eqn-18" display="block"><mml:mrow><mml:msup><mml:mi>w</mml:mi><mml:mrow><mml:mo>&#x2217;</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo>=</mml:mo><mml:mo>&#x2212;</mml:mo><mml:mfrac><mml:mrow><mml:mrow><mml:msub><mml:mi>G</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mrow><mml:mrow><mml:msub><mml:mi>H</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow><mml:mo>+</mml:mo><mml:mi>&#x03BE;</mml:mi></mml:mrow></mml:mfrac></mml:math></disp-formula>
</p>
<p>When substituting the formula by <xref ref-type="disp-formula" rid="eqn-17">Eq. (17)</xref>, the objective function to find the optimum tree structure is given herewith.
<disp-formula id="eqn-19"><label>(19)</label><mml:math id="mml-eqn-19" display="block"><mml:mrow><mml:msup><mml:mi>&#x2113;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo>&#x2245;</mml:mo><mml:mo>&#x2212;</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mn>2</mml:mn></mml:mfrac><mml:munderover><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>T</mml:mi></mml:munderover><mml:mo>&#x2061;</mml:mo><mml:mfrac><mml:mrow><mml:msubsup><mml:mi>G</mml:mi><mml:mi>j</mml:mi><mml:mn>2</mml:mn></mml:msubsup></mml:mrow><mml:mrow><mml:mrow><mml:msub><mml:mi>H</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:mi>&#x03BE;</mml:mi></mml:mrow></mml:mrow></mml:mfrac><mml:mo>+</mml:mo><mml:mi>&#x03BE;</mml:mi><mml:mi>T</mml:mi></mml:math></disp-formula>
</p>
<p>This equation is utilized in the practice to assess the splitting applicants in XGBoost. In order to find the optimum split, precise greedy technique and global and local (repropose applicants next every splits) variant approximation techniques are run for every possible splitting of the entire feature. This process is carried out by processing every splitting applicant in early stage, and similar split procedure is used to find the split on every leaf [<xref ref-type="bibr" rid="ref-26">26</xref>]. While this technique needs lower applicants over global technique, the outcome of global technique is precisely the local approach that provides sufficient applicants. In distributed tree learn approach, all the present approximations are utilized through the estimation of gradient statistics or quantile technique. XGBoost effectively assists in precise greedy technique for a single machine group or local and global variant approximation technique to entire sceneries [<xref ref-type="bibr" rid="ref-27">27</xref>].</p>
<p>In XGBoost by DART booster, <inline-formula id="ieqn-67"><mml:math id="mml-ieqn-67"><mml:mrow><mml:mi>k</mml:mi></mml:mrow></mml:math></inline-formula> trees are assumed to be dropped from techniques in <italic>m</italic>-th trained round. Here, <inline-formula id="ieqn-68"><mml:math id="mml-ieqn-68"><mml:mi>D</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:msub><mml:mi>&#x03A3;</mml:mi><mml:mrow><mml:mi>k</mml:mi><mml:mo>&#x2208;</mml:mo><mml:mi>K</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:msub><mml:mi>F</mml:mi><mml:mi>k</mml:mi></mml:msub></mml:mrow></mml:math></inline-formula> denotes the leave scores of the dropped trees and <inline-formula id="ieqn-69"><mml:math id="mml-ieqn-69"><mml:mrow><mml:msub><mml:mi>F</mml:mi><mml:mi>m</mml:mi></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:mi>&#x03B7;</mml:mi><mml:mrow><mml:msub><mml:mrow><mml:mover><mml:mi>F</mml:mi><mml:mo stretchy="false">&#x007E;</mml:mo></mml:mover></mml:mrow><mml:mi>m</mml:mi></mml:msub></mml:mrow></mml:math></inline-formula> indicates the leaf scores of a novel tree; Next, the objective function structure in <xref ref-type="disp-formula" rid="eqn-15">Eq. (15)</xref> is represented by:</p>
<p><disp-formula id="eqn-19.5"><label>(20)</label><mml:math id="mml-eqn-19.5" display="block"><mml:mrow><mml:msup><mml:mi>&#x2113;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup></mml:mrow><mml:mo>=</mml:mo><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:munderover><mml:mi>l</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:msubsup><mml:mrow><mml:mover><mml:mi>y</mml:mi><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>m</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msubsup><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>D</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mrow><mml:mover><mml:mi>F</mml:mi><mml:mo stretchy="false">&#x007E;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:mi mathvariant="normal">&#x03A9;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mrow><mml:mover><mml:mi>F</mml:mi><mml:mo stretchy="false">&#x007E;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula>
</p>
<p>Let <italic>D</italic> and <inline-formula id="ieqn-70"><mml:math id="mml-ieqn-70"><mml:mrow><mml:msub><mml:mi>F</mml:mi><mml:mi>m</mml:mi></mml:msub></mml:mrow></mml:math></inline-formula> denote the overshooting variables required for normalization. While, XGBoost assist tree and forest depend on normalization approach. The objective function of XGBoost with linear booster is represented by [<xref ref-type="bibr" rid="ref-28">28</xref>]:
<disp-formula id="eqn-20"><label>(21)</label><mml:math id="mml-eqn-20" display="block"><mml:msup><mml:mi>&#x2113;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mi>n</mml:mi></mml:mfrac><mml:munderover><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>n</mml:mi></mml:munderover><mml:mo>&#x2061;</mml:mo><mml:mi>l</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:msup><mml:mrow><mml:mrow><mml:mover><mml:mi>y</mml:mi><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow></mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup><mml:mo>,</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:mi>&#x03A9;</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>&#x03C9;</mml:mi><mml:mo>,</mml:mo><mml:mi>b</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mi>n</mml:mi></mml:mfrac><mml:munderover><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>n</mml:mi></mml:munderover><mml:mo>&#x2061;</mml:mo><mml:mi>l</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:msup><mml:mrow><mml:mrow><mml:mover><mml:mi>y</mml:mi><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow></mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:msup><mml:mo>,</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mn>2</mml:mn></mml:mfrac><mml:mi>&#x03BB;</mml:mi><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mi>&#x03C9;</mml:mi><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:msup><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:msup><mml:mo>+</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mn>2</mml:mn></mml:mfrac><mml:msub><mml:mi>&#x03BB;</mml:mi><mml:mi>b</mml:mi></mml:msub><mml:msup><mml:mi>b</mml:mi><mml:mn>2</mml:mn></mml:msup><mml:mo>+</mml:mo><mml:mi>a</mml:mi><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mi>&#x03C9;</mml:mi><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:msub><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mn>1</mml:mn></mml:msub></mml:math></disp-formula>
</p>
<p>Let <inline-formula id="ieqn-71"><mml:math id="mml-ieqn-71"><mml:mi>y</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mi>&#x03C9;</mml:mi></mml:mrow><mml:mi>x</mml:mi><mml:mo>+</mml:mo><mml:mi>b</mml:mi></mml:math></inline-formula>, <inline-formula id="ieqn-72"><mml:math id="mml-ieqn-72"><mml:mi>&#x03C9;</mml:mi><mml:mo>=</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>&#x03C9;</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:msub><mml:mi>&#x03C9;</mml:mi><mml:mn>2</mml:mn></mml:msub></mml:mrow><mml:mo>,</mml:mo><mml:mo>&#x2026;</mml:mo><mml:mo>,</mml:mo><mml:mrow><mml:msub><mml:mi>&#x03C9;</mml:mi><mml:mi>d</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> denotes the linear method [<xref ref-type="bibr" rid="ref-27">27</xref>], <italic>d</italic> represents dimension of features, <inline-formula id="ieqn-73"><mml:math id="mml-ieqn-73"><mml:mi>&#x03BB;</mml:mi></mml:math></inline-formula> is the <inline-formula id="ieqn-74"><mml:math id="mml-ieqn-74"><mml:mrow><mml:msub><mml:mi>&#x2113;</mml:mi><mml:mn>2</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula> regularization term according to <inline-formula id="ieqn-75"><mml:math id="mml-ieqn-75"><mml:mi>&#x03C9;</mml:mi></mml:math></inline-formula>, <inline-formula id="ieqn-76"><mml:math id="mml-ieqn-76"><mml:mrow><mml:msub><mml:mi>&#x03BB;</mml:mi><mml:mi>b</mml:mi></mml:msub></mml:mrow></mml:math></inline-formula> is the <inline-formula id="ieqn-77"><mml:math id="mml-ieqn-77"><mml:mrow><mml:msub><mml:mi>&#x2113;</mml:mi><mml:mn>2</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula> regularization term that depends on offset coefficient <inline-formula id="ieqn-78"><mml:math id="mml-ieqn-78"><mml:mi>b</mml:mi></mml:math></inline-formula>, and <italic>a</italic> is the <inline-formula id="ieqn-79"><mml:math id="mml-ieqn-79"><mml:mrow><mml:msub><mml:mi>&#x2113;</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula> regularization term that depends on <inline-formula id="ieqn-80"><mml:math id="mml-ieqn-80"><mml:mi>&#x03C9;</mml:mi></mml:math></inline-formula>.</p>
</sec>
</sec>
<sec id="s4"><label>4</label><title>Experimental Validation</title>
<p>The current section validates the results of analysis of the proposed SDL-XGBoost model on the classification of DMD and BMD using muscle MRI images. The presented SDL-XGBoost model was simulated in Python 3.6.5 tool. A detailed comparative study was also conducted with recent state-of- the-art methods in terms of accuracy, sensitivity, and specificity.</p>
<p><?A3B2 "tbl1",5,"anchor"?><xref ref-type="table" rid="table-1">Tab. 1</xref> and <?A3B2 "fig4",5,"anchor"?><xref ref-type="fig" rid="fig-4">Fig. 4</xref> shows the results of analysis of the proposed SDL-XGBoost model on the classification of DMD <italic>vs.</italic> other diseases under distinct runs. The experimental values denote that SDL-XGBoost method showcased better results in comparison with other methods. For instance, on execution run-1, the presented SDL-XGBoost technique achieved an effective performance with an accuracy of 93.45&#x0025;, precision of 80.52&#x0025;, sensitivity of 95.38&#x0025;, specificity of 92.86&#x0025;, F-score of 87.32&#x0025;, and kappa of 0.1706. Simultaneously, during execution run-3, the proposed SDL-XGBoost technique reached an efficient performance with an accuracy of 95.27&#x0025;, precision of 84.67&#x0025;, sensitivity of 97.69&#x0025;, specificity of 94.52&#x0025;, F-score of 90.71&#x0025;, and kappa of 0.1779. Besides, during the execution run-5, the presented SDL-XGBoost approach attained an effective performance with an accuracy of 96&#x0025;, precision of 86.49&#x0025;, sensitivity of 98.46&#x0025;, specificity of 95.24&#x0025;, F-score of 92.09&#x0025;, and kappa of 0.1806. Likewise, during execution run-7, the projected SDL-XGBoost methodology obtained superlative performance with an accuracy of 96.91&#x0025;, precision of 89.51, sensitivity of 98.46&#x0025;, specificity of 96.43&#x0025;, F-score of 93.77&#x0025;, and kappa of 0.1825. Moreover, during execution run-9, the presented SDL-XGBoost technique accomplished an excellent performance with its accuracy being 97.64&#x0025; and a precision of 91.49&#x0025;, sensitivity of 99.23&#x0025;, specificity of 97.14&#x0025;, F-score of 95.20&#x0025;, and kappa of 0.1852. At last, the presented SDL-XGBoost model classified DMD and other diseases with a maximum accuracy of 96.18&#x0025;, precision of 87.60&#x0025;, sensitivity of 97.92&#x0025;, specificity of 95.64&#x0025;, F-score of 92.44&#x0025;, and kappa of 0.1800.</p>
<table-wrap id="table-1"><label>Table 1</label><caption><title>Result of the analysis of the proposed SDL-XGBoost model for DMD <italic>vs.</italic> other diseases</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="left">No. of Runs</th>
<th align="left">Accuracy</th>
<th align="left">Precision</th>
<th align="left">Sensitivity</th>
<th align="left">Specificity</th>
<th align="left">F-Score</th>
<th align="left">Kappa</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">Run - 1</td>
<td align="left">93.45</td>
<td align="left">80.52</td>
<td align="left">95.38</td>
<td align="left">92.86</td>
<td align="left">87.32</td>
<td align="left">0.1706</td>
</tr>
<tr>
<td align="left">Run - 2</td>
<td align="left">94.36</td>
<td align="left">82.78</td>
<td align="left">96.15</td>
<td align="left">93.81</td>
<td align="left">88.97</td>
<td align="left">0.1736</td>
</tr>
<tr>
<td align="left">Run - 3</td>
<td align="left">95.27</td>
<td align="left">84.67</td>
<td align="left">97.69</td>
<td align="left">94.52</td>
<td align="left">90.71</td>
<td align="left">0.1779</td>
</tr>
<tr>
<td align="left">Run - 4</td>
<td align="left">95.64</td>
<td align="left">85.81</td>
<td align="left">97.69</td>
<td align="left">95.00</td>
<td align="left">91.37</td>
<td align="left">0.1787</td>
</tr>
<tr>
<td align="left">Run - 5</td>
<td align="left">96.00</td>
<td align="left">86.49</td>
<td align="left">98.46</td>
<td align="left">95.24</td>
<td align="left">92.09</td>
<td align="left">0.1806</td>
</tr>
<tr>
<td align="left">Run - 6</td>
<td align="left">96.36</td>
<td align="left">87.67</td>
<td align="left">98.46</td>
<td align="left">95.71</td>
<td align="left">92.75</td>
<td align="left">0.1814</td>
</tr>
<tr>
<td align="left">Run - 7</td>
<td align="left">96.91</td>
<td align="left">89.51</td>
<td align="left">98.46</td>
<td align="left">96.43</td>
<td align="left">93.77</td>
<td align="left">0.1825</td>
</tr>
<tr>
<td align="left">Run - 8</td>
<td align="left">97.27</td>
<td align="left">90.78</td>
<td align="left">98.46</td>
<td align="left">96.90</td>
<td align="left">94.46</td>
<td align="left">0.1832</td>
</tr>
<tr>
<td align="left">Run - 9</td>
<td align="left">97.64</td>
<td align="left">91.49</td>
<td align="left">99.23</td>
<td align="left">97.14</td>
<td align="left">95.20</td>
<td align="left">0.1852</td>
</tr>
<tr>
<td align="left">Run - 10</td>
<td align="left">98.91</td>
<td align="left">96.27</td>
<td align="left">99.23</td>
<td align="left">98.81</td>
<td align="left">97.73</td>
<td align="left">0.1878</td>
</tr>
<tr>
<td align="left">Average</td>
<td align="left">96.18</td>
<td align="left">87.60</td>
<td align="left">97.92</td>
<td align="left">95.64</td>
<td align="left">92.44</td>
<td align="left">0.1800</td>
</tr>
</tbody>
</table>
</table-wrap>
<fig id="fig-4"><label>Figure 4</label><caption><title>Result analysis of SDL-XGBoost model for DMD</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="CMC_20914-fig-4.png"/></fig>
<p><?A3B2 "tbl2",5,"anchor"?><xref ref-type="table" rid="table-2">Tab. 2</xref> and <?A3B2 "fig5",5,"anchor"?><xref ref-type="fig" rid="fig-5">Fig. 5</xref> portrays the results of analysis for the proposed SDL-XGBoost technique on the classification of BMD <italic>vs.</italic> other diseases under different runs. The experimental values inferred that SDL-XGBoost method demonstrated optimal outcomes over other techniques compared in this study. For sample, during execution run-1, the proposed SDL-XGBoost method attained an effective performance with an accuracy of 90.73&#x0025;, precision of 71.55&#x0025;, sensitivity of 84.69&#x0025;, specificity of 92.14&#x0025;, F-score of 77.57&#x0025;, and kappa of 0.1231. Likewise, during execution run-3, the projected SDL-XGBoost model reached an excellent performance with an accuracy of 92.47&#x0025;, precision of 75.65&#x0025;, sensitivity of 88.78&#x0025;, specificity of 93.33&#x0025;, F-score of 81.69&#x0025;, and kappa of 0.1315. Also, under execution run-5, the presented SDL-XGBoost manner accomplished superlative performance with an accuracy of 93.24&#x0025;, precision of 77.88&#x0025;, sensitivity of 89.8&#x0025;, specificity of 94.52&#x0025;, F-score of 84.21&#x0025;, and kappa of 0.1348. Similarly, during execution run-7, the presented SDL-XGBoost model gained superior performance with an accuracy of 94.98&#x0025;, precision of 82.73, sensitivity of 92.86&#x0025;, specificity of 95.48&#x0025;, F-score of 87.5&#x0025;, and kappa of 0.1412. Moreover, during the execution run-9, the proposed SDL-XGBoost methodology performed effectively with an accuracy of 96.91&#x0025;, precision of 88.68&#x0025;, sensitivity of 95.92&#x0025;, specificity of 97.14&#x0025;, F-score of 92.16&#x0025;, and kappa of 0.1485.</p>
<table-wrap id="table-2"><label>Table 2</label><caption><title>Results of analysis of the proposed SDL-XGBoost model for BMD <italic>vs.</italic> other diseases</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="left">No. of Runs</th>
<th align="left">Accuracy</th>
<th align="left">Precision</th>
<th align="left">Sensitivity</th>
<th align="left">Specificity</th>
<th align="left">F-Score</th>
<th align="left">Kappa</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">Run - 1</td>
<td align="left">90.73</td>
<td align="left">71.55</td>
<td align="left">84.69</td>
<td align="left">92.14</td>
<td align="left">77.57</td>
<td align="left">0.1231</td>
</tr>
<tr>
<td align="left">Run - 2</td>
<td align="left">91.89</td>
<td align="left">74.14</td>
<td align="left">87.76</td>
<td align="left">92.86</td>
<td align="left">80.37</td>
<td align="left">0.1291</td>
</tr>
<tr>
<td align="left">Run - 3</td>
<td align="left">92.47</td>
<td align="left">75.65</td>
<td align="left">88.78</td>
<td align="left">93.33</td>
<td align="left">81.69</td>
<td align="left">0.1315</td>
</tr>
<tr>
<td align="left">Run - 4</td>
<td align="left">93.24</td>
<td align="left">77.88</td>
<td align="left">89.80</td>
<td align="left">94.05</td>
<td align="left">83.41</td>
<td align="left">0.1341</td>
</tr>
<tr>
<td align="left">Run - 5</td>
<td align="left">93.63</td>
<td align="left">79.28</td>
<td align="left">89.80</td>
<td align="left">94.52</td>
<td align="left">84.21</td>
<td align="left">0.1348</td>
</tr>
<tr>
<td align="left">Run - 6</td>
<td align="left">94.59</td>
<td align="left">81.82</td>
<td align="left">91.84</td>
<td align="left">95.24</td>
<td align="left">86.54</td>
<td align="left">0.1392</td>
</tr>
<tr>
<td align="left">Run - 7</td>
<td align="left">94.98</td>
<td align="left">82.73</td>
<td align="left">92.86</td>
<td align="left">95.48</td>
<td align="left">87.50</td>
<td align="left">0.1412</td>
</tr>
<tr>
<td align="left">Run - 8</td>
<td align="left">96.14</td>
<td align="left">86.11</td>
<td align="left">94.90</td>
<td align="left">96.43</td>
<td align="left">90.29</td>
<td align="left">0.1458</td>
</tr>
<tr>
<td align="left">Run - 9</td>
<td align="left">96.91</td>
<td align="left">88.68</td>
<td align="left">95.92</td>
<td align="left">97.14</td>
<td align="left">92.16</td>
<td align="left">0.1485</td>
</tr>
<tr>
<td align="left">Run - 10</td>
<td align="left">97.88</td>
<td align="left">91.43</td>
<td align="left">97.96</td>
<td align="left">97.86</td>
<td align="left">94.58</td>
<td align="left">0.1528</td>
</tr>
<tr>
<td align="left">Average</td>
<td align="left">94.25</td>
<td align="left">80.93</td>
<td align="left">91.43</td>
<td align="left">94.91</td>
<td align="left">85.83</td>
<td align="left">0.1400</td>
</tr>
</tbody>
</table>
</table-wrap>
<fig id="fig-5"><label>Figure 5</label><caption><title>Result analysis of SDL-XGBoost model for BMD</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="CMC_20914-fig-5.png"/></fig>
<p>Finally, the proposed SDL-XGBoost technique classified BMD <italic>vs.</italic> other diseases and attained a superior accuracy of 94.25&#x0025;, precision of 80.93&#x0025;, sensitivity of 91.43&#x0025;, specificity of 94.91&#x0025;, F-score of 85.83&#x0025;, and kappa of 0.1400.</p>
<p>To further validate the supremacy of the presented SDL-XGBoost model, a detailed comparative analysis was conducted and the results were compared in terms of three distinct measures as given in <?A3B2 "tbl3",5,"anchor"?><xref ref-type="table" rid="table-3">Tab. 3</xref> and <?A3B2 "fig6",5,"anchor"?><xref ref-type="fig" rid="fig-6">Fig. 6</xref>.</p>
<table-wrap id="table-3"><label>Table 3</label><caption><title>Comparative analysis of existing techniques with the proposed SDL-XGBoost in terms of sensitivity, specificity and accuracy</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="left">Models</th>
<th align="left">Specificity</th>
<th align="left">Sensitivity</th>
<th align="left">Accuracy</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">SDL-XGBoost (DMD <italic>vs.</italic> Others)</td>
<td align="left">0.98</td>
<td align="left">0.96</td>
<td align="left">0.96</td>
</tr>
<tr>
<td align="left">SDL-XGBoost (BMD <italic>vs.</italic> Others)</td>
<td align="left">0.91</td>
<td align="left">0.95</td>
<td align="left">0.94</td>
</tr>
<tr>
<td align="left">VGG-19</td>
<td align="left">0.98</td>
<td align="left">0.66</td>
<td align="left">0.87</td>
</tr>
<tr>
<td align="left">ResNet-50</td>
<td align="left">0.92</td>
<td align="left">0.89</td>
<td align="left">0.91</td>
</tr>
<tr>
<td align="left">DenseNet-201</td>
<td align="left">0.98</td>
<td align="left">0.74</td>
<td align="left">0.90</td>
</tr>
<tr>
<td align="left">DenseNet-121</td>
<td align="left">0.94</td>
<td align="left">0.78</td>
<td align="left">0.88</td>
</tr>
<tr>
<td align="left">lnception-V3</td>
<td align="left">0.94</td>
<td align="left">0.83</td>
<td align="left">0.90</td>
</tr>
</tbody>
</table>
</table-wrap>
<fig id="fig-6"><label>Figure 6</label><caption><title>Comparative analysis of SDL-XGBoost model</title></caption><graphic mimetype="image" mime-subtype="png" xlink:href="CMC_20914-fig-6.png"/></fig>
<p>The experimental values depict that VGG-19 model produced insignificant outcome with a sensitivity of 0.98, specificity of 0.66, and accuracy of 0.87. At the same time, the DenseNet-121 model accomplished a slightly enhanced classification result with a sensitivity of 0.94, specificity of 0.78, and accuracy of 0.88. Followed by, the Inception v3 model attained a certain increase in the outcome in terms of sensitivity 0.94, specificity 0.83, and accuracy 0.90. Moreover, the DesnseNet-201 model offered a moderate performance with a sensitivity of 0.98, specificity of 0.74, and accuracy of 0.90. Furthermore, the ResNet-50 model showcased moderate outcomes with a sensitivity of 0.92, specificity of 0.89, and accuracy of 0.91. But the presented SDL-XGBoost model produced enhanced outcomes by classifying BMD with a sensitivity of 0.91, specificity of 0.95, and accuracy of 0.94. At last, the SDL-XGBoost model has classified the DMD with a sensitivity of 0.98, specificity of 0.96, and accuracy of 0.96.</p>
</sec>
<sec id="s5"><label>5</label><title>Conclusion</title>
<p>The current research article developed an automated muscular dystrophy detection and classification model using SDL-XGBoost. The presented SDL-XGBoost model makes use of muscle MRI patterns to identify the presence of muscular dystrophy. The proposed SDL-XGBoost model involves three major processes such as RoI detection, feature selection, and classification. Primarily, Kapur&#x0027;s thresholding is applied to determine the regions of interest. Afterwards, SDL model is employed to derive a set of useful feature vectors. Finally, XGBoost model is utilized in the allocation of appropriate class labels for muscle MRI data. A comprehensive experimental analysis was conducted to showcase the superior performance of the proposed SDL-XGBoost model. The results were examined under different aspects which inferred the supremacy of the proposed model. So, SDL-XGBoost model has been proved experimentally and can assist physicians in diagnosing muscular dystrophies through muscle fatty replacement patterns in muscle MRI. In future, the detection rate of muscular dystrophies can be improved with the help of learning rate schedulers.</p>
</sec>
</body>
<back>
<ack>
<p>The authors extend their appreciation to the Deanship of Scientific Research at King Khalid University for funding this work under grant number (RGP1/147/42), Received by Fahd N. Al-Wesabi. <uri xlink:href="https://www.kku.edu.sa">www.kku.edu.sa</uri>. This research was funded by the Deanship of Scientific Research at Princess Nourah bint Abdulrahman University through the Fast-track Research Funding Program.</p>
</ack>
<fn-group>
<fn fn-type="other"><p><bold>Funding Statement:</bold> The authors received no funding for this study.</p></fn>
<fn fn-type="conflict"><p><bold>Conflicts of Interest:</bold> The authors declare that they have no conflicts of interest to report regarding the present study.</p></fn>
</fn-group>
<ref-list content-type="authoryear">
<title>References</title>
<ref id="ref-1"><label>[1]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>D. G.</given-names> <surname>Leung</surname></string-name></person-group>, &#x201C;<article-title>Magnetic resonance imaging patterns of muscle involvement in genetic muscle diseases: A systematic review</article-title>,&#x201D; <source>Journal of Neurology</source><italic>,</italic> vol. <volume>264</volume><italic>,</italic> no. <issue>7</issue><italic>,</italic> pp. <fpage>1320</fpage>&#x2013;<lpage>1333</lpage>, <year>Jul. 2017</year>.</mixed-citation></ref>
<ref id="ref-2"><label>[2]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A. E.</given-names> <surname>Emery</surname></string-name></person-group>, &#x201C;<article-title>The muscular dystrophies</article-title>,&#x201D; <source>The Lancet</source><italic>,</italic> vol. <volume>359</volume><italic>,</italic> no. <issue>9307</issue><italic>,</italic> pp. <fpage>687</fpage>&#x2013;<lpage>695</lpage>, <year>2002</year>.</mixed-citation></ref>
<ref id="ref-3"><label>[3]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>F.</given-names> <surname>Del Grande</surname></string-name>, <string-name><given-names>J. A.</given-names> <surname>Carrino</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Del Grande</surname></string-name>, <string-name><given-names>A. L.</given-names> <surname>Mammen</surname></string-name> and <string-name><given-names>L. C.</given-names> <surname>Stine</surname></string-name></person-group>, &#x201C;<article-title>Magnetic resonance imaging of inflammatory myopathies</article-title>,&#x201D; <source>Topics in Magnetic Resonance Imaging</source><italic>,</italic> vol. <volume>22</volume><italic>,</italic> no. <issue>2</issue><italic>,</italic> pp. <fpage>39</fpage>&#x2013;<lpage>43</lpage>, <year>2011</year>.</mixed-citation></ref>
<ref id="ref-4"><label>[4]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>E.</given-names> <surname>Mercuri</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Pichiecchio</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Allsop</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Messina</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Pane</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>Muscle MRI in inherited neuromuscular disorders: Past, present, and future</article-title>,&#x201D; <source>Journal of Magnetic Resonance Imaging</source><italic>,</italic> vol. <volume>25</volume><italic>,</italic> no. <issue>2</issue><italic>,</italic> pp. <fpage>433</fpage>&#x2013;<lpage>440</lpage>, <year>2007</year>.</mixed-citation></ref>
<ref id="ref-5"><label>[5]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M. P.</given-names> <surname>Wattjes</surname></string-name>, <string-name><given-names>R. A.</given-names> <surname>Kley</surname></string-name> and <string-name><given-names>D.</given-names> <surname>Fischer</surname></string-name></person-group>, &#x201C;<article-title>Neuromuscular imaging in inherited muscle diseases</article-title>,&#x201D; <source>European Radiology</source><italic>,</italic> vol. <volume>20</volume><italic>,</italic> no. <issue>10</issue><italic>,</italic> pp. <fpage>2447</fpage>&#x2013;<lpage>2460</lpage>, <year>2010</year>.</mixed-citation></ref>
<ref id="ref-6"><label>[6]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>R.</given-names> <surname>Tawil</surname></string-name>, <string-name><given-names>G. W.</given-names> <surname>Padberg</surname></string-name>, <string-name><given-names>D. W.</given-names> <surname>Shaw</surname></string-name>, <string-name><given-names>S. M.</given-names> <surname>v</surname></string-name></person-group>. <person-group person-group-type="author"><string-name><given-names>d.</given-names> <surname>Maarel</surname></string-name>, <string-name><given-names>S. J.</given-names> <surname>Tapscott</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>Clinical trial preparedness in facioscapulo-humeral muscular dystrophy: Clinical, tissue, and imaging outcome measures 29&#x2013;30 May 2015, Rochester, New York</article-title>,&#x201D; <source>Neuromuscul Disord</source><italic>,</italic> vol. <volume>26</volume><italic>,</italic> no. <issue>2</issue><italic>,</italic> pp. <fpage>181</fpage>&#x2013;<lpage>186</lpage>, <year>2016</year>.</mixed-citation></ref>
<ref id="ref-7"><label>[7]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J. D.</given-names> <surname>Manera</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Llauger</surname></string-name>, <string-name><given-names>E.</given-names> <surname>Gallardo</surname></string-name> and <string-name><given-names>I.</given-names> <surname>Illa</surname></string-name></person-group>, &#x201C;<article-title>Muscle MRI in muscular dystrophies</article-title>,&#x201D; <source>Acta Myologica</source><italic>,</italic> vol. <volume>34</volume><italic>,</italic> no. <issue>2&#x2013;3</issue><italic>,</italic> pp. <fpage>95</fpage>&#x2013;<lpage>108</lpage>, <year>2015</year>.</mixed-citation></ref>
<ref id="ref-8"><label>[8]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Rajkomar</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Dean</surname></string-name> and <string-name><given-names>I.</given-names> <surname>Kohane</surname></string-name></person-group>, &#x201C;<article-title>Machine learning in medicine</article-title>,&#x201D; <source>The New England Journal of Medicine</source><italic>,</italic> vol. <volume>380</volume><italic>,</italic> pp. <fpage>1347</fpage>&#x2013;<lpage>1358</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-9"><label>[9]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Z.</given-names> <surname>Obermeyer</surname></string-name> and <string-name><given-names>T. H.</given-names> <surname>Lee</surname></string-name></person-group>, &#x201C;<article-title>Lost in thought &#x2014; the limits of the human mind and the future of medicine</article-title>,&#x201D; <source>The New England Journal of Medicine</source><italic>,</italic> vol. <volume>377</volume><italic>,</italic> no. <issue>13</issue><italic>,</italic> pp. <fpage>1209</fpage>&#x2013;<lpage>1211</lpage>, <year>2017</year>.</mixed-citation></ref>
<ref id="ref-10"><label>[10]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>Y.</given-names> <surname>Lecun</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Bottou</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Bengio</surname></string-name> and <string-name><given-names>P.</given-names> <surname>Haffner</surname></string-name></person-group>, &#x201C;<article-title>Gradient-based learning applied to document recognition</article-title>,&#x201D; in <conf-name>Proc. IEEE</conf-name>, <conf-loc>vol</conf-loc>. <volume>86</volume><italic>,</italic> <conf-loc>no</conf-loc>. <issue>11</issue><italic>,</italic> pp. <fpage>2278</fpage>&#x2013;<lpage>2324</lpage>, <year>1998</year>.</mixed-citation></ref>
<ref id="ref-11"><label>[11]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>D.</given-names> <surname>Ravi</surname></string-name>, <string-name><given-names>C.</given-names> <surname>Wong</surname></string-name>, <string-name><given-names>F.</given-names> <surname>Deligianni</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Berthelot</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Andreu-Perez</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>Deep learning for health informatics</article-title>,&#x201D; <source>IEEE Journal of Biomedical and Health Informatics</source><italic>,</italic> vol. <volume>21</volume><italic>,</italic> no. <issue>1</issue><italic>,</italic> pp. <fpage>4</fpage>&#x2013;<lpage>21</lpage>, <year>2017</year>.</mixed-citation></ref>
<ref id="ref-12"><label>[12]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Y. H.</given-names> <surname>Lee</surname></string-name></person-group>, &#x201C;<article-title>Efficiency improvement in a busy radiology practice: Determination of musculoskeletal magnetic resonance imaging protocol using deep-learning convolutional neural networks</article-title>,&#x201D; <source>Journal of Digital Imaging</source><italic>,</italic> vol. <volume>31</volume><italic>,</italic> no. <issue>5</issue><italic>,</italic> pp. <fpage>604</fpage>&#x2013;<lpage>610</lpage>, <year>2018</year>.</mixed-citation></ref>
<ref id="ref-13"><label>[13]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>E.</given-names> <surname>Gong</surname></string-name>, <string-name><given-names>J. M.</given-names> <surname>Pauly</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Wintermark</surname></string-name> and <string-name><given-names>G.</given-names> <surname>Zaharchuk</surname></string-name></person-group>, &#x201C;<article-title>Deep learning enables reduced gadolinium dose for contrast-enhanced brain MRI: Deep learning reduces gadolinium dose</article-title>,&#x201D; <source>Journal of Magnetic Resonance Imaging</source><italic>,</italic> vol. <volume>48</volume><italic>,</italic> no. <issue>2</issue><italic>,</italic> pp. <fpage>330</fpage>&#x2013;<lpage>340</lpage>, <year>2018</year>.</mixed-citation></ref>
<ref id="ref-14"><label>[14]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>P.</given-names> <surname>Meyer</surname></string-name>, <string-name><given-names>V.</given-names> <surname>Noblet</surname></string-name>, <string-name><given-names>C.</given-names> <surname>Mazzara</surname></string-name> and <string-name><given-names>A.</given-names> <surname>Lallement</surname></string-name></person-group>, &#x201C;<article-title>Survey on deep learning for radiotherapy</article-title>,&#x201D; <source>Computers in Biology and Medicine</source><italic>,</italic> vol. <volume>98</volume><italic>,</italic> pp. <fpage>126</fpage>&#x2013;46, <year>2018</year>.</mixed-citation></ref>
<ref id="ref-15"><label>[15]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Izadyyazdanabadi</surname></string-name>, <string-name><given-names>E.</given-names> <surname>Belykh</surname></string-name>, <string-name><given-names>M. A.</given-names> <surname>Mooney</surname></string-name>, <string-name><given-names>J. M.</given-names> <surname>Eschbacher</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Nakaji</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>Prospects for theranostics in neurosurgical imaging: Empowering confocal laser endomicroscopy diagnostics via deep learning</article-title>,&#x201D; <source>Frontiers in Oncology</source><italic>,</italic> vol. <volume>8</volume><italic>,</italic> pp. <fpage>240</fpage>, <year>2018</year>.</mixed-citation></ref>
<ref id="ref-16"><label>[16]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>G.</given-names> <surname>Haskins</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Kruecker</surname></string-name>, <string-name><given-names>U.</given-names> <surname>Kruger</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Xu</surname></string-name>, <string-name><given-names>P. A.</given-names> <surname>Pinto</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>Learning deep similarity metric for 3D MR&#x2013;TRUS image registration</article-title>,&#x201D; <source>International Journal of Computer Assisted Radiology and Surgery</source><italic>,</italic> vol. <volume>14</volume><italic>,</italic> no. <issue>3</issue><italic>,</italic> pp. <fpage>417</fpage>&#x2013;<lpage>425</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-17"><label>[17]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>V.</given-names> <surname>Kearney</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Haaf</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Sudhyadhom</surname></string-name>, <string-name><given-names>G.</given-names> <surname>Valdes</surname></string-name> and <string-name><given-names>T. D.</given-names> <surname>Solberg</surname></string-name></person-group>, &#x201C;<article-title>An unsupervised convolutional neural network-based algorithm for deformable image registration</article-title>,&#x201D; <source>Physics in Medicine and Biology</source><italic>,</italic> vol. <volume>63</volume><italic>,</italic> no. <issue>18</issue><italic>,</italic> pp. <fpage>185017</fpage>, <year>2018</year>.</mixed-citation></ref>
<ref id="ref-18"><label>[18]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J. V.</given-names> <surname>D&#x00ED;az</surname></string-name>, <string-name><given-names>J. A.</given-names> <surname>P&#x00E9;rez</surname></string-name>, <string-name><given-names>C. N.</given-names> <surname>Peralta</surname></string-name>, <string-name><given-names>G.</given-names> <surname>Tasca</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Vissing</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>Accuracy of a machine learning muscle MRI-based tool for the diagnosis of muscular dystrophies</article-title>,&#x201D; <source>Neurology</source><italic>,</italic> vol. <volume>94</volume><italic>,</italic> no. <issue>10</issue><italic>,</italic> pp. <fpage>e1094</fpage>&#x2013;<lpage>e1102</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-19"><label>[19]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Yang</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Zheng</surname></string-name>, <string-name><given-names>Z.</given-names> <surname>Xie</surname></string-name>, <string-name><given-names>Z.</given-names> <surname>Wang</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Xiao</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>A deep learning model for diagnosing dystrophinopathies on thigh muscle MRI images</article-title>,&#x201D; <source>BMC Neurology</source><italic>,</italic> vol. <volume>21</volume><italic>,</italic> no. <issue>1</issue><italic>,</italic> pp. <fpage>13</fpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-20"><label>[20]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J. N.</given-names> <surname>Kapur</surname></string-name>, <string-name><given-names>P. K.</given-names> <surname>Sahoo</surname></string-name> and <string-name><given-names>A. K. C.</given-names> <surname>Wong</surname></string-name></person-group>, &#x201C;<article-title>A new method for gray-level picture thresholding using the entropy of the histogram</article-title>,&#x201D; <source>Computer Vision, Graphics, and Image Processing</source><italic>,</italic> vol. <volume>29</volume><italic>,</italic> no. <issue>3</issue><italic>,</italic> pp. <fpage>273</fpage>&#x2013;<lpage>285</lpage>, <year>1985</year>.</mixed-citation></ref>
<ref id="ref-21"><label>[21]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>E. H.</given-names> <surname>Houssein</surname></string-name>, <string-name><given-names>B. E.</given-names> <surname>Helmy</surname></string-name>, <string-name><given-names>D.</given-names> <surname>Oliva</surname></string-name>, <string-name><given-names>A. A.</given-names> <surname>Elngar</surname></string-name> and <string-name><given-names>H.</given-names> <surname>Shaban</surname></string-name></person-group>, &#x201C;<article-title>A novel black widow optimization algorithm for multilevel thresholding image segmentation</article-title>,&#x201D; <source>Expert Systems with Applications</source><italic>,</italic> vol. <volume>167</volume><italic>,</italic> pp. <fpage>114159</fpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-22"><label>[22]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J.</given-names> <surname>Zhang</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Xie</surname></string-name>, <string-name><given-names>Q.</given-names> <surname>Wu</surname></string-name> and <string-name><given-names>Y.</given-names> <surname>Xia</surname></string-name></person-group>, &#x201C;<article-title>Medical image classification using synergic deep learning</article-title>,&#x201D; <source>Medical Image Analysis</source><italic>,</italic> vol. <volume>54</volume><italic>,</italic> pp. <fpage>10</fpage>&#x2013;<lpage>19</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-23"><label>[23]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>K.</given-names> <surname>He</surname></string-name>, <string-name><given-names>X.</given-names> <surname>Zhang</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Ren</surname></string-name> and <string-name><given-names>J.</given-names> <surname>Sun</surname></string-name></person-group>, &#x201C;<article-title>Deep residual learning for image recognition</article-title>,&#x201D; in <conf-name>2016 IEEE Conf. on Computer Vision and Pattern Recognition (CVPR)</conf-name>, <conf-loc>Las Vegas, NV, USA</conf-loc>, pp. <fpage>770</fpage>&#x2013;<lpage>778</lpage>, <year>2016</year>.</mixed-citation></ref>
<ref id="ref-24"><label>[24]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>K.</given-names> <surname>Shankar</surname></string-name>, <string-name><given-names>A. R. W.</given-names> <surname>Sait</surname></string-name>, <string-name><given-names>D.</given-names> <surname>Gupta</surname></string-name>, <string-name><given-names>S. K.</given-names> <surname>Lakshmanaprabu</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Khanna</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>Automated detection and classification of fundus diabetic retinopathy images using synergic deep learning model</article-title>,&#x201D; <source>Pattern Recognition Letters</source><italic>,</italic> vol. <volume>133</volume><italic>,</italic> pp. <fpage>210</fpage>&#x2013;<lpage>216</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-25"><label>[25]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>T.</given-names> <surname>Chen</surname></string-name> and <string-name><given-names>C.</given-names> <surname>Guestrin</surname></string-name></person-group>, &#x201C;<article-title>XGBoost: A scalable tree boosting system</article-title>,&#x201D; in <conf-name>Proc. of the 22nd ACM SIGKDD Int. Conf. on Knowledge Discovery and Data Mining</conf-name>, <conf-loc>San Francisco California USA</conf-loc>, pp. <fpage>785</fpage>&#x2013;<lpage>794</lpage>, <year>2016</year>.</mixed-citation></ref>
<ref id="ref-26"><label>[26]</label><mixed-citation publication-type="other">Preprint: <person-group person-group-type="author"><string-name><given-names>H.</given-names> <surname>Zhang</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Si</surname></string-name></person-group> and C. <person-group person-group-type="author"><string-name><given-names>J.</given-names> <surname>Hsieh</surname></string-name></person-group>, &#x201C;<article-title>GPU-Acceleration for large-scale tree boosting</article-title>,&#x201D; arXiv 2017, arXiv:1706.08359, <year>2017</year>.</mixed-citation></ref>
<ref id="ref-27"><label>[27]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>T.</given-names> <surname>Chen</surname></string-name>, <string-name><given-names>T.</given-names> <surname>He</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Benesty</surname></string-name>, <string-name><given-names>V.</given-names> <surname>Khotilovich</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Tang</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>Xgboost: Extreme gradient boosting. R package version 0.4&#x2013;2</article-title>,&#x201D; <source>Technical Report</source><italic>,</italic> vol. <volume>1</volume><italic>,</italic> no. <issue>4</issue><italic>,</italic> pp. <fpage>1</fpage>&#x2013;<lpage>3</lpage>, <year>2015</year>.</mixed-citation></ref>
<ref id="ref-28"><label>[28]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Samat</surname></string-name>, <string-name><given-names>E.</given-names> <surname>Li</surname></string-name>, <string-name><given-names>W.</given-names> <surname>Wang</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Liu</surname></string-name>, <string-name><given-names>C.</given-names> <surname>Lin</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>Meta-XGBoost for hyperspectral image classification using extended mser-guided morphological profiles</article-title>,&#x201D; <source>Remote Sensing</source><italic>,</italic> vol. <volume>12</volume><italic>,</italic> no. <issue>12</issue><italic>,</italic> pp. <fpage>1973</fpage>, <year>2020</year>.</mixed-citation></ref>
</ref-list>
</back>
</article>