<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.1 20151215//EN" "http://jats.nlm.nih.gov/publishing/1.1/JATS-journalpublishing1.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="1.1">
<front>
<journal-meta>
<journal-id journal-id-type="pmc">CMC</journal-id>
<journal-id journal-id-type="nlm-ta">CMC</journal-id>
<journal-id journal-id-type="publisher-id">CMC</journal-id>
<journal-title-group>
<journal-title>Computers, Materials &#x0026; Continua</journal-title>
</journal-title-group>
<issn pub-type="epub">1546-2226</issn>
<issn pub-type="ppub">1546-2218</issn>
<publisher>
<publisher-name>Tech Science Press</publisher-name>
<publisher-loc>USA</publisher-loc>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">32816</article-id>
<article-id pub-id-type="doi">10.32604/cmc.2023.032816</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Article</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Automated Brain Tumor Diagnosis Using Deep Residual U-Net Segmentation&#x00A0;Model</article-title>
<alt-title alt-title-type="left-running-head">Automated Brain Tumor Diagnosis Using Deep Residual U-Net Segmentation Model</alt-title>
<alt-title alt-title-type="right-running-head">Automated Brain Tumor Diagnosis Using Deep Residual U-Net Segmentation Model</alt-title>
</title-group>
<contrib-group content-type="authors">
<contrib id="author-1" contrib-type="author">
<name name-style="western"><surname>Poonguzhali</surname><given-names>R.</given-names>
</name><xref ref-type="aff" rid="aff-1">1</xref></contrib>
<contrib id="author-2" contrib-type="author">
<name name-style="western"><surname>Ahmad</surname><given-names>Sultan</given-names>
</name><xref ref-type="aff" rid="aff-2">2</xref></contrib>
<contrib id="author-3" contrib-type="author">
<name name-style="western"><surname>Sivasankar</surname><given-names>P. Thiruvannamalai</given-names>
</name><xref ref-type="aff" rid="aff-3">3</xref></contrib>
<contrib id="author-4" contrib-type="author">
<name name-style="western"><surname>Anantha Babu</surname><given-names>S.</given-names>
</name><xref ref-type="aff" rid="aff-3">3</xref></contrib>
<contrib id="author-5" contrib-type="author">
<name name-style="western"><surname>Joshi</surname><given-names>Pranav</given-names>
</name><xref ref-type="aff" rid="aff-4">4</xref></contrib>
<contrib id="author-6" contrib-type="author">
<name name-style="western"><surname>Joshi</surname><given-names>Gyanendra Prasad</given-names>
</name><xref ref-type="aff" rid="aff-5">5</xref></contrib>
<contrib id="author-7" contrib-type="author" corresp="yes">
<name name-style="western"><surname>Kim</surname><given-names>Sung Won</given-names>
</name><xref ref-type="aff" rid="aff-6">6</xref><email>swon@yu.ac.kr</email></contrib>
<aff id="aff-1"><label>1</label><institution>Department of Computer Science and Engineering, Periyar Maniammai Institute of Science and Technology</institution>, <addr-line>Thanjavur, 613403</addr-line>, <country>India</country></aff>
<aff id="aff-2"><label>2</label><institution>Department of Computer Science, College of Computer Engineering and Sciences, Prince Sattam Bin Abdulaziz University</institution>, <addr-line>Alkharj</addr-line>, <country>Saudi Arabia</country></aff>
<aff id="aff-3"><label>3</label><institution>Department of Computer Science and Engineering, Jain Deemed to-be University</institution>, <addr-line>Bangalore, 560069</addr-line>, <country>India</country></aff>
<aff id="aff-4"><label>4</label><institution>Department of Neurology, Annapurna Neuro Hospital</institution>, <addr-line>Kathmandu, 44600</addr-line>, <country>Nepal</country></aff>
<aff id="aff-5"><label>5</label><institution>Department of Computer Science and Engineering, Sejong University</institution>, <addr-line>Seoul, 05006, Korea</addr-line></aff>
<aff id="aff-6"><label>6</label><institution>Department of Information and Communication Engineering, Yeungnam University</institution>, <addr-line>Gyeongsan-si, Gyeongbuk-do, 38541, Korea</addr-line></aff>
</contrib-group>
<author-notes>
<corresp id="cor1"><label>&#x002A;</label>Corresponding Author: Sung Won Kim. Email: <email>swon@yu.ac.kr</email></corresp>
</author-notes>
<pub-date pub-type="epub" date-type="pub" iso-8601-date="2022-08-16"><day>16</day>
<month>08</month>
<year>2022</year></pub-date>
<volume>74</volume>
<issue>1</issue>
<fpage>2179</fpage>
<lpage>2194</lpage>
<history>
<date date-type="received">
<day>30</day>
<month>5</month>
<year>2022</year>
</date>
<date date-type="accepted">
<day>12</day>
<month>7</month>
<year>2022</year>
</date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2023 Poonguzhali et al.</copyright-statement>
<copyright-year>2023</copyright-year>
<copyright-holder>Poonguzhali et al.</copyright-holder>
<license xlink:href="https://creativecommons.org/licenses/by/4.0/">
<license-p>This work is licensed under a <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</ext-link>, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.</license-p>
</license>
</permissions>
<self-uri content-type="pdf" xlink:href="TSP_CMC_32816.pdf"></self-uri>
<abstract>
<p>Automated segmentation and classification of biomedical images act as a vital part of the diagnosis of brain tumors (BT). A primary tumor brain analysis suggests a quicker response from treatment that utilizes for improving patient survival rate. The location and classification of BTs from huge medicinal images database, obtained from routine medical tasks with manual processes are a higher cost together in effort and time. An automatic recognition, place, and classifier process was desired and useful. This study introduces an Automated Deep Residual U-Net Segmentation with Classification model (ADRU-SCM) for Brain Tumor Diagnosis. The presented ADRU-SCM model majorly focuses on the segmentation and classification of BT. To accomplish this, the presented ADRU-SCM model involves wiener filtering (WF) based preprocessing to eradicate the noise that exists in it. In addition, the ADRU-SCM model follows deep residual U-Net segmentation model to determine the affected brain regions. Moreover, VGG-19 model is exploited as a feature extractor. Finally, tunicate swarm optimization (TSO) with gated recurrent unit (GRU) model is applied as a classification model and the TSO algorithm effectually tunes the GRU hyperparameters. The performance validation of the ADRU-SCM model was tested utilizing FigShare dataset and the outcomes pointed out the better performance of the ADRU-SCM approach on recent approaches.</p>
</abstract>
<kwd-group kwd-group-type="author">
<kwd>Brain tumor diagnosis</kwd>
<kwd>image classification</kwd>
<kwd>biomedical images</kwd>
<kwd>image segmentation</kwd>
<kwd>deep learning</kwd>
</kwd-group>
</article-meta>
</front>
<body>
<sec id="s1">
<label>1</label>
<title>Introduction</title>
<p>Image Segmentation and classification were the widest image processing methods utilized for segmentation of the region of interest (ROI) and for dividing them into provided classes. Image classification and segmentation serve a significant role in multiple applications in extracting features, understanding images, and interpreting and analyzing them [<xref ref-type="bibr" rid="ref-1">1</xref>]. Computed Tomography (CT) scan and Magnetic Resonance Imaging (MRI) were utilized to examine and resection the abnormality relating to size shape, or position of brain tissue. Brain Tumor (BT) is regarded as a neoplastic and abnormal cell development from the brain [<xref ref-type="bibr" rid="ref-2">2</xref>]. Segmentation was a process of separation of an image to a similar class of properties like brightness, color, gray level, and contrast, to regions or blocks [<xref ref-type="bibr" rid="ref-3">3</xref>,<xref ref-type="bibr" rid="ref-4">4</xref>]. Brain tumor segmentation was used in medical imaging like magnetic resonance (MR) images or latest imaging modality for separating the tumor tissues like necrosis (dead cells) and edema from usual brain tissues, namely white matter (WM), gray matter (GM), and cerebrospinal fluid (CSF) [<xref ref-type="bibr" rid="ref-5">5</xref>]. For detecting tumor tissues from medical imaging modes, segmentation can be used, and based on the evaluations achieved with the help of enhanced medical imaging modalities, specialization in patient care was given to patients having BT [<xref ref-type="bibr" rid="ref-6">6</xref>]. The detection of a BT at initial level was the main problem to provide enhanced medication to the patient. After a BT has been suspected clinically, radiological assessment was needed for determining its size, place, and effects on the nearby regions [<xref ref-type="bibr" rid="ref-7">7</xref>]. It has been made clear that the survival chances of a tumor contaminated patient are raised when cancer has been identified at the initial level. So, the BTs study with the help of imaging modalities obtained significance in the radiological section [<xref ref-type="bibr" rid="ref-8">8</xref>].</p>
<p>From the study, it detected those conventional methods were more potential for the initial cluster centers and cluster size [<xref ref-type="bibr" rid="ref-9">9</xref>]. When such clusters differ with distinct early inputs, after which it creates issues in categorizing pixels. In the present general fuzzy cluster mean system, the cluster centroid value was considered randomly. It would rise up the duration to receive a favorable solution [<xref ref-type="bibr" rid="ref-10">10</xref>]. Manual evaluation and segmentation of MRI brain images performed by radiotherapists become tedious; the segmentation was performed with the help of machine learning (ML) methods whose calculation speed and accuracy were low.</p>
<p>Ilhan&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-11">11</xref>] suggest an effective algorithm for segmenting the whole BTs through MRI images on the basis of tumor localization and advancement methodologies using deep learning (DL) structure called U-net. At first, the histogram related nonparametric tumor localization methodology was implied for localizing the tumorous zones and the presented tumor advancement technique can be utilized for modifying the localized zones for increasing the visual appearances of low-contrast or indistinct tumors. Raju&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-12">12</xref>] recommend the automated technique of categorization with the help of the Harmony-Crow Search (HCS) Optimized system for training the multi-Support Vector Neural Network (SVNN) technique. The BT segmentation can be done with the help of the Bayesian fuzzy clustering technique, where the classification of tumors can be executed with the help of the suggested HCS Optimization system-related multi-SVNN classifier. Das&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-13">13</xref>] in consideration of 32 attributes, together with clusters having performance evaluation metrics, AI architecture, clinical evaluation, imaging modalities, and hyper-parameters. Kapila et al. [<xref ref-type="bibr" rid="ref-14">14</xref>] proposed approach uses a potential approach for BT classification and segmentation. For classifying and segmenting the BT MR image through artificial neural network (ANN) and Modified Fuzzy C-Means (MFCM). At this point, the features that are extracted have been chosen optimally by Hybrid Fruit fly and artificial bee colony (HFFABC). In [<xref ref-type="bibr" rid="ref-15">15</xref>], the researchers were concerned about the issue of completely automated BT segmentation in multimodal MRI. Conversely applying classification over whole volume data that needs heavy load of both memory and computation, suggests a 2-stage technique.</p>
<p>This study introduces an Automated Deep Residual U-Net Segmentation with Classification model (ADRU-SCM) for Brain Tumor Diagnosis. The presented ADRU-SCM model majorly focuses on the segmentation and classification of BT. The presented ADRU-SCM model involves wiener filtering (WF) based pre-processing to eradicate the noise that exists in it. In addition, the ADRU-SCM model follows deep residual U-Net segmentation model to determine the affected brain regions. Moreover, VGG-19 model is exploited as a feature extractor. Finally, tunicate swarm optimization (TSO) with gated recurrent unit (GRU) model is applied as a classification model and the TSO algorithm effectually tunes the GRU hyperparameters. The performance validation of the ADRU-SCM approach was tested using FigShare dataset.</p>
</sec>
<sec id="s2">
<label>2</label>
<title>The Proposed Model</title>
<p>In this study, a novel ADRU-SCM technique was established for the segmentation and classification of BT. The presented ADRU-SCM technique primarily applies WF based pre-processing to eradicate the noise that exists in it. In addition, the ADRU-SCM model follows deep residual U-Net segmentation model to determine the affected brain regions. Moreover, VGG-19 model is exploited as a feature extractor. Finally, TSO with GRU model is applied as a classification model. <xref ref-type="fig" rid="fig-1">Fig. 1</xref> depicts the overall process of ADRU-SCM approach.</p>
<fig id="fig-1">
<label>Figure 1</label>
<caption>
<title>Overall process of ADRU-SCM method</title>
</caption>
<graphic mimetype="image" mime-subtype="png" xlink:href="CMC_32816-fig-1.png"/>
</fig>
<sec id="s2_1">
<label>2.1</label>
<title>WF Based Pre-Processing</title>
<p>The presented ADRU-SCM approach primarily applies WF based pre-processing to eradicate the noise that exists in it. Noise extraction is an image pre-processing technique where the feature of the image corrupted by noise, are heightened [<xref ref-type="bibr" rid="ref-16">16</xref>]. The adaptive filter is a certain instance where the denoising procedure totally relies on the noise contents i.e., existing in the image. Given that the corrupted image be a <inline-formula id="ieqn-1"><mml:math id="mml-ieqn-1"><mml:mrow><mml:mover><mml:mi>I</mml:mi><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>x</mml:mi><mml:mo>,</mml:mo><mml:mi>y</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula>, the noise variance where the complete point is illustrated by <inline-formula id="ieqn-2"><mml:math id="mml-ieqn-2"><mml:msubsup><mml:mi>&#x03C3;</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>,</mml:mo></mml:math></inline-formula> the local mean is represented by <inline-formula id="ieqn-3"><mml:math id="mml-ieqn-3"><mml:mrow><mml:mover><mml:msub><mml:mi>&#x03BC;</mml:mi><mml:mrow><mml:mi>L</mml:mi></mml:mrow></mml:msub><mml:mo>&#x005E;</mml:mo></mml:mover></mml:mrow></mml:math></inline-formula> about a pixel window and local variance from the window is implied by <inline-formula id="ieqn-4"><mml:math id="mml-ieqn-4"><mml:msubsup><mml:mrow><mml:mover><mml:mi>&#x03C3;</mml:mi><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula>. Next, the probable system of denoising the image is demonstrated by the following expression:</p>
<p><disp-formula id="eqn-1"><label>(1)</label><mml:math id="mml-eqn-1" display="block"><mml:mrow><mml:mover><mml:mrow><mml:mover><mml:mi>I</mml:mi><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mover><mml:mi>I</mml:mi><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>x</mml:mi><mml:mo>,</mml:mo><mml:mi>y</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mfrac><mml:msubsup><mml:mi>&#x03C3;</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:msubsup><mml:mrow><mml:mover><mml:mi>&#x03C3;</mml:mi><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:mfrac><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mover><mml:mi>I</mml:mi><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>x</mml:mi><mml:mo>,</mml:mo><mml:mi>y</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mrow><mml:mover><mml:msub><mml:mi>&#x03BC;</mml:mi><mml:mrow><mml:mi>L</mml:mi></mml:mrow></mml:msub><mml:mo>&#x005E;</mml:mo></mml:mover></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula></p>
<p>Noe, the noise variance through the image becomes corresponding to zero, <inline-formula id="ieqn-5"><mml:math id="mml-ieqn-5"><mml:msubsup><mml:mi>&#x03C3;</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mn>0</mml:mn><mml:mo>=&#x003E;</mml:mo><mml:mrow><mml:mover><mml:mrow><mml:mover><mml:mi>I</mml:mi><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mover><mml:mi>I</mml:mi><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>x</mml:mi><mml:mo>,</mml:mo><mml:mi>y</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula>. As soon as the global noise variance becomes lower whereas the local variance becomes larger when compared to global variance, the ratio is nearly equal to one.</p>
<p>When <inline-formula id="ieqn-6"><mml:math id="mml-ieqn-6"><mml:msubsup><mml:mrow><mml:mover><mml:mi>&#x03C3;</mml:mi><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>&#x226B;</mml:mo><mml:msubsup><mml:mi>&#x03C3;</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula>, <inline-formula id="ieqn-7"><mml:math id="mml-ieqn-7"><mml:mrow><mml:mover><mml:mrow><mml:mover><mml:mrow><mml:mtext>I</mml:mtext></mml:mrow><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mrow><mml:mover><mml:mrow><mml:mtext>I</mml:mtext></mml:mrow><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>x</mml:mi><mml:mo>,</mml:mo><mml:mi>y</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula>. The superior local variance demonstrates the existence of edge from an image window. In such cases, when the local and global variance matches with one another, the equation is expressed by:</p>
<p><disp-formula id="eqn-2"><label>(2)</label><mml:math id="mml-eqn-2" display="block"><mml:mrow><mml:mover><mml:mrow><mml:mover><mml:mi>I</mml:mi><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mover><mml:msub><mml:mi>&#x03BC;</mml:mi><mml:mrow><mml:mi>L</mml:mi></mml:mrow></mml:msub><mml:mo>&#x005E;</mml:mo></mml:mover></mml:mrow><mml:mtext>&#xA0;</mml:mtext><mml:mi>a</mml:mi><mml:mi>s</mml:mi><mml:mtext>&#xA0;</mml:mtext><mml:msubsup><mml:mrow><mml:mover><mml:mi>&#x03C3;</mml:mi><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>&#x2248;</mml:mo><mml:msubsup><mml:mi>&#x03C3;</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></disp-formula></p>
</sec>
<sec id="s2_2">
<label>2.2</label>
<title>Image Segmentation</title>
<p>The ADRU-SCM model follows deep residual U-Net segmentation model for determining the affected brain regions. By designing the U-Net model for image segmentation, the researchers used a DL algorithm U-Net with residual connection [<xref ref-type="bibr" rid="ref-17">17</xref>]. U-Net could also be improved by using residual units rather than plain units. By using residual connections, it maximizes the capability and the performance of the network. ResUnet incorporates the robustness of residual neural network and U-Net architecture. ResUnet has encompassed three major components, bridge, decoder, and encoder. In the encoder, the image served as an input is encoded to denser representations. The decoder part recovers the depiction to a pixel-wise classification. Such components are generated by residual units comprising two convolutional blocks using the size of <inline-formula id="ieqn-8"><mml:math id="mml-ieqn-8"><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>3</mml:mn></mml:math></inline-formula> and an identity mapping connects the output and input units. The convolutional block contains a branch normalization (BN) layer and also it includes a convolutional layer and a Rectified linear unit activation (ReLU). The size of feature mappings is decreased by half with the stride of 2 in the convolutional block rather than applying pooling function in residual unit from the encoder to downsampling the feature mapping size. A convolution and sigmoid activation layers using a size of <inline-formula id="ieqn-9"><mml:math id="mml-ieqn-9"><mml:mn>1</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>1</mml:mn></mml:math></inline-formula> are applied for projecting the multichannel map in the target segmentation following the last part of decoder. U-Net based residual unit semantic segmentation is chosen by the researcher since it functions with trained instances and provides more efficient outcomes for segmentation tasks. Furthermore, U-Net was mainly constructed for image segmentation.</p>
</sec>
<sec id="s2_3">
<label>2.3</label>
<title>VGG-19 Feature Extractor</title>
<p>VGG19 is a nineteen-layer difference of VGG method. It involves one SoftMax layer, 16 convolution layers, 3 FC layers, and 5 MaxPool layers [<xref ref-type="bibr" rid="ref-18">18</xref>]. Further, there are VGG versions namely VGG16, VGG11, etc. VGG19 has a computing capacity of 19.6 billion floating point operations for every second (FLOPs). In a convolutional neural network (CNN), there are three major layers: (i) pooling layer; (ii) fully-connected layer (FC); and (iii) convolutional layer. Once the FC layer is prepared for the last classification, they are trained by several pooling and convolution layers. CNN model that has been trained is utilized rather than a feature extractor. With the network that is previously trained as the feature extractor, the deeper CNN is performed by smaller datasets in other fields. This is due to the feature extractor having been trained previously. VGG19 network was trained for recognizing objects to make texture. When initiated DenseBox, they employed a pre-trained VGG19 architecture from ImageNet. DenseBox is an FCNN architecture for object recognition.</p>
</sec>
<sec id="s2_4">
<label>2.4</label>
<title>GRU Based Classification</title>
<p>Once the features are derived, the GRU approach was executed as a classification model. The most important shortcoming of traditional recurrent neural network (RNN) method is that once the time step increases, the network fails to derive the context from the time step of the prior state is termed long-term dependency [<xref ref-type="bibr" rid="ref-19">19</xref>]. Further, to resolve these problems, the long short term memory (LSTM) technique is determined by memory cells with multiple gates in hidden layer.
<list list-type="bullet">
<list-item>
<p>The <inline-formula id="ieqn-10"><mml:math id="mml-ieqn-10"><mml:msub><mml:mi>f</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> forget gate selects that measure long-term state <inline-formula id="ieqn-11"><mml:math id="mml-ieqn-11"><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> need to be neglected;</p></list-item>
<list-item>
<p>The <inline-formula id="ieqn-12"><mml:math id="mml-ieqn-12"><mml:msub><mml:mi>i</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> input gate control that measures of <inline-formula id="ieqn-13"><mml:math id="mml-ieqn-13"><mml:msub><mml:mrow><mml:mover><mml:mi>c</mml:mi><mml:mo stretchy="false">&#x007E;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> should be additional to long-term form <inline-formula id="ieqn-14"><mml:math id="mml-ieqn-14"><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>;</mml:mo></mml:math></inline-formula></p></list-item>
<list-item>
<p>The <inline-formula id="ieqn-15"><mml:math id="mml-ieqn-15"><mml:msub><mml:mi>g</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> output gate describes that amount of <inline-formula id="ieqn-16"><mml:math id="mml-ieqn-16"><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> should be read and output to <inline-formula id="ieqn-17"><mml:math id="mml-ieqn-17"><mml:msub><mml:mi>h</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-18"><mml:math id="mml-ieqn-18"><mml:msub><mml:mi>o</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>.</mml:mo></mml:math></inline-formula></p></list-item>
</list></p>
<p>The subsequent equation illustrates the long- and short-term procedures of cell and output of each layer in time step:</p>
<p><disp-formula id="eqn-3"><label>(3)</label><mml:math id="mml-eqn-3" display="block"><mml:msub><mml:mi>f</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mi>&#x03C3;</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:msubsup><mml:mi>W</mml:mi><mml:mrow><mml:mi>x</mml:mi><mml:mi>f</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msubsup><mml:mo>&#x22C5;</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msubsup><mml:mi>W</mml:mi><mml:mrow><mml:mi>h</mml:mi><mml:mi>f</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msubsup><mml:mo>&#x22C5;</mml:mo><mml:msub><mml:mi>h</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>b</mml:mi><mml:mrow><mml:mi>f</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">)</mml:mo><mml:mo>.</mml:mo></mml:math></disp-formula></p>
<p><disp-formula id="eqn-4"><label>(4)</label><mml:math id="mml-eqn-4" display="block"><mml:msub><mml:mi>i</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mi>&#x03C3;</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:msubsup><mml:mi>W</mml:mi><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msubsup><mml:mi>i</mml:mi><mml:mo>&#x22C5;</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msubsup><mml:mi>W</mml:mi><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msubsup><mml:mi>i</mml:mi><mml:mo>&#x22C5;</mml:mo><mml:msub><mml:mi>h</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>b</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">)</mml:mo><mml:mo>.</mml:mo></mml:math></disp-formula></p>
<p><disp-formula id="eqn-5"><label>(5)</label><mml:math id="mml-eqn-5" display="block"><mml:msub><mml:mi>o</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mi>&#x03C3;</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:msubsup><mml:mi>W</mml:mi><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msubsup><mml:mi>o</mml:mi><mml:mo>&#x22C5;</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msubsup><mml:mi>W</mml:mi><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msubsup><mml:mi>o</mml:mi><mml:mo>&#x22C5;</mml:mo><mml:msub><mml:mi>h</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>b</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">)</mml:mo><mml:mo>.</mml:mo></mml:math></disp-formula></p>
<p><disp-formula id="eqn-6"><label>(6)</label><mml:math id="mml-eqn-6" display="block"><mml:msub><mml:mi>g</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mi>t</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>h</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:msubsup><mml:mi>W</mml:mi><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msubsup><mml:mi>g</mml:mi><mml:mo>&#x22C5;</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msubsup><mml:mi>W</mml:mi><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msubsup><mml:mi>g</mml:mi><mml:mo>&#x22C5;</mml:mo><mml:msub><mml:mi>h</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>b</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">)</mml:mo><mml:mo>.</mml:mo></mml:math></disp-formula></p>
<p><disp-formula id="eqn-7"><label>(7)</label><mml:math id="mml-eqn-7" display="block"><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mi>f</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2297;</mml:mo><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>i</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2297;</mml:mo><mml:msub><mml:mrow><mml:mover><mml:mi>c</mml:mi><mml:mo stretchy="false">&#x007E;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>.</mml:mo></mml:math></disp-formula></p>
<p><disp-formula id="eqn-8"><label>(8)</label><mml:math id="mml-eqn-8" display="block"><mml:msub><mml:mi>o</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>h</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mi>g</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2297;</mml:mo><mml:mi>t</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>h</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">)</mml:mo><mml:mo>.</mml:mo></mml:math></disp-formula></p>
<p>From the expression, <inline-formula id="ieqn-19"><mml:math id="mml-ieqn-19"><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>h</mml:mi></mml:mrow></mml:msub><mml:mi>f</mml:mi><mml:mo>,</mml:mo><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>h</mml:mi></mml:mrow></mml:msub><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>h</mml:mi></mml:mrow></mml:msub><mml:mi>o</mml:mi><mml:mo>,</mml:mo><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>h</mml:mi></mml:mrow></mml:msub><mml:mi>g</mml:mi></mml:math></inline-formula> determines the weight matrix of the short-term form of previous time step, <inline-formula id="ieqn-20"><mml:math id="mml-ieqn-20"><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>x</mml:mi></mml:mrow></mml:msub><mml:mi>f</mml:mi><mml:mo>,</mml:mo><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>x</mml:mi></mml:mrow></mml:msub><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>x</mml:mi></mml:mrow></mml:msub><mml:mi>o</mml:mi><mml:mo>,</mml:mo><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>x</mml:mi></mml:mrow></mml:msub><mml:mi>g</mml:mi></mml:math></inline-formula> indicates the weight matrix connecting input vector, and <inline-formula id="ieqn-21"><mml:math id="mml-ieqn-21"><mml:msub><mml:mi>b</mml:mi><mml:mrow><mml:mi>f</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>b</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>b</mml:mi><mml:mrow><mml:mi>o</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mrow><mml:mtext>&#xA0;and&#xA0;</mml:mtext></mml:mrow><mml:msub><mml:mrow><mml:mtext>b</mml:mtext></mml:mrow><mml:mrow><mml:mrow><mml:mtext>g</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> are bias. It is assumed that GRU has distinct implementations of transfer and collection of information that LSTM requires for employment.</p>
<p><disp-formula id="eqn-9"><label>(9)</label><mml:math id="mml-eqn-9" display="block"><mml:msub><mml:mi>r</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mi>&#x03C3;</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:msubsup><mml:mi>W</mml:mi><mml:mrow><mml:mi>x</mml:mi><mml:mi>r</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msubsup><mml:mo>&#x22C5;</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msubsup><mml:mi>W</mml:mi><mml:mrow><mml:mi>o</mml:mi><mml:mi>r</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msubsup><mml:mo>&#x22C5;</mml:mo><mml:msub><mml:mi>o</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>b</mml:mi><mml:mrow><mml:mi>r</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">)</mml:mo><mml:mo>.</mml:mo></mml:math></disp-formula></p>
<p><disp-formula id="eqn-10"><label>(10)</label><mml:math id="mml-eqn-10" display="block"><mml:msub><mml:mi>z</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mi>&#x03C3;</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:msubsup><mml:mi>W</mml:mi><mml:mrow><mml:mi>x</mml:mi><mml:mi>z</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msubsup><mml:mo>&#x22C5;</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msubsup><mml:mi>W</mml:mi><mml:mrow><mml:mi>o</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msubsup><mml:mi>z</mml:mi><mml:mo>&#x22C5;</mml:mo><mml:msub><mml:mi>o</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>b</mml:mi><mml:mrow><mml:mi>z</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">)</mml:mo><mml:mo>.</mml:mo></mml:math></disp-formula></p>
<p><disp-formula id="eqn-11"><label>(11)</label><mml:math id="mml-eqn-11" display="block"><mml:msub><mml:mrow><mml:mover><mml:mi>o</mml:mi><mml:mo stretchy="false">&#x007E;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mi>t</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>h</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:msubsup><mml:mi>W</mml:mi><mml:mrow><mml:mi>x</mml:mi><mml:mrow><mml:mover><mml:mi>o</mml:mi><mml:mo stretchy="false">&#x007E;</mml:mo></mml:mover></mml:mrow></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msubsup><mml:mo>&#x22C5;</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msubsup><mml:mi>W</mml:mi><mml:mrow><mml:mi>o</mml:mi><mml:mrow><mml:mover><mml:mi>o</mml:mi><mml:mo stretchy="false">&#x007E;</mml:mo></mml:mover></mml:mrow></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msubsup><mml:mo>&#x22C5;</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>r</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2297;</mml:mo><mml:msub><mml:mi>o</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:msub><mml:mi>b</mml:mi><mml:mrow><mml:mrow><mml:mover><mml:mi>o</mml:mi><mml:mo stretchy="false">&#x007E;</mml:mo></mml:mover></mml:mrow></mml:mrow></mml:msub><mml:mo stretchy="false">)</mml:mo><mml:mo>.</mml:mo></mml:math></disp-formula></p>
<p><disp-formula id="eqn-12"><label>(12)</label><mml:math id="mml-eqn-12" display="block"><mml:msub><mml:mi>o</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mi>z</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2297;</mml:mo><mml:msub><mml:mi>o</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>z</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x2297;</mml:mo><mml:msub><mml:mi>o</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>.</mml:mo></mml:math></disp-formula></p>
<p>In the equation, <inline-formula id="ieqn-22"><mml:math id="mml-ieqn-22"><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>x</mml:mi></mml:mrow></mml:msub><mml:mi>r</mml:mi><mml:mo>,</mml:mo><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>x</mml:mi></mml:mrow></mml:msub><mml:mi>z</mml:mi><mml:mo>,</mml:mo><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>x</mml:mi><mml:mrow><mml:mover><mml:mi>o</mml:mi><mml:mo stretchy="false">&#x007E;</mml:mo></mml:mover></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> refers to the weight matrix connecting input vector, <inline-formula id="ieqn-23"><mml:math id="mml-ieqn-23"><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>o</mml:mi><mml:mi>r</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>o</mml:mi></mml:mrow></mml:msub><mml:mi>z</mml:mi><mml:mo>,</mml:mo><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>o</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> denotes the weight matrix of preceding time steps, and <inline-formula id="ieqn-24"><mml:math id="mml-ieqn-24"><mml:msub><mml:mi>b</mml:mi><mml:mrow><mml:mi>r</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>b</mml:mi><mml:mrow><mml:mi>z</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>b</mml:mi><mml:mrow><mml:mrow><mml:mover><mml:mi>o</mml:mi><mml:mo stretchy="false">&#x007E;</mml:mo></mml:mover></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> are bias.</p>
</sec>
<sec id="s2_5">
<label>2.5</label>
<title>Hyperparameter Optimization</title>
<p>At the final stage, the TSO algorithm effectually tunes the GRU hyperparameters [<xref ref-type="bibr" rid="ref-20">20</xref>&#x2013;<xref ref-type="bibr" rid="ref-22">22</xref>]. Kaur&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-23">23</xref>] projected a bio-simulated optimized approach that simulates the natural foraging way of marine invertebrate, tunicate discharge bright <inline-formula id="ieqn-25"><mml:math id="mml-ieqn-25"><mml:mi>b</mml:mi><mml:mi>i</mml:mi><mml:mi>o</mml:mi></mml:math></inline-formula> luminescence. The numerical approach of jet propulsion was advanced from three limitations: residual nearby an optimum agent, prevent conflict amongst the exploration agent and followed the place of maximal qualified agent. <xref ref-type="fig" rid="fig-2">Fig. 2</xref> showcases the flowchart of TSO technique. In order to prevent inter agent conflicts if the seeking an optimum place, a novel agent place was evaluated as:</p>
<p><disp-formula id="eqn-13"><label>(13)</label><mml:math id="mml-eqn-13" display="block"><mml:mrow><mml:mover><mml:mi>A</mml:mi><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mover><mml:mi>G</mml:mi><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mover><mml:mi>M</mml:mi><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow></mml:mfrac></mml:math></disp-formula></p>
<p><disp-formula id="eqn-14"><label>(14)</label><mml:math id="mml-eqn-14" display="block"><mml:mrow><mml:mover><mml:mi>G</mml:mi><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mn>3</mml:mn></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:mrow><mml:mover><mml:mi>F</mml:mi><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow></mml:math></disp-formula></p>
<p><disp-formula id="eqn-15"><label>(15)</label><mml:math id="mml-eqn-15" display="block"><mml:mrow><mml:mover><mml:mi>F</mml:mi><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>&#x22C5;</mml:mo><mml:mrow><mml:mover><mml:mi>F</mml:mi><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow><mml:mo>.</mml:mo></mml:math></disp-formula></p>
<p>In which <inline-formula id="ieqn-26"><mml:math id="mml-ieqn-26"><mml:mrow><mml:mover><mml:mi>A</mml:mi><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow></mml:math></inline-formula> implies the vector of a new agent places, <inline-formula id="ieqn-27"><mml:math id="mml-ieqn-27"><mml:mrow><mml:mover><mml:mi>G</mml:mi><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow></mml:math></inline-formula> denotes the gravity force, <inline-formula id="ieqn-28"><mml:math id="mml-ieqn-28"><mml:mrow><mml:mover><mml:mi>F</mml:mi><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow></mml:math></inline-formula> stands for the water flow from the deep ocean, and <inline-formula id="ieqn-29"><mml:math id="mml-ieqn-29"><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo></mml:math></inline-formula> <inline-formula id="ieqn-30"><mml:math id="mml-ieqn-30"><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula>, and <inline-formula id="ieqn-31"><mml:math id="mml-ieqn-31"><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mn>3</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula> signifies the three arbitrary amounts. The social force amongst the agents was stored from a new vector <inline-formula id="ieqn-32"><mml:math id="mml-ieqn-32"><mml:mrow><mml:mover><mml:mi>M</mml:mi><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow></mml:math></inline-formula> is represented as:</p>
<p><disp-formula id="eqn-16"><label>(16)</label><mml:math id="mml-eqn-16" display="block"><mml:mrow><mml:mover><mml:mi>M</mml:mi><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:msub><mml:mi>P</mml:mi><mml:mrow><mml:mo movablelimits="true" form="prefix">min</mml:mo></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>&#x22C5;</mml:mo><mml:msub><mml:mi>P</mml:mi><mml:mrow><mml:mo movablelimits="true" form="prefix">max</mml:mo></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>P</mml:mi><mml:mrow><mml:mo movablelimits="true" form="prefix">min</mml:mo></mml:mrow></mml:msub><mml:mo>]</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:math></disp-formula></p>
<p>At this point, <inline-formula id="ieqn-33"><mml:math id="mml-ieqn-33"><mml:msub><mml:mi>P</mml:mi><mml:mrow><mml:mo movablelimits="true" form="prefix">min</mml:mo></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:math></inline-formula> and <inline-formula id="ieqn-34"><mml:math id="mml-ieqn-34"><mml:msub><mml:mi>P</mml:mi><mml:mrow><mml:mo movablelimits="true" form="prefix">max</mml:mo></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mn>4</mml:mn></mml:math></inline-formula> defines the 1<sup>st</sup> and 2<sup>nd</sup> sub-ordinates correspondingly signifying the speed of increasing social links. Following the project, optimum agents are essential for reaching optimal solutions. Therefore, to ensure that no conflict occurs amongst neighboring agents from the swarm, an optimum place of optimum agents is computed as:</p>
<p><disp-formula id="eqn-17"><label>(17)</label><mml:math id="mml-eqn-17" display="block"><mml:mover><mml:mrow><mml:mi>P</mml:mi><mml:mi>D</mml:mi></mml:mrow><mml:mo>&#x2192;</mml:mo></mml:mover><mml:mo>=</mml:mo><mml:mrow><mml:mo>|</mml:mo><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>b</mml:mi><mml:mi>e</mml:mi><mml:mi>s</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>r</mml:mi><mml:mrow><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>d</mml:mi></mml:mrow></mml:msub><mml:mo>&#x22C5;</mml:mo><mml:mover><mml:mrow><mml:msub><mml:mi>P</mml:mi><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>x</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mo>&#x2192;</mml:mo></mml:mover><mml:mo>|</mml:mo></mml:mrow></mml:math></disp-formula></p>
<p>In which <inline-formula id="ieqn-35"><mml:math id="mml-ieqn-35"><mml:mover><mml:mrow><mml:mi>P</mml:mi><mml:mi>D</mml:mi></mml:mrow><mml:mo>&#x2192;</mml:mo></mml:mover></mml:math></inline-formula> stored the length amongst the optimal agents and food origin, <inline-formula id="ieqn-36"><mml:math id="mml-ieqn-36"><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>b</mml:mi><mml:mi>e</mml:mi><mml:mi>s</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> denotes the optimum place, <inline-formula id="ieqn-37"><mml:math id="mml-ieqn-37"><mml:msub><mml:mi>r</mml:mi><mml:mrow><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>d</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> indicates the stochastic value from the range of [0,1], and the vector <inline-formula id="ieqn-38"><mml:math id="mml-ieqn-38"><mml:mover><mml:mrow><mml:msub><mml:mi>P</mml:mi><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>x</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x2192;</mml:mo></mml:mover></mml:math></inline-formula> is the place of tunicates at the time of iteration <inline-formula id="ieqn-39"><mml:math id="mml-ieqn-39"><mml:mi>x</mml:mi></mml:math></inline-formula>. For ensuring that search agent is still nearby an optimum agent, their places were computed as:</p>
<p><disp-formula id="eqn-18"><label>(18)</label><mml:math id="mml-eqn-18" display="block"><mml:mrow><mml:mover accent='true'><mml:mrow><mml:msub><mml:mi>P</mml:mi><mml:mi>p</mml:mi></mml:msub><mml:mo stretchy='false'>(</mml:mo><mml:mi>x</mml:mi><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mo stretchy='true'>&#x2192;</mml:mo></mml:mover><mml:mo>=</mml:mo><mml:mrow><mml:mo>{</mml:mo> <mml:mrow><mml:mtable><mml:mtr><mml:mtd><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>b</mml:mi><mml:mi>e</mml:mi><mml:mi>s</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:mi>A</mml:mi><mml:mo>&#x22C5;</mml:mo><mml:mover accent='true'><mml:mi>P</mml:mi><mml:mo>&#x2192;</mml:mo></mml:mover><mml:mi>D</mml:mi><mml:mo>,</mml:mo></mml:mrow></mml:mtd><mml:mtd><mml:mrow><mml:mi>i</mml:mi><mml:mi>f</mml:mi><mml:mtext>&#x00A0;</mml:mtext><mml:msub><mml:mi>r</mml:mi><mml:mrow><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>d</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2265;</mml:mo><mml:mn>0.5</mml:mn></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>b</mml:mi><mml:mi>e</mml:mi><mml:mi>s</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:mi>A</mml:mi><mml:mo>&#x22C5;</mml:mo><mml:mover accent='true'><mml:mi>P</mml:mi><mml:mo>&#x2192;</mml:mo></mml:mover><mml:mi>D</mml:mi><mml:mo>,</mml:mo></mml:mrow></mml:mtd><mml:mtd><mml:mrow><mml:mi>i</mml:mi><mml:mi>f</mml:mi><mml:mtext>&#x00A0;</mml:mtext><mml:msub><mml:mi>r</mml:mi><mml:mrow><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>d</mml:mi></mml:mrow></mml:msub><mml:mo>&#x003C;</mml:mo><mml:mn>0.5</mml:mn></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mrow> </mml:mrow><mml:mtext>&#x00A0;</mml:mtext></mml:mrow></mml:math></disp-formula></p>
<p>In which <inline-formula id="ieqn-40"><mml:math id="mml-ieqn-40"><mml:mover><mml:mrow><mml:msub><mml:mi>P</mml:mi><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>x</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x2192;</mml:mo></mml:mover></mml:math></inline-formula> represents the upgrade places of agents at iteration <inline-formula id="ieqn-41"><mml:math id="mml-ieqn-41"><mml:mi>x</mml:mi></mml:math></inline-formula> in comparison to optimum recorded place <inline-formula id="ieqn-42"><mml:math id="mml-ieqn-42"><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>b</mml:mi><mml:mi>e</mml:mi><mml:mi>s</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>. In order to model the swarming performance of tunicates, the places of current agent are upgraded on the fundamental of the places of 2 agents:</p>
<p><disp-formula id="eqn-19"><label>(19)</label><mml:math id="mml-eqn-19" display="block"><mml:msub><mml:mi>P</mml:mi><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mover><mml:mi>x</mml:mi><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow><mml:mo>+</mml:mo><mml:mn>1</mml:mn><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mover><mml:mrow><mml:msub><mml:mi>P</mml:mi><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>x</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x2192;</mml:mo></mml:mover><mml:mo>+</mml:mo><mml:msub><mml:mi>P</mml:mi><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mover><mml:mi>x</mml:mi><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow><mml:mo>+</mml:mo><mml:mn>1</mml:mn><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn><mml:mo>+</mml:mo><mml:mi>c</mml:mi><mml:mn>1</mml:mn></mml:mrow></mml:mfrac></mml:math></disp-formula></p>
<fig id="fig-2">
<label>Figure 2</label>
<caption>
<title>Flowchart of TSO technique</title>
</caption>
<graphic mimetype="image" mime-subtype="png" xlink:href="CMC_32816-fig-2.png"/>
</fig>
<p>In order to clarify the TSO, important steps were given under to depict the flow of original TSO thoroughly [<xref ref-type="bibr" rid="ref-24">24</xref>].
<list list-type="order">
<list-item>
<p>Initializing the primary population of tunicates <inline-formula id="ieqn-43"><mml:math id="mml-ieqn-43"><mml:msub><mml:mrow><mml:mover><mml:mi>P</mml:mi><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msub><mml:mo>.</mml:mo></mml:math></inline-formula></p></list-item>
<list-item>
<p>Fixed the original value to parameter and the maximal count of iterations.</p></list-item>
<list-item>
<p>Compute the fitness value of all the exploration agents.</p></list-item>
<list-item>
<p>Next estimating the fitness, an optimum agent was inspected from the offered searching space.</p></list-item>
<list-item>
<p>Upgrading the places of all the exploration agents in <xref ref-type="disp-formula" rid="eqn-19">Eq. (19)</xref>.</p></list-item>
<list-item>
<p>Returning novel upgrade agents to their boundary.</p></list-item>
<list-item>
<p>Compute the fitness cost of upgrade searching agents. If there is an optimum solution to preceding solutions, upgrade <inline-formula id="ieqn-44"><mml:math id="mml-ieqn-44"><mml:msub><mml:mrow><mml:mover><mml:mi>P</mml:mi><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> and storing the optimum solution from <inline-formula id="ieqn-45"><mml:math id="mml-ieqn-45"><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>b</mml:mi><mml:mi>e</mml:mi><mml:mi>s</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>.</mml:mo></mml:math></inline-formula></p></list-item>
<list-item>
<p>If the termination criteria were encountered, the processes end. Otherwise, iterate Steps 5&#x2013;8.</p></list-item>
<list-item>
<p>State the optimal solution <inline-formula id="ieqn-46"><mml:math id="mml-ieqn-46"><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>b</mml:mi><mml:mi>e</mml:mi><mml:mi>s</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula> which is reached so far.</p></list-item>
</list></p>
<p>The TSO system made a fitness function for achieving maximal classifier efficiency. It resolves a positive integer for representing best efficiency of candidate outcomes. During this case, the minimize of classify error rate was regarded as fitness function (FF) as provided in <xref ref-type="disp-formula" rid="eqn-20">Eq. (20)</xref>.</p>
<p><disp-formula id="eqn-20"><label>(20)</label><mml:math id="mml-eqn-20" display="block"><mml:mrow><mml:mi mathvariant="italic">f</mml:mi><mml:mi mathvariant="italic">i</mml:mi><mml:mi mathvariant="italic">t</mml:mi><mml:mi mathvariant="italic">n</mml:mi><mml:mi mathvariant="italic">e</mml:mi><mml:mi mathvariant="italic">s</mml:mi><mml:mi mathvariant="italic">s</mml:mi></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mi mathvariant="italic">C</mml:mi><mml:mi mathvariant="italic">l</mml:mi><mml:mi mathvariant="italic">a</mml:mi><mml:mi mathvariant="italic">s</mml:mi><mml:mi mathvariant="italic">s</mml:mi><mml:mi mathvariant="italic">i</mml:mi><mml:mi mathvariant="italic">f</mml:mi><mml:mi mathvariant="italic">i</mml:mi><mml:mi mathvariant="italic">e</mml:mi><mml:mi mathvariant="italic">r</mml:mi><mml:mi mathvariant="italic">E</mml:mi><mml:mi mathvariant="italic">r</mml:mi><mml:mi mathvariant="italic">r</mml:mi><mml:mi mathvariant="italic">o</mml:mi><mml:mi mathvariant="italic">r</mml:mi><mml:mi mathvariant="italic">R</mml:mi><mml:mi mathvariant="italic">a</mml:mi><mml:mi mathvariant="italic">t</mml:mi><mml:mi mathvariant="italic">e</mml:mi></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mrow><mml:mi mathvariant="italic">n</mml:mi><mml:mi mathvariant="italic">u</mml:mi><mml:mi mathvariant="italic">m</mml:mi><mml:mi mathvariant="italic">b</mml:mi><mml:mi mathvariant="italic">e</mml:mi><mml:mi mathvariant="italic">r</mml:mi></mml:mrow><mml:mtext>&#xA0;</mml:mtext><mml:mi>o</mml:mi><mml:mi>f</mml:mi><mml:mtext>&#xA0;</mml:mtext><mml:mrow><mml:mi mathvariant="italic">m</mml:mi><mml:mi mathvariant="italic">i</mml:mi><mml:mi mathvariant="italic">s</mml:mi><mml:mi mathvariant="italic">c</mml:mi><mml:mi mathvariant="italic">l</mml:mi><mml:mi mathvariant="italic">a</mml:mi><mml:mi mathvariant="italic">s</mml:mi><mml:mi mathvariant="italic">s</mml:mi><mml:mi mathvariant="italic">i</mml:mi><mml:mi mathvariant="italic">f</mml:mi><mml:mi mathvariant="italic">i</mml:mi><mml:mi mathvariant="italic">e</mml:mi><mml:mi mathvariant="italic">d</mml:mi></mml:mrow><mml:mtext>&#xA0;</mml:mtext><mml:mrow><mml:mi mathvariant="italic">s</mml:mi><mml:mi mathvariant="italic">a</mml:mi><mml:mi mathvariant="italic">m</mml:mi><mml:mi mathvariant="italic">p</mml:mi><mml:mi mathvariant="italic">l</mml:mi><mml:mi mathvariant="italic">e</mml:mi><mml:mi mathvariant="italic">s</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mrow><mml:mi mathvariant="italic">T</mml:mi><mml:mi mathvariant="italic">o</mml:mi><mml:mi mathvariant="italic">t</mml:mi><mml:mi mathvariant="italic">a</mml:mi><mml:mi mathvariant="italic">l</mml:mi></mml:mrow><mml:mtext>&#xA0;</mml:mtext><mml:mrow><mml:mi mathvariant="italic">n</mml:mi><mml:mi mathvariant="italic">u</mml:mi><mml:mi mathvariant="italic">m</mml:mi><mml:mi mathvariant="italic">b</mml:mi><mml:mi mathvariant="italic">e</mml:mi><mml:mi mathvariant="italic">r</mml:mi></mml:mrow><mml:mtext>&#xA0;</mml:mtext><mml:mi>o</mml:mi><mml:mi>f</mml:mi><mml:mtext>&#xA0;</mml:mtext><mml:mrow><mml:mi mathvariant="italic">s</mml:mi><mml:mi mathvariant="italic">a</mml:mi><mml:mi mathvariant="italic">m</mml:mi><mml:mi mathvariant="italic">p</mml:mi><mml:mi mathvariant="italic">l</mml:mi><mml:mi mathvariant="italic">e</mml:mi><mml:mi mathvariant="italic">s</mml:mi></mml:mrow></mml:mrow></mml:mfrac><mml:mo>&#x2217;</mml:mo><mml:mn>100</mml:mn></mml:math></disp-formula></p>
</sec>
</sec>
<sec id="s3">
<label>3</label>
<title>Results and Discussion</title>
<p>The performance validation of the ADRU-SCM techniques was tested with the help of Figshare dataset [<xref ref-type="bibr" rid="ref-25">25</xref>]. The dataset comprises 3 class labels with 150 images under Meningioma (MEN), 150 images under Glioma (GLI), and 150 images under Pituitary (PIT) classes as demonstrated in <xref ref-type="table" rid="table-1">Tab. 1</xref>.</p>
<table-wrap id="table-1">
<label>Table 1</label>
<caption>
<title>Dataset details</title>
</caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th>Class names</th>
<th>No. of images</th>
</tr>
</thead>
<tbody>
<tr>
<td>Meningioma (MEN)</td>
<td>150</td>
</tr>
<tr>
<td>Glioma (GLI)</td>
<td>150</td>
</tr>
<tr>
<td>Pituitary (PIT)</td>
<td>150</td>
</tr>
<tr>
<td>Total</td>
<td>450</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The set of confusion matrices created by the ADRU-SCM model under five distinct runs is given in <xref ref-type="fig" rid="fig-3">Fig. 3</xref>. On run-1, the ADRU-SCM model has recognized 136 samples under MEN, 132 samples under GLI, and 146 samples under PIT class. Likewise, on run-3, the ADRU-SCM approach has identified 140 samples under MEN, 150 samples under GLI, and 143 samples under PIT class. Moreover, on run-5, the ADRU-SCM method has recognized 134 samples under MEN, 143 samples under GLI, and 143 samples under PIT class.</p>
<fig id="fig-3">
<label>Figure 3</label>
<caption>
<title>Confusion matrices of ADRU-SCM approach (a) Run1, (b) Run2, (c) Run3, (d) Run4, and (e) Run5</title>
</caption>
<graphic mimetype="image" mime-subtype="png" xlink:href="CMC_32816-fig-3.png"/>
</fig>
<p><xref ref-type="table" rid="table-2">Tab. 2</xref> offers overall classification outcomes of the ADRU-SCM methodology under five distinct runs. <xref ref-type="fig" rid="fig-4">Fig. 4</xref> portrays brief classifier results of the ADRU-SCM model under run-1. The figure inferred that the ADRU-SCM model has reached effectual classification performance under all classes. For sample, the ADRU-SCM model has classified samples under MEN class with <inline-formula id="ieqn-47"><mml:math id="mml-ieqn-47"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-48"><mml:math id="mml-ieqn-48"><mml:mi>s</mml:mi><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-49"><mml:math id="mml-ieqn-49"><mml:mi>s</mml:mi><mml:mi>p</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, and <inline-formula id="ieqn-50"><mml:math id="mml-ieqn-50"><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mrow><mml:mi mathvariant="italic">s</mml:mi><mml:mi mathvariant="italic">c</mml:mi><mml:mi mathvariant="italic">o</mml:mi><mml:mi mathvariant="italic">r</mml:mi><mml:mi mathvariant="italic">e</mml:mi></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> of 95.78%, 90.67%, 98.33%, and 93.47% respectively. Also, the ADRU-SCM technique has classified samples under GLI class with <inline-formula id="ieqn-51"><mml:math id="mml-ieqn-51"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-52"><mml:math id="mml-ieqn-52"><mml:mi>s</mml:mi><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-53"><mml:math id="mml-ieqn-53"><mml:mi>s</mml:mi><mml:mi>p</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, and <inline-formula id="ieqn-54"><mml:math id="mml-ieqn-54"><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mrow><mml:mi mathvariant="italic">s</mml:mi><mml:mi mathvariant="italic">c</mml:mi><mml:mi mathvariant="italic">o</mml:mi><mml:mi mathvariant="italic">r</mml:mi><mml:mi mathvariant="italic">e</mml:mi></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> of 95.11%, 88%, 98.67%, and 92.31% correspondingly. Besides, the ADRU-SCM approach has classified samples under PIT class with <inline-formula id="ieqn-55"><mml:math id="mml-ieqn-55"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-56"><mml:math id="mml-ieqn-56"><mml:mi>s</mml:mi><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-57"><mml:math id="mml-ieqn-57"><mml:mi>s</mml:mi><mml:mi>p</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, and <inline-formula id="ieqn-58"><mml:math id="mml-ieqn-58"><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mrow><mml:mi mathvariant="italic">s</mml:mi><mml:mi mathvariant="italic">c</mml:mi><mml:mi mathvariant="italic">o</mml:mi><mml:mi mathvariant="italic">r</mml:mi><mml:mi mathvariant="italic">e</mml:mi></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> of 93.11%, 97.33%, 91%, and 90.40% correspondingly.</p>
<table-wrap id="table-2">
<label>Table 2</label>
<caption>
<title>Result analysis of ADRU-SCM approach with distinct measures and runs</title>
</caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th>Class name</th>
<th>Accuracy</th>
<th>Sensitivity</th>
<th>Specificity</th>
<th>F-score</th>
<th>Kappa score</th>
</tr>
</thead>
<tbody>
<tr>
<td colspan="6" align="center">Run&#x2013;1</td>
</tr>
<tr>
<td>Meningioma</td>
<td>95.78</td>
<td>90.67</td>
<td>98.33</td>
<td>93.47</td>
<td>&#x2013;</td>
</tr>
<tr>
<td>Glioma</td>
<td>95.11</td>
<td>88.00</td>
<td>98.67</td>
<td>92.31</td>
<td>&#x2013;</td>
</tr>
<tr>
<td>Pituitary</td>
<td>93.11</td>
<td>97.33</td>
<td>91.00</td>
<td>90.40</td>
<td>&#x2013;</td>
</tr>
<tr>
<td>Average</td>
<td>94.67</td>
<td>92.00</td>
<td>96.00</td>
<td>92.06</td>
<td>88.00</td>
</tr>
<tr>
<td colspan="6" align="center">Run&#x2013;2</td>
</tr>
<tr>
<td>Meningioma</td>
<td>92.67</td>
<td>100.00</td>
<td>89.00</td>
<td>90.09</td>
<td>&#x2013;</td>
</tr>
<tr>
<td>Glioma</td>
<td>95.11</td>
<td>87.33</td>
<td>99.00</td>
<td>92.25</td>
<td>&#x2013;</td>
</tr>
<tr>
<td>Pituitary</td>
<td>96.22</td>
<td>88.67</td>
<td>100.00</td>
<td>93.99</td>
<td>&#x2013;</td>
</tr>
<tr>
<td>Average</td>
<td>94.67</td>
<td>92.00</td>
<td>96.00</td>
<td>92.11</td>
<td>88.00</td>
</tr>
<tr>
<td colspan="6" align="center">Run&#x2013;3</td>
</tr>
<tr>
<td>Meningioma</td>
<td>97.78</td>
<td>93.33</td>
<td>100.00</td>
<td>96.55</td>
<td>&#x2013;</td>
</tr>
<tr>
<td>Glioma</td>
<td>96.22</td>
<td>100.00</td>
<td>94.33</td>
<td>94.64</td>
<td>&#x2013;</td>
</tr>
<tr>
<td>Pituitary</td>
<td>98.44</td>
<td>95.33</td>
<td>100.00</td>
<td>97.61</td>
<td>&#x2013;</td>
</tr>
<tr>
<td>Average</td>
<td>97.48</td>
<td>96.22</td>
<td>98.11</td>
<td>96.27</td>
<td>94.33</td>
</tr>
<tr>
<td colspan="6" align="center">Run&#x2013;4</td>
</tr>
<tr>
<td>Meningioma</td>
<td>93.11</td>
<td>94.00</td>
<td>92.67</td>
<td>90.10</td>
<td>&#x2013;</td>
</tr>
<tr>
<td>Glioma</td>
<td>95.33</td>
<td>90.00</td>
<td>98.00</td>
<td>92.78</td>
<td>&#x2013;</td>
</tr>
<tr>
<td>Pituitary</td>
<td>93.33</td>
<td>88.67</td>
<td>95.67</td>
<td>89.86</td>
<td>&#x2013;</td>
</tr>
<tr>
<td>Average</td>
<td>93.93</td>
<td>90.89</td>
<td>95.44</td>
<td>90.91</td>
<td>86.33</td>
</tr>
<tr>
<td colspan="6" align="center">Run&#x2013;5</td>
</tr>
<tr>
<td>Meningioma</td>
<td>94.22</td>
<td>89.33</td>
<td>96.67</td>
<td>91.16</td>
<td>&#x2013;</td>
</tr>
<tr>
<td>Glioma</td>
<td>96.00</td>
<td>95.33</td>
<td>96.33</td>
<td>94.08</td>
<td>&#x2013;</td>
</tr>
<tr>
<td>Pituitary</td>
<td>96.44</td>
<td>95.33</td>
<td>97.00</td>
<td>94.70</td>
<td>&#x2013;</td>
</tr>
<tr>
<td>Average</td>
<td>95.56</td>
<td>93.33</td>
<td>96.67</td>
<td>93.31</td>
<td>90.00</td>
</tr>
</tbody>
</table>
</table-wrap>
<fig id="fig-4">
<label>Figure 4</label>
<caption>
<title>Average analysis of ADRU-SCM approach under Run-1</title>
</caption>
<graphic mimetype="image" mime-subtype="png" xlink:href="CMC_32816-fig-4.png"/>
</fig>
<p><xref ref-type="fig" rid="fig-5">Fig. 5</xref> depicts detailed classifier results of the ADRU-SCM methodology under run-2. The figure implied the ADRU-SCM system has reached effectual classification performance under all classes. For example, the ADRU-SCM approach has classified samples under MEN class with <inline-formula id="ieqn-59"><mml:math id="mml-ieqn-59"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-60"><mml:math id="mml-ieqn-60"><mml:mi>s</mml:mi><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-61"><mml:math id="mml-ieqn-61"><mml:mi>s</mml:mi><mml:mi>p</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, and <inline-formula id="ieqn-62"><mml:math id="mml-ieqn-62"><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mrow><mml:mi mathvariant="italic">s</mml:mi><mml:mi mathvariant="italic">c</mml:mi><mml:mi mathvariant="italic">o</mml:mi><mml:mi mathvariant="italic">r</mml:mi><mml:mi mathvariant="italic">e</mml:mi></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> of 92.67%, 100%, 89%, and 90.09% correspondingly. Moreover, the ADRU-SCM method has classified samples under GLI class with <inline-formula id="ieqn-63"><mml:math id="mml-ieqn-63"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-64"><mml:math id="mml-ieqn-64"><mml:mi>s</mml:mi><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-65"><mml:math id="mml-ieqn-65"><mml:mi>s</mml:mi><mml:mi>p</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, and <inline-formula id="ieqn-66"><mml:math id="mml-ieqn-66"><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mrow><mml:mi mathvariant="italic">s</mml:mi><mml:mi mathvariant="italic">c</mml:mi><mml:mi mathvariant="italic">o</mml:mi><mml:mi mathvariant="italic">r</mml:mi><mml:mi mathvariant="italic">e</mml:mi></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> of 95.11%, 87.33%, 99%, and 92.25% correspondingly. In addition, the ADRU-SCM approach has classified samples under PIT class with <inline-formula id="ieqn-67"><mml:math id="mml-ieqn-67"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-68"><mml:math id="mml-ieqn-68"><mml:mi>s</mml:mi><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-69"><mml:math id="mml-ieqn-69"><mml:mi>s</mml:mi><mml:mi>p</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, and <inline-formula id="ieqn-70"><mml:math id="mml-ieqn-70"><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mrow><mml:mi mathvariant="italic">s</mml:mi><mml:mi mathvariant="italic">c</mml:mi><mml:mi mathvariant="italic">o</mml:mi><mml:mi mathvariant="italic">r</mml:mi><mml:mi mathvariant="italic">e</mml:mi></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> of 96.22%, 88.67%, 100%, and 993.99% correspondingly.</p>
<fig id="fig-5">
<label>Figure 5</label>
<caption>
<title>Average analysis of ADRU-SCM approach under Run-2</title>
</caption>
<graphic mimetype="image" mime-subtype="png" xlink:href="CMC_32816-fig-5.png"/>
</fig>
<p><xref ref-type="fig" rid="fig-6">Fig. 6</xref> represents brief classifier results of the ADRU-SCM approach under run-3. The figure inferred the ADRU-SCM algorithm has reached effectual classification performance under all classes. For example, the ADRU-SCM method has classified samples under MEN class with <inline-formula id="ieqn-71"><mml:math id="mml-ieqn-71"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-72"><mml:math id="mml-ieqn-72"><mml:mi>s</mml:mi><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-73"><mml:math id="mml-ieqn-73"><mml:mi>s</mml:mi><mml:mi>p</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, and <inline-formula id="ieqn-74"><mml:math id="mml-ieqn-74"><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mrow><mml:mi mathvariant="italic">s</mml:mi><mml:mi mathvariant="italic">c</mml:mi><mml:mi mathvariant="italic">o</mml:mi><mml:mi mathvariant="italic">r</mml:mi><mml:mi mathvariant="italic">e</mml:mi></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> of 97.78%, 93.33%, 100%, and 96.55% respectively. Similarly, the ADRU-SCM methodology has classified samples under GLI class with <inline-formula id="ieqn-75"><mml:math id="mml-ieqn-75"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-76"><mml:math id="mml-ieqn-76"><mml:mi>s</mml:mi><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-77"><mml:math id="mml-ieqn-77"><mml:mi>s</mml:mi><mml:mi>p</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, and <inline-formula id="ieqn-78"><mml:math id="mml-ieqn-78"><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mrow><mml:mi mathvariant="italic">s</mml:mi><mml:mi mathvariant="italic">c</mml:mi><mml:mi mathvariant="italic">o</mml:mi><mml:mi mathvariant="italic">r</mml:mi><mml:mi mathvariant="italic">e</mml:mi></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> of 96.22%, 100%, 94.33%, and 94.64% respectively. Also, the ADRU-SCM model has classified samples under PIT class with <inline-formula id="ieqn-79"><mml:math id="mml-ieqn-79"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-80"><mml:math id="mml-ieqn-80"><mml:mi>s</mml:mi><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-81"><mml:math id="mml-ieqn-81"><mml:mi>s</mml:mi><mml:mi>p</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, and <inline-formula id="ieqn-82"><mml:math id="mml-ieqn-82"><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mrow><mml:mi mathvariant="italic">s</mml:mi><mml:mi mathvariant="italic">c</mml:mi><mml:mi mathvariant="italic">o</mml:mi><mml:mi mathvariant="italic">r</mml:mi><mml:mi mathvariant="italic">e</mml:mi></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> of 98.44%, 95.33%, 100%, and 97.61% correspondingly.</p>
<fig id="fig-6">
<label>Figure 6</label>
<caption>
<title>Average analysis of ADRU-SCM approach under Run-3</title>
</caption>
<graphic mimetype="image" mime-subtype="png" xlink:href="CMC_32816-fig-6.png"/>
</fig>
<p><xref ref-type="fig" rid="fig-7">Fig. 7</xref> shows brief classifier results of the ADRU-SCM method under run-4. The figure inferred that the ADRU-SCM methodology has reached effectual classification performance under all classes. For example, the ADRU-SCM system has classified samples under MEN class with <inline-formula id="ieqn-83"><mml:math id="mml-ieqn-83"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-84"><mml:math id="mml-ieqn-84"><mml:mi>s</mml:mi><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-85"><mml:math id="mml-ieqn-85"><mml:mi>s</mml:mi><mml:mi>p</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, and <inline-formula id="ieqn-86"><mml:math id="mml-ieqn-86"><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mrow><mml:mi mathvariant="italic">s</mml:mi><mml:mi mathvariant="italic">c</mml:mi><mml:mi mathvariant="italic">o</mml:mi><mml:mi mathvariant="italic">r</mml:mi><mml:mi mathvariant="italic">e</mml:mi></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> of 93.11%, 94%, 92.67%, and 90.10% correspondingly. Along with that, the ADRU-SCM methodology has classified samples under GLI class with <inline-formula id="ieqn-87"><mml:math id="mml-ieqn-87"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-88"><mml:math id="mml-ieqn-88"><mml:mi>s</mml:mi><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-89"><mml:math id="mml-ieqn-89"><mml:mi>s</mml:mi><mml:mi>p</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, and <inline-formula id="ieqn-90"><mml:math id="mml-ieqn-90"><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mrow><mml:mi mathvariant="italic">s</mml:mi><mml:mi mathvariant="italic">c</mml:mi><mml:mi mathvariant="italic">o</mml:mi><mml:mi mathvariant="italic">r</mml:mi><mml:mi mathvariant="italic">e</mml:mi></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> of 95.33%, 90%, 98%, and 92.78% correspondingly. Besides, the ADRU-SCM system has classified samples under PIT class with <inline-formula id="ieqn-91"><mml:math id="mml-ieqn-91"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-92"><mml:math id="mml-ieqn-92"><mml:mi>s</mml:mi><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-93"><mml:math id="mml-ieqn-93"><mml:mi>s</mml:mi><mml:mi>p</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, and <inline-formula id="ieqn-94"><mml:math id="mml-ieqn-94"><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mrow><mml:mi mathvariant="italic">s</mml:mi><mml:mi mathvariant="italic">c</mml:mi><mml:mi mathvariant="italic">o</mml:mi><mml:mi mathvariant="italic">r</mml:mi><mml:mi mathvariant="italic">e</mml:mi></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> of 93.33%, 88.67%, 95.67%, and 89.86% correspondingly.</p>
<fig id="fig-7">
<label>Figure 7</label>
<caption>
<title>Average analysis of ADRU-SCM approach under Run-4</title>
</caption>
<graphic mimetype="image" mime-subtype="png" xlink:href="CMC_32816-fig-7.png"/>
</fig>
<p><xref ref-type="fig" rid="fig-8">Fig. 8</xref> reveals detailed classifier results of the ADRU-SCM method under run-5. The figure implied the ADRU-SCM system has reached effectual classification performance under all classes. For example, the ADRU-SCM methodology has classified samples under MEN class with <inline-formula id="ieqn-95"><mml:math id="mml-ieqn-95"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-96"><mml:math id="mml-ieqn-96"><mml:mi>s</mml:mi><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-97"><mml:math id="mml-ieqn-97"><mml:mi>s</mml:mi><mml:mi>p</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, and <inline-formula id="ieqn-98"><mml:math id="mml-ieqn-98"><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mrow><mml:mi mathvariant="italic">s</mml:mi><mml:mi mathvariant="italic">c</mml:mi><mml:mi mathvariant="italic">o</mml:mi><mml:mi mathvariant="italic">r</mml:mi><mml:mi mathvariant="italic">e</mml:mi></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> of 94.22%, 89.33%, 96.67%, and 91.16% correspondingly. Moreover, the ADRU-SCM technique has classified samples under GLI class with <inline-formula id="ieqn-99"><mml:math id="mml-ieqn-99"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-100"><mml:math id="mml-ieqn-100"><mml:mi>s</mml:mi><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-101"><mml:math id="mml-ieqn-101"><mml:mi>s</mml:mi><mml:mi>p</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, and <inline-formula id="ieqn-102"><mml:math id="mml-ieqn-102"><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mrow><mml:mi mathvariant="italic">s</mml:mi><mml:mi mathvariant="italic">c</mml:mi><mml:mi mathvariant="italic">o</mml:mi><mml:mi mathvariant="italic">r</mml:mi><mml:mi mathvariant="italic">e</mml:mi></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> of 96%, 95.33%, 96.33%, and 94.08% correspondingly. Also, the ADRU-SCM system has classified samples under PIT class with <inline-formula id="ieqn-103"><mml:math id="mml-ieqn-103"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-104"><mml:math id="mml-ieqn-104"><mml:mi>s</mml:mi><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-105"><mml:math id="mml-ieqn-105"><mml:mi>s</mml:mi><mml:mi>p</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, and <inline-formula id="ieqn-106"><mml:math id="mml-ieqn-106"><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mrow><mml:mi mathvariant="italic">s</mml:mi><mml:mi mathvariant="italic">c</mml:mi><mml:mi mathvariant="italic">o</mml:mi><mml:mi mathvariant="italic">r</mml:mi><mml:mi mathvariant="italic">e</mml:mi></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> of 96.44%, 95.33%, 97%, and 94.70% correspondingly.</p>
<fig id="fig-8">
<label>Figure 8</label>
<caption>
<title>Average analysis of ADRU-SCM approach under Run-5</title>
</caption>
<graphic mimetype="image" mime-subtype="png" xlink:href="CMC_32816-fig-8.png"/>
</fig>
<p>The training accuracy (TA) and validation accuracy (VA) obtained by the ADRU-SCM method on phishing email classification is illustrated in <xref ref-type="fig" rid="fig-9">Fig. 9</xref>. The experimental outcome inferred that the ADRU-SCM technique has reached maximum values of TA and VA. Particularly, the VA is higher than TA.</p>
<fig id="fig-9">
<label>Figure 9</label>
<caption>
<title>TA and VA analysis of ADRU-SCM methodology</title>
</caption>
<graphic mimetype="image" mime-subtype="png" xlink:href="CMC_32816-fig-9.png"/>
</fig>
<p>The training loss (TL) and validation loss (VL) gained by the ADRU-SCM approach to phishing email classification are established in <xref ref-type="fig" rid="fig-10">Fig. 10</xref>. The experimental outcome implied the ADRU-SCM method has accomplished least values of TL and VL. Specifically, the VL seemed to be lower than TL.</p>
<fig id="fig-10">
<label>Figure 10</label>
<caption>
<title>TL and VL analysis of ADRU-SCM methodology</title>
</caption>
<graphic mimetype="image" mime-subtype="png" xlink:href="CMC_32816-fig-10.png"/>
</fig>
<p>At last, a brief comparative analysis of the ADRU-SCM approach with existing DL models is performed in <xref ref-type="table" rid="table-3">Tab. 3</xref> [<xref ref-type="bibr" rid="ref-26">26</xref>]. <xref ref-type="fig" rid="fig-11">Fig. 11</xref> highlights the comparative <inline-formula id="ieqn-107"><mml:math id="mml-ieqn-107"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> investigation of the ADRU-SCM method with existing models. The figure indicated that the MobileNetV2 approach has shown ineffective outcomes with least <inline-formula id="ieqn-108"><mml:math id="mml-ieqn-108"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 92.78%. Similarly, the Inception v3 and ResNet50 models have obtained slightly increased <inline-formula id="ieqn-109"><mml:math id="mml-ieqn-109"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 93.34% and 93.10% respectively. Though the hybrid gravitational search optimization (HGSO) and DenseNet201 models have resulted in reasonable <inline-formula id="ieqn-110"><mml:math id="mml-ieqn-110"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 96.66% and 94.63%, the ADRU-SCM model has shown effectual outcomes with higher <inline-formula id="ieqn-111"><mml:math id="mml-ieqn-111"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 97.84%.</p>
<table-wrap id="table-3">
<label>Table 3</label>
<caption>
<title>Comparative analysis of ADRU-SCM approach with existing methodologies</title>
</caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th>Methods</th>
<th>Accuracy</th>
<th>Kappa score</th>
</tr>
</thead>
<tbody>
<tr>
<td>ADRU-SCM</td>
<td>97.48</td>
<td>94.33</td>
</tr>
<tr>
<td>HGSO model</td>
<td>96.66</td>
<td>91.87</td>
</tr>
<tr>
<td>Inception V3 model</td>
<td>93.34</td>
<td>88.52</td>
</tr>
<tr>
<td>MobileNet V2 model</td>
<td>92.78</td>
<td>86.75</td>
</tr>
<tr>
<td>DenseNet201 model</td>
<td>94.63</td>
<td>89.87</td>
</tr>
<tr>
<td>ResNet50 model</td>
<td>93.10</td>
<td>90.61</td>
</tr>
</tbody>
</table>
</table-wrap>
<fig id="fig-11">
<label>Figure 11</label>
<caption>
<title><inline-formula id="ieqn-112"><mml:math id="mml-ieqn-112"><mml:mi>A</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> analysis of ADRU-SCM method with existing algorithms</title>
</caption>
<graphic mimetype="image" mime-subtype="png" xlink:href="CMC_32816-fig-11.png"/>
</fig>
<p><xref ref-type="fig" rid="fig-12">Fig. 12</xref> highlights the comparative kappa examination of the ADRU-SCM method with recent models. The figure denoted the MobileNetV2 technique has shown an ineffective outcome with least kappa of 86.75%. Meanwhile, the Inception v3 and ResNet50 techniques have gained slightly increased kappa of 88.52% and 86.75% correspondingly. Though the HGSO and DenseNet201 approaches have resulted in reasonable kappa of 91.87% and 89.87%, the ADRU-SCM system has shown effectual outcome with higher kappa of 94.33%.</p>
<fig id="fig-12">
<label>Figure 12</label>
<caption>
<title>Kappa analysis of ADRU-SCM approach with existing algorithms</title>
</caption>
<graphic mimetype="image" mime-subtype="png" xlink:href="CMC_32816-fig-12.png"/>
</fig>
<p>These results and discussion clearly pointed out the better performance of the ADRU-SCM model over recent approaches.</p>
</sec>
<sec id="s4">
<label>4</label>
<title>Conclusion</title>
<p>In this study, a novel ADRU-SCM model was established for the segmentation and classification of BT. The presented ADRU-SCM approach initially applies WF based pre-processing to eradicate the noise that exists in it. In addition, the ADRU-SCM model follows deep residual U-Net segmentation model to determine the affected brain regions. Moreover, VGG-19 model is exploited as a feature extractor. Finally, TSO with GRU model is applied as a classification model and the TSO algorithm effectually tunes the GRU hyperparameters. The performance validation of the ADRU-SCM model was tested utilizing FigShare dataset and the results pointed out the better performance of the ADRU-SCM model over recent approaches. Thus, the ADRU-SCM model can be applied to carry out BT classification procedure. In future, the performance of ADRU-SCM approach is enhanced by the use of metaheuristic based deep instance segmentation models.</p>
</sec>
</body>
<back>
<fn-group>
<fn fn-type="other"><p><bold>Funding Statement:</bold> This work was supported by the 2022 Yeungnam University Research Grant.</p>
</fn>
<fn fn-type="conflict"><p><bold>Conflicts of Interest:</bold> The authors declare that they have no conflicts of interest to report regarding the present study.</p>
</fn>
</fn-group>
<ref-list content-type="authoryear">
<title>References</title>
<ref id="ref-1"><label>[1]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Tiwari</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Srivastava</surname></string-name> and <string-name><given-names>M.</given-names> <surname>Pant</surname></string-name></person-group>, &#x201C;<article-title>Brain tumor segmentation and classification from magnetic resonance images: Review of selected methods from 2014 to 2019</article-title>,&#x201D; <source>Pattern Recognition Letters</source>, vol. <volume>131</volume>, no. <issue>9</issue>, pp. <fpage>244</fpage>&#x2013;<lpage>260</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-2"><label>[2]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>G.</given-names> <surname>Neelima</surname></string-name>, <string-name><given-names>D. R.</given-names> <surname>Chigurukota</surname></string-name>, <string-name><given-names>B.</given-names> <surname>Maram</surname></string-name> and <string-name><given-names>B.</given-names> <surname>Girirajan</surname></string-name></person-group>, &#x201C;<article-title>Optimal DeepMRSeg based tumor segmentation with GAN for brain tumor classification</article-title>,&#x201D; <source>Biomedical Signal Processing and Control</source>, vol. <volume>74</volume>, no. <issue>1</issue>, pp. <fpage>103537</fpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-3"><label>[3]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>K.</given-names> <surname>Kumar</surname></string-name> and <string-name><given-names>R.</given-names> <surname>Boda</surname></string-name></person-group>, &#x201C;<article-title>A multi-objective randomly updated beetle swarm and multi-verse optimization for brain tumor segmentation and classification</article-title>,&#x201D; <source>The Computer Journal</source>, vol. <volume>65</volume>, no. <issue>4</issue>, pp. <fpage>1029</fpage>&#x2013;<lpage>1052</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-4"><label>[4]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>B.</given-names> <surname>Tahir</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Iqbal</surname></string-name>, <string-name><given-names>M. U. G.</given-names> <surname>Khan</surname></string-name>, <string-name><given-names>T.</given-names> <surname>Saba</surname></string-name>, <string-name><given-names>Z.</given-names> <surname>Mehmood</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>Feature enhancement framework for brain tumor segmentation and classification</article-title>,&#x201D; <source>Microscopy Research and Technique</source>, vol. <volume>82</volume>, no. <issue>6</issue>, pp. <fpage>803</fpage>&#x2013;<lpage>811</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-5"><label>[5]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>E. S.</given-names> <surname>Biratu</surname></string-name>, <string-name><given-names>F.</given-names> <surname>Schwenker</surname></string-name>, <string-name><given-names>Y. M.</given-names> <surname>Ayano</surname></string-name> and <string-name><given-names>T. G.</given-names> <surname>Debelee</surname></string-name></person-group>, &#x201C;<article-title>A survey of brain tumor segmentation and classification algorithms</article-title>,&#x201D; <source>Journal of Imaging</source>, vol. <volume>7</volume>, no. <issue>9</issue>, pp. <fpage>179</fpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-6"><label>[6]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J.</given-names> <surname>Amin</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Sharif</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Yasmin</surname></string-name>, <string-name><given-names>T.</given-names> <surname>Saba</surname></string-name>, <string-name><given-names>M. A.</given-names> <surname>Anjum</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>A new approach for brain tumor segmentation and classification based on score level fusion using transfer learning</article-title>,&#x201D; <source>Journal of Medical Systems</source>, vol. <volume>43</volume>, no. <issue>11</issue>, pp. <fpage>326</fpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-7"><label>[7]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Sharif</surname></string-name>, <string-name><given-names>U.</given-names> <surname>Tanvir</surname></string-name>, <string-name><given-names>E. U.</given-names> <surname>Munir</surname></string-name>, <string-name><given-names>M. A.</given-names> <surname>Khan</surname></string-name> and <string-name><given-names>M.</given-names> <surname>Yasmin</surname></string-name></person-group>, &#x201C;<article-title>Brain tumor segmentation and classification by improved binomial thresholding and multi-features selection</article-title>,&#x201D; <source>Journal of Ambient Intelligence and Humanized Computing</source>, vol. <volume>219</volume>, no. <issue>1</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>20</lpage>, <year>2018</year>. <uri>http://dx.doi.org/10.1007/s12652-018-1075-x</uri>.</mixed-citation></ref>
<ref id="ref-8"><label>[8]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M. Y. B.</given-names> <surname>Murthy</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Koteswararao</surname></string-name> and <string-name><given-names>M. S.</given-names> <surname>Babu</surname></string-name></person-group>, &#x201C;<article-title>Adaptive fuzzy deformable fusion and optimized CNN with ensemble classification for automated brain tumor diagnosis</article-title>,&#x201D; <source>Biomedical Engineering Letters</source>, vol. <volume>12</volume>, no. <issue>1</issue>, pp. <fpage>37</fpage>&#x2013;<lpage>58</lpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-9"><label>[9]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Dasanayaka</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Silva</surname></string-name>, <string-name><given-names>V.</given-names> <surname>Shantha</surname></string-name>, <string-name><given-names>D.</given-names> <surname>Meedeniya</surname></string-name> and <string-name><given-names>T.</given-names> <surname>Ambegoda</surname></string-name></person-group>, &#x201C;<article-title>Interpretable machine learning for brain tumor analysis using MRI</article-title>,&#x201D; in <conf-name>2022 2nd Int. Conf. on Advanced Research in Computing (ICARC)</conf-name>, <publisher-loc>Belihuloya, Sri Lanka</publisher-loc>, pp. <fpage>212</fpage>&#x2013;<lpage>217</lpage>, <year>2022</year>. </mixed-citation></ref>
<ref id="ref-10"><label>[10]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Krishnakumar</surname></string-name> and <string-name><given-names>K.</given-names> <surname>Manivannan</surname></string-name></person-group>, &#x201C;<article-title>Effective segmentation and classification of brain tumor using rough K means algorithm and multi kernel SVM in MR images</article-title>,&#x201D; <source>Journal of Ambient Intelligence and Humanized Computing</source>, vol. <volume>12</volume>, no. <issue>6</issue>, pp. <fpage>6751</fpage>&#x2013;<lpage>6760</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-11"><label>[11]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Ilhan</surname></string-name>, <string-name><given-names>B.</given-names> <surname>Sekeroglu</surname></string-name> and <string-name><given-names>R.</given-names> <surname>Abiyev</surname></string-name></person-group>, &#x201C;<article-title>Brain tumor segmentation in MRI images using nonparametric localization and enhancement methods with U-net</article-title>,&#x201D; <source>International Journal of Computer Assisted Radiology and Surgery</source>, vol. <volume>17</volume>, no. <issue>3</issue>, pp. <fpage>589</fpage>&#x2013;<lpage>600</lpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-12"><label>[12]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A. R.</given-names> <surname>Raju</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Suresh</surname></string-name> and <string-name><given-names>R. R.</given-names> <surname>Rao</surname></string-name></person-group>, &#x201C;<article-title>Bayesian HCS-based multi-SVNN: A classification approach for brain tumor segmentation and classification using bayesian fuzzy clustering</article-title>,&#x201D; <source>Biocybernetics and Biomedical Engineering</source>, vol. <volume>38</volume>, no. <issue>3</issue>, pp. <fpage>646</fpage>&#x2013;<lpage>660</lpage>, <year>2018</year>.</mixed-citation></ref>
<ref id="ref-13"><label>[13]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Das</surname></string-name>, <string-name><given-names>G.</given-names> <surname>Nayak</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Saba</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Kalra</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Suri</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>An artificial intelligence framework and its bias for brain tumor segmentation: A narrative review</article-title>,&#x201D; <source>Computers in Biology and Medicine</source>, vol. <volume>143</volume>, no. <issue>3</issue>, pp. <fpage>105273</fpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-14"><label>[14]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>D.</given-names> <surname>Kapila</surname></string-name> and <string-name><given-names>N.</given-names> <surname>Bhagat</surname></string-name></person-group>, &#x201C;<article-title>Efficient feature selection technique for brain tumor classification utilizing hybrid fruit fly based ABC and ANN algorithm</article-title>,&#x201D; <source>Materials Today: Proceedings</source>, vol. <volume>51</volume>, pp. <fpage>12</fpage>&#x2013;<lpage>20</lpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-15"><label>[15]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>B.</given-names> <surname>Song</surname></string-name>, <string-name><given-names>C. R.</given-names> <surname>Chou</surname></string-name>, <string-name><given-names>X.</given-names> <surname>Chen</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Huang</surname></string-name> and <string-name><given-names>M. C.</given-names> <surname>Liu</surname></string-name></person-group>, &#x201C;<article-title>Anatomy-guided brain tumor segmentation and classification</article-title>,&#x201D; in <conf-name>Int. Workshop on Brainlesion: Glioma, Multiple Sclerosis, Stroke and Traumatic Brain Injuries, BrainLes 2016: Brainlesion: Glioma, Multiple Sclerosis, Stroke and Traumatic Brain Injuries, Lecture Notes in Computer Science Book Series</conf-name>, Athens, Greece, vol. <volume>10154</volume>, pp. <fpage>162</fpage>&#x2013;<lpage>170</lpage>, <year>2016</year>. </mixed-citation></ref>
<ref id="ref-16"><label>[16]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>K.</given-names> <surname>Shankar</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Elhoseny</surname></string-name>, <string-name><given-names>S. K.</given-names> <surname>Lakshmanaprabu</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Ilayaraja</surname></string-name>, <string-name><given-names>R. M.</given-names> <surname>Vidhyavathi</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>Optimal feature level fusion based ANFIS classifier for brain MRI image classification</article-title>,&#x201D; <source>Concurrency and Computation: Practice and Experience</source>, vol. <volume>32</volume>, no. <issue>1</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>12</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-17"><label>[17]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M. Z.</given-names> <surname>Alom</surname></string-name>, <string-name><given-names>C.</given-names> <surname>Yakopcic</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Hasan</surname></string-name>, <string-name><given-names>T. M.</given-names> <surname>Taha</surname></string-name> and <string-name><given-names>V. K.</given-names> <surname>Asari</surname></string-name></person-group>, &#x201C;<article-title>Recurrent residual U-Net for medical image segmentation</article-title>,&#x201D; <source>Journal of Medical Imaging</source>, vol. <volume>6</volume>, no. <issue>1</issue>, pp. <fpage>1</fpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-18"><label>[18]</label><mixed-citation publication-type="other"><person-group person-group-type="author"><string-name><given-names>D.</given-names> <surname>Bhowmik</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Abdullah</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Bin</surname></string-name> and <string-name><given-names>M. T.</given-names> <surname>Islam</surname></string-name></person-group>, &#x201C;<article-title>A deep face-mask detection model using DenseNet169 and image processing techniques</article-title>,&#x201D; <comment>Doctoral dissertation, Brac University</comment>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-19"><label>[19]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>H. M.</given-names> <surname>Lynn</surname></string-name>, <string-name><given-names>S. B.</given-names> <surname>Pan</surname></string-name> and <string-name><given-names>P.</given-names> <surname>Kim</surname></string-name></person-group>, &#x201C;<article-title>A deep bidirectional GRU network model for biometric electrocardiogram classification based on recurrent neural networks</article-title>,&#x201D; <source>IEEE Access</source>, vol. <volume>7</volume>, pp. <fpage>145395</fpage>&#x2013;<lpage>145405</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-20"><label>[20]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>R.</given-names> <surname>Sowmyalakshmi</surname></string-name>, <string-name><given-names>T.</given-names> <surname>Jayasankar</surname></string-name>, <string-name><given-names>V. A.</given-names> <surname>PiIllai</surname></string-name>, <string-name><given-names>K.</given-names> <surname>Subramaniyan</surname></string-name>, <string-name><given-names>I. V.</given-names> <surname>Pustokhina</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>An optimal classification model for rice plant disease detection</article-title>,&#x201D; <source>Computers, Materials &#x0026; Continua</source>, vol. <volume>68</volume>, no. <issue>2</issue>, pp. <fpage>1751</fpage>&#x2013;<lpage>1767</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-21"><label>[21]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>D. N.</given-names> <surname>Le</surname></string-name>, <string-name><given-names>V. S.</given-names> <surname>Parvathy</surname></string-name>, <string-name><given-names>D.</given-names> <surname>Gupta</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Khanna</surname></string-name>, <string-name><given-names>J. J. P. C.</given-names> <surname>Rodrigues</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>IoT enabled depthwise separable convolution neural network with deep support vector machine for COVID-19 diagnosis and classification</article-title>,&#x201D; <source>International Journal of Machine Learning and Cybernetics</source>, vol. <volume>12</volume>, no. <issue>11</issue>, pp. <fpage>3235</fpage>&#x2013;<lpage>3248</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-22"><label>[22]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>D.</given-names> <surname>Venugopal</surname></string-name>, <string-name><given-names>T.</given-names> <surname>Jayasankar</surname></string-name>, <string-name><given-names>M. Y.</given-names> <surname>Sikkandar</surname></string-name>, <string-name><given-names>M. I.</given-names> <surname>Waly</surname></string-name>, <string-name><given-names>I. V.</given-names> <surname>Pustokhina</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>A novel deep neural network for intracranial haemorrhage detection and classification</article-title>,&#x201D; <source>Computers, Materials &#x0026; Continua</source>, vol. <volume>68</volume>, no. <issue>3</issue>, pp. <fpage>2877</fpage>&#x2013;<lpage>2893</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-23"><label>[23]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Kaur</surname></string-name>, <string-name><given-names>L. K.</given-names> <surname>Awasthi</surname></string-name>, <string-name><given-names>A. L.</given-names> <surname>Sangal</surname></string-name> and <string-name><given-names>G.</given-names> <surname>Dhiman</surname></string-name></person-group>, &#x201C;<article-title>Tunicate Swarm Algorithm: A new bio-inspired based metaheuristic paradigm for global optimization</article-title>,&#x201D; <source>Engineering Applications of Artificial Intelligence</source>, vol. <volume>90</volume>, no. <issue>2</issue>, pp. <fpage>103541</fpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-24"><label>[24]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>F. N.</given-names> <surname>Al-Wesabi</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Obayya</surname></string-name>, <string-name><given-names>A. M.</given-names> <surname>Hilal</surname></string-name>, <string-name><given-names>O.</given-names> <surname>Castillo</surname></string-name>, <string-name><given-names>D.</given-names> <surname>Gupta</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>Multi-objective quantum tunicate swarm optimization with deep learning model for intelligent dystrophinopathies diagnosis</article-title>,&#x201D; <source>Soft Computing</source>, vol. <volume>318</volume>, no. <issue>3</issue>, pp. <fpage>2199</fpage>, <year>2022</year>. <uri>http://dx.doi.org/10.1007/s00500-021-06620-5</uri>.</mixed-citation></ref>
<ref id="ref-25"><label>[25]</label><mixed-citation publication-type="other"><person-group person-group-type="author"><string-name><given-names>J.</given-names> <surname>Cheng</surname></string-name></person-group>, &#x201C;<article-title>Brain tumor dataset</article-title>,&#x201D; <comment>Figshare</comment>, <year>2017</year>. [Online]. Available: <uri>http://dx.doi.org/10.6084/m9.figshare.1512427.v5</uri>.</mixed-citation></ref>
<ref id="ref-26"><label>[26]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>T.</given-names> <surname>Sadad</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Rehman</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Munir</surname></string-name>, <string-name><given-names>T.</given-names> <surname>Saba</surname></string-name>, <string-name><given-names>U.</given-names> <surname>Tariq</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>Brain tumor detection and multi-classification using advanced deep learning techniques</article-title>,&#x201D; <source>Microscopy Research and Technique</source>, vol. <volume>84</volume>, no. <issue>6</issue>, pp. <fpage>1296</fpage>&#x2013;<lpage>1308</lpage>, <year>2021</year>.</mixed-citation></ref>
</ref-list>
</back>
</article>
