<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.1 20151215//EN" "http://jats.nlm.nih.gov/publishing/1.1/JATS-journalpublishing1.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:mml="http://www.w3.org/1998/Math/MathML" xml:lang="en" article-type="research-article" dtd-version="1.1">
<front>
<journal-meta>
<journal-id journal-id-type="pmc">CMC</journal-id>
<journal-id journal-id-type="nlm-ta">CMC</journal-id>
<journal-id journal-id-type="publisher-id">CMC</journal-id>
<journal-title-group>
<journal-title>Computers, Materials &#x0026; Continua</journal-title>
</journal-title-group>
<issn pub-type="epub">1546-2226</issn>
<issn pub-type="ppub">1546-2218</issn>
<publisher>
<publisher-name>Tech Science Press</publisher-name>
<publisher-loc>USA</publisher-loc>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">33005</article-id>
<article-id pub-id-type="doi">10.32604/cmc.2023.033005</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Article</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Arithmetic Optimization with Ensemble Deep Transfer Learning Based&#x00A0;Melanoma Classification</article-title>
<alt-title alt-title-type="left-running-head">Arithmetic Optimization with Ensemble Deep Transfer Learning Based Melanoma Classification</alt-title>
<alt-title alt-title-type="right-running-head">Arithmetic Optimization with Ensemble Deep Transfer Learning Based Melanoma Classification</alt-title>
</title-group>
<contrib-group>
<contrib id="author-1" contrib-type="author">
<name name-style="western"><surname>Kalyani</surname><given-names>K.</given-names></name><xref ref-type="aff" rid="aff-1">1</xref></contrib>
<contrib id="author-2" contrib-type="author">
<name name-style="western"><surname>Althubiti</surname><given-names>Sara A</given-names></name><xref ref-type="aff" rid="aff-2">2</xref></contrib>
<contrib id="author-3" contrib-type="author">
<name name-style="western"><surname>Ahmed</surname><given-names>Mohammed Altaf</given-names></name><xref ref-type="aff" rid="aff-3">3</xref></contrib>
<contrib id="author-4" contrib-type="author">
<name name-style="western"><surname>Laxmi Lydia</surname><given-names>E.</given-names></name><xref ref-type="aff" rid="aff-4">4</xref></contrib>
<contrib id="author-5" contrib-type="author">
<name name-style="western"><surname>Kadry</surname><given-names>Seifedine</given-names></name><xref ref-type="aff" rid="aff-5">5</xref></contrib>
<contrib id="author-6" contrib-type="author">
<name name-style="western"><surname>Han</surname><given-names>Neunggyu</given-names></name><xref ref-type="aff" rid="aff-6">6</xref></contrib>
<contrib id="author-7" contrib-type="author" corresp="yes">
<name name-style="western"><surname>Nam</surname><given-names>Yunyoung</given-names></name><xref ref-type="aff" rid="aff-6">6</xref><email>ynam@sch.ac.kr</email></contrib>
<aff id="aff-1"><label>1</label><institution>Department of Computer Science, Dr. Nalli Kuppusamy Arts College (Affiliated to Bharathidasan University, Tiruchirappalli)</institution>, <addr-line>Thanjavur, 613003</addr-line>, <country>India</country></aff>
<aff id="aff-2"><label>2</label><institution>Department of Computer Science, College of Computer and Information Sciences, Majmaah University, Al-Majmaah</institution>, <addr-line>11952</addr-line>, <country>Saudi Arabia</country></aff>
<aff id="aff-3"><label>3</label><institution>Lepartment of Computer Engineering, College of Computer Engineering &#x0026; Sciences, Prince Sattam Bin Abdulaziz University</institution>, <addr-line>Al-Kharj, 11942</addr-line>, <country>Saudi Arabia</country></aff>
<aff id="aff-4"><label>4</label><institution>Department of Computer Science and Engineering, Vignan&#x2019;s Institute of Information Technology</institution>, <addr-line>Visakhapatnam, 530049</addr-line>, <country>India</country></aff>
<aff id="aff-5"><label>5</label><institution>Department of Applied Data Science, Noroff University College</institution>, <addr-line>Kristiansand</addr-line>, <country>Norway</country></aff>
<aff id="aff-6"><label>6</label><institution>Department of ICT Convergence, Soonchunhyang University</institution>, <country>Korea</country></aff>
</contrib-group>
<author-notes>
<corresp id="cor1"><label>&#x002A;</label>Corresponding Author: Yunyoung Nam. Email: <email>ynam@sch.ac.kr</email></corresp>
</author-notes>
<pub-date date-type="collection" publication-format="electronic"><year>2023</year></pub-date>
<pub-date date-type="pub" publication-format="electronic"><day>24</day><month>1</month><year>2023</year></pub-date>
<volume>75</volume>
<issue>1</issue>
<fpage>149</fpage>
<lpage>164</lpage>
<history>
<date date-type="received"><day>04</day><month>6</month><year>2022</year></date>
<date date-type="accepted"><day>05</day><month>7</month><year>2022</year></date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2023 Kalyani et al.</copyright-statement>
<copyright-year>2023</copyright-year>
<copyright-holder>Kalyani et al.</copyright-holder>
<license xlink:href="https://creativecommons.org/licenses/by/4.0/">
<license-p>This work is licensed under a <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</ext-link>, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.</license-p>
</license>
</permissions>
<self-uri content-type="pdf" xlink:href="TSP_CMC_33005.pdf"></self-uri>
<abstract><p>Melanoma is a skin disease with high mortality rate while early diagnoses of the disease can increase the survival chances of patients. It is challenging to automatically diagnose melanoma from dermoscopic skin samples. Computer-Aided Diagnostic (CAD) tool saves time and effort in diagnosing melanoma compared to existing medical approaches. In this background, there is a need exists to design an automated classification model for melanoma that can utilize deep and rich feature datasets of an image for disease classification. The current study develops an Intelligent Arithmetic Optimization with Ensemble Deep Transfer Learning Based Melanoma Classification (IAOEDTT-MC) model. The proposed IAOEDTT-MC model focuses on identification and classification of melanoma from dermoscopic images. To accomplish this, IAOEDTT-MC model applies image preprocessing at the initial stage in which Gabor Filtering (GF) technique is utilized. In addition, U-Net segmentation approach is employed to segment the lesion regions in dermoscopic images. Besides, an ensemble of DL models including ResNet50 and ElasticNet models is applied in this study. Moreover, AO algorithm with Gated Recurrent Unit (GRU) method is utilized for identification and classification of melanoma. The proposed IAOEDTT-MC method was experimentally validated with the help of benchmark datasets and the proposed model attained maximum accuracy of 92.09&#x0025; on ISIC 2017 dataset.</p>
</abstract>
<kwd-group kwd-group-type="author">
<kwd>Skin cancer</kwd>
<kwd>deep learning</kwd>
<kwd>melanoma classification</kwd>
<kwd>dermoscopy</kwd>
<kwd>computer aided diagnosis</kwd>
</kwd-group>
</article-meta>
</front>
<body>
<sec id="s1"><label>1</label><title>Introduction</title>
<p>Melanoma has been classified as one of the serious skin cancer types and is ranked at 9<sup>th</sup> position amongst the common cancer types. Every year, more than 132,000 cases are diagnosed across the globe [<xref ref-type="bibr" rid="ref-1">1</xref>]. According to the report by The American Cancer institution in the year 2019, 1,92,310 patients are affected with melanoma in U.S. In the last few years, the prevalence rate of melanoma is increasing with more number of patients getting infected with the disease alike other cancer types. A minor operation may incline the possibility of recovery after early detection of melanoma [<xref ref-type="bibr" rid="ref-2">2</xref>]. Dermoscopy is the commonly used imaging technique used for diagnosing melanoma. It expands the skin surface affected by cancer and its structure can be easily seen by dermatologists for investigation [<xref ref-type="bibr" rid="ref-3">3</xref>]. But discrimination of skin lesions, by dermatologists, from dermoscopic images is subjective and a time-consuming process. Further, the accuracy of the diagnoses primarily depends upon the expertise. Therefore, non-experienced dermatologists cannot make judgments with accuracy [<xref ref-type="bibr" rid="ref-4">4</xref>].</p>
<p>On the other hand, numerous problems arise in case of automatic detection of melanoma. At first, skin lesion has great intra-class variations and inter-class similarities in texture, color, and shape; different types of skin cancers possess high visual similarity [<xref ref-type="bibr" rid="ref-5">5</xref>]. Secondly, the region of skin lesions largely differ in dermoscopic images while the borders between normal skin and skin lesions remain unclear in certain images. Next, artifacts like rulers, texture, and hair are highly challenging to recognize in dermoscopic images in case of changes in melanoma. Likewise, many other factors increase the challenges involved in automated detection of melanoma from dermoscopic images [<xref ref-type="bibr" rid="ref-6">6</xref>]. So, there is an urgent need to develop an automated and non-subjective detection technique. This might help dermatologists to make accurate diagnoses. Such challenges inspire the researchers to propose new algorithms for diagnosis and visualization of melanoma. Computer-Aided Diagnosis (CAD) schemes assist in the diagnosis of melanoma. CAD system offers a user-friendly environment for inexperienced dermatologists [<xref ref-type="bibr" rid="ref-7">7</xref>]. The evidence produced by CAD diagnosis mechanism is utilized as a second opinion for melanoma diagnosis. In order to detect a skin lesion at early stages and resolve the abovementioned complexity, extensive studies have been conducted earlier with the help of Computer Vision (CV) algorithm [<xref ref-type="bibr" rid="ref-8">8</xref>]. The classification method varies such as Decision Trees (DT), Support Vector Machines (SVM), and Artificial Neural Networks (ANN). Conventional techniques are parametric in nature and need trainable datasets to achieve normal distribution. However, skin cancer dataset is uncontrivable [<xref ref-type="bibr" rid="ref-9">9</xref>]. Every lesion contains a distinct pattern; therefore, this technique is not sufficient. For this reason, DL technique is powerful in skin image classification as it can help the dermatologists to diagnose lesions with high accuracy [<xref ref-type="bibr" rid="ref-10">10</xref>].</p>
<p>The current study develops an Intelligent Arithmetic Optimization with Ensemble Deep Transfer Learning Based Melanoma Classification (IAOEDTT-MC) model. Initially, the proposed IAOEDTT-MC model applies image preprocessing at initial stage with the help of Gabor Filtering (GF) technique. In addition, U-Net segmentation approach is employed to segment the lesion regions in dermoscopic images. Besides, an ensemble of DL models including ResNet50 and ElasticNet models is applied. Moreover, AO algorithm with Gated Recurrent Unit (GRU) method is employed for both identification and classification of melanoma. The proposed IAOEDTT-MC approach was experimentally validated with the help of benchmark datasets and the results were examined under several measures.</p>
</sec>
<sec id="s2"><label>2</label><title>Literature Review</title>
<p>In literature [<xref ref-type="bibr" rid="ref-11">11</xref>], the authors proposed an automatic skin lesion classification technique. In this technique, Transfer Learning (TL) and pretrained Deep Learning (DL) networks were used. Apart from data augmentation and fine-tuning, TL model is also employed for AlexNet by replacing the final layer with softmax function to categorize three distinct types of lesions such as atypical nevus, common nevus, and melanoma. In Li et al. [<xref ref-type="bibr" rid="ref-12">12</xref>], two DL techniques were proposed to address three major tasks that evolve in the region of skin cancer image processing such as lesion segmentation, lesion dermoscopic feature extraction, and lesion classification. A DL architecture, comprised of two Fully Convolutional Residual Network (FCRN), was presented to concurrently produce coarse classification and segmentation results. Lesion Index Calculation Unit (LICU) was designed earlier to refine the coarse classification result by evaluating distance heat map. A direct CNN was introduced in this study for dermoscopic feature extraction tasks.</p>
<p>The authors in the study conducted earlier [<xref ref-type="bibr" rid="ref-13">13</xref>] developed a DL-based technique that overcomes the limitation in automated detection and segmentation of melanoma from dermoscopic images. An encoder-decoder network, with decoder and encoder subnetworks, was developed in this study. This network is interconnected with a sequence of skip pathways that carries the semantic level of encoder feature map close to the decoder feature map for feature extraction and effectual learning. The scheme applies multi-stage-and-scale method and uses softmax classification for pixel-wise classification of melanoma lesions. Kaur&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-14">14</xref>] developed an automated melanoma classification method based on Deep CNN (DCNN) for accurate classification of malevolent <italic>vs.</italic> benevolent melanoma. DCNN architecture can be well planned by organizing more than one layer that is accountable for deriving lower to higher-level features of the skin image in an exclusive manner. Other vital conditions in DCNN structure includes the selection of sizes and multiple filters that employ optimizing hyperparameters, proper DL layers, and selection of in-depth network.</p>
<p>Thapar&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-15">15</xref>] suggested a reliable technique for skin lesion detection with the help of dermoscopic images. This aim of this technique is to enhance the visual observation and diagnostic ability of healthcare professionals in terms of differentiating benign from malignant lesions. Swarm Intelligence (SI) algorithm was utilized in this study for segmentation of Region of Interest (RoI) from dermoscopic images. Further, Speeded-Up Robust Feature (SURF) was applied for feature extraction of RoI which is determined as a better segmentation outcome, attained by Grasshopper Optimization Algorithm (GOA). Banerjee&#x00A0;et&#x00A0;al.&#x00A0;[<xref ref-type="bibr" rid="ref-16">16</xref>] proposed a DL-based &#x2018;You Only Look Once (YOLO)&#x2019; technique based on the application of DCNN to diagnose melanoma from digital and dermoscopic images. This technique also aimed at providing fast and accurate output compared to traditional CNN. With respect to position of the recognized object in a cell, this method forecasted the class confidence score and bounding box of the detected object. In literature [<xref ref-type="bibr" rid="ref-17">17</xref>], the authors addressed the problem of efficient usage of feature set extracted from DL model which is pre-trained on ImageNet.</p>
</sec>
<sec id="s3"><label>3</label><title>The Proposed Model</title>
<p>In this study, a novel IAOEDTT-MC technique has been developed for identification and classification of melanoma on dermoscopic images. Initially, the proposed IAOEDTT-MC model applies image pre-processing using GF technique. Followed by, U-Net segmentation approach is employed to segment the lesion regions in dermoscopic images. Furthermore, an ensemble of DL models including ResNet50 and ElasticNet models is applied. At last, AO algorithm with GRU approach is employed for the identification and classification of melanoma. <xref ref-type="fig" rid="fig-1">Fig. 1</xref> depicts the overall process involved in IAOEDTT-MC approach.</p>
<fig id="fig-1"><label>Figure 1</label><caption><title>Block diagram of IAOEDTT-MC approach</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_33005-fig-1.tif"/></fig>
<sec id="s3_1"><label>3.1</label><title>Image Pre-Processing</title>
<p>GF is a band-pass filter that is effectively executed for CV application and variations in image processing. Two-dimensional Gabor function is an orientated complex in which the sinusoidal grating gets decreased with the help of two-dimensional Gaussian envelope. In two-dimensional co-ordinate <inline-formula id="ieqn-1"><mml:math id="mml-ieqn-1"><mml:mrow><mml:mo>(</mml:mo><mml:mi>a</mml:mi><mml:mo>,</mml:mo><mml:mi>b</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> system, GF comprises of a real component and imaginary one and are represented as follows&#x00A0;[<xref ref-type="bibr" rid="ref-18">18</xref>]:
<disp-formula id="eqn-1"><label>(1)</label><mml:math id="mml-eqn-1" display="block"><mml:msub><mml:mi>G</mml:mi><mml:mrow><mml:mi>&#x03B4;</mml:mi><mml:mo>,</mml:mo><mml:mi>&#x03B8;</mml:mi><mml:mo>,</mml:mo><mml:mi>&#x03C8;</mml:mi><mml:mo>,</mml:mo><mml:mi>&#x03C3;</mml:mi><mml:mo>,</mml:mo><mml:mi>&#x03B3;</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>a</mml:mi><mml:mo>,</mml:mo><mml:mi>b</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mtext>exp</mml:mtext></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mo>&#x2212;</mml:mo><mml:mfrac><mml:mrow><mml:msup><mml:mi>a</mml:mi><mml:mrow><mml:mrow><mml:mo>&#x2032;</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mo>+</mml:mo><mml:msup><mml:mi>&#x03B3;</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:msup><mml:mi>b</mml:mi><mml:mrow><mml:mrow><mml:mo>&#x2032;</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:mrow><mml:mrow><mml:mn>2</mml:mn><mml:msup><mml:mi>&#x03C3;</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:mrow></mml:mfrac><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x00D7;</mml:mo><mml:mrow><mml:mtext>exp</mml:mtext></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>j</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mn>2</mml:mn><mml:mi>&#x03C0;</mml:mi><mml:mfrac><mml:msup><mml:mi>a</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:mrow></mml:msup><mml:mi>&#x03B4;</mml:mi></mml:mfrac><mml:mo>+</mml:mo><mml:mi>&#x03C8;</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula>where
<disp-formula id="eqn-2"><label>(2)</label><mml:math id="mml-eqn-2" display="block"><mml:msup><mml:mi>a</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:mrow></mml:msup><mml:mo>=</mml:mo><mml:mi>a</mml:mi><mml:mrow><mml:mtext>cos</mml:mtext></mml:mrow><mml:mi>&#x03B8;</mml:mi><mml:mo>+</mml:mo><mml:mi>b</mml:mi><mml:mrow><mml:mtext>sin</mml:mtext></mml:mrow><mml:mi>&#x03B8;</mml:mi></mml:math></disp-formula>
<disp-formula id="eqn-3"><label>(3)</label><mml:math id="mml-eqn-3" display="block"><mml:msup><mml:mi>b</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:mrow></mml:msup><mml:mo>=</mml:mo><mml:mo>&#x2212;</mml:mo><mml:mi>a</mml:mi><mml:mrow><mml:mtext>sin</mml:mtext></mml:mrow><mml:mi>&#x03B8;</mml:mi><mml:mo>+</mml:mo><mml:mi>b</mml:mi><mml:mrow><mml:mtext>cos</mml:mtext></mml:mrow><mml:mi>&#x03B8;</mml:mi></mml:math></disp-formula></p>
<p>In the above equation, <inline-formula id="ieqn-2"><mml:math id="mml-ieqn-2"><mml:mi>&#x03B4;</mml:mi></mml:math></inline-formula> indicates the wavelength of sinusoidal factor and <inline-formula id="ieqn-3"><mml:math id="mml-ieqn-3"><mml:mi>&#x03B8;</mml:mi></mml:math></inline-formula> denotes the orientated separation angle of Gabor kernel. Assume <inline-formula id="ieqn-4"><mml:math id="mml-ieqn-4"><mml:mi>&#x03B8;</mml:mi></mml:math></inline-formula> in the range of <inline-formula id="ieqn-5"><mml:math id="mml-ieqn-5"><mml:mrow><mml:mo>[</mml:mo><mml:msup><mml:mn>0</mml:mn><mml:mrow><mml:mi>o</mml:mi></mml:mrow></mml:msup><mml:mo>,</mml:mo><mml:msup><mml:mn>180</mml:mn><mml:mrow><mml:mi>o</mml:mi></mml:mrow></mml:msup><mml:mo>]</mml:mo></mml:mrow></mml:math></inline-formula> as symmetry which makes other directions redundant. <inline-formula id="ieqn-6"><mml:math id="mml-ieqn-6"><mml:mi>&#x03C8;</mml:mi></mml:math></inline-formula> determines phase offset, <inline-formula id="ieqn-7"><mml:math id="mml-ieqn-7"><mml:mi>&#x03C3;</mml:mi></mml:math></inline-formula> defines the Standard Deviation (SD) of Gaussian envelope and <inline-formula id="ieqn-8"><mml:math id="mml-ieqn-8"><mml:mi>&#x03B3;</mml:mi></mml:math></inline-formula> represents spatial feature (default value is 0.5) to identify the ellipticity of Gabor function support. The variable <inline-formula id="ieqn-9"><mml:math id="mml-ieqn-9"><mml:mn>0</mml:mn></mml:math></inline-formula> is determined to be 6 and spatial frequency bandwidth <inline-formula id="ieqn-10"><mml:math id="mml-ieqn-10"><mml:mi>b</mml:mi><mml:mi>w</mml:mi></mml:math></inline-formula> is as follows.
<disp-formula id="eqn-4"><label>(4)</label><mml:math id="mml-eqn-4" display="block"><mml:mi>&#x03C3;</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mi>&#x03B4;</mml:mi><mml:mrow><mml:mi>p</mml:mi><mml:mi>i</mml:mi></mml:mrow></mml:mfrac><mml:msqrt><mml:mfrac><mml:mrow><mml:mi>l</mml:mi><mml:mi>n</mml:mi><mml:mn>2</mml:mn></mml:mrow><mml:mn>2</mml:mn></mml:mfrac></mml:msqrt><mml:mfrac><mml:mrow><mml:msup><mml:mn>2</mml:mn><mml:mrow><mml:mi>b</mml:mi><mml:mi>w</mml:mi></mml:mrow></mml:msup><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:msup><mml:mn>2</mml:mn><mml:mrow><mml:mi>b</mml:mi><mml:mi>w</mml:mi></mml:mrow></mml:msup><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:mfrac></mml:math></disp-formula></p>
</sec>
<sec id="s3_2"><label>3.2</label><title>U-Net Segmentation</title>
<p>In this study, U-Net segmentation technique is utilized for the segmentation of lesion region in dermoscopic image. U-net network is classified as follows: initially, the contracting path uses a standard CNN structure [<xref ref-type="bibr" rid="ref-19">19</xref>]. All the blocks in contracting path comprise of two consecutive 3&#x2009;&#x00D7;&#x2009;3 convolutions along with max-pooling layer and ReLU activation unit. This procedure is repeated iteratively. U-net is innovated in the upcoming section and is named as expansive path in which all the phases upsample the feature maps using 2&#x2009;&#x00D7;&#x2009;2 up-convolutions. Next, the feature map from respective layer in the contracting path, can be cropped and concatenated onto up-sampled feature maps. It is followed by ReLU activation and two consecutive 3&#x2009;&#x00D7;&#x2009;3 convolutions. Finally, a 1&#x2009;&#x00D7;&#x2009;1 convolution is employed in the reduction of feature maps to necessary amount of channels and generation of the segmented images. Cropping is essential, because the pixel features in the edges have minimum amount of context data due to which it should be dismissed. This results in a u-shaped network and propagates the context dataset alongside the network. This scenario enables the user to segment objects in a region with the help of context from a large overlapping region.</p>
</sec>
<sec id="s3_3"><label>3.3</label><title>Ensemble Learning Process</title>
<p>For feature extraction, an ensemble of DL models including ResNet50 and ElasticNet models is applied. Ensemble learning is a fusion method in which two or three models are fused together to gain benefits in terms of performance and computation. The outcomes of an ensemble of DNNs are always better compared to the outcomes achieved from a single model. In this study, average ensemble learning is utilized by a similar weight assigned to all the models.
<disp-formula id="eqn-5"><label>(5)</label><mml:math id="mml-eqn-5" display="block"><mml:mi>P</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mo>&#x2211;</mml:mo><mml:msub><mml:mi>M</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mi>N</mml:mi></mml:mfrac></mml:math></disp-formula></p>
<p>In this expression, <inline-formula id="ieqn-11"><mml:math id="mml-ieqn-11"><mml:msub><mml:mi>M</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> refers to the probability of model <italic>i</italic>, and <italic>N</italic> stands for overall number of models. DL model is complex in nature and possesses different architectures; they do not offer a similar outcome. Thus, it is advantageous to assign additional weights to the model for better implementation. Hereby, the maximal output is extracted from other models. The problem is to identify the accurate combination of model weights. Then, the grid search method is utilized to resolve the problem. In this study, the authors employed a total of 1000 weight combinations. The search process continues until each variety is checked. Finally, the algorithm is implemented by means of correct weight combination for the maximum of the presented assessment metrics.</p>
<sec id="s3_3_1"><label>3.3.1</label><title>ResNet-50 Model</title>
<p>ResNet is commonly used for improving the problem of vanishing or exploding gradients [<xref ref-type="bibr" rid="ref-20">20</xref>]. ResNet is a collection of residual blocks while each residual block is in turn a collection of different layers such as Batch Normalization (BN), convolution, and ReLU layers. In addition to all the residual blocks, the input is directly processed as the output using identity whereas a short connection permits the user to perform residual learning; this is important to resolve gradient problems in training deep network. The residual block is expressed as follows.
<disp-formula id="eqn-6"><label>(6)</label><mml:math id="mml-eqn-6" display="block"><mml:msub><mml:mi>H</mml:mi><mml:mrow><mml:mi>l</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mi>H</mml:mi><mml:mrow><mml:mi>l</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:mi>F</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>H</mml:mi><mml:mrow><mml:mi>l</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula></p>
<p>Here, <inline-formula id="ieqn-12"><mml:math id="mml-ieqn-12"><mml:msub><mml:mi>H</mml:mi><mml:mrow><mml:mi>l</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-13"><mml:math id="mml-ieqn-13"><mml:msub><mml:mi>H</mml:mi><mml:mrow><mml:mi>l</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:math></inline-formula> signify the output and input of <inline-formula id="ieqn-14"><mml:math id="mml-ieqn-14"><mml:msup><mml:mi>l</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msup></mml:math></inline-formula> residual block correspondingly. <inline-formula id="ieqn-15"><mml:math id="mml-ieqn-15"><mml:mi>F</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>x</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> denotes the residual mapping function of stack layers. It is apparent that the dimensions such as <inline-formula id="ieqn-16"><mml:math id="mml-ieqn-16"><mml:msub><mml:mi>H</mml:mi><mml:mrow><mml:mi>l</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:math></inline-formula> and <inline-formula id="ieqn-17"><mml:math id="mml-ieqn-17"><mml:mi>F</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>H</mml:mi><mml:mrow><mml:mi>l</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> are equivalent. But, the convolutional function generally has dimensional variations. Thus, the linear projection <inline-formula id="ieqn-18"><mml:math id="mml-ieqn-18"><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>s</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> is utilized to match the dimension. So, <xref ref-type="disp-formula" rid="eqn-6">Eq. (6)</xref> is transformed as follows.
<disp-formula id="eqn-7"><label>(7)</label><mml:math id="mml-eqn-7" display="block"><mml:msub><mml:mi>H</mml:mi><mml:mrow><mml:mi>l</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>s</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mi>H</mml:mi><mml:mrow><mml:mi>l</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:mi>F</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>H</mml:mi><mml:mrow><mml:mi>l</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula></p>
<p>Thus, ResNet-50 is attained by stacking the residual block so as to count the last network layer to&#x00A0;50.</p>
</sec>
<sec id="s3_3_2"><label>3.3.2</label><title>ElasticNet Model</title>
<p>ElasticNet is basically a linear regression mechanism developed on the basis of Lasso and ridge regressions [<xref ref-type="bibr" rid="ref-21">21</xref>]. Assume that the sample size is <italic>N</italic> and the number of prediction parameters is <italic>p</italic>.
<disp-formula id="eqn-8"><label>(8)</label><mml:math id="mml-eqn-8" display="block"><mml:msub><mml:mi>y</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:mo>&#x22EF;</mml:mo><mml:mo>+</mml:mo><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mi>p</mml:mi></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mrow><mml:mrow><mml:mi mathvariant="script">E</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mrow><mml:mi mathvariant="script">E</mml:mi></mml:mrow></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:mi>N</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:msup><mml:mi>&#x03C3;</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mo>)</mml:mo></mml:mrow><mml:mo>,</mml:mo><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mo>&#x22EF;</mml:mo><mml:mo>,</mml:mo><mml:mi>N</mml:mi></mml:math></disp-formula></p>
<p>In <xref ref-type="disp-formula" rid="eqn-8">Eq. (8)</xref>, the regression coefficient is represented as <inline-formula id="ieqn-19"><mml:math id="mml-ieqn-19"><mml:mi>&#x03B2;</mml:mi></mml:math></inline-formula> whereas the constant term is characterized by <inline-formula id="ieqn-20"><mml:math id="mml-ieqn-20"><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-21"><mml:math id="mml-ieqn-21"><mml:msup><mml:mi>&#x03C3;</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:math></inline-formula> denotes a disturbance of target value around real value. The definition of Ridge regression is as follows.
<disp-formula id="eqn-9"><label>(9)</label><mml:math id="mml-eqn-9" display="block"><mml:msup><mml:mrow><mml:mover><mml:mi>&#x03B2;</mml:mi><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">ridge</mml:mtext></mml:mrow></mml:mrow></mml:msup><mml:mo>=</mml:mo><mml:munder><mml:mrow><mml:mtext>argmin</mml:mtext></mml:mrow><mml:mi>&#x03B2;</mml:mi></mml:munder><mml:mrow><mml:mo>{</mml:mo><mml:msubsup><mml:mrow><mml:mo>&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>N</mml:mi></mml:mrow></mml:msubsup><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msubsup><mml:mrow><mml:mo>&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msubsup><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mo>+</mml:mo><mml:msub><mml:mi>&#x03BB;</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:msubsup><mml:mrow><mml:mo>&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msubsup><mml:msubsup><mml:mrow><mml:mi>&#x03B2;</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>}</mml:mo></mml:mrow></mml:math></disp-formula></p>
<p>Lasso regression is defined as follows.
<disp-formula id="eqn-10"><label>(10)</label><mml:math id="mml-eqn-10" display="block"><mml:msup><mml:mrow><mml:mover><mml:mi>&#x03B2;</mml:mi><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">ridge</mml:mtext></mml:mrow></mml:mrow></mml:msup><mml:mo>=</mml:mo><mml:munder><mml:mrow><mml:mtext>argmin</mml:mtext></mml:mrow><mml:mi>&#x03B2;</mml:mi></mml:munder><mml:mrow><mml:mo>{</mml:mo><mml:msubsup><mml:mrow><mml:mo>&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>N</mml:mi></mml:mrow></mml:msubsup><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msubsup><mml:mrow><mml:mo>&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msubsup><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mrow><mml:msub><mml:mi>i</mml:mi><mml:mrow><mml:mi>J</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mo>+</mml:mo><mml:msub><mml:mi>&#x03BB;</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:msubsup><mml:mrow><mml:mo>&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msubsup><mml:mrow><mml:mo>|</mml:mo><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>|</mml:mo></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:math></disp-formula></p>
<p>To break the limitation of two techniques, elastic net method is developed. Regression coefficient is represented as follows.
<disp-formula id="eqn-11"><label>(11)</label><mml:math id="mml-eqn-11" display="block"><mml:msup><mml:mrow><mml:mover><mml:mi>&#x03B2;</mml:mi><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">ridge</mml:mtext></mml:mrow></mml:mrow></mml:msup><mml:mo>=</mml:mo><mml:munder><mml:mrow><mml:mtext>argmin</mml:mtext></mml:mrow><mml:mi>&#x03B2;</mml:mi></mml:munder><mml:mrow><mml:mo>{</mml:mo><mml:msubsup><mml:mrow><mml:mo>&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>N</mml:mi></mml:mrow></mml:msubsup><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msubsup><mml:mrow><mml:mo>&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msubsup><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mi>J</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mo>+</mml:mo><mml:msub><mml:mi>&#x03BB;</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:msubsup><mml:mrow><mml:mo>&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msubsup><mml:msubsup><mml:mrow><mml:mi>&#x03B2;</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>+</mml:mo><mml:msub><mml:mi>&#x03BB;</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:msubsup><mml:mrow><mml:mo>&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msubsup><mml:mrow><mml:mo>|</mml:mo><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>|</mml:mo></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:math></disp-formula></p>
<p>If <inline-formula id="ieqn-22"><mml:math id="mml-ieqn-22"><mml:mi>&#x03BB;</mml:mi><mml:mo>=</mml:mo><mml:msub><mml:mi>&#x03BB;</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>&#x03BB;</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mi>&#x03B1;</mml:mi><mml:mo>=</mml:mo><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mfrac><mml:msub><mml:mi>&#x03BB;</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:msub><mml:mi>&#x03BB;</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>&#x03BB;</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:mfrac></mml:mstyle></mml:math></inline-formula>:
<disp-formula id="eqn-12"><label>(12)</label><mml:math id="mml-eqn-12" display="block"><mml:msup><mml:mrow><mml:mover><mml:mi>&#x03B2;</mml:mi><mml:mo stretchy="false">&#x005E;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">ridge</mml:mtext></mml:mrow></mml:mrow></mml:msup><mml:mo>=</mml:mo><mml:munder><mml:mrow><mml:mtext>argmin</mml:mtext></mml:mrow><mml:mi>&#x03B2;</mml:mi></mml:munder><mml:mrow><mml:mo>{</mml:mo><mml:msubsup><mml:mrow><mml:mo>&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>N</mml:mi></mml:mrow></mml:msubsup><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msubsup><mml:mrow><mml:mo>&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msubsup><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mo>+</mml:mo><mml:mi>&#x03BB;</mml:mi><mml:msubsup><mml:mrow><mml:mo>&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msubsup><mml:mrow><mml:mo>(</mml:mo><mml:mi>&#x03B1;</mml:mi><mml:mrow><mml:mo>|</mml:mo><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>|</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:mi>&#x03B1;</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:msubsup><mml:mrow><mml:mi>&#x03B2;</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>)</mml:mo></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:math></disp-formula></p>
<p>The penalty function of the elastic net is <inline-formula id="ieqn-23"><mml:math id="mml-ieqn-23"><mml:msubsup><mml:mrow><mml:mo>&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msubsup><mml:mrow><mml:mo>(</mml:mo><mml:mi>&#x03B1;</mml:mi><mml:mrow><mml:mo>|</mml:mo><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>|</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:mi>&#x03B1;</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:msup><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> i.e., a convex linear integration of the penalty function <inline-formula id="ieqn-24"><mml:math id="mml-ieqn-24"><mml:msub><mml:mi>&#x03BB;</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:msubsup><mml:mrow><mml:mo>&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msubsup><mml:mrow><mml:mo>|</mml:mo><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>|</mml:mo></mml:mrow></mml:math></inline-formula> of Lasso regression and <inline-formula id="ieqn-25"><mml:math id="mml-ieqn-25"><mml:msub><mml:mi>&#x03BB;</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:msubsup><mml:mrow><mml:mo>&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msubsup><mml:msubsup><mml:mrow><mml:mi>&#x03B2;</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> of ridge regression. If <inline-formula id="ieqn-26"><mml:math id="mml-ieqn-26"><mml:mi>&#x03B1;</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:math></inline-formula>, the elastic net is Lasso regression. If <inline-formula id="ieqn-27"><mml:math id="mml-ieqn-27"><mml:mi>&#x03B1;</mml:mi><mml:mo>=</mml:mo><mml:mn>0</mml:mn></mml:math></inline-formula>, the elastic net is ridge regression. Thus, the elastic net has the advantages of both ridge and Lasso regressions. It can determine which parameters to be selected so that a better group effect can be achieved.</p>
</sec>
</sec>
<sec id="s3_4"><label>3.4</label><title>Melanoma Classification</title>
<p>Finally, AO algorithm with GRU model is employed for identification and classification of melanoma. RNN is a well-known neural network that can process sequential datasets. This characteristic of RNN makes it relevant to learn the algorithmic tasks [<xref ref-type="bibr" rid="ref-22">22</xref>]. Though RNN is utilized for different NLP applications, it has a primary constraint i.e., if suffers from gradient vanishing in deep network. For sequential datasets <inline-formula id="ieqn-28"><mml:math id="mml-ieqn-28"><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mn>3</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mo>&#x2026;</mml:mo><mml:mo>,</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula>, the hidden state <inline-formula id="ieqn-29"><mml:math id="mml-ieqn-29"><mml:msub><mml:mi>h</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of the RNN is evaluated by the formula given below.
<disp-formula id="eqn-13"><label>(13)</label><mml:math id="mml-eqn-13" display="block"><mml:msub><mml:mi>h</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mi>f</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>h</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula></p>
<p>In <xref ref-type="disp-formula" rid="eqn-13">Eq. (13)</xref>, <italic>f</italic> refers to the activation function. GRU is a kind of RNN model with less number of gates than LSTM. In GRU cell units, both forget gate and input gate can be controlled by a single gate. Therefore, both input and forget gates are integrated into a single gate which makes the GRU a simple technique than LSTM. For instance, when <inline-formula id="ieqn-30"><mml:math id="mml-ieqn-30"><mml:msub><mml:mi>z</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:math></inline-formula>, the entry of the novel dataset for forget gate is opened, and the input gate is closed, if <inline-formula id="ieqn-31"><mml:math id="mml-ieqn-31"><mml:msub><mml:mi>z</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mn>0</mml:mn></mml:math></inline-formula>. The reset gate defines that the novel input should be integrated with the preceding memory in order to determine the novel state as given below.
<disp-formula id="eqn-14"><label>(14)</label><mml:math id="mml-eqn-14" display="block"><mml:msub><mml:mi>r</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mi>&#x03C3;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:msubsup><mml:mrow><mml:mi>w</mml:mi></mml:mrow><mml:mrow><mml:mi>x</mml:mi><mml:mi>r</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msubsup><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msubsup><mml:mrow><mml:mi>w</mml:mi></mml:mrow><mml:mrow><mml:mi>h</mml:mi><mml:msup><mml:mi>r</mml:mi><mml:mrow><mml:mi>O</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:msup></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msubsup><mml:mo>+</mml:mo><mml:msub><mml:mi>b</mml:mi><mml:mrow><mml:mi>r</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula>
<disp-formula id="eqn-15"><label>(15)</label><mml:math id="mml-eqn-15" display="block"><mml:msub><mml:mi>z</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mi>&#x03C3;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:msubsup><mml:mrow><mml:mi>w</mml:mi></mml:mrow><mml:mrow><mml:mi>x</mml:mi><mml:mi>z</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msubsup><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msubsup><mml:mrow><mml:mi>w</mml:mi></mml:mrow><mml:mrow><mml:mi>o</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msubsup><mml:mi>z</mml:mi><mml:msub><mml:mi>o</mml:mi><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>b</mml:mi><mml:mrow><mml:mi>z</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula>
<disp-formula id="eqn-16"><label>(16)</label><mml:math id="mml-eqn-16" display="block"><mml:msub><mml:mi>o</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mi>z</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2297;</mml:mo><mml:msub><mml:mn>0</mml:mn><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>z</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x2297;</mml:mo><mml:msub><mml:mrow><mml:mover><mml:mi>O</mml:mi><mml:mo>&#x007E;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:math></disp-formula></p>
<p>In these expression, <inline-formula id="ieqn-32"><mml:math id="mml-ieqn-32"><mml:msub><mml:mi>r</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> refers to reset gate; <inline-formula id="ieqn-33"><mml:math id="mml-ieqn-33"><mml:msub><mml:mi>z</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> indicates the update gate; and <inline-formula id="ieqn-34"><mml:math id="mml-ieqn-34"><mml:msub><mml:mi>o</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> represents the output gate. <inline-formula id="ieqn-35"><mml:math id="mml-ieqn-35"><mml:mo>&#x2297;</mml:mo></mml:math></inline-formula> represents component-wise multiplication; <italic>t</italic> denotes the time step; <italic>T</italic> refers to the length of window; <italic>w</italic> indicates the layer weight demonstrating input <italic>x</italic>, and <italic>b</italic> signifies the threshold of output gate.</p>
<p>AOA comprises of exploitation, and exploration stages, inspired by arithmetical operations, like <inline-formula id="ieqn-36"><mml:math id="mml-ieqn-36"><mml:mo>&#x2212;</mml:mo><mml:mo>,</mml:mo><mml:mo>+</mml:mo><mml:mo>,</mml:mo><mml:mrow><mml:mtext>&#xA0;&#x002A;</mml:mtext></mml:mrow><mml:mo>,</mml:mo></mml:math></inline-formula> and <inline-formula id="ieqn-37"><mml:math id="mml-ieqn-37"><mml:mrow><mml:mo>/</mml:mo></mml:mrow></mml:math></inline-formula>. Initially, AOA produces a set of <italic>N</italic> solutions [<xref ref-type="bibr" rid="ref-23">23</xref>]. Thus, the agent or solution signifies <italic>X</italic> population in the following equation.
<disp-formula id="ueqn-1">
<mml:math id="mml-ueqn-1" display="block"><mml:mi>X</mml:mi><mml:mo>=</mml:mo><mml:msub><mml:mo stretchy="false">[</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>N</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:mrow></mml:msub><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>N</mml:mi><mml:mo>,</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>N</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>N</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>N</mml:mi><mml:mo>,</mml:mo><mml:mi>n</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mi>n</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>N</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mi>n</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>N</mml:mi><mml:mo>,</mml:mo><mml:mi>n</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mi>n</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mi>n</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">]</mml:mo></mml:math></disp-formula>
<disp-formula id="eqn-17"><label>(17)</label><mml:math id="mml-eqn-17" display="block"><mml:mi>X</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mtable columnalign="center center center center center" rowspacing="4pt" columnspacing="1em"><mml:mtr><mml:mtd><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:mo>&#x22EF;</mml:mo></mml:mtd><mml:mtd><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mi>n</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:mo>&#x22EF;</mml:mo></mml:mtd><mml:mtd><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:mo>&#x22EF;</mml:mo></mml:mtd><mml:mtd><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mo>&#x22EF;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x22EF;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x22EF;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x22EF;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x22EF;</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>N</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:mo>&#x22EF;</mml:mo></mml:mtd><mml:mtd><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>N</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:mo>&#x22EF;</mml:mo></mml:mtd><mml:mtd><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>N</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>N</mml:mi><mml:mo>,</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:mo>&#x22EF;</mml:mo></mml:mtd><mml:mtd><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>N</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>N</mml:mi><mml:mo>,</mml:mo><mml:mi>n</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>N</mml:mi><mml:mo>,</mml:mo><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:mtd></mml:mtr></mml:mtable><mml:mo>]</mml:mo></mml:mrow></mml:math></disp-formula></p>
<p>Followed by, the fitness function of the solution is evaluated to identify the finest one <inline-formula id="ieqn-38"><mml:math id="mml-ieqn-38"><mml:mo>,</mml:mo><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>b</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>. Then, AOA executes exploration or exploitation method according to Math Optimizer Accelerated <inline-formula id="ieqn-39"><mml:math id="mml-ieqn-39"><mml:mrow><mml:mo>(</mml:mo><mml:mi>M</mml:mi><mml:mi>O</mml:mi><mml:mi>A</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> values. Subsequently, <inline-formula id="ieqn-40"><mml:math id="mml-ieqn-40"><mml:mi>M</mml:mi><mml:mi>O</mml:mi><mml:mi>A</mml:mi></mml:math></inline-formula> is upgraded as given below.
<disp-formula id="eqn-18"><label>(18)</label><mml:math id="mml-eqn-18" display="block"><mml:mi>M</mml:mi><mml:mi>O</mml:mi><mml:mi>A</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mtext>Min</mml:mtext></mml:mrow><mml:mo>+</mml:mo><mml:mi>t</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mfrac><mml:mrow><mml:msub><mml:mrow><mml:mtext>Max</mml:mtext></mml:mrow><mml:mrow><mml:mi>M</mml:mi><mml:mi>O</mml:mi><mml:mi>A</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mrow><mml:mtext>Min</mml:mtext></mml:mrow><mml:mrow><mml:mi>M</mml:mi><mml:mi>O</mml:mi><mml:mi>A</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:msub><mml:mi>M</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:mfrac><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula></p>
<p>Here, <inline-formula id="ieqn-41"><mml:math id="mml-ieqn-41"><mml:msub><mml:mi>M</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> indicates the total quantity of iterations. <inline-formula id="ieqn-42"><mml:math id="mml-ieqn-42"><mml:msub><mml:mrow><mml:mtext>Max</mml:mtext></mml:mrow><mml:mrow><mml:mi>O</mml:mi><mml:mi>A</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-43"><mml:math id="mml-ieqn-43"><mml:msub><mml:mrow><mml:mtext>Min</mml:mtext></mml:mrow><mml:mrow><mml:mi>M</mml:mi><mml:mi>O</mml:mi><mml:mi>A</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> indicate the maximal and minimal values of the accelerated operation while multiplication (M) and division (D) are exploited in the exploration process of AOA:
<disp-formula id="eqn-19"><label>(19)</label><mml:math id="mml-eqn-19" display="block"><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mo>{</mml:mo><mml:mtable columnalign="left left" rowspacing=".2em" columnspacing="1em" displaystyle="false"><mml:mtr><mml:mtd><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>b</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>&#x00F7;</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>M</mml:mi><mml:mrow><mml:mi>O</mml:mi><mml:mi>P</mml:mi></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:mo>&#x2208;</mml:mo><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x00D7;</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mi>U</mml:mi><mml:msub><mml:mi>B</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:mi>L</mml:mi><mml:msub><mml:mi>B</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x00D7;</mml:mo><mml:mi>&#x03BC;</mml:mi><mml:mo>+</mml:mo><mml:mi>L</mml:mi><mml:msub><mml:mi>B</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mtd><mml:mtd><mml:msub><mml:mi>r</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mo>&#x003C;</mml:mo><mml:mn>0.5</mml:mn></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>b</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>&#x00D7;</mml:mo><mml:msub><mml:mi>M</mml:mi><mml:mrow><mml:mi>O</mml:mi><mml:mi>P</mml:mi></mml:mrow></mml:msub><mml:mo>&#x00D7;</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mi>U</mml:mi><mml:msub><mml:mi>B</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:mi>L</mml:mi><mml:msub><mml:mi>B</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x00D7;</mml:mo><mml:mi>&#x03BC;</mml:mi><mml:mo>+</mml:mo><mml:mi>L</mml:mi><mml:msub><mml:mi>B</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mtd><mml:mtd><mml:mrow><mml:mtext mathvariant="italic">otherwise</mml:mtext></mml:mrow></mml:mtd></mml:mtr></mml:mtable><mml:mo fence="true" stretchy="true" symmetric="true"></mml:mo></mml:mrow></mml:math></disp-formula></p>
<p>Then, <italic>e</italic> denotes the small integer value,<inline-formula id="ieqn-44"><mml:math id="mml-ieqn-44"><mml:mi>U</mml:mi><mml:msub><mml:mi>B</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mtext>and</mml:mtext></mml:mrow><mml:mi>L</mml:mi><mml:msub><mml:mi>B</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> indicates upper and lower bounds of the search space at <inline-formula id="ieqn-45"><mml:math id="mml-ieqn-45"><mml:mi>j</mml:mi></mml:math></inline-formula>-<inline-formula id="ieqn-46"><mml:math id="mml-ieqn-46"><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:math></inline-formula> variable. <inline-formula id="ieqn-47"><mml:math id="mml-ieqn-47"><mml:mi>&#x03BC;</mml:mi><mml:mo>=</mml:mo><mml:mn>0.5</mml:mn></mml:math></inline-formula> represents the control function. Moreover, Math Optimizer <inline-formula id="ieqn-48"><mml:math id="mml-ieqn-48"><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>M</mml:mi><mml:mrow><mml:mi>O</mml:mi><mml:mi>P</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> is defined as follows:
<disp-formula id="eqn-20"><label>(20)</label><mml:math id="mml-eqn-20" display="block"><mml:msub><mml:mi>M</mml:mi><mml:mrow><mml:mi>O</mml:mi><mml:mi>P</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:mfrac><mml:msup><mml:mi>t</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mrow><mml:mo>/</mml:mo></mml:mrow><mml:mi>&#x03B1;</mml:mi></mml:mrow></mml:msup><mml:msubsup><mml:mrow><mml:mi>M</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn><mml:mrow><mml:mo>/</mml:mo></mml:mrow><mml:mi>&#x03B1;</mml:mi></mml:mrow></mml:msubsup></mml:mfrac></mml:math></disp-formula></p>
<p><inline-formula id="ieqn-49"><mml:math id="mml-ieqn-49"><mml:mi>&#x03B1;</mml:mi><mml:mo>=</mml:mo><mml:mn>5</mml:mn></mml:math></inline-formula> indicates the dynamic parameter that determines the performance of exploitation phase. <xref ref-type="fig" rid="fig-2">Fig. 2</xref> depicts the flowchart of AOA.</p>
<fig id="fig-2"><label>Figure 2</label><caption><title>Flowchart of AOA</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_33005-fig-2.tif"/></fig>
<p>Furthermore, subtraction (D) and addition (A) operators are also applied in the implementation of AOA exploitation process as given below.
<disp-formula id="eqn-21"><label>(21)</label><mml:math id="mml-eqn-21" display="block"><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mo>{</mml:mo><mml:mtable columnalign="left left" rowspacing=".2em" columnspacing="1em" displaystyle="false"><mml:mtr><mml:mtd><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>b</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>M</mml:mi><mml:mrow><mml:mi>O</mml:mi><mml:mi>P</mml:mi></mml:mrow></mml:msub><mml:mo>&#x00D7;</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mi>U</mml:mi><mml:msub><mml:mi>B</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:mi>L</mml:mi><mml:msub><mml:mi>B</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x00D7;</mml:mo><mml:mi>&#x03BC;</mml:mi><mml:mo>+</mml:mo><mml:mi>L</mml:mi><mml:msub><mml:mi>B</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mo>,</mml:mo><mml:msub><mml:mi>r</mml:mi><mml:mrow><mml:mn>3</mml:mn></mml:mrow></mml:msub><mml:mo>&#x003C;</mml:mo><mml:mn>0.5</mml:mn></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>b</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>M</mml:mi><mml:mrow><mml:mi>O</mml:mi><mml:mi>P</mml:mi></mml:mrow></mml:msub><mml:mo>&#x00D7;</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mi>U</mml:mi><mml:msub><mml:mi>B</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:mi>L</mml:mi><mml:msub><mml:mi>B</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x00D7;</mml:mo><mml:mi>&#x03BC;</mml:mi><mml:mo>+</mml:mo><mml:mi>L</mml:mi><mml:msub><mml:mi>B</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:mtext mathvariant="italic">otherwise</mml:mtext></mml:mrow></mml:mtd></mml:mtr></mml:mtable><mml:mo fence="true" stretchy="true" symmetric="true"></mml:mo></mml:mrow></mml:math></disp-formula></p>
<p>Here, <inline-formula id="ieqn-50"><mml:math id="mml-ieqn-50"><mml:msub><mml:mi>r</mml:mi><mml:mrow><mml:mn>3</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula> indicates an arbitrary value that lies in the interval of&#x00A0;[0, 1]. Later, the agent update process is also implemented using AOA operator. Basically, Algorithm 1 illustrates the steps involved in AOA.
</p>
<fig id="fig-9">
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_33005-fig-9.tif"/>
</fig>
</sec>
</sec>
<sec id="s4"><label>4</label><title>Performance Validation</title>
<p>The proposed EAOEDTT-MC model was experimentally validated using two benchmark datasets namely, ISIC 2017 [<xref ref-type="bibr" rid="ref-24">24</xref>] and ISIC 2020 [<xref ref-type="bibr" rid="ref-25">25</xref>]. The proposed EAOEDTT-MC approach was simulated with the help of Python 3.6.5 tool. The datasets has two class labels namely, melanoma (MEL) and benign (BEN) as shown in <xref ref-type="table" rid="table-1">Table 1</xref>. A few sample images are portrayed in <xref ref-type="fig" rid="fig-3">Fig. 3</xref>.</p>
<table-wrap id="table-1"><label>Table 1</label><caption><title>Datasets details</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="left">Class name</th>
<th align="center" colspan="2">No. of samples</th>
</tr>
<tr>
<th/>
<th align="left">ISIC 2017 dataset</th>
<th align="left">ISIC 2020 dataset</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">MEL</td>
<td align="left">1732</td>
<td align="left">4970</td>
</tr>
<tr>
<td align="left">BEN</td>
<td align="left">2440</td>
<td align="left">5100</td>
</tr>
<tr>
<td align="left">Total no. of samples</td>
<td align="left">4172</td>
<td align="left">10070</td>
</tr>
</tbody>
</table>
</table-wrap><fig id="fig-3"><label>Figure 3</label><caption><title>Sample images</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_33005-fig-3.tif"/></fig>
<p><xref ref-type="fig" rid="fig-4">Fig. 4</xref> highlights the confusion matrices generated by the proposed EAOEDTT-MC model on test ISIC 2017 dataset. On entire dataset, the proposed EAOEDTT-MC model classified 1,452 samples under MEL class and 2,389 samples under BEN class. Eventually, on 70&#x0025; of TR dataset, EAOEDTT-MC approach categorized 1,010 samples under MEL class and 1,679 samples under BEN class. Concurrently, on 30&#x0025; of TS dataset, the proposed EAOEDTT-MC system recognized 442 samples under MEL class and 710 samples under BEN class.</p>
<fig id="fig-4"><label>Figure 4</label><caption><title>Confusion matrices of EAOEDTT-MC approach under ISIC 2017 dataset (a) entire dataset, (b) 70&#x0025; of TR data, and (c) 30&#x0025; of TS data</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_33005-fig-4a.tif"/><graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_33005-fig-4b.tif"/></fig>
<p><xref ref-type="table" rid="table-2">Table 2</xref> and <xref ref-type="fig" rid="fig-5">Fig. 5</xref> shows a brief melanoma classification results achieved by the proposed EAOEDTT-MC model on test ISIC 2017 dataset. The experimental results imply that the proposed EAOEDTT-MC model achieved effectual outcomes in every aspect. For instance, with entire dataset, EAOEDTT-MC model attained an average <inline-formula id="ieqn-68"><mml:math id="mml-ieqn-68"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 92.07&#x0025;, <inline-formula id="ieqn-69"><mml:math id="mml-ieqn-69"><mml:mi>p</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 93.06&#x0025;, <inline-formula id="ieqn-70"><mml:math id="mml-ieqn-70"><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>a</mml:mi><mml:mrow><mml:mi>l</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 90.87&#x0025;, <inline-formula id="ieqn-71"><mml:math id="mml-ieqn-71"><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">score</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> of 91.64&#x0025;, <inline-formula id="ieqn-72"><mml:math id="mml-ieqn-72"><mml:mi>A</mml:mi><mml:mi>U</mml:mi><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">score</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> of 90.87&#x0025;, and an MCC of 83.90&#x0025;. In addition, with 70&#x0025; of TR data, the proposed EAOEDTT-MC technique obtained an average <inline-formula id="ieqn-73"><mml:math id="mml-ieqn-73"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 95.12&#x0025;, <inline-formula id="ieqn-74"><mml:math id="mml-ieqn-74"><mml:mi>p</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 95.23&#x0025;, <inline-formula id="ieqn-75"><mml:math id="mml-ieqn-75"><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>a</mml:mi><mml:mrow><mml:mi>l</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 95.16&#x0025;, <inline-formula id="ieqn-76"><mml:math id="mml-ieqn-76"><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">score</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> of 95.12&#x0025;, <inline-formula id="ieqn-77"><mml:math id="mml-ieqn-77"><mml:mi>A</mml:mi><mml:mi>U</mml:mi><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">score</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> of 95.16&#x0025;, and an MCC of 90.39&#x0025;. Also, with 30&#x0025; of TS data, EAOEDTT-MC system accomplished an average <inline-formula id="ieqn-78"><mml:math id="mml-ieqn-78"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 92.09&#x0025;, <inline-formula id="ieqn-79"><mml:math id="mml-ieqn-79"><mml:mi>p</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 93.08&#x0025;, <inline-formula id="ieqn-80"><mml:math id="mml-ieqn-80"><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>a</mml:mi><mml:mrow><mml:mi>l</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 90.86&#x0025;, <inline-formula id="ieqn-81"><mml:math id="mml-ieqn-81"><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">score</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> of 91.65&#x0025;, <inline-formula id="ieqn-82"><mml:math id="mml-ieqn-82"><mml:mi>A</mml:mi><mml:mi>U</mml:mi><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">score</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> of 90.86&#x0025;, and an MCC of 83.91&#x0025;.</p>
<table-wrap id="table-2"><label>Table 2</label><caption><title>Results of the analysis of EAOEDTT-MC method upon ISIC 2017 dataset under different measures</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="center" colspan="7">ISIC 2017 dataset</th>
</tr>
<tr>
<th align="left">Class labels</th>
<th align="left">Accuracy</th>
<th align="left">Precision</th>
<th align="left">Recall</th>
<th align="left">F-Score</th>
<th align="left">AUC Score</th>
<th align="left">MCC</th>
</tr>
<tr>
<th align="center" colspan="7">Entire dataset</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">MEL</td>
<td align="left">92.07</td>
<td align="left">96.61</td>
<td align="left">83.83</td>
<td align="left">89.77</td>
<td align="left">90.87</td>
<td align="left">83.90</td>
</tr>
<tr>
<td align="left">BEN</td>
<td align="left">92.07</td>
<td align="left">89.51</td>
<td align="left">97.91</td>
<td align="left">93.52</td>
<td align="left">90.87</td>
<td align="left">83.90</td>
</tr>
<tr>
<td align="left">Average</td>
<td align="left">92.07</td>
<td align="left">93.06</td>
<td align="left">90.87</td>
<td align="left">91.64</td>
<td align="left">90.87</td>
<td align="left">83.90</td>
</tr>
<tr>
<td align="center" colspan="7">Training phase (70&#x0025;)</td>
</tr>
<tr>
<td align="left">MEL</td>
<td align="left">95.12</td>
<td align="left">92.67</td>
<td align="left">97.87</td>
<td align="left">95.20</td>
<td align="left">95.16</td>
<td align="left">90.39</td>
</tr>
<tr>
<td align="left">BEN</td>
<td align="left">95.12</td>
<td align="left">97.80</td>
<td align="left">92.45</td>
<td align="left">95.05</td>
<td align="left">95.16</td>
<td align="left">90.39</td>
</tr>
<tr>
<td align="left">Average</td>
<td align="left">95.12</td>
<td align="left">95.23</td>
<td align="left">95.16</td>
<td align="left">95.12</td>
<td align="left">95.16</td>
<td align="left">90.39</td>
</tr>
<tr>
<td align="center" colspan="7">Testing phase (30&#x0025;)</td>
</tr>
<tr>
<td align="left">MEL</td>
<td align="left">92.09</td>
<td align="left">96.56</td>
<td align="left">83.82</td>
<td align="left">89.74</td>
<td align="left">90.86</td>
<td align="left">83.91</td>
</tr>
<tr>
<td align="left">BEN</td>
<td align="left">92.09</td>
<td align="left">89.59</td>
<td align="left">97.90</td>
<td align="left">93.56</td>
<td align="left">90.86</td>
<td align="left">83.91</td>
</tr>
<tr>
<td align="left">Average</td>
<td align="left">92.09</td>
<td align="left">93.08</td>
<td align="left">90.86</td>
<td align="left">91.65</td>
<td align="left">90.86</td>
<td align="left">83.91</td>
</tr>
</tbody>
</table>
</table-wrap><fig id="fig-5"><label>Figure 5</label><caption><title>Average analysis of EAOEDTT-MC method under ISIC 2017 dataset</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_33005-fig-5.tif"/></fig>
<p>Training Accuracy (TA) and Validation Accuracy (VA) values, attained by the proposed EAOEDTT-MC approach on ISIC 2017 dataset, are demonstrated in <xref ref-type="fig" rid="fig-6">Fig. 6</xref>. The experimental outcomes reveal that the proposed EAOEDTT-MC algorithm gained maximum TA and VA values. To be specific, VA seemed to be higher than TA. Next, <xref ref-type="fig" rid="fig-7">Fig. 7</xref> shows the training loss (TL) and validation loss (VL) analysis of the proposed model. The results indicated that the proposed model has offered minimal loss with an increase in epochs.</p>
<fig id="fig-6"><label>Figure 6</label><caption><title>TA and VA analysis of EAOEDTT-MC method under ISIC 2017 dataset</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_33005-fig-6.tif"/></fig><fig id="fig-7"><label>Figure 7</label><caption><title>TL and VL analysis results of EAOEDTT-MC method under ISIC 2017 dataset</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_33005-fig-7.tif"/></fig>
<p><xref ref-type="table" rid="table-3">Table 3</xref> and <xref ref-type="fig" rid="fig-8">Fig. 8</xref> shows the comparative study results accomplished by the proposed EAOEDTT-MC method and other existing techniques on ISIC 2017 dataset [<xref ref-type="bibr" rid="ref-26">26</xref>]. The experimental values indicate that ResNet18, Inception v3, and AlexNet models obtained the least classification performance over other models. At the same time, LCNet model reached a moderately improved classification performance while it <inline-formula id="ieqn-83"><mml:math id="mml-ieqn-83"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-84"><mml:math id="mml-ieqn-84"><mml:mi>p</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, and <inline-formula id="ieqn-85"><mml:math id="mml-ieqn-85"><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>a</mml:mi><mml:mrow><mml:mi>l</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> values were 88.20&#x0025;, 78.50&#x0025;, and 87.80&#x0025; respectively.</p>
<table-wrap id="table-3"><label>Table 3</label><caption><title>Comparative analysis results of EAOEDTT-MC and other recent methods under ISIC 2017 dataset</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="center" colspan="4">ISIC 2017 dataset</th>
</tr>
<tr>
<th align="left">Methods</th>
<th align="left">Accuracy</th>
<th align="left">Precision</th>
<th align="left">Recall</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">EAOEDTT-MC</td>
<td align="left">92.09</td>
<td align="left">93.08</td>
<td align="left">90.86</td>
</tr>
<tr>
<td align="left">ResNet18</td>
<td align="left">75.00</td>
<td align="left">64.00</td>
<td align="left">57.10</td>
</tr>
<tr>
<td align="left">Inceptionv3</td>
<td align="left">77.40</td>
<td align="left">69.10</td>
<td align="left">61.20</td>
</tr>
<tr>
<td align="left">AlexNet</td>
<td align="left">74.00</td>
<td align="left">67.00</td>
<td align="left">66.00</td>
</tr>
<tr>
<td align="left">LCNet</td>
<td align="left">88.20</td>
<td align="left">78.50</td>
<td align="left">87.80</td>
</tr>
<tr>
<td align="left">Ensemble-Two-Stage DNN</td>
<td align="left">90.90</td>
<td align="left">85.90</td>
<td align="left">80.80</td>
</tr>
<tr>
<td align="left">MB-DCNN</td>
<td align="left">90.40</td>
<td align="left">88.13</td>
<td align="left">78.60</td>
</tr>
</tbody>
</table>
</table-wrap><fig id="fig-8"><label>Figure 8</label><caption><title>Comparative analysis results of EAOEDTT-MC method under ISIC 2017 dataset</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_33005-fig-8.tif"/></fig>
</sec>
<sec id="s5"><label>5</label><title>Conclusion</title>
<p>In this study, a novel IAOEDTT-MC approach has been developed for identification and classification of melanoma on dermoscopic images. Initially, the proposed IAOEDTT-MC model applies image pre-processing using GF technique. Followed by, U-Net segmentation approach is followed to segment the lesion regions in dermoscopic image. Furthermore, an ensemble of DL models including ResNet50 and ElasticNet models is applied. At last, AO algorithm with GRU algorithm is utilized for identification and classification of melanoma. The design of AO algorithm involves the integration of LAHC concept with traditional AOA in order to enhance the quality of the solution. The proposed IAOEDTT-MC system was experimentally validated utilizing benchmark datasets and the outcomes were inspected under distinct measures. The extensive comparative analysis outcomes highlighted the enhanced performance of IAOEDTT-MC model over recent algorithms. In future, the performance of IAOEDTT-MC model can be enhanced with the help of hybrid metaheuristic algorithms with deep instance segmentation models.</p>
</sec>
</body>
<back>
<sec><title>Funding Statement</title>
<p>This research was supported by the <funding-source>MSIT (Ministry of Science and ICT)</funding-source>, Korea, under the ICAN (ICT Challenge and Advanced Network of HRD) program (<award-id>IITP-2022-2020-0-01832</award-id>) supervised by the <funding-source>IITP (Institute of Information &#x0026; Communications Technology Planning &#x0026; Evaluation)</funding-source> and the <funding-source>Soonchunhyang University Research Fund</funding-source>.</p></sec>
<sec sec-type="COI-statement"><title>Conflicts of Interest</title>
<p>The authors declare that they have no conflicts of interest to report regarding the present study.</p></sec>
<ref-list content-type="authoryear"><title>References</title>
<ref id="ref-1"><label>[1]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>R.</given-names> <surname>Manne</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Kantheti</surname></string-name> and <string-name><given-names>S.</given-names> <surname>Kantheti</surname></string-name></person-group>, &#x201C;<article-title>Classification of skin cancer using deep learning, convolutional neural networks-opportunities and vulnerabilities-A systematic review</article-title>,&#x201D; <source>International Journal for Modern Trends in Science and Technology</source>, vol. <volume>6</volume>, no. <issue>11</issue>, pp. <fpage>101</fpage>&#x2013;<lpage>108</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-2"><label>[2]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>T. J.</given-names> <surname>Brinker</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Hekler</surname></string-name>, <string-name><given-names>A. H.</given-names> <surname>Enk</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Klode</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Hauschild</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Deep learning outperformed 136 of 157 dermatologists in a head-to-head dermoscopic melanoma image classification task</article-title>,&#x201D; <source>European Journal of Cancer</source>, vol. <volume>113</volume>, pp. <fpage>47</fpage>&#x2013;<lpage>54</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-3"><label>[3]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Naeem</surname></string-name>, <string-name><given-names>M. S.</given-names> <surname>Farooq</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Khelifi</surname></string-name> and <string-name><given-names>A.</given-names> <surname>Abid</surname></string-name></person-group>, &#x201C;<article-title>Malignant melanoma classification using deep learning: Datasets, performance measurements, challenges and opportunities</article-title>,&#x201D; <source>IEEE Access</source>, vol. <volume>8</volume>, pp. <fpage>110575</fpage>&#x2013;<lpage>110597</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-4"><label>[4]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>X.</given-names> <surname>Lai</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Zhou</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Wessely</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Heppt</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Maier</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>A disease network-based deep learning approach for characterizing melanoma</article-title>,&#x201D; <source>International Journal of Cancer</source>, vol. <volume>150</volume>, no. <issue>6</issue>, pp. <fpage>1029</fpage>&#x2013;<lpage>1044</lpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-5"><label>[5]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S. H.</given-names> <surname>Kassani</surname></string-name> and <string-name><given-names>P. H.</given-names> <surname>Kassani</surname></string-name></person-group>, &#x201C;<article-title>A comparative study of deep learning architectures on melanoma detection</article-title>,&#x201D; <source>Tissue and Cell</source>, vol. <volume>58</volume>, pp. <fpage>76</fpage>&#x2013;<lpage>83</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-6"><label>[6]</label><mixed-citation publication-type="book"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Mukherjee</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Adhikari</surname></string-name> and <string-name><given-names>M.</given-names> <surname>Roy</surname></string-name></person-group>, &#x201C;<chapter-title>Malignant melanoma classification using cross-platform dataset with deep learning CNN architecture</chapter-title>,&#x201D; in <source>Recent Trends in Signal and Image Processing, Advances in Intelligent Systems and Computing Book Series</source>, <publisher-loc>Singapore</publisher-loc>: <publisher-name>Springer</publisher-name>, vol. <volume>922</volume>, pp. <fpage>31</fpage>&#x2013;<lpage>41</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-7"><label>[7]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Korfiati</surname></string-name>, <string-name><given-names>K.</given-names> <surname>Grafanaki</surname></string-name>, <string-name><given-names>G. C.</given-names> <surname>Kyriakopoulos</surname></string-name>, <string-name><given-names>I.</given-names> <surname>Skeparnias</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Georgiou</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Revisiting miRNA association with melanoma recurrence and metastasis from a machine learning point of view</article-title>,&#x201D; <source>International Journal of Molecular Sciences</source>, vol. <volume>23</volume>, no. <issue>3</issue>, pp. <fpage>1299</fpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-8"><label>[8]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Adegun</surname></string-name> and <string-name><given-names>S.</given-names> <surname>Viriri</surname></string-name></person-group>, &#x201C;<article-title>Deep learning techniques for skin lesion analysis and melanoma cancer detection: A survey of state-of-the-art</article-title>,&#x201D; <source>Artificial Intelligence Review</source>, vol. <volume>54</volume>, no. <issue>2</issue>, pp. <fpage>811</fpage>&#x2013;<lpage>841</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-9"><label>[9]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J. A. A.</given-names> <surname>Damian</surname></string-name>, <string-name><given-names>V.</given-names> <surname>Ponomaryov</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Sadovnychiy</surname></string-name> and <string-name><given-names>H. C.</given-names> <surname>Fernandez</surname></string-name></person-group>, &#x201C;<article-title>Melanoma and nevus skin lesion classification using handcraft and deep learning feature fusion via mutual information measures</article-title>,&#x201D; <source>Entropy</source>, vol. <volume>22</volume>, no. <issue>4</issue>, pp. <fpage>484</fpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-10"><label>[10]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Maiti</surname></string-name> and <string-name><given-names>B.</given-names> <surname>Chatterjee</surname></string-name></person-group>, &#x201C;<article-title>Improving detection of melanoma and naevus with deep neural networks</article-title>,&#x201D; <source>Multimedia Tools and Applications</source>, vol. <volume>79</volume>, no. <issue>21&#x2013;22</issue>, pp. <fpage>15635</fpage>&#x2013;<lpage>15654</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-11"><label>[11]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>K. M.</given-names> <surname>Hosny</surname></string-name>, <string-name><given-names>M. A.</given-names> <surname>Kassem</surname></string-name> and <string-name><given-names>M. M.</given-names> <surname>Foaud</surname></string-name></person-group>, &#x201C;<article-title>Skin cancer classification using deep learning and transfer learning</article-title>,&#x201D; in <conf-name>2018 9th Cairo Int. Biomedical Engineering Conf. (CIBEC)</conf-name>, <conf-loc>Cairo, Egypt</conf-loc>, pp. <fpage>90</fpage>&#x2013;<lpage>93</lpage>, <year>2018</year>.</mixed-citation></ref>
<ref id="ref-12"><label>[12]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Y.</given-names> <surname>Li</surname></string-name> and <string-name><given-names>L.</given-names> <surname>Shen</surname></string-name></person-group>, &#x201C;<article-title>Skin lesion analysis towards melanoma detection using deep learning network</article-title>,&#x201D; <source>Sensors</source>, vol. <volume>18</volume>, no. <issue>2</issue>, pp. <fpage>556</fpage>, <year>2018</year>.</mixed-citation></ref>
<ref id="ref-13"><label>[13]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A. A.</given-names> <surname>Adegun</surname></string-name> and <string-name><given-names>S.</given-names> <surname>Viriri</surname></string-name></person-group>, &#x201C;<article-title>Deep learning-based system for automatic melanoma detection</article-title>,&#x201D; <source>IEEE Access</source>, vol. <volume>8</volume>, pp. <fpage>7160</fpage>&#x2013;<lpage>7172</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-14"><label>[14]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>R.</given-names> <surname>Kaur</surname></string-name>, <string-name><given-names>H. G.</given-names> <surname>Hosseini</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Sinha</surname></string-name> and <string-name><given-names>M.</given-names> <surname>Lind&#x00E9;n</surname></string-name></person-group>, &#x201C;<article-title>Melanoma classification using a novel deep convolutional neural network with dermoscopic images</article-title>,&#x201D; <source>Sensors</source>, vol. <volume>22</volume>, no. <issue>3</issue>, pp. <fpage>1134</fpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-15"><label>[15]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>P.</given-names> <surname>Thapar</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Rakhra</surname></string-name>, <string-name><given-names>G.</given-names> <surname>Cazzato</surname></string-name> and <string-name><given-names>M. S.</given-names> <surname>Hossain</surname></string-name></person-group>, &#x201C;<article-title>A novel hybrid deep learning approach for skin lesion segmentation and classification</article-title>,&#x201D; <source>Journal of Healthcare Engineering</source>, vol. <volume>2022</volume>, pp. <fpage>1</fpage>&#x2013;<lpage>21</lpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-16"><label>[16]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Banerjee</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Singh</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Chakraborty</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Das</surname></string-name> and <string-name><given-names>R.</given-names> <surname>Bag</surname></string-name></person-group>, &#x201C;<article-title>Melanoma diagnosis using deep learning and fuzzy logic</article-title>,&#x201D; <source>Diagnostics</source>, vol. <volume>10</volume>, no. <issue>8</issue>, pp. <fpage>577</fpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-17"><label>[17]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>T.</given-names> <surname>Majtner</surname></string-name>, <string-name><given-names>S. Y.</given-names> <surname>Yayilgan</surname></string-name> and <string-name><given-names>J. Y.</given-names> <surname>Hardeberg</surname></string-name></person-group>, &#x201C;<article-title>Optimised deep learning features for improved melanoma detection</article-title>,&#x201D; <source>Multimedia Tools and Applications</source>, vol. <volume>78</volume>, no. <issue>9</issue>, pp. <fpage>11883</fpage>&#x2013;<lpage>11903</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-18"><label>[18]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>C.</given-names> <surname>Chen</surname></string-name>, <string-name><given-names>W.</given-names> <surname>Li</surname></string-name>, <string-name><given-names>H.</given-names> <surname>Su</surname></string-name> and <string-name><given-names>K.</given-names> <surname>Liu</surname></string-name></person-group>, &#x201C;<article-title>Spectral-spatial classification of hyperspectral image based on kernel extreme learning machine</article-title>,&#x201D; <source>Remote Sensing</source>, vol. <volume>6</volume>, no. <issue>6</issue>, pp. <fpage>5795</fpage>&#x2013;<lpage>5814</lpage>, <year>2014</year>.</mixed-citation></ref>
<ref id="ref-19"><label>[19]</label><mixed-citation publication-type="other"><person-group person-group-type="author"><string-name><given-names>Y.</given-names> <surname>Zhang</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Zhang</surname></string-name>, <string-name><given-names>Z.</given-names> <surname>Zhang</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Bao</surname></string-name> and <string-name><given-names>Y.</given-names> <surname>Song</surname></string-name></person-group>, &#x201C;<article-title>Human activity recognition based on time series analysis using U-net</article-title>,&#x201D; arXiv: 1809.08113 [cs, stat], <year>2018</year>, Accessed: Jun. 03, 2022.</mixed-citation></ref>
<ref id="ref-20"><label>[20]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Ye</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Lu</surname></string-name>, <string-name><given-names>X.</given-names> <surname>Bai</surname></string-name> and <string-name><given-names>J.</given-names> <surname>Gu</surname></string-name></person-group>, &#x201C;<article-title>ResNet-Locust-bn network-based automatic identification of east asian migratory locust species and instars from rgb images</article-title>,&#x201D; <source>Insects</source>, vol. <volume>11</volume>, no. <issue>8</issue>, pp. <fpage>458</fpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-21"><label>[21]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>P. G.</given-names> <surname>Nieto</surname></string-name>, <string-name><given-names>E. G.</given-names> <surname>Gonzalo</surname></string-name> and <string-name><given-names>J. P.</given-names> <surname>S&#x00E1;nchez</surname></string-name></person-group>, &#x201C;<article-title>Prediction of the critical temperature of a superconductor by using the WOA/MARS, ridge, Lasso and elastic-net machine learning techniques</article-title>,&#x201D; <source>Neural Computing and Applications</source>, vol. <volume>33</volume>, no. <issue>24</issue>, pp. <fpage>17131</fpage>&#x2013;<lpage>17145</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-22"><label>[22]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>H. M.</given-names> <surname>Lynn</surname></string-name>, <string-name><given-names>S. B.</given-names> <surname>Pan</surname></string-name> and <string-name><given-names>P.</given-names> <surname>Kim</surname></string-name></person-group>, &#x201C;<article-title>A deep bidirectional gru network model for biometric electrocardiogram classification based on recurrent neural networks</article-title>,&#x201D; <source>IEEE Access</source>, vol. <volume>7</volume>, pp. <fpage>145395</fpage>&#x2013;<lpage>145405</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-23"><label>[23]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>L.</given-names> <surname>Abualigah</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Diabat</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Sumari</surname></string-name> and <string-name><given-names>A.</given-names> <surname>Gandomi</surname></string-name></person-group>, &#x201C;<article-title>A novel evolutionary arithmetic optimization algorithm for multilevel thresholding segmentation of covid-19 ct images</article-title>,&#x201D; <source>Processes</source>, vol. <volume>9</volume>, no. <issue>7</issue>, pp. <fpage>1155</fpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-24"><label>[24]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>N. C. F.</given-names> <surname>Codella</surname></string-name>, <string-name><given-names>D.</given-names> <surname>Gutman</surname></string-name>, <string-name><given-names>M. E.</given-names> <surname>Celebi</surname></string-name>, <string-name><given-names>B.</given-names> <surname>Helba</surname></string-name>, <string-name><given-names>M. A.</given-names> <surname>Marchetti</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Skin lesion analysis toward melanoma detection: A challenge at the 2017 international symposium on biomedical imaging (ISBI), hosted by the international skin imaging collaboration (ISIC)</article-title>,&#x201D; in <conf-name>2018 IEEE 15th Int. Symp. on Biomedical Imaging (ISBI 2018)</conf-name>, <conf-loc>Washington, DC, USA</conf-loc>, pp. <fpage>168</fpage>&#x2013;<lpage>172</lpage>, <year>2018</year>.</mixed-citation></ref>
<ref id="ref-25"><label>[25]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>V.</given-names> <surname>Rotemberg</surname></string-name>, <string-name><given-names>N.</given-names> <surname>Kurtansky</surname></string-name>, <string-name><given-names>B. B.</given-names> <surname>Stablein</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Caffery</surname></string-name>, <string-name><given-names>E.</given-names> <surname>Chousakos</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>A Patient-centric dataset of images and metadata for identifying melanomas using clinical context</article-title>,&#x201D; <source>Scientific Data</source>, vol. <volume>8</volume>, no. <issue>1</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>8</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-26"><label>[26]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J.</given-names> <surname>Ding</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Song</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Li</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Tang</surname></string-name> and <string-name><given-names>F.</given-names> <surname>Guo</surname></string-name></person-group>, &#x201C;<article-title>Two-stage deep neural network via ensemble learning for melanoma classification</article-title>,&#x201D; <source>Frontiers in Bioengineering and Biotechnology</source>, vol. <volume>9</volume>, pp. <fpage>1</fpage>&#x2013;<lpage>12</lpage>, <year>2022</year>.</mixed-citation></ref>
</ref-list>
</back>
</article>