<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.1 20151215//EN" "http://jats.nlm.nih.gov/publishing/1.1/JATS-journalpublishing1.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:mml="http://www.w3.org/1998/Math/MathML" xml:lang="en" article-type="research-article" dtd-version="1.1">
<front>
<journal-meta>
<journal-id journal-id-type="pmc">CMC</journal-id>
<journal-id journal-id-type="nlm-ta">CMC</journal-id>
<journal-id journal-id-type="publisher-id">CMC</journal-id>
<journal-title-group>
<journal-title>Computers, Materials &#x0026; Continua</journal-title>
</journal-title-group>
<issn pub-type="epub">1546-2226</issn>
<issn pub-type="ppub">1546-2218</issn>
<publisher>
<publisher-name>Tech Science Press</publisher-name>
<publisher-loc>USA</publisher-loc>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">63851</article-id>
<article-id pub-id-type="doi">10.32604/cmc.2025.063851</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Article</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>A Multi-Layers Information Fused Deep Architecture for Skin Cancer Classification in Smart Healthcare</article-title>
<alt-title alt-title-type="left-running-head">A Multi-Layers Information Fused Deep Architecture for Skin Cancer Classification in Smart Healthcare</alt-title>
<alt-title alt-title-type="right-running-head">A Multi-Layers Information Fused Deep Architecture for Skin Cancer Classification in Smart Healthcare</alt-title>
</title-group>
<contrib-group>
<contrib id="author-1" contrib-type="author">
<name name-style="western"><surname>Dillshad</surname><given-names>Veena</given-names></name><xref ref-type="aff" rid="aff-1">1</xref></contrib>
<contrib id="author-2" contrib-type="author" corresp="yes">
<name name-style="western"><surname>Khan</surname><given-names>Muhammad Attique</given-names></name><xref ref-type="aff" rid="aff-2">2</xref><xref rid="cor1" ref-type="corresp">&#x002A;</xref><email>attique@ciitwah.edu.pk</email></contrib>
<contrib id="author-3" contrib-type="author">
<name name-style="western"><surname>Nazir</surname><given-names>Muhammad</given-names></name><xref ref-type="aff" rid="aff-1">1</xref></contrib>
<contrib id="author-4" contrib-type="author">
<name name-style="western"><surname>Ahmad</surname><given-names>Jawad</given-names></name><xref ref-type="aff" rid="aff-2">2</xref></contrib>
<contrib id="author-5" contrib-type="author">
<name name-style="western"><surname>AlHammadi</surname><given-names>Dina Abdulaziz</given-names></name><xref ref-type="aff" rid="aff-3">3</xref></contrib>
<contrib id="author-6" contrib-type="author">
<name name-style="western"><surname>Houda</surname><given-names>Taha</given-names></name><xref ref-type="aff" rid="aff-2">2</xref></contrib>
<contrib id="author-7" contrib-type="author">
<name name-style="western"><surname>Cho</surname><given-names>Hee-Chan</given-names></name><xref ref-type="aff" rid="aff-4">4</xref></contrib>
<contrib id="author-8" contrib-type="author" corresp="yes">
<name name-style="western"><surname>Chang</surname><given-names>Byoungchol</given-names></name><xref ref-type="aff" rid="aff-5">5</xref><xref rid="cor1" ref-type="corresp">&#x002A;</xref><email>bcchang@hanyang.ac.kr</email></contrib>
<aff id="aff-1"><label>1</label><institution>Department of Computer Science, HITEC University</institution>, <addr-line>Taxila, 47080</addr-line>, <country>Pakistan</country></aff>
<aff id="aff-2"><label>2</label><institution>Department of Artificial Intelligence, College of Computer Engineering and Science, Prince Mohammad Bin Fahd University</institution>, <addr-line>Al-Khobar, 31952</addr-line>, <country>Saudi Arabia</country></aff>
<aff id="aff-3"><label>3</label><institution>Department of Information Systems, College of Computer and Information Sciences, Princess Nourah bint Abdulrahman University</institution>, <addr-line>Riyadh, 11671</addr-line>, <country>Saudi Arabia</country></aff>
<aff id="aff-4"><label>4</label><institution>Center for Computational Social Science, Hanyang University</institution>, <addr-line>Seoul, 01000</addr-line>, <country>Republic of Korea</country></aff>
<aff id="aff-5"><label>5</label><institution>Department of Computer Science, Hanynag University</institution>, <addr-line>Seoul, 01000</addr-line>, <country>Republic of Korea</country></aff>
</contrib-group>
<author-notes>
<corresp id="cor1"><label>&#x002A;</label>Corresponding Authors: Muhammad Attique Khan. Email: <email>attique@ciitwah.edu.pk</email>; Byoungchol Chang. Email: <email>bcchang@hanyang.ac.kr</email></corresp>
</author-notes>
<pub-date date-type="collection" publication-format="electronic">
<year>2025</year>
</pub-date>
<pub-date date-type="pub" publication-format="electronic">
<day>19</day><month>05</month><year>2025</year>
</pub-date>
<volume>83</volume>
<issue>3</issue>
<fpage>5299</fpage>
<lpage>5321</lpage>
<history>
<date date-type="received">
<day>25</day>
<month>1</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>28</day>
<month>2</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2025 The Authors.</copyright-statement>
<copyright-year>2025</copyright-year>
<copyright-holder>Published by Tech Science Press.</copyright-holder>
<license xlink:href="https://creativecommons.org/licenses/by/4.0/">
<license-p>This work is licensed under a <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</ext-link>, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.</license-p>
</license>
</permissions>
<self-uri content-type="pdf" xlink:href="TSP_CMC_63851.pdf"></self-uri>
<abstract>
<p>Globally, skin cancer is a prevalent form of malignancy, and its early and accurate diagnosis is critical for patient survival. Clinical evaluation of skin lesions is essential, but several challenges, such as long waiting times and subjective interpretations, make this task difficult. The recent advancement of deep learning in healthcare has shown much success in diagnosing and classifying skin cancer and has assisted dermatologists in clinics. Deep learning improves the speed and precision of skin cancer diagnosis, leading to earlier prediction and treatment. In this work, we proposed a novel deep architecture for skin cancer classification in innovative healthcare. The proposed framework performed data augmentation at the first step to resolve the imbalance issue in the selected dataset. The proposed architecture is based on two customized, innovative Convolutional neural network (CNN) models based on small depth and filter sizes. In the first model, four residual blocks are added in a squeezed fashion with a small filter size. In the second model, five residual blocks are added with smaller depth and more useful weight information of the lesion region. To make models more useful, we selected the hyperparameters through Bayesian Optimization, in which the learning rate is selected. After training the proposed models, deep features are extracted and fused using a novel information entropy-controlled Euclidean Distance technique. The final features are passed on to the classifiers, and classification results are obtained. Also, the proposed trained model is interpreted through LIME-based localization on the HAM10000 dataset. The experimental process of the proposed architecture is performed on two dermoscopic datasets, HAM10000 and ISIC2019. We obtained an improved accuracy of 90.8% and 99.3% on these datasets, respectively. Also, the proposed architecture returned 91.6% for the cancer localization. In conclusion, the proposed architecture accuracy is compared with several pre-trained and state-of-the-art (SOTA) techniques and shows improved performance.</p>
</abstract>
<kwd-group kwd-group-type="author">
<kwd>Smart health</kwd>
<kwd>skin cancer</kwd>
<kwd>internet of things</kwd>
<kwd>deep learning</kwd>
<kwd>residual blocks</kwd>
<kwd>fusion</kwd>
<kwd>optimization</kwd>
</kwd-group>
<funding-group>
<award-group id="awg1">
<funding-source>National Research Foundation of Korea (NRF)</funding-source>
<award-id>2018R1A5A7059549</award-id>
</award-group>
<award-group id="awg2">
<funding-source>Princess Nourah bint Abdulrahman University</funding-source>
<award-id>PNURSP2025R508</award-id>
</award-group>
<award-group id="awg3">
<funding-source>Princess Nourah bint Abdulrahman University</funding-source>
</award-group>
</funding-group>
</article-meta>
</front>
<body>
<sec id="s1">
<label>1</label>
<title>Introduction</title>
<p>The skin is the most significant body part by surface area in the human body. It shields the inner organs and is sensitive to external factors [<xref ref-type="bibr" rid="ref-1">1</xref>]. UV exposure is a critical external factor that adversely impacts the skin. Excessive exposure and lack of protection mechanisms lead to fatal skin diseases such as melanoma [<xref ref-type="bibr" rid="ref-2">2</xref>]. Every year, there is a significant rise in skin diseases. The largest organ is needed to enhance understanding of skin diseases [<xref ref-type="bibr" rid="ref-3">3</xref>]. Skin cancer is the term for the unusual growth of new skin cells and is a fatal skin disease [<xref ref-type="bibr" rid="ref-4">4</xref>]. Skin cancer is generally categorized into benign and malignant forms, distinguishing whether the condition is inactive or actively progressing. However, it has seven significant categorizations: Actinic keratosis (akiec), Vascular lesions (vasc), Basal cell carcinoma (bcc), Melanocytic nevi (nv), Dermatofibroma (df), Melanoma (mel), Benign keratosis-like lesions (bkl).</p>
<p>It is the most common malignancy in the United States. In the US, skin cancer claims the lives of about two people every hour. In 2023, there is an estimated 4.4% rise in the number of melanoma deaths. In the United States, an estimated 97,610 persons, among which 58,120 men and 39,490 women, will receive a diagnosis of invasive skin cancer in 2023. In 2020, the diagnosed melanoma cases were 324,635, whereas 57,043 deaths were reported. Also, white people are 20 times more likely than people with dark skin to develop melanoma. The diagnostic age is 65 on average. Compared to men, women receive more melanoma diagnoses before they reach the age of 50, whereas men have a higher prevalence after the age of 50. Melanoma predominantly manifests with age, yet its occurrence extends to younger individuals, encompassing those under 30 [<xref ref-type="bibr" rid="ref-5">5</xref>]. Notably, it stands among the prevalent cancers identified in young adults, mainly among women. In 2020, approximately 2400 instances of melanoma were projected to be detected in individuals aged 15 to 29.</p>
<p>The incidence of melanoma witnessed a significant upward trend for several decades. However, in the early 2000s, the annual diagnosis rates for individuals under 50 stabilized in women and exhibited a decline of approximately 1% per year in men [<xref ref-type="bibr" rid="ref-4">4</xref>]. Although only making a small fraction, e.g., 1%, of all skin cancer diagnoses in the United States, melanoma is a primary reason for deaths caused by skin cancer. On the other hand, between 2011 and 2020, the annual rate of mortality from melanoma declined by around 3% for individuals over 50% and 5% for persons under 50. Medical advancements have led to this progress.</p>
<p>Traditionally, many invasive and noninvasive techniques were utilized for skin cancer diagnosis. These techniques include biopsy, sonography, fluorescence spectroscopy, and dermoscopy [<xref ref-type="bibr" rid="ref-6">6</xref>]. Among all traditional clinical methods, the use of dermoscopy is comparatively high due to its noninvasive nature. Dermoscopy is a digital device that involves using handheld devices to illuminate subsurface structures of the skin [<xref ref-type="bibr" rid="ref-7">7</xref>]. These devices facilitate the optical penetration of light rays beyond the skin surface, reducing surface reflection [<xref ref-type="bibr" rid="ref-8">8</xref>]. It has higher classifying power than the naked eye clinical analysis, which gives a maximum of 60% accuracy [<xref ref-type="bibr" rid="ref-7">7</xref>], but still, the correctness depends on the dermatologist&#x2019;s practice. The ABCD rule was developed for dermoscopy to diagnose skin lesions in clinical trials [<xref ref-type="bibr" rid="ref-9">9</xref>]. The parameters of this rule include asymmetry, border irregularity, color, and differential structures. These parameters graded the lesions in benign or malignant [<xref ref-type="bibr" rid="ref-10">10</xref>]. With the use of computerized techniques, the diagnostics performance has been improved for the experts and clinicians with limited experience in dermoscopy [<xref ref-type="bibr" rid="ref-11">11</xref>].</p>
<p>Significant contributions are made to automated skin lesion diagnosis to counter traditional approaches&#x2019; limitations [<xref ref-type="bibr" rid="ref-12">12</xref>]. The application of computational intelligence has remarkably increased the diagnostic accuracy of skin lesions [<xref ref-type="bibr" rid="ref-13">13</xref>]&#x2014;the endurance rate of skin cancer patients determined by multiple factors. One of the prime factors in this regard is the premature detection of skin cancer. The involvement of Computer-Aided Diagnostic (CAD) systems has significantly facilitated the timely detection of skin lesions [<xref ref-type="bibr" rid="ref-14">14</xref>]. The gradual decline in the mortality rate of skin cancer and recent studies validate the impact of computer-aided diagnostic systems [<xref ref-type="bibr" rid="ref-15">15</xref>].</p>
<p>CAD systems follow predefined steps to detect and classify lesions [<xref ref-type="bibr" rid="ref-16">16</xref>]. These steps generally follow the sequence: preprocessing followed by separation of an object from the background, feature extraction, feature selection, and finally, classification [<xref ref-type="bibr" rid="ref-15">15</xref>]. This sequence can be modified according to the methodology followed by CAD systems. The core step for automated diagnosis in these CAD systems is extracting features. These features are defining parameters to distinguish between classes of skin lesions. Initially, CAD systems were based on handcrafted image feature extraction [<xref ref-type="bibr" rid="ref-11">11</xref>]. These handcrafted features are inadequate to solve the challenges of publicly available datasets, including imbalanced datasets, complicated images, interclass similarity, and intra-class differences. Most recently, deep feature extraction has gained attention in intelligent CAD systems. It has provided promising results in different domains and the field of automated medical diagnosis [<xref ref-type="bibr" rid="ref-17">17</xref>,<xref ref-type="bibr" rid="ref-18">18</xref>].</p>
<p>With the emergence of intelligent techniques, automatic diagnosis is improving daily. However, along with advancements in tools and techniques, new challenges keep surfacing in the medical field [<xref ref-type="bibr" rid="ref-16">16</xref>,<xref ref-type="bibr" rid="ref-19">19</xref>]. Big and complicated datasets are becoming publicly available, posing new research challenges [<xref ref-type="bibr" rid="ref-20">20</xref>]. Particularly for skin cancer, much of the research is based on the binary classification of lesions; however, skin cancer is categorized into seven major classes. The datasets of skin lesions are highly imbalanced, along with complications like inter-class similarity and intra-class differences. Researchers have contributed to addressing these problems, but improvements are required to develop a robust and efficient multiclass skin lesion diagnosis system. With the emergence of deep neural networks, concerns like computational complexity and resource consumption emerged. Therefore, there is a dire need for a framework that considers all these challenges and provides an accurate, robust, computationally efficient solution. This research presents an automated framework for efficiently classifying skin lesions into multiple classes. Significant contributions of this work are:
<list list-type="simple">
<list-item><label>-</label><p>We proposed two deep learning architectures, QuadRes-Net and PentRes-Net, inspired by the ResNet architecture. Both models have fewer parameters and are more efficient than the ResNet50 and ResNet101 architectures.</p></list-item>
<list-item><label>-</label><p>The proposed models&#x2019; hyperparameters have been initialized using Bayesian Optimization. Usually, they are initialized using a hit-trail method.</p></list-item>
<list-item><label>-</label><p>A novel technique, Information Entropy Controlled Distance, is proposed for the fusion of higher entropy value features for improved accuracy and less computational time.</p></list-item>
<list-item><label>-</label><p>GradCAM-based interpretability is performed on the original images to capture the critical prediction information. A detailed ablation study was performed, and the proposed work was compared with some recent methods.</p></list-item>
</list></p>
<p>The manuscript unfolds in the subsequent sequence. Initially, <xref ref-type="sec" rid="s2">Section 2</xref> provides a summary of related work, encompassing an overview of existing techniques. Following that, in <xref ref-type="sec" rid="s3">Section 3</xref>, the proposed methodology is delineated, encompassing the explanation of datasets, proposed deep learning models, and complete framework. A description of the findings is covered in <xref ref-type="sec" rid="s4">Section 4</xref>. Lastly, <xref ref-type="sec" rid="s5">Section 5</xref> concludes the manuscript.</p>
</sec>
<sec id="s2">
<label>2</label>
<title>Literature Review</title>
<p>Significant advancements have been made in automated medical diagnosis in the past decade [<xref ref-type="bibr" rid="ref-16">16</xref>]. Several computerized techniques have been introduced in the literature for the classification and localization of skin lesions [<xref ref-type="bibr" rid="ref-21">21</xref>,<xref ref-type="bibr" rid="ref-22">22</xref>]. Skin lesion segmentation is pivotal in automated dermatological image analysis, presenting substantial opportunities for enhancing diagnosis, treatment planning, and disease monitoring. Despite the hurdles, progress in segmentation methodologies, notably those rooted in deep learning, shows potential for achieving more precise and dependable lesion delineation in clinical settings. Skin lesion segmentation involves identifying and isolating areas of interest (lesions) within medical images, such as dermoscopic or clinical photographs. Its primary objective is to precisely differentiate the lesion area from the surrounding healthy skin, enabling further analysis and diagnosis [<xref ref-type="bibr" rid="ref-23">23</xref>].</p>
<p>Skin lesion classification entails grouping lesions into distinct categories determined by their visual attributes, including color, shape, texture, and irregularity of borders. By analyzing these traits, classification algorithms strive to discern between benign and malignant lesions, thereby assisting in precise diagnosis and formulation of treatment strategies. Skin lesion classification is a crucial aspect of automated dermatological image analysis, holding considerable promise for improving early detection, decision-making, and patient care. Addressing data variability, class imbalance, and model interpretability challenges is paramount for advancing and integrating classification algorithms effectively into clinical practice. Before deep learning approaches, segmentation was based on basic classic image processing. These traditional segmentation techniques can be broadly classified as follows: It segments images into numerous regions based on pixel intensity, grouping pixels with similar grey values. In [<xref ref-type="bibr" rid="ref-24">24</xref>], the authors tackle the challenges of skin lesion images by presenting a new contrast enhancement technique and segmenting the lesion area by a novel OCF (optimized color feature)-based technique for lesion segmentation, utilizing the YCbCr color space for feature extraction, optimized by a Genetic Algorithm (GA). A multilevel probability-based threshold function is developed to categorize the optimized features into their respective colors, which are then converted into the binary form using maximum probability-based thresholding. An existing saliency-based method is also applied, and its information is integrated to refine the lesion.</p>
<p>Authors in [<xref ref-type="bibr" rid="ref-25">25</xref>] presented a two-step system consisting of a preprocessing algorithm and a lesion segmentation network. The hairline removal algorithm uses morphological operators and is designed to eliminate noise artifacts. The processed images are then input into a convolutional neural network (CNN) for lesion segmentation. This novel CNN framework is built from scratch, following an encoder-decoder architecture. The layers are uniquely sequenced to perform both downsampling and upsampling, resulting in a high-resolution segmentation map. In [<xref ref-type="bibr" rid="ref-26">26</xref>], authors extracted lesion area by using the U-Net model that subsequently enhanced the classification accuracy. To address the segmentation challenges, the authors in [<xref ref-type="bibr" rid="ref-27">27</xref>] aimed to implement a novel Sailfish-based Gradient Boosting Framework (SbGBF) for accurately recognizing and segmenting the SL region. The boosting mechanism, rich with noise removal features, optimizes the segmentation process.</p>
<p>The boosting parameters are activated to eliminate noise variables in the trained SL data. Subsequently, the sailfish fitness function is applied to trace region features in the preprocessed SL images, leading to the final segmentation. In [<xref ref-type="bibr" rid="ref-28">28</xref>], authors gave a collaborative learning deep convolutional neural networks (CL-DCNN) model based on the teacher-student learning method for dermatological segmentation and classification. The self-training method was introduced to generate high-quality pseudo-labels. The segmentation network is selectively retrained through a classification network that screens the pseudo-labels. Specifically, high-quality pseudo-labels were obtained for the segmentation network using a reliability measure method.</p>
<p>Class activation maps were also employed to enhance the segmentation network&#x2019;s localization capability. Furthermore, the classification network&#x2019;s recognition ability was improved by providing lesion contour information through lesion segmentation masks. In [<xref ref-type="bibr" rid="ref-2">2</xref>], the authors suggested an end-to-end deep learning framework based on the segmentation method. The author presented a novel S-MobileNet and used a mish activation instead of ReLU. The framework was implemented in HAM10000 and ISIC datasets, and they achieved 98.15% accuracy. The drawback of this work was the authors did not address the data unbalancing problem. Ref. [<xref ref-type="bibr" rid="ref-29">29</xref>] presented an inherent learning using the deep learning models for the classification of skin cancer. The authors integrate an explainable AI algorithm with the proposed model. They evaluated the framework using the HAM10000 dataset, gained the highest accuracy of 92.89%, and visualized the learned feature. In [<xref ref-type="bibr" rid="ref-30">30</xref>], the authors presented a novel generative AI model for preventing the data imbalance problem in skin cancer. The authors presented an ST-GAN network that generated skin cancer images and employed the proposed classification model. They used the HAM10000 dataset for the evaluation and improved accuracy by 16% from the SOTA techniques.</p>
<p>This work achieved a classification accuracy of 82.1%. In [<xref ref-type="bibr" rid="ref-31">31</xref>], a unified CAD model that involves preprocessing, a novel architecture for segmentation and transfer learning, is presented, followed by feature extraction, fusion, and selection. Finally, the features are classified using SVM. For the ISIC2019 dataset, this approach achieved 93.47% accuracy. In [<xref ref-type="bibr" rid="ref-32">32</xref>], authors investigated the performance of 17 different CNNs for skin lesion classification and established that DenseNet201 with Cubic SVM/ Fine KNN gained a top accuracy of 92.34%. In [<xref ref-type="bibr" rid="ref-33">33</xref>], authors took advantage of and combined transformers and CNNs to build an efficient skin lesion classification system. This system was assessed using the ISIC2019 dataset and achieved an accuracy of 97.2%. In [<xref ref-type="bibr" rid="ref-34">34</xref>], a new segmentation framework is given, and classification is performed using extracted features from pre-trained DenseNet201. It achieved the accuracy of 91.7%. In [<xref ref-type="bibr" rid="ref-35">35</xref>], authors presented a novel contrast enhancement technique and then used modified DarkNet-53 and DenseNet-201 for transfer learning. Extracted features are combined and then optimized using the modified marine predator optimizer. The ISIC2019 dataset was utilized for evaluation, and an accuracy of 98.8% was attained. In [<xref ref-type="bibr" rid="ref-36">36</xref>], a new model for segmentation was presented, after which the segmented images&#x2019; local and global characteristics were extracted and classified using EfficientNetB1. This work achieved an accuracy of 91.73% for the ISIC2019 dataset.</p>
<p>In [<xref ref-type="bibr" rid="ref-37">37</xref>], authors presented a system for skin lesion classification focusing on transfer learning and feature optimization. In [<xref ref-type="bibr" rid="ref-38">38</xref>], authors presented a semantic segmentation model, then some essential features were nominated by the Binary Dragonfly Algorithm (BDA), and finally, grouping was done using SqeezNet. This work achieved an accuracy of 98.77%. In [<xref ref-type="bibr" rid="ref-39">39</xref>], ShuffleNet is modified using SqeezNet excitation blocks, and a lightweight CNN is presented. The accuracy achieved by this work is 98.1% for the ISIC2019 dataset. In [<xref ref-type="bibr" rid="ref-40">40</xref>], the authors presented an AI-based method for skin lesion classification. A residual deep convolution neural network was utilized for this work, and an accuracy of 94.65% was achieved for the ISIC2019 dataset.</p>
<p>In [<xref ref-type="bibr" rid="ref-41">41</xref>], authors used deep learning to present AI-driven skin cancer classification. They employed pertained vision transformers and conducted a comparative analysis among the deep learning models. They utilized the ISIC2019 dataset for the experimental process. They achieved better results from the state-of-the-art methods. The primary limitation of this work was the fixed hyperparameters that affect the generalization of the proposed framework. In [<xref ref-type="bibr" rid="ref-42">42</xref>], authors suggested a lightweight deep-learning model for the classification of skin cancer disease. They performed experiments on publicly available datasets with two classes, achieving 92% accuracy. The limitation of the proposed work was that the number of training samples was small for the efficient learning of the proposed model. In [<xref ref-type="bibr" rid="ref-43">43</xref>], the authors suggested an optimized CNN for dermatological lesion classification. They created a CNN from scratch and a novel data augmentation method. The authors selected the HAM1000 dataset for the experimental process and gained 97.78% accuracy. The limitation of this work was the extensive utilization of the pooling layer that removes the valuable features from the feature maps.</p>
<p><bold>Challenges:</bold> Despite all these research advancements, many difficulties in skin lesion segmentation and accurate classification require the research community&#x2019;s attention for effective and efficient solutions. Most of these past research studies for skin lesion classification have focused on segmentation and then used transfer learning for classification tasks. Most of the previous works have not focused on the design of new models from scratch. In addition, hyperparameter optimization was rarely focused on. The preprocessing and segmentation steps are eluded in the proposed work, and two novel deep-learning architectures are proposed to efficiently classify skin lesions into several classes.</p>
</sec>
<sec id="s3">
<label>3</label>
<title>Proposed Methodology</title>
<p>The detailed theoretical and mathematical justification for the suggested methodology is provided in this section. <xref ref-type="fig" rid="fig-1">Fig. 1</xref> depicts the proposed method for skin lesion classification and segmentation. Data augmentation was performed using the proposed method, and two models were trained. The proposed CNN models in this work are QuadRes-Net and PentRes-Net. Hyperparameters of both models have been initialized using the Bayesian Optimization algorithm. After that, we trained both models and extracted testing features. The testing features are fused using a serial approach, reducing the amount of irrelevant information using information entropy. The final resultant vector is further passed to classifiers for the final classification. An explainable AI technique named LIME is also applied to analyze the interpretability of a proposed model. Also, the LIME output is concatenated with the mean-saliency technique for the lesion segmentation. The details of each step in the proposed method are given below.</p>
<fig id="fig-1">
<label>Figure 1</label>
<caption>
<title>Proposed framework for multiclass skin lesion classification</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_63851-fig-1.tif"/>
</fig>
<sec id="s3_1">
<label>3.1</label>
<title>Dataset and Augmentation</title>
<p>This study employs two well-known datasets, HAM10000 (Human Against Machine) [<xref ref-type="bibr" rid="ref-44">44</xref>] and ISIC2019 [<xref ref-type="bibr" rid="ref-45">45</xref>], to assess the proposed framework. The HAM10000 dataset included 10,015 dermoscopic images. The nature of the images of this dataset is RGB. The number of images in each class includes 327 (AKIEC), 1099 (BKL), 514 (BKL), 514 (BCC), 115 (DF), 1113 (MEL), 6705 (NV), 142 (VASC), respectively.</p>
<p>The 25,331 images in the ISIC2019 dataset are divided into eight classes. The number of images in the melanocytic nevi class is 12,875; for the melanoma class, it is 4522; for the BKL class, it is 2624 images; for the BCC class, it contains 3323 images; the SCC class includes 253 images; the VL class has 628 images; the DF class has 239 images; and AK includes 867 images, respectively. The sample images of each lesion type in these datasets are shown in <xref ref-type="fig" rid="fig-1">Fig. 1</xref> (second half).</p>
<p>The class distributions of both datasets indicate an imbalanced problem. To resolve this problem, a simple flip-and-rotate approach to data augmentation is adopted. The resultant balanced datasets contain both the original and augmented images. <xref ref-type="table" rid="table-1">Table 1</xref> displays the class distributions of each dataset before and after augmentation.</p>
<table-wrap id="table-1">
<label>Table 1</label>
<caption>
<title>Class distributions of HAM10000 and ISIC2019 before and after augmentation</title>
</caption>
<table>
<colgroup>
<col/>
<col/>
<col/>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th><italic>Class/Dataset</italic></th>
<th colspan="2"><italic>HAM</italic>10000</th>
<th colspan="2"><italic>ISIC</italic>2019</th>
</tr>
<tr>
<th></th>
<th><italic>Raw data</italic></th>
<th><italic>Augmented</italic></th>
<th><italic>Raw data</italic></th>
<th><italic>Augmented</italic></th>
</tr>
</thead>
<tbody>
<tr>
<td><italic>AKIEC</italic></td>
<td>327</td>
<td>5242</td>
<td>867</td>
<td>3469</td>
</tr>
<tr>
<td><italic>BCC</italic></td>
<td>514</td>
<td>5225</td>
<td>3323</td>
<td>3232</td>
</tr>
<tr>
<td><italic>BKL</italic></td>
<td>1099</td>
<td>5852</td>
<td>2624</td>
<td>3200</td>
</tr>
<tr>
<td><italic>DF</italic></td>
<td>115</td>
<td>3680</td>
<td>239</td>
<td>3232</td>
</tr>
<tr>
<td><italic>MEL</italic></td>
<td>1113</td>
<td>5423</td>
<td>4522</td>
<td>3072</td>
</tr>
<tr>
<td><italic>NV</italic></td>
<td>6705</td>
<td>6705</td>
<td>12,875</td>
<td>2112</td>
</tr>
<tr>
<td><italic>VASC</italic></td>
<td>142</td>
<td>4544</td>
<td>628</td>
<td>2240</td>
</tr>
<tr>
<td><italic>SCC</italic></td>
<td>&#x2013;</td>
<td>&#x2013;</td>
<td>253</td>
<td>3200</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s3_2">
<label>3.2</label>
<title>Proposed QuadRes-Net Model</title>
<p>The proposed model, named QuadRes-Net, is visually illustrated in <xref ref-type="fig" rid="fig-2">Fig. 2</xref>. The motivation behind implementing the quadres model. The quad structure allows the network to highlight malignant regions&#x2019; global patterns, shapes, and irregularities and identify the irregular borders and color distribution essential to skin cancer. The model has an input layer that accepts the input image size of 299 &#x00D7; 299 &#x00D7; 3. It is followed by a convolutional layer named conv comprising 32 filters of dimension 7 &#x00D7; 7&#x00D7; 3 with a step value of 2. Applying the ReLU activation layer resulted in obtaining a feature map of size 150 &#x00D7; 150 &#x00D7; 32. The output of the first ReLU activation is forwarded to a first residual block. The first block contains conv_1 comprising 64 kernels of size 3 &#x00D7; 3 &#x00D7; 32 with a step size of 1, a batch norm, relu_1, and conv_2 with 32 filters of dimension 7 &#x00D7; 7 &#x00D7; 64 with a step of 1. The outcome of this block of size 150 &#x00D7; 150 &#x00D7; 32 is added to the identity mapping, and the results are convolved with the following conv_3 weight layer comprising 64 filters of size 3 &#x00D7; 3 &#x00D7; 32 with a step of 2 Max pooling layer of size 3 &#x00D7; 3 with the stride of 1, is placed next playing its vital role in the reduction of spatial dimensions.</p>
<fig id="fig-2">
<label>Figure 2</label>
<caption>
<title>Proposed QuadRes-Net model for skin lesion classification</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_63851-fig-2.tif"/>
</fig>
<p>An output of size 75 &#x00D7; 75 &#x00D7; 64 enters the second residual block from the max pooling layer. The second block encompasses conv_5, which has 128 filters of dimension <inline-formula id="ieqn-1"><mml:math id="mml-ieqn-1"><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>64</mml:mn></mml:math></inline-formula> with a step size of 1, batchnorm_1, relu_2, and conv_4, which have 64 kernels of dimension <inline-formula id="ieqn-2"><mml:math id="mml-ieqn-2"><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>128</mml:mn></mml:math></inline-formula>. Conv_6, which has 128 filters of size <inline-formula id="ieqn-3"><mml:math id="mml-ieqn-3"><mml:mn>7</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>7</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>64</mml:mn><mml:mo>,</mml:mo></mml:math></inline-formula> follows the shortcut connection and is continued by relu_3.</p>
<p>The third block first convolves the input of size <inline-formula id="ieqn-4"><mml:math id="mml-ieqn-4"><mml:mn>38</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>38</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>256</mml:mn></mml:math></inline-formula> with conv_8, which has 256 filters of size <inline-formula id="ieqn-5"><mml:math id="mml-ieqn-5"><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>128</mml:mn></mml:math></inline-formula> and step size 1. The convolution is followed by batchnorm_2, relu_4, and conv_7, which has 128 filters of size <inline-formula id="ieqn-6"><mml:math id="mml-ieqn-6"><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>256</mml:mn></mml:math></inline-formula> with stride 1. A feature map of size <inline-formula id="ieqn-7"><mml:math id="mml-ieqn-7"><mml:mn>38</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>38</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>128</mml:mn></mml:math></inline-formula> exits the block and, following the skip connection, passes through a convolutional layer of 256 filters of size <inline-formula id="ieqn-8"><mml:math id="mml-ieqn-8"><mml:mn>7</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>7</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>128</mml:mn><mml:mo>,</mml:mo></mml:math></inline-formula> stride two, and relu_6. Proceeding this, an input of <inline-formula id="ieqn-9"><mml:math id="mml-ieqn-9"><mml:mn>19</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>19</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>256</mml:mn></mml:math></inline-formula> enters the final residual block.</p>
<p>The final block comprises the first weight layer with 256 filters of dimension <inline-formula id="ieqn-10"><mml:math id="mml-ieqn-10"><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>256</mml:mn></mml:math></inline-formula>, stride 1, batchnorm_3, relu_5, and a second weight layer with 256 filters of <inline-formula id="ieqn-11"><mml:math id="mml-ieqn-11"><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>256</mml:mn></mml:math></inline-formula> size and stride 1. This block generates feature map of size <inline-formula id="ieqn-12"><mml:math id="mml-ieqn-12"><mml:mn>19</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>19</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>256</mml:mn></mml:math></inline-formula> which passes through conv_12 having 512 filters of <inline-formula id="ieqn-13"><mml:math id="mml-ieqn-13"><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>256</mml:mn></mml:math></inline-formula> size, stride 2, relu_7, maxpool_1 of size 5 &#x00D7; 5 with stride 1, conv_13 having 1024 filters of size <inline-formula id="ieqn-14"><mml:math id="mml-ieqn-14"><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>512</mml:mn></mml:math></inline-formula> with step size 1, relu_8, conv_14 having 1024 filters of size <inline-formula id="ieqn-15"><mml:math id="mml-ieqn-15"><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>1024</mml:mn></mml:math></inline-formula> with stride of 2 and relu_9. Finally, the feature map of size <inline-formula id="ieqn-16"><mml:math id="mml-ieqn-16"><mml:mn>5</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>5</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>1024</mml:mn></mml:math></inline-formula> has proceeded to the global average pooling layer that flattens this to provide a one-dimensional feature vector of size <inline-formula id="ieqn-17"><mml:math id="mml-ieqn-17"><mml:mn>1</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>1</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>1024</mml:mn></mml:math></inline-formula>. For the final classification, the output is fed into a dense NewFc layer, followed by a softmax layer.</p>
</sec>
<sec id="s3_3">
<label>3.3</label>
<title>Proposed PentRes-Net Model</title>
<p>The second proposed deep learning model, PentRes-Net, consists of a 64-layered CNN architecture. The proposed architecture is shown in <xref ref-type="fig" rid="fig-3">Fig. 3</xref>. Its parts are convolutional, max pooling, batch normalization, flattening, dense, ReLU activation, and input and output layers. The proposed model accepts the input of size <inline-formula id="ieqn-18"><mml:math id="mml-ieqn-18"><mml:mn>227</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>227</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>3</mml:mn></mml:math></inline-formula>. The aim behind employing the PentRes network is to learn fine-grained details and contextual features. The skin lesion has different textures at different resolutions; the PentRes captures the coarse and wide lesion features and can learn the lesion and the context around the skin. It helps in considering whether a spot is an isolated feature or part of an extensive pattern.</p>
<fig id="fig-3">
<label>Figure 3</label>
<caption>
<title>Proposed PentRes-Net model for skin lesion classification</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_63851-fig-3.tif"/>
</fig>
<p>The accepted input is then convolved by the conv layer with 32 filters of dimension 3 &#x00D7; 3 &#x00D7; 3 with a step size 2. After convolution, the output of size <inline-formula id="ieqn-19"><mml:math id="mml-ieqn-19"><mml:mn>114</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>114</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>32</mml:mn></mml:math></inline-formula> enters the first residual block. The first residual block contains two convolutional layers named conv_1 and conv_2, having 64 filters of <inline-formula id="ieqn-20"><mml:math id="mml-ieqn-20"><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>32</mml:mn></mml:math></inline-formula> size and 128 filters of <inline-formula id="ieqn-21"><mml:math id="mml-ieqn-21"><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>64</mml:mn></mml:math></inline-formula> size with a step size of 1, respectively. Following these two convolutional layers are the ReLu layer and batch norm layers are added. Finally, a set of convolutional layers, named conv_3, having 32 filters of dimension <inline-formula id="ieqn-22"><mml:math id="mml-ieqn-22"><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>128</mml:mn></mml:math></inline-formula> with step size 1, relu_1, and batchnorm_1 concludes the first residual block and generates the output of size <inline-formula id="ieqn-23"><mml:math id="mml-ieqn-23"><mml:mn>114</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>114</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>32</mml:mn></mml:math></inline-formula>. The output of the first block is added to the identity mapping and is down-sampled by the following maxpool layer of size <inline-formula id="ieqn-24"><mml:math id="mml-ieqn-24"><mml:mn>5</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>5</mml:mn></mml:math></inline-formula>, then convolved by the conv_4 having 64 filters of x <inline-formula id="ieqn-25"><mml:math id="mml-ieqn-25"><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>32</mml:mn></mml:math></inline-formula> size and step size of 2, and then relu_2 is applied on convolved output.</p>
<p>After applying nonlinear activation, an output of size <inline-formula id="ieqn-26"><mml:math id="mml-ieqn-26"><mml:mn>57</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>57</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>64</mml:mn></mml:math></inline-formula> enters the second residual block comprising conv_ 5 having 64 filters of dimension <inline-formula id="ieqn-27"><mml:math id="mml-ieqn-27"><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>64</mml:mn></mml:math></inline-formula>, conv_6 having 128 kernels of dimensions <inline-formula id="ieqn-28"><mml:math id="mml-ieqn-28"><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>64</mml:mn></mml:math></inline-formula> with stride 1, relu_3, batchnorm_2 followed by conv_7 having 64 kernels of size <inline-formula id="ieqn-29"><mml:math id="mml-ieqn-29"><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>128</mml:mn></mml:math></inline-formula>, relu_4 and batchnorm_3. The second block produces an output of size <inline-formula id="ieqn-30"><mml:math id="mml-ieqn-30"><mml:mn>57</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>57</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>64</mml:mn></mml:math></inline-formula> which is down-sampled by the maxpool_1 of size <inline-formula id="ieqn-31"><mml:math id="mml-ieqn-31"><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>3</mml:mn></mml:math></inline-formula>, then convolved by conv_8 having 128 filters of dimension <inline-formula id="ieqn-32"><mml:math id="mml-ieqn-32"><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>64</mml:mn></mml:math></inline-formula> with step size of 2 followed by relu_5.</p>
<p>The input of size <inline-formula id="ieqn-33"><mml:math id="mml-ieqn-33"><mml:mn>29</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>29</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>128</mml:mn></mml:math></inline-formula> enters the third residual block and is processed by conv_9 and conv_10, followed by relu_6 activation and batchnorm_4. Further, it is processed by conv_11, relu_7, and batchnorm_5, and the output of size <inline-formula id="ieqn-34"><mml:math id="mml-ieqn-34"><mml:mn>29</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>29</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>128</mml:mn></mml:math></inline-formula> is then added to identity mapping and is down-sampled by maxpool_2 to size <inline-formula id="ieqn-35"><mml:math id="mml-ieqn-35"><mml:mn>15</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>15</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>128</mml:mn></mml:math></inline-formula> and is then convolved by conv_12 having 256 filters of size <inline-formula id="ieqn-36"><mml:math id="mml-ieqn-36"><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>128</mml:mn></mml:math></inline-formula> which are followed by relu_8. This generates the output of size <inline-formula id="ieqn-37"><mml:math id="mml-ieqn-37"><mml:mn>15</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>15</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>256</mml:mn></mml:math></inline-formula>, which is further processed in block four by two convolutional layers, conv_13 and conv_14, followed by relu_9, batchnorm_6, conv_15, relu_10, and batchnorm_7. Proceeding this block is a shortcut connection followed by maxpool_3 of size <inline-formula id="ieqn-38"><mml:math id="mml-ieqn-38"><mml:mn>5</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>5</mml:mn></mml:math></inline-formula>, conv_16, and relu_11. After the processing from these layers, an input of size <inline-formula id="ieqn-39"><mml:math id="mml-ieqn-39"><mml:mn>15</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>15</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>512</mml:mn></mml:math></inline-formula> enters the final residual block with the same configuration as previous blocks. After processing through the final block, the output is down-sampled by maxpool_4 of size <inline-formula id="ieqn-40"><mml:math id="mml-ieqn-40"><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>3</mml:mn></mml:math></inline-formula> with stride two and generates the feature map of size <inline-formula id="ieqn-41"><mml:math id="mml-ieqn-41"><mml:mn>8</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>8</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>512</mml:mn></mml:math></inline-formula>. It is further convolved by conv_20 having 1024 filters of size <inline-formula id="ieqn-42"><mml:math id="mml-ieqn-42"><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>512</mml:mn></mml:math></inline-formula> with stride one followed by relu_14. Maxpool_5 of size <inline-formula id="ieqn-43"><mml:math id="mml-ieqn-43"><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>3</mml:mn></mml:math></inline-formula> with a stride of 2 further down samples the output to <inline-formula id="ieqn-44"><mml:math id="mml-ieqn-44"><mml:mn>4</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>4</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>1024</mml:mn></mml:math></inline-formula> which is then convolved by conv_21 having 2048 filters of size <inline-formula id="ieqn-45"><mml:math id="mml-ieqn-45"><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>1024</mml:mn></mml:math></inline-formula>, after this relu_15 is placed. Feature map of size <inline-formula id="ieqn-46"><mml:math id="mml-ieqn-46"><mml:mn>4</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>4</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>2048</mml:mn></mml:math></inline-formula> is fed to gapool, a global average pooling layer to flatten it to <inline-formula id="ieqn-47"><mml:math id="mml-ieqn-47"><mml:mn>1</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>1</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mn>2048</mml:mn></mml:math></inline-formula> feature vector. It is followed by a dense, after which a softmax activation is employed to allocate likelihoods to different classes.</p>
</sec>
<sec id="s3_4">
<label>3.4</label>
<title>Training and Features Extraction</title>
<p><xref ref-type="fig" rid="fig-4">Fig. 4</xref> illustrates the training process of the proposed deep learning model on augmented skin lesion datasets. All parameters were trained for both models, such as 19.4 M (million) for QuadRes-Net and 28.2 M for PentRes-Net. The balanced HAM10000 and ISIC 2019 datasets are employed to train the models. The Global Average Pooling (GAP) layer is chosen and activated for the QuadRes-Net model. As an activation function, the entropy loss function is employed. Deep features are extracted on this layer, and a feature vector of dimension Nx1024 is obtained.</p>
<fig id="fig-4">
<label>Figure 4</label>
<caption>
<title>Proposed deep learning model learning using training data for skin lesion classification task</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_63851-fig-4.tif"/>
</fig>
<p>Similarly, in the model PentRes-Net, the Global Average pooling (GAP) layer is selected, and feature extraction is activated. The entropy loss function was employed as an activation function to obtain a feature vector of dimension Nx2048.</p>
</sec>
<sec id="s3_5">
<label>3.5</label>
<title>Proposed Feature Fusion</title>
<p>The process of merging several feature vectors into one feature vector is termed feature fusion. In this work, we aim to fuse the features of the proposed QuadRes-Net and PentRes-Net into a single feature vector for better accuracy. However, the simple fusion process increased the computational time and added redundant features, which increased the uncertainty of the extracted features. Therefore, we proposed a Serial controlled Information Entropy fusion technique that improved the accuracy and reduced the computational time.</p>
<p>Given two extracted feature vectors known as QuadRes-Net and PentRes-Net, denoted by <inline-formula id="ieqn-48"><mml:math id="mml-ieqn-48"><mml:mi mathvariant="normal">&#x0394;</mml:mi><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-49"><mml:math id="mml-ieqn-49"><mml:mi mathvariant="normal">&#x0394;</mml:mi><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula> of dimensional <inline-formula id="ieqn-50"><mml:math id="mml-ieqn-50"><mml:mi>N</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mn>1024</mml:mn></mml:math></inline-formula> and <inline-formula id="ieqn-51"><mml:math id="mml-ieqn-51"><mml:mi>N</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mn>2048</mml:mn></mml:math></inline-formula>, respectively. Suppose &#x0394; denotes the serial fusion-based obtained vector <inline-formula id="ieqn-52"><mml:math id="mml-ieqn-52"><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mi>u</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of dimension <inline-formula id="ieqn-53"><mml:math id="mml-ieqn-53"><mml:mi>N</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mn>3072</mml:mn></mml:math></inline-formula>. Mathematically, this process is defined as follows:
<disp-formula id="eqn-1"><label>(1)</label><mml:math id="mml-eqn-1" display="block"><mml:mtable columnalign="right left right left right left right left right left right left" rowspacing="3pt" columnspacing="0em 2em 0em 2em 0em 2em 0em 2em 0em 2em 0em" displaystyle="true"><mml:mtr><mml:mtd /><mml:mtd><mml:mi mathvariant="normal">&#x0394;</mml:mi><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mi>A</mml:mi><mml:mi>C</mml:mi><mml:mi>T</mml:mi><mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>M</mml:mi><mml:mi>o</mml:mi><mml:mi>d</mml:mi><mml:mi>e</mml:mi><mml:mi>l</mml:mi><mml:mn>1</mml:mn><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mi>N</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mn>1024</mml:mn></mml:mrow></mml:msub></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<disp-formula id="eqn-2"><label>(2)</label><mml:math id="mml-eqn-2" display="block"><mml:mtable columnalign="right left right left right left right left right left right left" rowspacing="3pt" columnspacing="0em 2em 0em 2em 0em 2em 0em 2em 0em 2em 0em" displaystyle="true"><mml:mtr><mml:mtd /><mml:mtd><mml:mi mathvariant="normal">&#x0394;</mml:mi><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mi>A</mml:mi><mml:mi>C</mml:mi><mml:mi>T</mml:mi><mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>M</mml:mi><mml:mi>o</mml:mi><mml:mi>d</mml:mi><mml:mi>e</mml:mi><mml:mi>l</mml:mi><mml:mn>2</mml:mn><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mi>N</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mn>2048</mml:mn></mml:mrow></mml:msub></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<disp-formula id="eqn-3"><label>(3)</label><mml:math id="mml-eqn-3" display="block"><mml:mtable columnalign="right left right left right left right left right left right left" rowspacing="3pt" columnspacing="0em 2em 0em 2em 0em 2em 0em 2em 0em 2em 0em" displaystyle="true"><mml:mtr><mml:mtd /><mml:mtd><mml:mi mathvariant="normal">&#x0394;</mml:mi><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mi>u</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mtable rowspacing="4pt" columnspacing="1em"><mml:mtr><mml:mtd><mml:mi mathvariant="normal">&#x0394;</mml:mi><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mi mathvariant="normal">&#x0394;</mml:mi><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:mtd></mml:mtr></mml:mtable><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mi>N</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mn>3072</mml:mn></mml:mrow></mml:msub></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula></p>
<p>The obtained fused vector of dimension <inline-formula id="ieqn-54"><mml:math id="mml-ieqn-54"><mml:mi>N</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mn>3072</mml:mn></mml:math></inline-formula> contains several redundant and uncertain information observed during the initial classification; therefore, we proposed a new technique that improved the fusion process named information entropy controlled minimum distance. To measure the uncertain information in the fused vector, the following probability distribution function has been employed [<xref ref-type="bibr" rid="ref-46">46</xref>]:
<disp-formula id="eqn-4"><label>(4)</label><mml:math id="mml-eqn-4" display="block"><mml:mi>U</mml:mi><mml:mo>=</mml:mo><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mi>p</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mi>log</mml:mi><mml:mo>&#x2061;</mml:mo><mml:msub><mml:mi>p</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></disp-formula>where <inline-formula id="ieqn-55"><mml:math id="mml-ieqn-55"><mml:msub><mml:mi>p</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> denotes the occurrence probability of one sample, and the logarithmic gain function is defined by <inline-formula id="ieqn-56"><mml:math id="mml-ieqn-56"><mml:mi>log</mml:mi><mml:mo>&#x2061;</mml:mo><mml:msub><mml:mi>p</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>. The variable <inline-formula id="ieqn-57"><mml:math id="mml-ieqn-57"><mml:mi>U</mml:mi></mml:math></inline-formula> denotes the information entropy of the entire feature vector. The smaller the entropy value, the more certain the information is opted. After that, the Euclidean distance has been computed among each sample as follows:
<disp-formula id="eqn-5"><label>(5)</label><mml:math id="mml-eqn-5" display="block"><mml:msub><mml:mrow><mml:mover><mml:mi>D</mml:mi><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mo symmetric="true">&#x2016;</mml:mo><mml:mi>n</mml:mi><mml:mi>e</mml:mi><mml:mi>w</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mi>n</mml:mi><mml:mi>e</mml:mi><mml:mi>w</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mo symmetric="true">&#x2016;</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:math></disp-formula>
<disp-formula id="eqn-6"><label>(6)</label><mml:math id="mml-eqn-6" display="block"><mml:mo>=</mml:mo><mml:mrow><mml:mover><mml:mi>K</mml:mi><mml:mo stretchy="false">&#x007E;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>.</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:mover><mml:mi>K</mml:mi><mml:mo stretchy="false">&#x007E;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>.</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mn>2</mml:mn><mml:mrow><mml:mover><mml:mi>K</mml:mi><mml:mo stretchy="false">&#x007E;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>.</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula>where <inline-formula id="ieqn-58"><mml:math id="mml-ieqn-58"><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-59"><mml:math id="mml-ieqn-59"><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> are two samples in a testing set and <inline-formula id="ieqn-60"><mml:math id="mml-ieqn-60"><mml:msub><mml:mrow><mml:mover><mml:mi>D</mml:mi><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> denotes the ED among <inline-formula id="ieqn-61"><mml:math id="mml-ieqn-61"><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:math></inline-formula>. The new feature space <inline-formula id="ieqn-62"><mml:math id="mml-ieqn-62"><mml:mi>n</mml:mi><mml:mi>e</mml:mi><mml:mi>w</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> is mapped, and the kernel <inline-formula id="ieqn-63"><mml:math id="mml-ieqn-63"><mml:mrow><mml:mover><mml:mi>K</mml:mi><mml:mo stretchy="false">&#x007E;</mml:mo></mml:mover></mml:mrow></mml:math></inline-formula> should be considered positive definite. Based on the distance <inline-formula id="ieqn-64"><mml:math id="mml-ieqn-64"><mml:msub><mml:mrow><mml:mover><mml:mi>D</mml:mi><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, the uncertain is computed as follows:
<disp-formula id="eqn-7"><label>(7)</label><mml:math id="mml-eqn-7" display="block"><mml:msub><mml:mrow><mml:mover><mml:mi>U</mml:mi><mml:mo stretchy="false">&#x007E;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mfrac><mml:msub><mml:mrow><mml:mover><mml:mi>D</mml:mi><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:munder><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>k</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mo>&#x2026;</mml:mo><mml:mi>n</mml:mi></mml:mrow></mml:munder><mml:msub><mml:mrow><mml:mover><mml:mi>D</mml:mi><mml:mo stretchy="false">&#x2192;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mi>k</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mfrac></mml:math></disp-formula></p>
<p>Finally, the entropy value is computed for the final fused feature vector. The max entropy value features are selected in this work for the final fused feature vector&#x2014;the dimension of the fused vector for this work is <inline-formula id="ieqn-65"><mml:math id="mml-ieqn-65"><mml:mi>N</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mn>1752</mml:mn></mml:math></inline-formula>. The final fused feature vector is passed to several machine-learning classifiers for the final classification results.</p>
</sec>
</sec>
<sec id="s4">
<label>4</label>
<title>Results and Analysis</title>
<p>The findings of the experiments that are conducted to assess the proposed framework are elaborated in this section. The framework is tested using two publicly accessible datasets, HAM10000 and ISIC2019. The training and testing results have been determined through an equal distribution strategy of 50:50. The hyperparameters of this work are stochastic gradient descent, epoch&#x2019;s value of 50, momentum of 0.706, learning rate of 0.0010, and mini-batch size of 64. These hyperparameters are initialized using a Bayesian optimization (BO) technique. 5-fold cross-validation is employed to prevent overfitting. For the classification results, several classifiers have been employed, including Linear SVM, Fine KNN, Medium Quadratic SVM, Cosine KNN, Gaussian SVM, Cubic KNN, Coarse Gaussian SVM, and Weighted KNN. The 10-fold cross-validation approach is implemented for testing results. The performance of each classifier is computed based on the following measures: accuracy, sensitivity, precision, F1-Score, and classification time. The entire framework is simulated using MATLAB2023b on a PC with 128 GB of RAM and 12 GB graphics card RTX3060.</p>
<sec id="s4_1">
<label>4.1</label>
<title>Qualitative Analysis</title>
<p>Multiple experiments were conducted to test the proposed framework on both selected datasets. Initially, the results are computed for each model separately, such as QuadRes-Net and PentRes-Net. After that, the fusion results are computed and compared to each single step. Further, a detailed ablation study was also performed to validate the proposed framework performance.</p>
</sec>
<sec id="s4_2">
<label>4.2</label>
<title>Results of HAM10000 Dataset</title>
<p>Classification outcomes for the model QuadRes-Net using the HAM10000 dataset are presented in <xref ref-type="table" rid="table-2">Table 2</xref>. Results of different classifiers used in the experiment are presented. Fine KNN has accomplished the maximum accuracy of 84.2% and attained the sensitivity, precision, and F1-Score of 85.82%, 85.15%, and 85.49%, respectively. The rest of the classifiers have achieved an accuracy of 78.3%, 82.6%, 84.1%, 78.3%, 76.6%, 76.9%, 71.6%, 77.1%, 74.9%, and 82.7%, respectively. Cubic SVM has achieved the second-highest accuracy of 84.1%, but the classification time is much higher than Fine KNN&#x2019;s. Overall, the computational time of the classification process is noted for each classifier and given in this table.</p>
<table-wrap id="table-2">
<label>Table 2</label>
<caption>
<title>Results of the HAM10000 dataset using the proposed QuadRes-Net and PenRes-Net deep architectures. Bold entries means most significant values</title>
</caption>
<table>
<colgroup>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th colspan="6">Results of the HAM10000 dataset using the proposed QuadRes-Net deep architecture</th>
</tr>
<tr>
<th>Classifier</th>
<th>Accuracy (%)</th>
<th>Sensitivity (%)</th>
<th>Precision (%)</th>
<th>F1-Score (%)</th>
<th>Time (sec)</th>
</tr>
</thead>
<tbody>
<tr>
<td>Linear SVM</td>
<td>78.3</td>
<td>79.57</td>
<td>79.61</td>
<td>79.57</td>
<td>190.3</td>
</tr>
<tr>
<td>Quadratic SVM</td>
<td>82.6</td>
<td>83.95</td>
<td>83.6</td>
<td>83.79</td>
<td>140.9</td>
</tr>
<tr>
<td>Cubic SVM</td>
<td>84.1</td>
<td>85.5</td>
<td>85.04</td>
<td>85.28</td>
<td><bold>136.2</bold></td>
</tr>
<tr>
<td>Fine Gaussian SVM</td>
<td>78.3</td>
<td>79.24</td>
<td>81.85</td>
<td>80.52</td>
<td>185.2</td>
</tr>
<tr>
<td>Medium Gaussian SVM</td>
<td>76.6</td>
<td>77.68</td>
<td>78.08</td>
<td>77.88</td>
<td>176.8</td>
</tr>
<tr>
<td>Coarse Gaussian SVM</td>
<td>70.2</td>
<td>70.64</td>
<td>74.25</td>
<td>72.433</td>
<td>173.2</td>
</tr>
<tr>
<td>Fine KNN</td>
<td><bold>84.2</bold></td>
<td>85.82</td>
<td>85.15</td>
<td>85.49</td>
<td>194.0</td>
</tr>
<tr>
<td>Medium KNN</td>
<td>76.9</td>
<td>78.57</td>
<td>78.05</td>
<td>78.31</td>
<td>274.3</td>
</tr>
<tr>
<td>Coarse KNN</td>
<td>71.6</td>
<td>72.57</td>
<td>74.12</td>
<td>73.34</td>
<td>177.6</td>
</tr>
<tr>
<td>Cosine KNN</td>
<td>77.1</td>
<td>78.74</td>
<td>78.14</td>
<td>78.44</td>
<td>166.8</td>
</tr>
<tr>
<td>Cubic KNN</td>
<td>74.9</td>
<td>76.44</td>
<td>76.22</td>
<td>76.33</td>
<td>199.2</td>
</tr>
<tr>
<td>Weighted KNN</td>
<td>82.7</td>
<td>84.51</td>
<td>83.47</td>
<td>83.98</td>
<td>166.0</td>
</tr>
<tr>
<td colspan="6"><bold>Results of HAM10000 using the proposed deep learning Model PentRes-Net</bold></td>
</tr>
<tr>
<td>Linear SVM</td>
<td>79.5</td>
<td>80.77</td>
<td>80.28</td>
<td>80.56</td>
<td>170.2</td>
</tr>
<tr>
<td>Quadratic SVM</td>
<td>82.3</td>
<td>83.4</td>
<td>83.0</td>
<td>83.2</td>
<td>131.4</td>
</tr>
<tr>
<td>Cubic SVM</td>
<td><bold>83.9</bold></td>
<td>85.11</td>
<td>84.6</td>
<td>84.8</td>
<td>122.0</td>
</tr>
<tr>
<td>Fine Gaussian SVM</td>
<td>80.44</td>
<td>81.58</td>
<td>81.257</td>
<td>81.42</td>
<td>171.3</td>
</tr>
<tr>
<td>Medium Gaussian SVM</td>
<td>77.4</td>
<td>78.5</td>
<td>78.3</td>
<td>78.4</td>
<td>142.6</td>
</tr>
<tr>
<td>Coarse Gaussian SVM</td>
<td>73.7</td>
<td>74.4</td>
<td>76.22</td>
<td>75.32</td>
<td>140.8</td>
</tr>
<tr>
<td>Fine KNN</td>
<td>82.7</td>
<td>84.2</td>
<td>83.6</td>
<td>83.9</td>
<td>144.6</td>
</tr>
<tr>
<td>Medium KNN</td>
<td>78.4</td>
<td>80.05</td>
<td>79.2</td>
<td>79.6</td>
<td>151.4</td>
</tr>
<tr>
<td>Coarse KNN</td>
<td>75.9</td>
<td>77.31</td>
<td>76.7</td>
<td>77.0</td>
<td>157.1</td>
</tr>
<tr>
<td>Cosine KNN</td>
<td>78.6</td>
<td>80.22</td>
<td>79.35</td>
<td>79.7</td>
<td>175.7</td>
</tr>
<tr>
<td>Cubic KNN</td>
<td>78.4</td>
<td>79.98</td>
<td>79.22</td>
<td>78.81</td>
<td>147.1</td>
</tr>
<tr>
<td>Weighted KNN</td>
<td>83.2</td>
<td>84.7</td>
<td>83.8</td>
<td>84.2</td>
<td>152.3</td>
</tr>
</tbody>
</table>
</table-wrap>
<p><xref ref-type="table" rid="table-2">Table 2</xref> (second half) summarizes the classification results of the model PentRes-Net for the HAM10000 dataset. Among all the ten classifiers, the Cubic SVM achieved the maximum accuracy of 83.9% and the maximum sensitivity, precision, and F1-Score of 85.11%, 84.6%, and 84.8%, respectively. Weighted KNN of 83.2% achieved second-best accuracy. The classification accuracy and computation times for different classifiers are comparable with the results of the proposed model QuadRes-Net. There is neither a considerable spike in the accuracies nor an observable dip in the computation time of SVM classifiers; however, the computation time for Cubic SVM and other classifiers is low for model PentRes-Net compared to model QuadRes-Net.</p>

<p><xref ref-type="table" rid="table-3">Table 3</xref> presents the classification results of the proposed fusion technique. The proposed fusion was performed on feature vectors from QuadRes-Net and PentRes-Net deep neural networks on the HAM10000 dataset. The fusion step has remarkably increased the classification accuracy, as shown in this table. The classification time is also significantly reduced compared to experiments 1 and 2 (QuadRes-Net and PentRes-Net). Quadratic SVM has gained the maximum accuracy of 90.7%, and the sensitivity, precision, and F1-Score values are 91.38%, 91.27%, and 91.32%, respectively. The rest of the classifiers also obtained improved accuracy of 90.0%, 90.7%, 80.4%, 88.5%, 84.8%, 87.4%, 85.7%, 82.8%, 85.7%, 84.3%, and 87.4%, respectively. The confusion matrix of Quadratic SVM is illustrated in <xref ref-type="fig" rid="fig-5">Fig. 5</xref>. This figure shows that the BKL and MEL classes have higher false positive rates, whereas the others show better prediction performance.</p>
<table-wrap id="table-3">
<label>Table 3</label>
<caption>
<title>Results of the proposed features fusion on the HAM10000 dataset. Bold entries means most significant values</title>
</caption>
<table>
<colgroup>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th>Classifier</th>
<th>Accuracy (%)</th>
<th>Sensitivity (%)</th>
<th>Precision (%)</th>
<th>F1-Score (%)</th>
<th>Time (sec)</th>
</tr>
</thead>
<tbody>
<tr>
<td>Linear SVM</td>
<td>90.0</td>
<td>90.75</td>
<td>90.64</td>
<td>90.42</td>
<td>90.1</td>
</tr>
<tr>
<td><bold>Quadratic SVM</bold></td>
<td><bold>90.8</bold></td>
<td>91.38</td>
<td>91.27</td>
<td>91.32</td>
<td>94.6</td>
</tr>
<tr>
<td>Cubic SVM</td>
<td>90.7</td>
<td>91.44</td>
<td>91.28</td>
<td>91.35</td>
<td>85.2</td>
</tr>
<tr>
<td>Fine Gaussian SVM</td>
<td>80.4</td>
<td>80.44</td>
<td>85.44</td>
<td>82.86</td>
<td>110.9</td>
</tr>
<tr>
<td>Medium Gaussian SVM</td>
<td>88.5</td>
<td>89.21</td>
<td>89.17</td>
<td>89.19</td>
<td>117.3</td>
</tr>
<tr>
<td>Coarse Gaussian SVM</td>
<td>84.8</td>
<td>85.27</td>
<td>86.18</td>
<td>85.72</td>
<td>121.3</td>
</tr>
<tr>
<td>Fine KNN</td>
<td>87.4</td>
<td>88.54</td>
<td>88.11</td>
<td>88.32</td>
<td>112.4</td>
</tr>
<tr>
<td>Medium KNN</td>
<td>85.7</td>
<td>86.87</td>
<td>86.42</td>
<td>86.65</td>
<td>111.8</td>
</tr>
<tr>
<td>Coarse KNN</td>
<td>82.8</td>
<td>83.88</td>
<td>83.81</td>
<td>83.84</td>
<td>125.8</td>
</tr>
<tr>
<td>Cosine KNN</td>
<td>85.7</td>
<td>86.91</td>
<td>86.42</td>
<td>86.67</td>
<td>121.6</td>
</tr>
<tr>
<td>Cubic KNN</td>
<td>84.3</td>
<td>85.51</td>
<td>85.15</td>
<td>85.33</td>
<td>131.2</td>
</tr>
<tr>
<td>Weighted KNN</td>
<td>87.4</td>
<td>88.48</td>
<td>87.98</td>
<td>88.22</td>
<td>120.8</td>
</tr>
</tbody>
</table>
</table-wrap><fig id="fig-5">
<label>Figure 5</label>
<caption>
<title>Confusion Matrix for Quadratic SVM for proposed methodology using HAM10000 dataset</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_63851-fig-5.tif"/>
</fig>
</sec>
<sec id="s4_3">
<label>4.3</label>
<title>Results for ISIC2019 Dataset</title>
<p>For the ISIC2019 dataset, classification experiments were conducted using the proposed QuadRes-Net and PentRes-Net models. <xref ref-type="table" rid="table-4">Table 4</xref> presents the classification results of model QuadRes-Net for the ISIC2019 dataset. The Fine KNN topped accuracy with 98.5% among the ten classifiers. Further, the sensitivity achieved was 98.45, the precision value was 98.525, and the F1-Score of 98.48%, respectively. A weighted KNN of 98.0% achieved the second-best accuracy. The classification accuracy and computation times for different classifiers are given in <xref ref-type="table" rid="table-5">Table 5</xref>. The computational time of each classifier is noted for this experiment, and cubic SVM shows less time than the other classifiers.</p>
<table-wrap id="table-4">
<label>Table 4</label>
<caption>
<title>Proposed classification results of ISIC2019 dataset using proposed deep learning model named QuadRes-Net and PentRes-Net. Bold entriesmeans most significant
values</title>
</caption>
<table>
<colgroup>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th colspan="6">Results of ISIC2019 dataset using proposed deep learning model named QuadRes-Net</th>
</tr>
<tr>
<th>Classifier</th>
<th>Accuracy (%)</th>
<th>Sensitivity (%)</th>
<th>Precision (%)</th>
<th>F1-Score (%)</th>
<th>Time (sec)</th>
</tr>
</thead>
<tbody>
<tr>
<td>Linear SVM</td>
<td>94.3</td>
<td>94.212</td>
<td>94.51</td>
<td>94.363</td>
<td>230.4</td>
</tr>
<tr>
<td>Quadratic SVM</td>
<td>95.9</td>
<td>95.83</td>
<td>96.1</td>
<td>95.964</td>
<td>239.8</td>
</tr>
<tr>
<td>Cubic SVM</td>
<td><bold>96.6</bold></td>
<td>96.57</td>
<td>96.75</td>
<td>96.659</td>
<td>225.7</td>
</tr>
<tr>
<td>Fine Gaussian SVM</td>
<td>94.2</td>
<td>94.02</td>
<td>95.087</td>
<td>94.55</td>
<td>330.9</td>
</tr>
<tr>
<td>Medium Gaussian SVM</td>
<td>94.0</td>
<td>93.97</td>
<td>94.18</td>
<td>94.08</td>
<td>392.2</td>
</tr>
<tr>
<td>Coarse Gaussian SVM</td>
<td>88.3</td>
<td>88.06</td>
<td>89.075</td>
<td>88.56</td>
<td>340.2</td>
</tr>
<tr>
<td>Fine KNN</td>
<td><bold>98.5</bold></td>
<td>98.45</td>
<td>98.525</td>
<td>98.48</td>
<td>350.9</td>
</tr>
<tr>
<td>Medium KNN</td>
<td>93.1</td>
<td>92.81</td>
<td>93.35</td>
<td>93.08</td>
<td>327.0</td>
</tr>
<tr>
<td>Coarse KNN</td>
<td>84.9</td>
<td>84.87</td>
<td>85.65</td>
<td>85.26</td>
<td>333.7</td>
</tr>
<tr>
<td>Cosine KNN</td>
<td>93.4</td>
<td>93.25</td>
<td>93.67</td>
<td>93.46</td>
<td>354.8</td>
</tr>
<tr>
<td>Cubic KNN</td>
<td>92.0</td>
<td>91.787</td>
<td>92.225</td>
<td>92.0</td>
<td>390.0</td>
</tr>
<tr>
<td>Weighted KNN</td>
<td>98.0</td>
<td>97.88</td>
<td>98.16</td>
<td>98.02</td>
<td>355.4</td>
</tr>
<tr>
<td colspan="6"><bold>Results of ISIC2019 using the proposed deep learning model named PentRes-Net</bold></td>
</tr>
<tr>
<td>Linear SVM</td>
<td>95.0</td>
<td>94.97</td>
<td>95.03</td>
<td>95.00</td>
<td>302.5</td>
</tr>
<tr>
<td>Quadratic SVM</td>
<td>96.2</td>
<td>96.10</td>
<td>96.23</td>
<td>96.20</td>
<td>360.2</td>
</tr>
<tr>
<td>Cubic SVM</td>
<td>97.1</td>
<td>97.07</td>
<td>97.13</td>
<td>97.10</td>
<td>338.8</td>
</tr>
<tr>
<td>Fine Gaussian SVM</td>
<td>95.6</td>
<td>95.30</td>
<td>96.23</td>
<td>95.79</td>
<td>340.2</td>
</tr>
<tr>
<td>Medium Gaussian SVM</td>
<td>94.8</td>
<td>94.75</td>
<td>94.85</td>
<td>94.79</td>
<td>357.3</td>
</tr>
<tr>
<td>Coarse Gaussian SVM</td>
<td>92.7</td>
<td>92.46</td>
<td>92.92</td>
<td>92.69</td>
<td>339</td>
</tr>
<tr>
<td>Fine KNN</td>
<td><bold>98.51</bold></td>
<td>98.47</td>
<td>98.57</td>
<td>98.52</td>
<td>393.2</td>
</tr>
<tr>
<td>Medium KNN</td>
<td>94.8</td>
<td>94.66</td>
<td>95.03</td>
<td>94.84</td>
<td>301.5</td>
</tr>
<tr>
<td>Coarse KNN</td>
<td>91.9</td>
<td>91.80</td>
<td>91.90</td>
<td>91.9</td>
<td>300.4</td>
</tr>
<tr>
<td>Cosine KNN</td>
<td>94.9</td>
<td>94.80</td>
<td>95.00</td>
<td>94.8</td>
<td>397.6</td>
</tr>
<tr>
<td>Cubic KNN</td>
<td>94.4</td>
<td>94.31</td>
<td>94.53</td>
<td>94.42</td>
<td>389.2</td>
</tr>
<tr>
<td>Weighted KNN</td>
<td>98.41</td>
<td>98.25</td>
<td>98.48</td>
<td>98.36</td>
<td>390.7</td>
</tr>
</tbody>
</table>
</table-wrap><table-wrap id="table-5">
<label>Table 5</label>
<caption>
<title>Results of the proposed features fusion technique using ISIC2019 dataset</title>
</caption>
<table>
<colgroup>
<col align="center"/>
<col/>
<col/>
<col/>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th align="center">Classifier</th>
<th>Accuracy (%)</th>
<th>Sensitivity (%)</th>
<th>Precision (%)</th>
<th>F1-Score (%)</th>
<th>Time (sec)</th>
</tr>
</thead>
<tbody>
<tr>
<td>Linear SVM</td>
<td>98.0</td>
<td>97.93</td>
<td>98.0</td>
<td>97.93</td>
<td>109.0</td>
</tr>
<tr>
<td>Quadratic SVM</td>
<td>98.6</td>
<td>98.53</td>
<td>98.58</td>
<td>98.55</td>
<td>115.0</td>
</tr>
<tr>
<td>Cubic SVM</td>
<td>98.9</td>
<td>98.9</td>
<td>98.9</td>
<td>98.90</td>
<td>130.5</td>
</tr>
<tr>
<td>Fine Gaussian SVM</td>
<td>90.3</td>
<td>89.16</td>
<td>93.73</td>
<td>91.36</td>
<td>179.5</td>
</tr>
<tr>
<td>Medium Gaussian SVM</td>
<td>98.0</td>
<td>97.92</td>
<td>98.06</td>
<td>97.99</td>
<td>178.1</td>
</tr>
<tr>
<td>Coarse Gaussian SVM</td>
<td>96.5</td>
<td>96.28</td>
<td>96.61</td>
<td>96.44</td>
<td>177.2</td>
</tr>
<tr>
<td>Fine KNN</td>
<td>99.3</td>
<td>99.2</td>
<td>99.28</td>
<td>99.22</td>
<td>160.3</td>
</tr>
<tr>
<td>Medium KNN</td>
<td>97.6</td>
<td>96.61</td>
<td>97.7</td>
<td>97.15</td>
<td>169.6</td>
</tr>
<tr>
<td>Coarse KNN</td>
<td>94.7</td>
<td>94.47</td>
<td>94.78</td>
<td>94.62</td>
<td>171.3</td>
</tr>
<tr>
<td>Cosine KNN</td>
<td>97.6</td>
<td>97.36</td>
<td>97.66</td>
<td>97.50</td>
<td>190.3</td>
</tr>
<tr>
<td>Cubic KNN</td>
<td>97.0</td>
<td>96.85</td>
<td>97.125</td>
<td>96.98</td>
<td>182.4</td>
</tr>
<tr>
<td>Weighted KNN</td>
<td>98.8</td>
<td>98.67</td>
<td>98.82</td>
<td>98.74</td>
<td>178.7</td>
</tr>
</tbody>
</table>
</table-wrap>
<p><xref ref-type="table" rid="table-4">Table 4</xref> summarizes the classification results of the model PentRes-Net for the ISIC2019 dataset. Fine KNN outperformed the rest of the classifiers for this experiment with 98.51% accuracy. Weighted KNN gave the second-best accuracy with 98.41%. The highest sensitivity, precision, and F1-Score were also achieved by Fine KNN with the values of 98.47%, 98.57%, and 98.52%, respectively. Furthermore, it is evident that the suggested PentRes-Net model required a little longer computing time than the QuadRes-Net model; however, an improvement has occurred in the accuracy, precision, sensitivity, and F1-Score.</p>

<p>In the last stage, extracted features are fused using a novel fusion technique as presented in the proposed <xref ref-type="sec" rid="s3">Section 3</xref>. <xref ref-type="table" rid="table-5">Table 5</xref> presents the overview of classification results achieved by utilizing the fused feature vector for the ISIC2019 dataset. The fusion step has impacted the classification results and computation time positively. Fine KNN outperformed the other classifiers by gaining 99.3% accuracy and a sensitivity value of 99.2%, a precision of 99.28%, and an F1-Score achieved of 99.22%, respectively. <xref ref-type="fig" rid="fig-6">Fig. 6</xref> shows the Fine KNN classifier&#x2019;s confusion matrix that can be utilized to confirm the computed performance measures. This figure shows that each class&#x2019;s correct prediction rate is above 98%, which is a strength of the proposed fusion process.</p>
<fig id="fig-6">
<label>Figure 6</label>
<caption>
<title>Confusion matrix for Fine KNN for proposed methodology using ISIC2019 dataset</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_63851-fig-6.tif"/>
</fig>
</sec>
<sec id="s4_4">
<label>4.4</label>
<title>Discussion</title>
<p>A detailed discussion of the proposed framework is presented in this subsection. <xref ref-type="fig" rid="fig-1">Fig. 1</xref> shows the importance of deep learning techniques for skin lesion classification and diagnosis. The proposed deep learning fusion framework is illustrated in <xref ref-type="fig" rid="fig-1">Fig. 1</xref>, which includes several middle steps, such as designing new models and feature fusion. The dataset imbalance issue is resolved in the initial step and is later utilized for training deep learning models. Two novel deep learning models, QuadRes-Net and PentRes-Net, are proposed in this work (models can be seen in <xref ref-type="fig" rid="fig-3">Figs. 3</xref> and <xref ref-type="fig" rid="fig-4">4</xref>). The total learnable parameters of the suggested models are comparable with the state-of-the-art models, as shown in <xref ref-type="fig" rid="fig-7">Fig. 7</xref> (upper part). This figure shows that the parameters of the proposed models are less than those of other pre-trained models except for four networks. The proposed PentRes-Net architecture contains fewer parameters than all the other networks.</p>
<fig id="fig-7">
<label>Figure 7</label>
<caption>
<title>Comparison among several pre-trained and proposed deep architectures in the form of parameters and accuracy</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_63851-fig-7.tif"/>
</fig>
<p><xref ref-type="table" rid="table-2">Tables 2</xref> and <xref ref-type="table" rid="table-3">3</xref> present the results of the HAM10000 dataset, whereas <xref ref-type="table" rid="table-4">Tables 4</xref> and <xref ref-type="table" rid="table-5">5</xref> present the classification results of the ISIC2019 dataset, respectively. In these tables, it is noted that the classification accuracy has been improved after employing the proposed fusion technique. The accuracy almost jumped up to 5% after the fusion technique. Confusion matrices are illustrated in <xref ref-type="fig" rid="fig-5">Figs. 5</xref> and <xref ref-type="table" rid="table-6">6</xref>, which can be used to verify the highest classifier&#x2019;s computed performance measures.</p>
<table-wrap id="table-6">
<label>Table 6</label>
<caption>
<title>Proposed model analysis before and after the augmentation process</title>
</caption>
<table>
<colgroup>
<col/>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th colspan="3">HAM10000</th>
</tr>
<tr>
<th>Models</th>
<th>Before augmentation</th>
<th>After augmentation</th>
</tr>
</thead>
<tbody>
<tr>
<td>QuadRes-Net</td>
<td>84.1</td>
<td>89.1</td>
</tr>
<tr>
<td>PentRes-Net</td>
<td>88.2</td>
<td>93.2</td>
</tr>
<tr>
<td colspan="3"><bold>ISIC 2019</bold></td>
</tr>
<tr>
<td>QuadRes-Net</td>
<td>90.2</td>
<td>95.2</td>
</tr>
<tr>
<td>PentRes-Net</td>
<td>94.5</td>
<td>99.5</td>
</tr>
</tbody>
</table>
</table-wrap>
<p><xref ref-type="fig" rid="fig-7">Fig. 7</xref> (second-half) presents a comparison among proposed models and pre-trained neural networks accuracy. In this figure, the accuracy performance of proposed QuadRes-Net, PentRes-Net, and fusion process has been significantly improved. The computational time of each classifier is also noted during the classification process and the fusion process reduced the overall testing time (see <xref ref-type="table" rid="table-2">Tables 2</xref>&#x2013;<xref ref-type="table" rid="table-5">5</xref>).</p>

<p><bold>Ablation Study:</bold> Initially, the publicly accessible datasets are used to train the proposed models, and feature vectors are extracted from them. The classification results of individual features and the computation time are calculated. The proposed framework has performed well for ISIC2019 regarding accuracy and computation time using individual features from models QuadRes-Net and PentRes-Net. However, the classification accuracy for HAM10000 had room for improvement. After that, the individual feature vectors are fused using simple serial-based fusion to evaluate the impact of merged features on the proposed framework.</p>
<p>Finally, the fused feature vector is used to evaluate the framework. The fusion step has remarkably enhanced the classification accuracy of HAM10000 and improved the classification results of the ISIC2019 dataset. It is also observed that generally, the fusion step impacts accuracy positively but adds to the computation time; however, in the proposed framework, it is evident from the results that the fusion step decreased the computation time for the experiment using HAM10000 and ISIC2019 datasets. The classification and segmentation performance can be further enhanced by adopting color constancy, as multiple research works have shown improvement in segmentation and classification accuracy by using color constancy. Further, a GradCAM visualization is performed on the proposed models, and the results are illustrated in <xref ref-type="fig" rid="fig-8">Fig. 8</xref>.</p>
<fig id="fig-8">
<label>Figure 8</label>
<caption>
<title>GradCAM-based visualization of the proposed PentRes-Net Model</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_63851-fig-8.tif"/>
</fig>
<p>Another ablation study is conducted based on the before-and-after data augmentation process, as shown in <xref ref-type="table" rid="table-6">Table 6</xref>. From this table, it is observed that the proposed model significantly improved the accuracy by &#x007E;5% after the augmentation step and is more generalized after the augmentation step.</p>

<p>Lastly, the proposed framework&#x2019;s performance is compared with that of state-of-the-art techniques. <xref ref-type="table" rid="table-7">Table 7</xref> gives a performance comparison of the proposed work using the HAM10000 dataset and the ISIC2019 dataset. It is evident that the proposed framework has outpaced the state-of-the-art in classification performance.</p>
<table-wrap id="table-7">
<label>Table 7</label>
<caption>
<title>Comparison of the proposed accuracy with the state-of-the-art techniques. Bold entries means most significant values</title>
</caption>
<table>
<colgroup>
<col/>
<col/>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th colspan="4">Comparison of the proposed methodology with the state-of-the-art HAM10000 dataset</th>
</tr>
<tr>
<th>Sr.#</th>
<th>Research work</th>
<th>Year</th>
<th>Accuracy (%)</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>Hoang et al. [<xref ref-type="bibr" rid="ref-47">47</xref>]</td>
<td>2022</td>
<td>84.80</td>
</tr>
<tr>
<td>2</td>
<td>Popescu et al. [<xref ref-type="bibr" rid="ref-48">48</xref>]</td>
<td>2022</td>
<td>86.71</td>
</tr>
<tr>
<td>3</td>
<td>Neeshma et al. [<xref ref-type="bibr" rid="ref-49">49</xref>]</td>
<td>2022</td>
<td>82.1</td>
</tr>
<tr>
<td>4</td>
<td>Shobha et al. [<xref ref-type="bibr" rid="ref-50">50</xref>]</td>
<td>2022</td>
<td>86.54</td>
</tr>
<tr>
<td>5</td>
<td>Jaeyho et al. [<xref ref-type="bibr" rid="ref-51">51</xref>] (Xception Model)</td>
<td>2024</td>
<td>89.76</td>
</tr>
<tr>
<td>6</td>
<td colspan="2"><bold>Proposed Model</bold></td>
<td><bold>90.8</bold></td>
</tr>
<tr>
<td colspan="4"><bold>Comparison of the proposed methodology with the state-of-the-art for ISIC2019 dataset</bold></td>
</tr>
<tr>
<td>1</td>
<td>Maqsood et al. [<xref ref-type="bibr" rid="ref-31">31</xref>]</td>
<td>2023</td>
<td>93.47</td>
</tr>
<tr>
<td>2</td>
<td>Ayas et al. [<xref ref-type="bibr" rid="ref-33">33</xref>]</td>
<td>2022</td>
<td>97.2</td>
</tr>
<tr>
<td>3</td>
<td>Zafar et al. [<xref ref-type="bibr" rid="ref-34">34</xref>]</td>
<td>2023</td>
<td>91.7</td>
</tr>
<tr>
<td>4</td>
<td>Bibi et al. [<xref ref-type="bibr" rid="ref-35">35</xref>]</td>
<td>2023</td>
<td>98.80</td>
</tr>
<tr>
<td>5</td>
<td>Kadirappa et al. [<xref ref-type="bibr" rid="ref-36">36</xref>]</td>
<td>2023</td>
<td>91.73</td>
</tr>
<tr>
<td>6</td>
<td>Radhika et al. [<xref ref-type="bibr" rid="ref-38">38</xref>]</td>
<td>2023</td>
<td>98.77</td>
</tr>
<tr>
<td>7</td>
<td>Baig et al. [<xref ref-type="bibr" rid="ref-39">39</xref>]</td>
<td>2023</td>
<td>98.1</td>
</tr>
<tr>
<td>8</td>
<td>Alsahafi et al. [<xref ref-type="bibr" rid="ref-40">40</xref>]</td>
<td>2023</td>
<td>94.65</td>
</tr>
<tr>
<td>9</td>
<td>Khan et al. [<xref ref-type="bibr" rid="ref-52">52</xref>]</td>
<td>2024</td>
<td>95.70</td>
</tr>
<tr>
<td>10</td>
<td colspan="2"><bold>Proposed Model</bold></td>
<td><bold>99.3</bold></td>
</tr>
</tbody>
</table>
</table-wrap>
<p><bold>Lesion Localization:</bold> Based on the classification performance, the proposed PentRes-Net architecture is modified for the lesion segmentation task using the HAM10000 dataset. The testing was done on 500 dermoscopic images and obtained an accuracy of 91.6%, a sensitivity rate of 90.4%, and a precision rate of 91.0%. Furthermore, a few sample images are illustrated in <xref ref-type="fig" rid="fig-9">Fig. 9</xref>. <xref ref-type="fig" rid="fig-9">Fig. 9a</xref> shows the original testing images, (b) illustrates the segmented binary images after PentRes-Net and OTSU thresholding, (c) denotes the refined image, (d) denotes the fused image, (e) represents the mapped on the original image, (f) represent the ground truth image, and (g) shows the final localized image, respectively.</p>
<fig id="fig-9">
<label>Figure 9</label>
<caption>
<title>Lesion localization using QuadRes-Net with LIME interpretable technique on HAM10000 dataset. (a)&#x2013;(g) Original images, proposed segmented without postprocessing, proposed with post-processing, proposed with final refinement, proposed mapped on the original image, ground truth image, and compare proposed segmented and ground truth</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_63851-fig-9.tif"/>
</fig>
</sec>
</sec>
<sec id="s5">
<label>5</label>
<title>Conclusion</title>
<p>This work proposed a novel skin lesion classification and localization framework using dermoscopic images. The proposed framework begins with dataset augmentation and ends with lesion localization. In the augmentation process, the imbalanced issue was resolved, increasing the training accuracy. Before the augmentation process, the training accuracy of both proposed models was 84.1% and 88.2% for the HAM10000 dataset (QuadRes-Net and PentRes-Net) and 90.2% and 94.5% for the ISIC2019 dataset, respectively. After the augmentation process, the accuracy improved by up to 5% for both datasets. The models were trained using the Bayesian optimization approach instead of the selection of literature knowledge-based hyperparameters. The trained model&#x2019;s features are extracted from the testing data and fused using a novel technique that improves the accuracy and reduces the computational time. The proposed framework obtained final classification accuracy of 90.8% and 99.3% for HAM10000 and ISIC2019 datasets, respectively. The localization accuracy is also computed for the HAM10000 dataset, and 91.6% accuracy was obtained. A comparison was also conducted with SOTA, showing that the proposed framework has improved accuracy and precision rates.</p>
<p>Despite the numerous advantages, the most notable limitation of these networks lies in their requirement for exceedingly deep architectures, leading to a substantial amount of computational load. This research&#x2019;s strength lies in developing innovative models with fewer residual blocks and weight layers, resulting in reduced computational requirements while achieving improved accuracy in disease diagnosis compared to the ResNet, Densenet, and Inception architectures. In the future, addressing this limitation could involve the implementation of an optimization algorithm to choose the most influential features.</p>
</sec>
</body>
<back>
<ack>
<p>The National Research Foundation of Korea (NRF) grant funded by the Korea government (*MSIT) (No. 2018R1A5A7059549) and Princess Nourah bint Abdulrahman University Researchers Supporting Project number (PNURSP2025R508), Princess Nourah bint Abdulrahman University, Riyadh, Saudi Arabia provided invaluable assistance during the experimental work, for which the authors are quite grateful.</p>
</ack>
<sec>
<title>Funding Statement</title>
<p>This work was supported by the National Research Foundation of Korea (NRF) grant funded by the Korea government (*MSIT) (No. 2018R1A5A7059549). This work was supported through Princess Nourah bint Abdulrahman University Researchers Supporting Project number (PNURSP2025R508), Princess Nourah bint Abdulrahman University, Riyadh, Saudi Arabia.</p>
</sec>
<sec>
<title>Author Contributions</title>
<p>Conceptualization, Design, Software, Methodology, and Original Writeup: Veena Dillshad, Muhammad Attique Khan, Muhammad Nazir, Jawad Ahmad. Methodology, Funding, Validation, and Project Administration: Dina Abdulaziz AlHammadi, Taha Houda, Hee-Chan Cho. Supervision, Funding, and Review Draft: Byoungchol Chang. All authors reviewed the results and approved the final version of the manuscript.</p>
</sec>
<sec sec-type="data-availability">
<title>Availability of Data and Materials</title>
<p>Datasets of this work are publically available for the research purposes: <ext-link ext-link-type="uri" xlink:href="https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/DBW86T">https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/DBW86T</ext-link> (accessed on 27 February 2025), <ext-link ext-link-type="uri" xlink:href="https://challenge.isic-archive.com/data/#2019">https://challenge.isic-archive.com/data/#2019</ext-link> (accessed on 27 February 2025).</p>
</sec>
<sec>
<title>Ethics Approval</title>
<p>Not applicable.</p>
</sec>
<sec sec-type="COI-statement">
<title>Conflicts of Interest</title>
<p>The authors declare no conflicts of interest to report regarding the present study.</p>
</sec>
<ref-list content-type="authoryear">
<title>References</title>
<ref id="ref-1"><label>[1]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Dzieniszewska</surname> <given-names>A</given-names></string-name>, <string-name><surname>Garbat</surname> <given-names>P</given-names></string-name>, <string-name><surname>Piramidowicz</surname> <given-names>R</given-names></string-name></person-group>. <article-title>Improving skin Lesion segmentation with self-training</article-title>. <source>Cancers</source>. <year>2024</year>;<volume>16</volume>(<issue>2</issue>):<fpage>1120</fpage>; <pub-id pub-id-type="pmid">38539454</pub-id></mixed-citation></ref>
<ref id="ref-2"><label>[2]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Sulthana</surname> <given-names>R</given-names></string-name>, <string-name><surname>Chamola</surname> <given-names>V</given-names></string-name>, <string-name><surname>Hussain</surname> <given-names>Z</given-names></string-name>, <string-name><surname>Albalwy</surname> <given-names>F</given-names></string-name>, <string-name><surname>Hussain</surname> <given-names>A</given-names></string-name></person-group>. <article-title>A novel end-to-end deep convolutional neural network based skin lesion classification framework</article-title>. <source>Expert Syst Appl</source>. <year>2024</year>;<volume>246</volume>(<issue>5</issue>):<fpage>123056</fpage>.</mixed-citation></ref>
<ref id="ref-3"><label>[3]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Richard</surname> <given-names>M</given-names></string-name>, <string-name><surname>Paul</surname> <given-names>C</given-names></string-name>, <string-name><surname>Nijsten</surname> <given-names>T</given-names></string-name>, <string-name><surname>Gisondi</surname> <given-names>P</given-names></string-name>, <string-name><surname>Salavastru</surname> <given-names>C</given-names></string-name></person-group>. <article-title>Prevalence of most common skin diseases in Europe: a population-based study</article-title>. <source>J Eur Acad Dermatol Venereol</source>. <year>2022</year>;<volume>36</volume>(<issue>11</issue>):<fpage>1088</fpage>&#x2013;<lpage>96</lpage>; <pub-id pub-id-type="pmid">35274366</pub-id></mixed-citation></ref>
<ref id="ref-4"><label>[4]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Din</surname> <given-names>S</given-names></string-name>, <string-name><surname>Mourad</surname> <given-names>O</given-names></string-name>, <string-name><surname>Serpedin</surname> <given-names>E</given-names></string-name></person-group>. <article-title>LSCS-Net: a lightweight skin cancer segmentation network with densely connected multi-rate atrous convolution</article-title>. <source>Comput Biol Med</source>. <year>2024</year>;<volume>11</volume>(<issue>3</issue>):<fpage>108303</fpage>.</mixed-citation></ref>
<ref id="ref-5"><label>[5]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Olsen</surname> <given-names>CM</given-names></string-name>, <string-name><surname>Thompson</surname> <given-names>JF</given-names></string-name>, <string-name><surname>Pandeya</surname> <given-names>N</given-names></string-name>, <string-name><surname>Whiteman</surname> <given-names>DC</given-names></string-name></person-group>. <article-title>Evaluation of sex-specific incidence of melanoma</article-title>. <source>JAMA Dermatol</source>. <year>2020</year>;<volume>156</volume>(<issue>6</issue>):<fpage>553</fpage>&#x2013;<lpage>60</lpage>; <pub-id pub-id-type="pmid">32211827</pub-id></mixed-citation></ref>
<ref id="ref-6"><label>[6]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Giuffrida</surname> <given-names>R</given-names></string-name>, <string-name><surname>Conforti</surname> <given-names>C</given-names></string-name>, <string-name><surname>Di Meo</surname> <given-names>N</given-names></string-name>, <string-name><surname>Deinlein</surname> <given-names>T</given-names></string-name>, <string-name><surname>Zalaudek</surname> <given-names>I</given-names></string-name></person-group>. <article-title>Use of noninvasive imaging in the management of skin cancer</article-title>. <source>Curr Opin Oncol</source>. <year>2020</year>;<volume>32</volume>(<issue>4</issue>):<fpage>98</fpage>&#x2013;<lpage>105</lpage>; <pub-id pub-id-type="pmid">31850969</pub-id></mixed-citation></ref>
<ref id="ref-7"><label>[7]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Kittler</surname> <given-names>H</given-names></string-name>, <string-name><surname>Pehamberger</surname> <given-names>H</given-names></string-name>, <string-name><surname>Wolff</surname> <given-names>K</given-names></string-name>, <string-name><surname>Binder</surname> <given-names>M</given-names></string-name></person-group>. <article-title>Diagnostic accuracy of dermoscopy</article-title>. <source>Lancet Oncol</source>. <year>2002</year>;<volume>3</volume>(<issue>6</issue>):<fpage>159</fpage>&#x2013;<lpage>65</lpage>; <pub-id pub-id-type="pmid">11902502</pub-id></mixed-citation></ref>
<ref id="ref-8"><label>[8]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Campos-do-Carmo</surname> <given-names>G</given-names></string-name>, <string-name><surname>Ramos-e-Silva</surname> <given-names>M</given-names></string-name></person-group>. <article-title>Dermoscopy: basic concepts</article-title>. <source>Int J Dermatol</source>. <year>2008</year>;<volume>47</volume>(<issue>10</issue>):<fpage>712</fpage>&#x2013;<lpage>9</lpage>; <pub-id pub-id-type="pmid">18613881</pub-id></mixed-citation></ref>
<ref id="ref-9"><label>[9]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Chatterjee</surname> <given-names>S</given-names></string-name>, <string-name><surname>Dey</surname> <given-names>D</given-names></string-name>, <string-name><surname>Munshi</surname> <given-names>S</given-names></string-name>, <string-name><surname>Gorai</surname> <given-names>S</given-names></string-name></person-group>. <article-title>Dermatological expert system implementing the ABCD rule of dermoscopy for skin disease identification</article-title>. <source>Expert Syst Appl</source>. <year>2021</year>;<volume>167</volume>(<issue>2</issue>):<fpage>114204</fpage>.</mixed-citation></ref>
<ref id="ref-10"><label>[10]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>J&#x00FC;tte</surname> <given-names>L</given-names></string-name>, <string-name><surname>Gonz&#x00E1;lez-Vill&#x00E0;</surname> <given-names>S</given-names></string-name>, <string-name><surname>Quintana</surname> <given-names>J</given-names></string-name>, <string-name><surname>Steven</surname> <given-names>M</given-names></string-name>, <string-name><surname>Garcia</surname> <given-names>R</given-names></string-name>, <string-name><surname>Roth</surname> <given-names>B</given-names></string-name></person-group>. <article-title>Integrating generative AI with ABCDE rule analysis for enhanced skin cancer diagnosis, dermatologist training and patient education</article-title>. <source>Front Med</source>. <year>2024</year>;<volume>11</volume>(<issue>3</issue>):<fpage>1445318</fpage>.</mixed-citation></ref>
<ref id="ref-11"><label>[11]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Hasan</surname> <given-names>MK</given-names></string-name>, <string-name><surname>Ahamad</surname> <given-names>MA</given-names></string-name>, <string-name><surname>Yap</surname> <given-names>CH</given-names></string-name>, <string-name><surname>Yang</surname> <given-names>G</given-names></string-name></person-group>. <article-title>A survey, review, and future trends of skin lesion segmentation and classification</article-title>. <source>Comput Biol Med</source>. <year>2023</year>;<volume>155</volume>:<fpage>106624</fpage>; <pub-id pub-id-type="pmid">36774890</pub-id></mixed-citation></ref>
<ref id="ref-12"><label>[12]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Li</surname> <given-names>Z</given-names></string-name>, <string-name><surname>Zhang</surname> <given-names>N</given-names></string-name>, <string-name><surname>Gong</surname> <given-names>H</given-names></string-name>, <string-name><surname>Qiu</surname> <given-names>R</given-names></string-name>, <string-name><surname>Zhang</surname> <given-names>W</given-names></string-name></person-group>. <article-title>SG-MIAN: self-guided multiple information aggregation network for image-level weakly supervised skin lesion segmentation</article-title>. <source>Comput Biol Med</source>. <year>2024</year>;<volume>170</volume>:<fpage>107988</fpage>; <pub-id pub-id-type="pmid">38232452</pub-id></mixed-citation></ref>
<ref id="ref-13"><label>[13]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Nguyen</surname> <given-names>ATP</given-names></string-name>, <string-name><surname>Jewel</surname> <given-names>RM</given-names></string-name>, <string-name><surname>Akter</surname> <given-names>A</given-names></string-name></person-group>. <article-title>Comparative analysis of machine learning models for automated skin cancer detection: advancements in diagnostic accuracy and AI integration</article-title>. <source>Am J Medical Sci Pharm Res</source>. <year>2025</year>;<volume>7</volume>:<fpage>15</fpage>&#x2013;<lpage>26</lpage>.</mixed-citation></ref>
<ref id="ref-14"><label>[14]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Ahmad</surname> <given-names>I</given-names></string-name>, <string-name><surname>Amin</surname> <given-names>J</given-names></string-name>, <string-name><surname>Lali</surname> <given-names>MI</given-names></string-name>, <string-name><surname>Abbas</surname> <given-names>F</given-names></string-name>, <string-name><surname>Sharif</surname> <given-names>MI</given-names></string-name></person-group>. <article-title>A novel Deeplabv3&#x002B; and vision-based transformer model for segmentation and classification of skin lesions</article-title>. <source>Biomed Signal Process Control</source>. <year>2024</year>;<volume>92</volume>:<fpage>106084</fpage>.</mixed-citation></ref>
<ref id="ref-15"><label>[15]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Kalaivani</surname> <given-names>A</given-names></string-name>, <string-name><surname>Karpagavalli</surname> <given-names>S</given-names></string-name></person-group>. <article-title>Bootstrapping of fine-tuned segmentation and classification network for epidermis disorder categorization</article-title>. <source>Multimed Tools Appl</source>. <year>2024</year>;<volume>83</volume>:<fpage>18907</fpage>&#x2013;<lpage>17</lpage>.</mixed-citation></ref>
<ref id="ref-16"><label>[16]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Attallah</surname> <given-names>O</given-names></string-name></person-group>. <article-title>Skin-CAD: explainable deep learning classification of skin cancer from dermoscopic images by feature selection of dual high-level CNNs features and transfer learning</article-title>. <source>Comput Biol Med</source>. <year>2024</year>;<volume>178</volume>:<fpage>108798</fpage>; <pub-id pub-id-type="pmid">38925085</pub-id></mixed-citation></ref>
<ref id="ref-17"><label>[17]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Yeasmin</surname> <given-names>MN</given-names></string-name>, <string-name><surname>Amin</surname> <given-names>MAl</given-names></string-name>, <string-name><surname>Joti</surname> <given-names>TJ</given-names></string-name>, <string-name><surname>Aung</surname> <given-names>Z</given-names></string-name>, <string-name><surname>Azim</surname> <given-names>MA</given-names></string-name></person-group>. <article-title>Advances of AI in image-based computer-aided diagnosis: a review</article-title>. <source>Array</source>. <year>2024</year>;<volume>23</volume>:<fpage>100357</fpage>.</mixed-citation></ref>
<ref id="ref-18"><label>[18]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Amiri</surname> <given-names>Z</given-names></string-name>, <string-name><surname>Heidari</surname> <given-names>A</given-names></string-name>, <string-name><surname>Navimipour</surname> <given-names>NJ</given-names></string-name>, <string-name><surname>Esmaeilpour</surname> <given-names>M</given-names></string-name>, <string-name><surname>Yazdani</surname> <given-names>Y</given-names></string-name></person-group>. <article-title>The deep learning applications in IoT-based bio-and medical informatics: a systematic literature review</article-title>. <source>Neural Comput Appl</source>. <year>2024</year>;<volume>36</volume>:<fpage>5757</fpage>&#x2013;<lpage>97</lpage>.</mixed-citation></ref>
<ref id="ref-19"><label>[19]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Vakili</surname> <given-names>A</given-names></string-name>, <string-name><surname>Al-Khafaji</surname> <given-names>HMR</given-names></string-name>, <string-name><surname>Darbandi</surname> <given-names>M</given-names></string-name>, <string-name><surname>Heidari</surname> <given-names>A</given-names></string-name>, <string-name><surname>Jafari Navimipour</surname> <given-names>N</given-names></string-name>, <string-name><surname>Unal</surname> <given-names>M</given-names></string-name></person-group>. <article-title>A new service composition method in the cloud-based internet of things environment using a grey wolf optimization algorithm and MapReduce framework</article-title>. <source>Concurr Comput</source>. <year>2024</year>;<volume>36</volume>:<fpage>e8091</fpage>.</mixed-citation></ref>
<ref id="ref-20"><label>[20]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Pacal</surname> <given-names>I</given-names></string-name>, <string-name><surname>Ozdemir</surname> <given-names>B</given-names></string-name>, <string-name><surname>Zeynalov</surname> <given-names>J</given-names></string-name>, <string-name><surname>Gasimov</surname> <given-names>H</given-names></string-name>, <string-name><surname>Pacal</surname> <given-names>N</given-names></string-name></person-group>. <article-title>A novel CNN-ViT-based deep learning model for early skin cancer diagnosis</article-title>. <source>Biomed Signal Process Control</source>. <year>2025</year>;<volume>104</volume>:<fpage>107627</fpage>.</mixed-citation></ref>
<ref id="ref-21"><label>[21]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Dong</surname> <given-names>C</given-names></string-name>, <string-name><surname>Dai</surname> <given-names>D</given-names></string-name>, <string-name><surname>Zhang</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Zhang</surname> <given-names>C</given-names></string-name>, <string-name><surname>Li</surname> <given-names>Z</given-names></string-name>, <string-name><surname>Xu</surname> <given-names>S</given-names></string-name></person-group>. <article-title>Learning from dermoscopic images in association with clinical metadata for skin lesion segmentation and classification</article-title>. <source>Comput Biol Med</source>. <year>2023</year>;<volume>152</volume>(<issue>1</issue>):<fpage>106321</fpage>. doi:<pub-id pub-id-type="doi">10.1016/j.compbiomed.2022.106321</pub-id>; <pub-id pub-id-type="pmid">36463792</pub-id></mixed-citation></ref>
<ref id="ref-22"><label>[22]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Ozdemir</surname> <given-names>B</given-names></string-name>, <string-name><surname>Pacal</surname> <given-names>I</given-names></string-name></person-group>. <article-title>A robust deep learning framework for multiclass skin cancer classification</article-title>. <source>Sci Rep</source>. <year>2025</year>;<volume>15</volume>(<issue>1</issue>):<fpage>4938</fpage>. doi:<pub-id pub-id-type="doi">10.1038/s41598-025-89230-7</pub-id>; <pub-id pub-id-type="pmid">39930026</pub-id></mixed-citation></ref>
<ref id="ref-23"><label>[23]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Zhang</surname> <given-names>Q</given-names></string-name>, <string-name><surname>Li</surname> <given-names>C</given-names></string-name>, <string-name><surname>Zuo</surname> <given-names>S</given-names></string-name>, <string-name><surname>Cai</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Xu</surname> <given-names>A</given-names></string-name>, <string-name><surname>Huang</surname> <given-names>H</given-names></string-name>, <etal>et al.</etal></person-group> <article-title>The impact of multiclass information decoupling in latent space on skin lesion segmentation</article-title>. <source>Neurocomputing</source>. <year>2025</year>;<volume>617</volume>:<fpage>128962</fpage>.</mixed-citation></ref>
<ref id="ref-24"><label>[24]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Khan</surname> <given-names>MA</given-names></string-name>, <string-name><surname>Sharif</surname> <given-names>MI</given-names></string-name>, <string-name><surname>Raza</surname> <given-names>M</given-names></string-name>, <string-name><surname>Anjum</surname> <given-names>A</given-names></string-name>, <string-name><surname>Saba</surname> <given-names>T</given-names></string-name>, <string-name><surname>Shad</surname> <given-names>SA</given-names></string-name></person-group>. <article-title>Skin lesion segmentation and classification: a unified framework of deep neural network features fusion and selection</article-title>. <source>Expert Syst</source>. <year>2022</year>;<volume>39</volume>(<issue>7</issue>):<fpage>e12497</fpage>. doi:<pub-id pub-id-type="doi">10.1111/exsy.12497</pub-id>.</mixed-citation></ref>
<ref id="ref-25"><label>[25]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Kaur</surname> <given-names>R</given-names></string-name>, <string-name><surname>GholamHosseini</surname> <given-names>H</given-names></string-name>, <string-name><surname>Sinha</surname> <given-names>R</given-names></string-name></person-group>. <article-title>Skin lesion segmentation using an improved framework of encoder-decoder based convolutional neural network</article-title>. <source>Int J Imaging Syst Technol</source>. <year>2022</year>;<volume>32</volume>(<issue>4</issue>):<fpage>1143</fpage>&#x2013;<lpage>58</lpage>. doi:<pub-id pub-id-type="doi">10.1002/ima.22699</pub-id>.</mixed-citation></ref>
<ref id="ref-26"><label>[26]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Anand</surname> <given-names>V</given-names></string-name>, <string-name><surname>Gupta</surname> <given-names>S</given-names></string-name>, <string-name><surname>Koundal</surname> <given-names>D</given-names></string-name>, <string-name><surname>Singh</surname> <given-names>K</given-names></string-name></person-group>. <article-title>Fusion of U-Net and CNN model for segmentation and classification of skin lesion from dermoscopy images</article-title>. <source>Expert Syst Appl</source>. <year>2023</year>;<volume>213</volume>(<issue>7</issue>):<fpage>119230</fpage>. doi:<pub-id pub-id-type="doi">10.1016/j.eswa.2022.119230</pub-id>.</mixed-citation></ref>
<ref id="ref-27"><label>[27]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Yadav</surname> <given-names>AK</given-names></string-name>, <string-name><surname>Mehta</surname> <given-names>R</given-names></string-name>, <string-name><surname>Kumar</surname> <given-names>V</given-names></string-name>, <string-name><surname>Medikondu</surname> <given-names>NR</given-names></string-name></person-group>. <article-title>An optimized boosting framework for skin lesion segmentation and classification</article-title>. <source>Multimed Tools Appl</source>. <year>2023</year>;<volume>83</volume>(<issue>18</issue>):<fpage>53875</fpage>&#x2013;<lpage>96</lpage>. doi:<pub-id pub-id-type="doi">10.1007/s11042-023-17042-w</pub-id>.</mixed-citation></ref>
<ref id="ref-28"><label>[28]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Wang</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Su</surname> <given-names>J</given-names></string-name>, <string-name><surname>Xu</surname> <given-names>Q</given-names></string-name>, <string-name><surname>Zhong</surname> <given-names>Y</given-names></string-name></person-group>. <article-title>A collaborative learning model for skin lesion segmentation and classification</article-title>. <source>Diagnostics</source>. <year>2023</year>;<volume>13</volume>(<issue>5</issue>):<fpage>912</fpage>. doi:<pub-id pub-id-type="doi">10.3390/diagnostics13050912</pub-id>; <pub-id pub-id-type="pmid">36900056</pub-id></mixed-citation></ref>
<ref id="ref-29"><label>[29]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Hosny</surname> <given-names>KM</given-names></string-name>, <string-name><surname>Said</surname> <given-names>W</given-names></string-name>, <string-name><surname>Elmezain</surname> <given-names>M</given-names></string-name>, <string-name><surname>Kassem</surname> <given-names>MA</given-names></string-name></person-group>. <article-title>Explainable deep inherent learning for multi-classes skin lesion classification</article-title>. <source>Appl Soft Comput</source>. <year>2024</year>;<volume>159</volume>(<issue>3</issue>):<fpage>111624</fpage>. doi:<pub-id pub-id-type="doi">10.1016/j.asoc.2024.111624</pub-id>.</mixed-citation></ref>
<ref id="ref-30"><label>[30]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Su</surname> <given-names>Q</given-names></string-name>, <string-name><surname>Hamed</surname> <given-names>HNA</given-names></string-name>, <string-name><surname>Isa</surname> <given-names>MA</given-names></string-name>, <string-name><surname>Hao</surname> <given-names>X</given-names></string-name>, <string-name><surname>Dai</surname> <given-names>X</given-names></string-name></person-group>. <article-title>A GAN-based data augmentation method for imbalanced multiclass skin lesion classification</article-title>. <source>IEEE Access</source>. <year>2024</year>;<volume>12</volume>(<issue>1</issue>):<fpage>16498</fpage>&#x2013;<lpage>513</lpage>. doi:<pub-id pub-id-type="doi">10.1109/ACCESS.2024.3360215</pub-id>.</mixed-citation></ref>
<ref id="ref-31"><label>[31]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Maqsood</surname> <given-names>S</given-names></string-name>, <string-name><surname>Dama&#x0161;evi&#x010D;ius</surname> <given-names>R</given-names></string-name></person-group>. <article-title>Multiclass skin lesion localization and classification using deep learning based features fusion and selection framework for smart healthcare</article-title>. <source>Neural Netw</source>. <year>2023</year>;<volume>160</volume>(<issue>4</issue>):<fpage>238</fpage>&#x2013;<lpage>58</lpage>. doi:<pub-id pub-id-type="doi">10.1016/j.neunet.2023.01.022</pub-id>; <pub-id pub-id-type="pmid">36701878</pub-id></mixed-citation></ref>
<ref id="ref-32"><label>[32]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Benyahia</surname> <given-names>S</given-names></string-name>, <string-name><surname>Meftah</surname> <given-names>B</given-names></string-name>, <string-name><surname>L&#x00E9;zoray</surname> <given-names>O</given-names></string-name></person-group>. <article-title>Multi-features extraction based on deep learning for skin lesion classification</article-title>. <source>Tissue Cell</source>. <year>2022</year>;<volume>74</volume>(<issue>22</issue>):<fpage>101701</fpage>. doi:<pub-id pub-id-type="doi">10.1016/j.tice.2021.101701</pub-id>; <pub-id pub-id-type="pmid">34861582</pub-id></mixed-citation></ref>
<ref id="ref-33"><label>[33]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Ayas</surname> <given-names>S</given-names></string-name></person-group>. <article-title>Multiclass skin lesion classification in dermoscopic images using swin transformer model</article-title>. <source>Neural Comput Appl</source>. <year>2023</year>;<volume>35</volume>(<issue>9</issue>):<fpage>6713</fpage>&#x2013;<lpage>22</lpage>. doi:<pub-id pub-id-type="doi">10.1007/s00521-022-08053-z</pub-id>.</mixed-citation></ref>
<ref id="ref-34"><label>[34]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Zafar</surname> <given-names>M</given-names></string-name>, <string-name><surname>Amin</surname> <given-names>J</given-names></string-name>, <string-name><surname>Sharif</surname> <given-names>M</given-names></string-name>, <string-name><surname>Anjum</surname> <given-names>MA</given-names></string-name>, <string-name><surname>Mallah</surname> <given-names>GA</given-names></string-name>, <string-name><surname>Kadry</surname> <given-names>S</given-names></string-name></person-group>. <article-title>DeepLabv3&#x002B;-based segmentation and best features selection using slime mould algorithm for multiclass skin lesion classification</article-title>. <source>Mathematics</source>. <year>2023</year>;<volume>11</volume>(<issue>2</issue>):<fpage>364</fpage>.</mixed-citation></ref>
<ref id="ref-35"><label>[35]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Bibi</surname> <given-names>S</given-names></string-name>, <string-name><surname>Khan</surname> <given-names>MA</given-names></string-name>, <string-name><surname>Shah</surname> <given-names>JH</given-names></string-name>, <string-name><surname>Dama&#x0161;evi&#x010D;ius</surname> <given-names>R</given-names></string-name>, <string-name><surname>Alasiry</surname> <given-names>A</given-names></string-name>, <string-name><surname>Marzougui</surname> <given-names>M</given-names></string-name>, <etal>et al.</etal></person-group> <article-title>MSRNet: multiclass skin lesion recognition using additional residual block based fine-tuned deep models information fusion and best feature selection</article-title>. <source>Diagnostics</source>. <year>2023</year>;<volume>13</volume>(<issue>19</issue>):<fpage>3063</fpage>. doi:<pub-id pub-id-type="doi">10.3390/diagnostics13193063</pub-id>; <pub-id pub-id-type="pmid">37835807</pub-id></mixed-citation></ref>
<ref id="ref-36"><label>[36]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Kadirappa</surname> <given-names>R</given-names></string-name>, <string-name><surname>Deivalakshmi</surname> <given-names>S</given-names></string-name>, <string-name><surname>Pandeeswari</surname> <given-names>R</given-names></string-name>, <string-name><surname>Ko</surname> <given-names>S-B</given-names></string-name></person-group>. <article-title>An automated multiclass skin lesion diagnosis by embedding local and global features of Dermoscopy images</article-title>. <source>Multimed Tools Appl</source>. <year>2023</year>;<volume>82</volume>(<issue>22</issue>):<fpage>34885</fpage>&#x2013;<lpage>912</lpage>. doi:<pub-id pub-id-type="doi">10.1007/s11042-023-14892-2</pub-id>.</mixed-citation></ref>
<ref id="ref-37"><label>[37]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Dillshad</surname> <given-names>V</given-names></string-name>, <string-name><surname>Khan</surname> <given-names>MA</given-names></string-name>, <string-name><surname>Nazir</surname> <given-names>M</given-names></string-name>, <string-name><surname>Saidani</surname> <given-names>O</given-names></string-name>, <string-name><surname>Alturki</surname> <given-names>N</given-names></string-name>, <string-name><surname>Kadry</surname> <given-names>S</given-names></string-name></person-group>. <article-title>D2LFS2Net: multiclass skin lesion diagnosis using deep learning and variance-controlled Marine Predator optimisation: an application for precision medicine</article-title>. <source>CAAI Trans Intell Technol</source>. <year>2025</year>;<volume>10</volume>(<issue>1</issue>):<fpage>207</fpage>&#x2013;<lpage>22</lpage>. doi:<pub-id pub-id-type="doi">10.1049/cit2.12267</pub-id>.</mixed-citation></ref>
<ref id="ref-38"><label>[38]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Radhika</surname> <given-names>V</given-names></string-name>, <string-name><surname>Chandana</surname> <given-names>BS</given-names></string-name></person-group>. <article-title>MSCDNet-based multiclass classification of skin cancer using dermoscopy images</article-title>. <source>PeerJ Comput Sci</source>. <year>2023</year>;<volume>9</volume>(<issue>3</issue>):<fpage>e1520</fpage>. doi:<pub-id pub-id-type="doi">10.7717/peerj-cs.1520</pub-id>; <pub-id pub-id-type="pmid">37705664</pub-id></mixed-citation></ref>
<ref id="ref-39"><label>[39]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Baig</surname> <given-names>AR</given-names></string-name>, <string-name><surname>Abbas</surname> <given-names>Q</given-names></string-name>, <string-name><surname>Almakki</surname> <given-names>R</given-names></string-name>, <string-name><surname>Ibrahim</surname> <given-names>ME</given-names></string-name>, <string-name><surname>AlSuwaidan</surname> <given-names>L</given-names></string-name>, <string-name><surname>Ahmed</surname> <given-names>AE</given-names></string-name></person-group>. <article-title>Light-Dermo: a lightweight pretrained convolution neural network for the diagnosis of multiclass skin lesions</article-title>. <source>Diagnostics</source>. <year>2023</year>;<volume>13</volume>(<issue>3</issue>):<fpage>385</fpage>. doi:<pub-id pub-id-type="doi">10.3390/diagnostics13030385</pub-id>; <pub-id pub-id-type="pmid">36766490</pub-id></mixed-citation></ref>
<ref id="ref-40"><label>[40]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Alsahafi</surname> <given-names>YS</given-names></string-name>, <string-name><surname>Kassem</surname> <given-names>MA</given-names></string-name>, <string-name><surname>Hosny</surname> <given-names>KM</given-names></string-name></person-group>. <article-title>Skin-Net: a novel deep residual network for skin lesions classification using multilevel feature extraction and cross-channel correlation with detection of outlier</article-title>. <source>J Big Data</source>. <year>2023</year>;<volume>10</volume>(<issue>1</issue>):<fpage>105</fpage>. doi:<pub-id pub-id-type="doi">10.1186/s40537-023-00769-6</pub-id>.</mixed-citation></ref>
<ref id="ref-41"><label>[41]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Abou Ali</surname> <given-names>M</given-names></string-name>, <string-name><surname>Dornaika</surname> <given-names>F</given-names></string-name>, <string-name><surname>Arganda-Carreras</surname> <given-names>I</given-names></string-name>, <string-name><surname>Ali</surname> <given-names>H</given-names></string-name>, <string-name><surname>Karaouni</surname> <given-names>M</given-names></string-name></person-group>. <article-title>Naturalize revolution: unprecedented AI-Driven precision in skin cancer classification using deep learning</article-title>. <source>BioMedInformatics</source>. <year>2024</year>;<volume>4</volume>:<fpage>638</fpage>&#x2013;<lpage>60</lpage>.</mixed-citation></ref>
<ref id="ref-42"><label>[42]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Tuncer</surname> <given-names>T</given-names></string-name>, <string-name><surname>Barua</surname> <given-names>PD</given-names></string-name>, <string-name><surname>Tuncer</surname> <given-names>I</given-names></string-name>, <string-name><surname>Dogan</surname> <given-names>S</given-names></string-name>, <string-name><surname>Acharya</surname> <given-names>UR</given-names></string-name></person-group>. <article-title>A lightweight deep convolutional neural network model for skin cancer image classification</article-title>. <source>Appl Soft Comput</source>. <year>2024</year>;<volume>162</volume>:<fpage>111794</fpage>.</mixed-citation></ref>
<ref id="ref-43"><label>[43]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Musthafa</surname> <given-names>MM</given-names></string-name>, <string-name><surname>Mahesh</surname> <given-names>TR</given-names></string-name>, <string-name><surname>Vinoth Kumar</surname> <given-names>V</given-names></string-name>, <string-name><surname>Guluwadi</surname> <given-names>S</given-names></string-name></person-group>. <article-title>Enhanced skin cancer diagnosis using optimized CNN architecture and checkpoints for automated dermatological lesion classification</article-title>. <source>BMC Med Imaging</source>. <year>2024</year>;<volume>24</volume>:<fpage>201</fpage>; <pub-id pub-id-type="pmid">39095688</pub-id></mixed-citation></ref>
<ref id="ref-44"><label>[44]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Tschandl</surname> <given-names>P</given-names></string-name>, <string-name><surname>Rosendahl</surname> <given-names>C</given-names></string-name>, <string-name><surname>Kittler</surname> <given-names>H</given-names></string-name></person-group>. <article-title>The HAM10000 dataset, a large collection of multi-source dermatoscopic images of common pigmented skin lesions</article-title>. <source>Sci Data</source>. <year>2018</year>;<volume>5</volume>:<fpage>180161</fpage>; <pub-id pub-id-type="pmid">30106392</pub-id></mixed-citation></ref>
<ref id="ref-45"><label>[45]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Kassem</surname> <given-names>MA</given-names></string-name>, <string-name><surname>Hosny</surname> <given-names>KM</given-names></string-name>, <string-name><surname>Fouad</surname> <given-names>MM</given-names></string-name></person-group>. <article-title>Skin lesions classification into eight classes for ISIC, 2019 using deep convolutional neural network and transfer learning</article-title>. <source>IEEE Access</source>. <year>2020</year>;<volume>8</volume>:<fpage>114822</fpage>&#x2013;<lpage>32</lpage>.</mixed-citation></ref>
<ref id="ref-46"><label>[46]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Li</surname> <given-names>D</given-names></string-name>, <string-name><surname>Wang</surname> <given-names>Z</given-names></string-name>, <string-name><surname>Cao</surname> <given-names>C</given-names></string-name>, <string-name><surname>Liu</surname> <given-names>Y</given-names></string-name></person-group>. <article-title>Information entropy based sample reduction for support vector data description</article-title>. <source>Appl Soft Comput</source>. <year>2018</year>;<volume>71</volume>:<fpage>1153</fpage>&#x2013;<lpage>60</lpage>.</mixed-citation></ref>
<ref id="ref-47"><label>[47]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Hoang</surname> <given-names>L</given-names></string-name>, <string-name><surname>Lee</surname> <given-names>S-H</given-names></string-name>, <string-name><surname>Lee</surname> <given-names>E-J</given-names></string-name>, <string-name><surname>Kwon</surname> <given-names>K-R</given-names></string-name></person-group>. <article-title>Multiclass skin lesion classification using a novel lightweight deep learning framework for smart healthcare</article-title>. <source>Appl Sci</source>. <year>2022</year>;<volume>12</volume>(<issue>5</issue>):<fpage>2677</fpage>.</mixed-citation></ref>
<ref id="ref-48"><label>[48]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Popescu</surname> <given-names>D</given-names></string-name>, <string-name><surname>El-Khatib</surname> <given-names>M</given-names></string-name>, <string-name><surname>Ichim</surname> <given-names>L</given-names></string-name></person-group>. <article-title>Skin lesion classification using collective intelligence of multiple neural networks</article-title>. <source>Sensors</source>. <year>2022</year>;<volume>22</volume>(<issue>12</issue>):<fpage>4399</fpage>; <pub-id pub-id-type="pmid">35746180</pub-id></mixed-citation></ref>
<ref id="ref-49"><label>[49]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><surname>Neeshma</surname> <given-names>A</given-names></string-name>, <string-name><surname>Nair</surname> <given-names>CS</given-names></string-name></person-group>. <article-title>Multiclass skin lesion classification using densenet</article-title>. In: <conf-name>2022 Third International Conference on Intelligent Computing Instrumentation and Control Technologies (ICICICT)</conf-name>; <year>2022</year>; <publisher-loc>Kannur, India</publisher-loc>. p. <fpage>506</fpage>&#x2013;<lpage>10</lpage>.</mixed-citation></ref>
<ref id="ref-50"><label>[50]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><surname>Shobha</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Prasad</surname> <given-names>K</given-names></string-name>, <string-name><surname>Anuradha</surname> <given-names>SG</given-names></string-name></person-group>. <article-title>Multiclass classification of skin cancer using convolutional neural network</article-title>. In: <conf-name>IEEE 2nd Mysore Sub Section International Conference (MysuruCon)</conf-name>; <year>2022</year>; <publisher-loc>Mysuru, India</publisher-loc>. p. <fpage>1</fpage>&#x2013;<lpage>5</lpage>.</mixed-citation></ref>
<ref id="ref-51"><label>[51]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><surname>Jaehyo</surname> <given-names>C</given-names></string-name>, <string-name><surname>John</surname> <given-names>P</given-names></string-name>, <string-name><surname>Chibuike</surname> <given-names>O</given-names></string-name></person-group>. <article-title>Performance comparison of deep learning models for skin disease classification using the HAM10000 dataset</article-title>. In: <conf-name>Proceedings of KIIT Conference</conf-name>; <year>2024</year>; <publisher-loc>Odisha, India</publisher-loc>. p. <fpage>1142</fpage>&#x2013;<lpage>6</lpage>.</mixed-citation></ref>
<ref id="ref-52"><label>[52]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Khan</surname> <given-names>AR</given-names></string-name>, <string-name><surname>Mujahid</surname> <given-names>M</given-names></string-name>, <string-name><surname>Alamri</surname> <given-names>FS</given-names></string-name>, <string-name><surname>Saba</surname> <given-names>T</given-names></string-name>, <string-name><surname>Ayesha</surname> <given-names>N</given-names></string-name></person-group>. <article-title>Early-stage melanoma cancer diagnosis framework for imbalanced data from dermoscopic images</article-title>. <source>Microsc Res Tech</source>. <year>2024</year>;<volume>8</volume>(<issue>2</issue>):<fpage>1</fpage>&#x2013;<lpage>21</lpage>.</mixed-citation></ref>
</ref-list>
</back></article>