<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.1 20151215//EN" "http://jats.nlm.nih.gov/publishing/1.1/JATS-journalpublishing1.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:mml="http://www.w3.org/1998/Math/MathML" xml:lang="en" article-type="research-article" dtd-version="1.1">
<front>
<journal-meta>
<journal-id journal-id-type="pmc">CMC</journal-id>
<journal-id journal-id-type="nlm-ta">CMC</journal-id>
<journal-id journal-id-type="publisher-id">CMC</journal-id>
<journal-title-group>
<journal-title>Computers, Materials &#x0026; Continua</journal-title>
</journal-title-group>
<issn pub-type="epub">1546-2226</issn>
<issn pub-type="ppub">1546-2218</issn>
<publisher>
<publisher-name>Tech Science Press</publisher-name>
<publisher-loc>USA</publisher-loc>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">52548</article-id>
<article-id pub-id-type="doi">10.32604/cmc.2024.052548</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Article</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Multi-Class Skin Cancer Detection Using Fusion of Textural Features Based CAD Tool</article-title>
<alt-title alt-title-type="left-running-head">Multi-Class Skin Cancer Detection Using Fusion of Textural Features Based CAD Tool</alt-title>
<alt-title alt-title-type="right-running-head">Multi-Class Skin Cancer Detection Using Fusion of Textural Features Based CAD Tool</alt-title>
</title-group>
<contrib-group>
<contrib id="author-1" contrib-type="author">
<name name-style="western"><surname>Brar</surname><given-names>Khushmeen Kaur</given-names></name><xref ref-type="aff" rid="aff-1">1</xref></contrib>
<contrib id="author-2" contrib-type="author">
<name name-style="western"><surname>Goyal</surname><given-names>Bhawna</given-names></name><xref ref-type="aff" rid="aff-1">1</xref></contrib>
<contrib id="author-3" contrib-type="author">
<name name-style="western"><surname>Dogra</surname><given-names>Ayush</given-names></name><xref ref-type="aff" rid="aff-2">2</xref></contrib>
<contrib id="author-4" contrib-type="author">
<name name-style="western"><surname>Reddy</surname><given-names>Sampangi Rama</given-names></name><xref ref-type="aff" rid="aff-3">3</xref></contrib>
<contrib id="author-5" contrib-type="author">
<name name-style="western"><surname>Alkhayyat</surname><given-names>Ahmed</given-names></name><xref ref-type="aff" rid="aff-4">4</xref></contrib>
<contrib id="author-6" contrib-type="author">
<name name-style="western"><surname>Singh</surname><given-names>Rajesh</given-names></name><xref ref-type="aff" rid="aff-5">5</xref></contrib>
<contrib id="author-7" contrib-type="author" corresp="yes">
<name name-style="western"><surname>Saikia</surname><given-names>Manob Jyoti</given-names></name><xref ref-type="aff" rid="aff-6">6</xref><xref ref-type="aff" rid="aff-7">7</xref><email>msaikia@memphis.edu</email></contrib>
<aff id="aff-1"><label>1</label><institution>Department of Electronics and Communication Engineering, Chandigarh University</institution>, <addr-line>Mohali, 140413</addr-line>, <country>India</country></aff>
<aff id="aff-2"><label>2</label><institution>Chitkara University Institute of Engineering and Technology, Chitkara University</institution>, <addr-line>Chandigarh, 140401</addr-line>, <country>India</country></aff>
<aff id="aff-3"><label>3</label><institution>Department of Physics &#x0026; Electronics, School of Sciences, JAIN (Deemed to be University)</institution>, <addr-line>Bangalore, 560001</addr-line>, <country>India</country></aff>
<aff id="aff-4"><label>4</label><institution>College of Technical Engineering, The Islamic University</institution>, <addr-line>Najaf, 54001</addr-line>, <country>Iraq</country></aff>
<aff id="aff-5"><label>5</label><institution>Department of Electronics and Communication Engineering, Uttaranchal Institute of Technology, Uttaranchal University</institution>, <addr-line>Dehradun, 248007</addr-line>, <country>India</country></aff>
<aff id="aff-6"><label>6</label><institution>Electrical and Computer Engineering Department, University of Memphis</institution>, <addr-line>Memphis, TN 38152</addr-line>, <country>USA</country></aff>
<aff id="aff-7"><label>7</label><institution>Biomedical Sensors &#x0026; Systems Lab, University of Memphis</institution>, <addr-line>Memphis, TN 38152</addr-line>, <country>USA</country></aff>
</contrib-group>
<author-notes>
<corresp id="cor1"><label>&#x002A;</label>Corresponding Author: Manob Jyoti Saikia. Email: <email>msaikia@memphis.edu</email></corresp>
</author-notes>
<pub-date date-type="collection" publication-format="electronic">
<year>2024</year>
</pub-date>
<pub-date date-type="pub" publication-format="electronic">
<day>19</day><month>12</month><year>2024</year>
</pub-date>
<volume>81</volume>
<issue>3</issue>
<fpage>4217</fpage>
<lpage>4263</lpage>
<history>
<date date-type="received">
<day>05</day>
<month>4</month>
<year>2024</year>
</date>
<date date-type="accepted">
<day>11</day>
<month>10</month>
<year>2024</year>
</date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2024 The Authors.</copyright-statement>
<copyright-year>2024</copyright-year>
<copyright-holder>Published by Tech Science Press.</copyright-holder>
<license xlink:href="https://creativecommons.org/licenses/by/4.0/">
<license-p>This work is licensed under a <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</ext-link>, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.</license-p>
</license>
</permissions>
<self-uri content-type="pdf" xlink:href="TSP_CMC_52548.pdf"></self-uri>
<abstract>
<p>Skin cancer has been recognized as one of the most lethal and complex types of cancer for over a decade. The diagnosis of skin cancer is of paramount importance, yet the process is intricate and challenging. The analysis and modeling of human skin pose significant difficulties due to its asymmetrical nature, the visibility of dense hair, and the presence of various substitute characteristics. The texture of the epidermis is notably different from that of normal skin, and these differences are often evident in cases of unhealthy skin. As a consequence, the development of an effective method for monitoring skin cancer has seen little progress. Moreover, the task of diagnosing skin cancer from dermoscopic images is particularly challenging. It is crucial to diagnose skin cancer at an early stage, despite the high cost associated with the procedure, as it is an expensive process. Unfortunately, the advancement of diagnostic techniques for skin cancer has been limited. To address this issue, there is a need for a more accurate and efficient method for identifying and categorizing skin cancer cases. This involves the evaluation of specific characteristics to distinguish between benign and malignant skin cancer occurrences. We present and evaluate several techniques for segmentation, categorized into three main types: thresholding, edge-based, and region-based. These techniques are applied to a dataset of 200 benign and melanoma lesions from the Hospital Pedro Hispano (PH2) collection. The evaluation is based on twelve distinct metrics, which are designed to measure various types of errors with particular clinical significance. Additionally, we assess the effectiveness of these techniques independently for three different types of lesions: melanocytic nevi, atypical nevi, and melanomas. The first technique is capable of classifying lesions into two categories: atypical nevi and melanoma, achieving the highest accuracy score of 90.00% with the Otsu (3-level) method. The second technique also classifies lesions into two categories: common nevi and melanoma, achieving a score of 90.80% with the Binarized Sauvola method.</p>
</abstract>
<kwd-group kwd-group-type="author">
<kwd>Melanoma</kwd>
<kwd>computer-aided diagnosis</kwd>
<kwd>segmentation</kwd>
<kwd>PH2</kwd>
<kwd>ISIC (International Skin Imaging Collaboration)</kwd>
<kwd>dermoscopy</kwd>
<kwd>non-melanoma</kwd>
</kwd-group>
</article-meta>
</front>
<body>
<sec id="s1">
<label>1</label>
<title>Introduction</title>
<p>The most common type of cancer and a serious risk to the general public&#x2019;s health is skin cancer. Skin cancer has the highest chance of spreading to other organs out of all disorders. The dermatologist typically performs physical examinations on patients to identify skin cancer. Clinical assessment reliability is a little ineffective [<xref ref-type="bibr" rid="ref-1">1</xref>]. Melanoma and non-melanoma are the two main classifications used to describe skin cancer. The most deadly of the two is melanoma, which usually begins as a superficial wound that develops into a malignant tumor. Most cases of non-melanoma are classified into two classes: squamous cell carcinoma (SCC) and basal cell carcinoma (BCC). The origins of SCC and BCC are squamous cells and basal cells, respectively.</p>
<p>Several hues are present in pigmented skin lesions (PSLs), which are categorized as serious medical problems brought on by a pigmented area on the skin. Because these lesions include both benign and malignant disorders, they are important from a therapeutic standpoint [<xref ref-type="bibr" rid="ref-2">2</xref>,<xref ref-type="bibr" rid="ref-3">3</xref>].</p>
<p>The conventional method for identifying PSLs is a non-invasive technique known as dermoscopy. To highlight minute details, it is composed of a liquid immersion and a magnifying lens [<xref ref-type="bibr" rid="ref-4">4</xref>]. It highlights the need of a physical examination in healthcare settings [<xref ref-type="bibr" rid="ref-5">5</xref>]. But in an early case of skin cancer, the imaging system might simply take into account a doctor using a standard camera. In this case, pictures of the wound are taken and sent to an expert for assessment. The approach is believed to be efficient as physical identification, albeit much more quickly [<xref ref-type="bibr" rid="ref-6">6</xref>].</p>
<p>Dermatologists are capable of identifying skin cancer prematurely nowadays because of advancements in computational techniques. Establishing robust computational methods for determining skin cancer through images is the primary objective of the emerging domain of research on automated PSL tests. Research demonstrates that in test settings, automated devices could identify skin cancer [<xref ref-type="bibr" rid="ref-7">7</xref>]. CAD strategies have proven to be the ideal alternative for premature detection of skin cancer, which reduces the probability that patients who underwent screening may receive a skin cancer diagnosis without any symptoms. Machine learning (ML) has progressed extensively because of breakthroughs in computing technology and the accessibility of vast image databases.</p>
<p>Deep learning (DL) is eventually substituting traditional ML frameworks in the recognition of skin cancer, primarily owing to the development of deep neural networks (DNN). The traditional method for automatically detecting skin cancer encompasses a number of steps, as illustrated in <xref ref-type="fig" rid="fig-1">Fig. 1</xref>. The steps are acquiring data, pre-processing, segmenting the data, eradicating the significant attributes, and at last grouping the data through the withdrawn features. The final phase in the procedure computes the trained classifier using the appropriate metrics.</p>
<fig id="fig-1">
<label>Figure 1</label>
<caption>
<title>Typical procedure for designing and creating an automated system to detect skin cancer</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_52548-fig-1.tif"/>
</fig>
<p>Skin cancer develops by ultraviolet (UV) radiation from the sun, which is highly intense and damaging [<xref ref-type="bibr" rid="ref-8">8</xref>,<xref ref-type="bibr" rid="ref-9">9</xref>]. It usually surfaces as an unusual or evolving spot on the skin. This spot could have the appearance of an injured area of skin, a mole, or an unhealing blister. The unusual formation of a new lesion or lump is a common warning sign for skin cancer. A mole that evolves in size, color, or shape over time may also be an indication of skin cancer. Melanoma, SCC, and BCC are the three predominant kinds of skin cancer [<xref ref-type="bibr" rid="ref-10">10</xref>]. Depending on where it originates, BCC possesses distinct looks and is the least malignant type.</p>
<p>A small, smooth bulge that appears translucent or pearly will develop on the face, ears, or neck. It is capable of bleeding by itself. On the torso, arms, or legs, it emerges as a flat patch of pink, red, or brown color. The texture of it usually seems harsh or rough. Since it originates from cells in the stratum basale, BCC gets its name. SCC is similar to red, firm nodules. Moreover, the growth might seem scaly, rough, and flat. This might become crusty, bleed, or cause pain. SCC is caused by cells in the stratum spinosum, specifically keratinocytes [<xref ref-type="bibr" rid="ref-11">11</xref>]. SCC is less prevalent than BCC as the cells cannot divide as often as they do in BCC. But when it develops, it can be hazardous because it metastasizes rapidly. It is speculated that UV light from the sun triggers these cancers by traversing skin cells and affecting cells in the stratum spinosum that mutates their DNA, resulting in cancer. Melanoma accounts for 1% of all skin cancers [<xref ref-type="bibr" rid="ref-12">12</xref>]. However, it is among the most fatal forms of skin cancer [<xref ref-type="bibr" rid="ref-13">13</xref>]. It may damage lymph nodes or various organs and spread drastically. Despite its color and shape being irregular, it generally possesses the appearance of a brown patch. Certain melanomas originate from an ordinary mole. Melanoma is a cancer of melanocytes that are located in the basal layer of the stratum basale of the epidermis. Melanoma might develop spontaneously or as a consequence of pre-existing moles. It metastasizes far more rapidly than SCC. Melanoma is generally identified following the ABCD rule. It specifies that A represents asymmetry (i.e., the two sides of the mole are uneven), B specifies an uneven border, C is for color (when the pigmented area has multiple hues), and D specifies diameter (when the spot is larger than 6 mm) [<xref ref-type="bibr" rid="ref-14">14</xref>,<xref ref-type="bibr" rid="ref-15">15</xref>].</p>
<p>Dermoscopy was created to enhance the skin cancer diagnostic process. It is a non-invasive visualization technique that generates a brighter, more intense illustration of a specific area of skin, making skin markings easier to see. The removal of the reflecting skin surface improves the visibility of deeper skin layers and gives additional details about the lesion. Dermoscopy evaluation is a usual way of recognizing melanoma as it is more reliable than visual inspection [<xref ref-type="bibr" rid="ref-16">16</xref>]. Dermoscopic evaluation of images by specialists typically takes a significant amount of time and may result in a range of diagnostic results. As a result, automated identification methods are highly recommended. But for the reasons listed below, it is not a simple task: First, it might be difficult to differentiate between non-melanomas and melanomas owing to the similarities of benign and malignant lesions. Second, the automated identification process is more difficult due to low contrasts and unclear borders between the lesion and normal skin areas. Last but not least, the development of artifacts like hairs or air bubbles may mask the lesions. The difficulties are shown in <xref ref-type="fig" rid="fig-2">Fig. 2</xref>. The types, causes and diagnosis approaches for skin cancer are shown in <xref ref-type="fig" rid="fig-3">Fig. 3</xref>.</p>
<fig id="fig-2">
<label>Figure 2</label>
<caption>
<title>The process of automatically identifying melanoma from dermoscopy images is quite difficult. From top to bottom, the primary difficulties are: a significant level of visual resemblance amongst lesions that are melanoma compared to ones that are not; comparatively little difference among lesions and areas of healthy skin; and picture artifacts. Images in the left column display non-melanomas, whereas those in the right column display melanomas</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_52548-fig-2a.tif"/><graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_52548-fig-2b.tif"/>
</fig><fig id="fig-3">
<label>Figure 3</label>
<caption>
<title>Skin cancer: types, causes and diagnosis</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_52548-fig-3.tif"/>
</fig>
<p>To solve these challenges, numerous algorithms have been advocated that are significantly categorized into thresholding, edge [<xref ref-type="bibr" rid="ref-17">17</xref>] and region-based approaches [<xref ref-type="bibr" rid="ref-18">18</xref>]. For instance, the integration of global thresholding, adaptive thresholding, and clustering thresholding is presented in [<xref ref-type="bibr" rid="ref-19">19</xref>].</p>
<p>In case of a strong variability between the skin and the lesion, thresholding techniques work well; nevertheless, they are ineffective when the modes from the two locations overlap. However, the edge-based approach is not effective when the skin and lesion are growing together naturally and the edges are not visible. Another issue resulting from artifacts is the emergence of artificial edge extremities that do not belong in the lesion periphery. The over-segmentation that occurs when distinct colors are present in lesions and skin presents a problem for the region-based method.</p>
<p>Early identification of skin cancer by image processing techniques may lead to additional therapeutic identification and management. With the advancement of image processing techniques in the field of therapeutic detection, there is a possibility of early diagnosis and prevention of skin cancer. The skin tumor detection application works in several steps on a smartphone. The primary steps in image processing are skin lesion segmentation, feature extraction, and lesion classification into benign, atypical, and malignant lesions [<xref ref-type="bibr" rid="ref-20">20</xref>]. Skin cancer segmentation strategies rely on morphological methods, thresholding, and a number of segmentation procedures. In the majority of applications, the various attributes that are extracted with the goal of identifying skin cancer are color, form, texture, and geometry. Support vector machines (SVMs) and kernel neighborhood network (KNN) classifiers are also widely used for classification in a similar manner. Two of the software applications used to do the skin cancer diagnostic process are Matlab and OpenCV.</p>
<p>In this paper, we present and evaluate numerous segmentation approaches from the three categories: thresholding, edge-based, and region-based. The algorithms are executed on 200 dermoscopic images of benign and melanoma lesions from the Hospital Pedro Hispano (PH2) dataset. The computation is based on 12 distinct metrics that account for various types of errors with unique therapeutic significance. We evaluate the performance of these approaches independently for three different types of lesions: melanomas, atypical nevi, and melanocytic nevi.</p>
</sec>
<sec id="s2">
<label>2</label>
<title>Related Works</title>
<p>Over the past decade, there have been numerous openly accessible datasets of skin images operated by various groups. The commonly used databases are PH2 and International Skin Imaging Collaboration (ISIC). <xref ref-type="table" rid="table-1">Table 1</xref> brings forth an overview of distinct datasets.</p>
<table-wrap id="table-1">
<label>Table 1</label>
<caption>
<title>A brief overview of distinct skin cancer databases</title>
</caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th>Articles</th>
<th>Dataset</th>
<th>Total number of images</th>
</tr>
</thead>
<tbody>
<tr>
<td>[<xref ref-type="bibr" rid="ref-21">21</xref>]</td>
<td>ISIC, PH2</td>
<td>1000, 200</td>
</tr>
<tr>
<td>[<xref ref-type="bibr" rid="ref-22">22</xref>]</td>
<td>Human against machine with 10000 training images (HAM10000)</td>
<td>10,000</td>
</tr>
<tr>
<td>[<xref ref-type="bibr" rid="ref-23">23</xref>,<xref ref-type="bibr" rid="ref-24">24</xref>]</td>
<td>International symposium on biomedical imaging (ISBI2016), (ISBI2017)</td>
<td>900, 2000</td>
</tr>
<tr>
<td>[<xref ref-type="bibr" rid="ref-25">25</xref>]</td>
<td>ISIC2018</td>
<td>2594</td>
</tr>
<tr>
<td>[<xref ref-type="bibr" rid="ref-26">26</xref>,<xref ref-type="bibr" rid="ref-27">27</xref>]</td>
<td>PH2</td>
<td>200</td>
</tr>
<tr>
<td>[<xref ref-type="bibr" rid="ref-28">28</xref>]</td>
<td>PH2, DermIs, ISIC2016 and ISIC2017</td>
<td>1380</td>
</tr>
<tr>
<td>[<xref ref-type="bibr" rid="ref-29">29</xref>]</td>
<td>PH2 and ISIC2016</td>
<td>200, 900</td>
</tr>
<tr>
<td>[<xref ref-type="bibr" rid="ref-30">30</xref>&#x2013;<xref ref-type="bibr" rid="ref-33">33</xref>]</td>
<td>ISIC2017, PH2</td>
<td>2000, 200</td>
</tr>
<tr>
<td>[<xref ref-type="bibr" rid="ref-34">34</xref>]</td>
<td>ISBI2017, PH2</td>
<td>2000, 200</td>
</tr>
<tr>
<td>[<xref ref-type="bibr" rid="ref-35">35</xref>]</td>
<td>ISBI2016, PH2</td>
<td>900, 200</td>
</tr>
<tr>
<td>[<xref ref-type="bibr" rid="ref-36">36</xref>]</td>
<td>PH2, ISBI2016, ISIC</td>
<td>200, 900, 1000</td>
</tr>
<tr>
<td>[<xref ref-type="bibr" rid="ref-37">37</xref>]</td>
<td>PH2, ISIC, ISBI2016, ISBI2017</td>
<td>200, 1000, 900, 2000</td>
</tr>
<tr>
<td>[<xref ref-type="bibr" rid="ref-38">38</xref>&#x2013;<xref ref-type="bibr" rid="ref-40">40</xref>]</td>
<td>ISIC2017</td>
<td>2000</td>
</tr>
<tr>
<td>[<xref ref-type="bibr" rid="ref-41">41</xref>]</td>
<td>HAM10000, ISIC2019</td>
<td>18,730</td>
</tr>
<tr>
<td>[<xref ref-type="bibr" rid="ref-42">42</xref>]</td>
<td>7-Point, Med-Node and PH2</td>
<td>1011, 170, 200</td>
</tr>
<tr>
<td>[<xref ref-type="bibr" rid="ref-43">43</xref>]</td>
<td>ISBI2016, ISBI2017, ISIC2018, PH2</td>
<td>900, 2000, 2594, 200</td>
</tr>
<tr>
<td>[<xref ref-type="bibr" rid="ref-44">44</xref>]</td>
<td>ISBI2017, ISIC2018</td>
<td>2000, 2594</td>
</tr>
<tr>
<td>[<xref ref-type="bibr" rid="ref-45">45</xref>]</td>
<td>ISBI2016, ISBI 2017</td>
<td>900, 2000</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>There are some unwanted artifacts in the pictures that cameras take, which complicate the segmentation process. The pre-processing stage works to eliminate anomalies such as hair or bubbles surrounding the lesion and lighting errors that occurred during the picture collecting phase. For the algorithms to function correctly in subsequent analysis, this phase is essential for an accurate evaluation [<xref ref-type="bibr" rid="ref-46">46</xref>]. Various pre-processing approaches such as normalization [<xref ref-type="bibr" rid="ref-30">30</xref>,<xref ref-type="bibr" rid="ref-32">32</xref>], hair removal [<xref ref-type="bibr" rid="ref-47">47</xref>,<xref ref-type="bibr" rid="ref-48">48</xref>], median filtering [<xref ref-type="bibr" rid="ref-28">28</xref>,<xref ref-type="bibr" rid="ref-29">29</xref>,<xref ref-type="bibr" rid="ref-49">49</xref>], contrast enhancement [<xref ref-type="bibr" rid="ref-50">50</xref>,<xref ref-type="bibr" rid="ref-51">51</xref>], rotation [<xref ref-type="bibr" rid="ref-52">52</xref>&#x2013;<xref ref-type="bibr" rid="ref-54">54</xref>], shifting [<xref ref-type="bibr" rid="ref-55">55</xref>&#x2013;<xref ref-type="bibr" rid="ref-58">58</xref>] and scaling [<xref ref-type="bibr" rid="ref-59">59</xref>] have been used.</p>
<p>As we previously stated, the generated images may have fluctuations in light, which, if used directly for segmentation data, would lead to blurry lesion boundaries and shading. Previous segmentation has the effect of reducing the shading of the original image. After Cavalcanti et al. [<xref ref-type="bibr" rid="ref-60">60</xref>] proposed an analytical method for decreasing shadowing in the HSV color space, Amelard et al. [<xref ref-type="bibr" rid="ref-61">61</xref>], Cavalcanti et al. [<xref ref-type="bibr" rid="ref-62">62</xref>], and Amelard et al. [<xref ref-type="bibr" rid="ref-63">63</xref>] used it. In a second study [<xref ref-type="bibr" rid="ref-64">64</xref>], artifacts are removed by leveling out the steep slope in the saturation and value channels using HSV color space. The remaining studies [<xref ref-type="bibr" rid="ref-65">65</xref>&#x2013;<xref ref-type="bibr" rid="ref-68">68</xref>] used a similar approach. A multi-stage artifact enhancement method was suggested by Amelard et al. [<xref ref-type="bibr" rid="ref-69">69</xref>] to remove reflected artifacts from images. For enhancing illumination variability in medical images, the same researchers [<xref ref-type="bibr" rid="ref-70">70</xref>] employed the previous strategy in their follow-up work and also suggested a novel multistage illumination modeling approach (MSIM). In another research project, the authors reduced illuminations using the MSIM approach [<xref ref-type="bibr" rid="ref-71">71</xref>,<xref ref-type="bibr" rid="ref-72">72</xref>]. To get rid of the reflected artifacts, the researchers [<xref ref-type="bibr" rid="ref-73">73</xref>] employed an in-painting method and a thresholding approach that was suggested by Barata et al. [<xref ref-type="bibr" rid="ref-74">74</xref>]. Ramezani et al. proposed employing a median filter with the mask size in combination with a shadow elimination method in HSV color space to attenuate the shading [<xref ref-type="bibr" rid="ref-75">75</xref>].</p>
<p>Several researchers used a tool called DullRazor [<xref ref-type="bibr" rid="ref-76">76</xref>] to eliminate hair artifacts from medical pictures [<xref ref-type="bibr" rid="ref-77">77</xref>]. Hair segmentation and elimination utilizing matching filter and region growth approaches were suggested by Huang et al. [<xref ref-type="bibr" rid="ref-78">78</xref>]. In order to compare and contrast the two, the researcher also employed DullRazor and its recommended technique with their own dataset. Compared to its technique, the DullRazor proved to have a higher percentage of false-hair diagnoses. Dense hair was eliminated from the images by Ramezani et al. [<xref ref-type="bibr" rid="ref-75">75</xref>] using a morphological opening and a morphological transformation in the bottom hat. Oliveira et al. [<xref ref-type="bibr" rid="ref-79">79</xref>] utilized an anisotropic diffusion filter to remove hair. In another research effort, morphological closure techniques were used to interpolate hair pixels with neighboring pixels in order to eliminate hair artifacts [<xref ref-type="bibr" rid="ref-65">65</xref>]. The researchers then found hair intersections using linear discriminant analysis (LDA) [<xref ref-type="bibr" rid="ref-80">80</xref>]. The Gaussian filter has been extensively employed in related studies to eliminate noise from the input pictures [<xref ref-type="bibr" rid="ref-81">81</xref>,<xref ref-type="bibr" rid="ref-82">82</xref>]. In an additional study, guided filtering&#x2014;an edge-preserving smoothing technique&#x2014;was used to eliminate the artifacts [<xref ref-type="bibr" rid="ref-83">83</xref>,<xref ref-type="bibr" rid="ref-84">84</xref>].</p>
<sec id="s2_1">
<label>2.1</label>
<title>Convolution Neural Network (CNN) Based Models</title>
<p>CNN-based classification models must be trained with each source image having the same size. Mechanisms like cropping, resizing, and rescaling can be used to combine the dataset configurations in order to accomplish this. When new and distinct instances are added to the dataset for training, data augmentation makes machine learning models more effective. To ensure that the architecture runs effectively, there needs to be an adequate repository that can deliver more data and has several desirable features. Using data augmentation approaches to incorporate variations that the model typically encounters in practice could strengthen machine learning models and increase the likelihood of issues such as over-fitting. Additionally, the data are unbalanced because the number of instances of various skin disorders is higher than that of melanoma. Data augmentation can be used, for example, to balance a dataset. Several studies have employed widely used methods for augmenting data, including scaling [<xref ref-type="bibr" rid="ref-86">86</xref>&#x2013;<xref ref-type="bibr" rid="ref-90">90</xref>], flipping, rotation, and cropping [<xref ref-type="bibr" rid="ref-85">85</xref>&#x2013;<xref ref-type="bibr" rid="ref-90">90</xref>]. To reduce the noise in the dataset, researchers employed a range of methods, including guided filters, Gaussian low-pass filters, blur filters, motion blur, noise addition, and histogram equalization [<xref ref-type="bibr" rid="ref-85">85</xref>,<xref ref-type="bibr" rid="ref-88">88</xref>,<xref ref-type="bibr" rid="ref-91">91</xref>,<xref ref-type="bibr" rid="ref-92">92</xref>]. Adjustments in color and brightness have also been made in [<xref ref-type="bibr" rid="ref-70">70</xref>,<xref ref-type="bibr" rid="ref-91">91</xref>]. After training generative adversarial networks (GANs) on digital images of cutaneous lesions, the researchers presented a DL approach for controlling variable background illumination [<xref ref-type="bibr" rid="ref-93">93</xref>]. These days, researchers are using DL layouts a lot to identify skin lesions from medical imaging. The lighting in images of skin lesions may be adjusted with the use of these deep learning techniques. In order to correct for variable background intensity in dermatological images, the study introduces a novel transform known as IECET [<xref ref-type="bibr" rid="ref-94">94</xref>].</p>
</sec>
<sec id="s2_2">
<label>2.2</label>
<title>Role of Segmentation Techniques in Skin Cancer Detection</title>
<p>Segmentation is an approach that partitions the picture into numerous segments or pixels for the purpose of enhancing image analysis [<xref ref-type="bibr" rid="ref-95">95</xref>]. It facilitates identifying tumors from an analyzed picture easier. However, it is essential to recall that image segmentation is a complicated procedure that may necessitate further processing. The 2 leading approaches to segmentation are K-means clustering and Otsu&#x2019;s approach [<xref ref-type="bibr" rid="ref-75">75</xref>,<xref ref-type="bibr" rid="ref-96">96</xref>,<xref ref-type="bibr" rid="ref-97">97</xref>]. Sagar et al. [<xref ref-type="bibr" rid="ref-65">65</xref>] implied a color channel-based technique that merged an edge detection mechanism with a modified 2D Otsu approach.</p>
<p>The researchers report a 93.7% accuracy rate using their suggested strategy for lesion identification from a source image. A number of researchers used the K-means clustering approach with K &#x003D; 2 for segmentation [<xref ref-type="bibr" rid="ref-82">82</xref>,<xref ref-type="bibr" rid="ref-83">83</xref>]. Another study suggested combining Otsu thresholding with K-means, which produced accuracy and AUC values of 0.91% and 89.07%, respectively [<xref ref-type="bibr" rid="ref-98">98</xref>]. An improved K-means variation with a 94% accuracy rate has been suggested in a different study [<xref ref-type="bibr" rid="ref-83">83</xref>]. In addition to these methods, image segmentation in [<xref ref-type="bibr" rid="ref-72">72</xref>] was done using Chan Vese&#x2019;s active contour approach [<xref ref-type="bibr" rid="ref-99">99</xref>]. In [<xref ref-type="bibr" rid="ref-64">64</xref>,<xref ref-type="bibr" rid="ref-100">100</xref>], a novel segmentation technique combining Chan level set border detection with rapid independent component analysis (FastICA) has been demonstrated. A novel method that combines specific morphological analysis with Chan Vese active contour computing was put forth by Roberta et al. [<xref ref-type="bibr" rid="ref-79">79</xref>]. The TDR value outperformed the Otsu technique&#x2019;s results while using the same set of data [<xref ref-type="bibr" rid="ref-101">101</xref>]. Furthermore, Sabouri et al. segmented photographs using CNN boundary-identification technology, producing an 86.67% Jaccard Index score [<xref ref-type="bibr" rid="ref-102">102</xref>].</p>
<p>Using GAN and a U-Net generator, Udrea et al. identified and suggested pigmented lesions with 91.40% accuracy [<xref ref-type="bibr" rid="ref-103">103</xref>]. Another research effort [<xref ref-type="bibr" rid="ref-104">104</xref>] used a U-Net with four subsampling layers to segment medical images. The method was first proposed in [<xref ref-type="bibr" rid="ref-105">105</xref>], where 92%, 98%, and 95% percent accuracy, specificity, and sensitivity were obtained, respectively. Another research study [<xref ref-type="bibr" rid="ref-106">106</xref>] has demonstrated a fuzzy C-means clustering algorithm with sensitivity, specificity, and accuracy of 90.02%, 99.15%, and 95.69%, respectively. A strategy called interactive object recognition was used in another investigation for the segmentation [<xref ref-type="bibr" rid="ref-107">107</xref>]. A further study has presented a unique segmentation approach that uses edge detection, thresholding, and evaluation of a correlated element of the mask to extract geometric features based on ABCD [<xref ref-type="bibr" rid="ref-108">108</xref>]. For determining the threshold level from medical macro photos, a CNN method called DTP-Net was introduced [<xref ref-type="bibr" rid="ref-109">109</xref>].</p>
</sec>
<sec id="s2_3">
<label>2.3</label>
<title>Feature Extraction and Feature Selection</title>
<p>The processes of feature extraction and selection are fundamental components of machine learning. The latest studies on the diagnosis of skin cancer include a variety of attributes, particularly distinct aspects derived from the dermatologist&#x2019;s ABCD rule. The feature selection approach is usually utilized to limit the feature set&#x2019;s size. However, a number of research employed DNN and charged CNNs with feature extraction. PSLs are evaluated using the ABCD rule; further assessment by a specialist is required for these lesions [<xref ref-type="bibr" rid="ref-110">110</xref>]. By computing asymmetry, the authors in [<xref ref-type="bibr" rid="ref-101">101</xref>] were able to estimate the lesion&#x2019;s major and minor axes [<xref ref-type="bibr" rid="ref-111">111</xref>]. The region of the lesion was split into 2 parts, employing an axis derived from the longest diagonal vector corresponding to the Euclidean distance of the affected area [<xref ref-type="bibr" rid="ref-112">112</xref>].</p>
<p>The edges of the lesion, deformity, four features based on the radius [<xref ref-type="bibr" rid="ref-113">113</xref>], asymmetric edge indices [<xref ref-type="bibr" rid="ref-114">114</xref>], compactness index [<xref ref-type="bibr" rid="ref-115">115</xref>], Heywood circularity index, mean curvature [<xref ref-type="bibr" rid="ref-116">116</xref>], best-fit ellipse indices [<xref ref-type="bibr" rid="ref-117">117</xref>], bulkiness index [<xref ref-type="bibr" rid="ref-118">118</xref>], bending energy, area, perimeter of the convex hull, convexity index, indentation and protrusion index, and fractal dimensions were among the features extracted for border irregularity. In the three color channels, the average gradient magnitude and variance in the lesion expanded rim were retrieved in [<xref ref-type="bibr" rid="ref-119">119</xref>]. The gray-level co-occurrence matrix (GLCM) [<xref ref-type="bibr" rid="ref-120">120</xref>,<xref ref-type="bibr" rid="ref-121">121</xref>] has been employed by numerous authors for acquiring textural information [<xref ref-type="bibr" rid="ref-82">82</xref>,<xref ref-type="bibr" rid="ref-98">98</xref>,<xref ref-type="bibr" rid="ref-101">101</xref>,<xref ref-type="bibr" rid="ref-122">122</xref>] and is, therefore, the most frequently employed method. Furthermore, the authors in a distinct study employed the gray-level run length matrix (GLRLM) [<xref ref-type="bibr" rid="ref-123">123</xref>]. For acquiring the textural attributes of cutaneous lesions, the authors employed the local binary pattern (LBP) [<xref ref-type="bibr" rid="ref-124">124</xref>]. In another study, the authors employed a mechanism called Color Image Analysis Learning Vector Quantization (CIA-LVQ) [<xref ref-type="bibr" rid="ref-125">125</xref>].</p>
<p>In another study, the local features were acquired employing the interest points that were identified with the help of the difference of Gaussian (DoG) [<xref ref-type="bibr" rid="ref-126">126</xref>]. Pacheco et al. [<xref ref-type="bibr" rid="ref-86">86</xref>,<xref ref-type="bibr" rid="ref-91">91</xref>] suggested an approach to merging CNN-extracted attributes with medical information and this proposed methodology was called MetaBlock. According to the study&#x2019;s results, MetaBlock can be a more effective feature combination technique than conventional concatenation approaches. The authors in [<xref ref-type="bibr" rid="ref-127">127</xref>] adopted the same technique on the model they utilized in a recent investigation. A pre-trained ResNet50 had been employed by the authors in their model for withdrawing deep features [<xref ref-type="bibr" rid="ref-128">128</xref>].</p>
</sec>
<sec id="s2_4">
<label>2.4</label>
<title>Classification</title>
<p>The acquired attributes are then used to assess and provide the PSLs&#x2019; detailed classification data. A training set of data is used to create a classification method, which is subsequently used by several classifiers. As a result, the features and classifier drive the model&#x2019;s performance. Furthermore, comparing methods of classification with the same dataset and features is often beneficial [<xref ref-type="bibr" rid="ref-46">46</xref>]. Compared to fully linked networks, CNNs are frequently simpler to train and require fewer hyper-parameter modifications. Custom CNN models have been trained to recognize skin lesion photos in certain investigations [<xref ref-type="bibr" rid="ref-81">81</xref>,<xref ref-type="bibr" rid="ref-88">88</xref>,<xref ref-type="bibr" rid="ref-102">102</xref>]. The researchers in [<xref ref-type="bibr" rid="ref-83">83</xref>] used two convolutional layers in each of two similar CNN frameworks to assess the overall structure and distinct texture of skin pictures. The researchers combined them to create a fully connected layer. CNNs had been employed in a different study to extract features from photographs [<xref ref-type="bibr" rid="ref-92">92</xref>]. CNNs that were initially trained to respond to classification issues had undergone modifications as a result of numerous investigations [<xref ref-type="bibr" rid="ref-129">129</xref>]. A few researchers used ResNet50 [<xref ref-type="bibr" rid="ref-130">130</xref>] and GoogleNet [<xref ref-type="bibr" rid="ref-85">85</xref>,<xref ref-type="bibr" rid="ref-91">91</xref>,<xref ref-type="bibr" rid="ref-131">131</xref>]. ResNet-152 was tuned to function as a classifier in a different study [<xref ref-type="bibr" rid="ref-132">132</xref>]. The same researchers trained a CNN to categorize images into 134 groups in a separate test [<xref ref-type="bibr" rid="ref-133">133</xref>]. The researchers in Reference [<xref ref-type="bibr" rid="ref-123">123</xref>] used a customized artificial neural network (ANN) to achieve the categorization goal. The researchers of a different study [<xref ref-type="bibr" rid="ref-61">61</xref>] proposed a linear soft margin SVM as an alternative to the linear SVM utilized in [<xref ref-type="bibr" rid="ref-67">67</xref>]. In [<xref ref-type="bibr" rid="ref-119">119</xref>], an SVM architecture with a kernel for the histogram&#x2019;s intersection was employed as the classification strategy. Additionally, the researchers in [<xref ref-type="bibr" rid="ref-129">129</xref>] classified many categories using a weighted SVM and adjusted them according to each category&#x2019;s degree of complexity. Two machine learning models&#x2014;KNN and a hybrid classifier created by fusing KNN and Decision Tree (DT)&#x2014;were employed in a different investigation [<xref ref-type="bibr" rid="ref-60">60</xref>]. The KNN was used for binary classification in a different study [<xref ref-type="bibr" rid="ref-108">108</xref>].</p>
<p>Non-invasive detecting techniques have emerged from using incident light, oil immersion, and magnification. The epiluminescence microscope (ELM) stimulates these mechanisms. Nonetheless, experience as a medical professional is still required for accuracy. Recently, a variety of methods have been used in experiments with automatic skin cancer diagnosis. Computer-Aided Diagnosis (CAD) approaches for dermoscopic pictures have evolved to readily stimulate the diagnosis of worrisome lesions, hence aiding dermatologists in their diagnostic evaluation. Furthermore, inexperienced practitioners may utilize it as a supplementary tool to carry out an initial inquiry [<xref ref-type="bibr" rid="ref-46">46</xref>]. Considering the key elements which are excluded from the dermoscopic pictures, these therapies might be grouped into broadly 2 categories. By automatically eradicating the identical attributes, a subset seeks to resemble therapeutic recognition strategies. Another discipline has its foundation in machine learning and computational pattern recognition utilized for common visual elements. In [<xref ref-type="bibr" rid="ref-134">134</xref>], a depiction of an assessment network affected by therapy aspects is shown.</p>
<p>Gola et al. [<xref ref-type="bibr" rid="ref-135">135</xref>] demonstrated a method for diagnosing melanoma. The globular and reticular patterns are identified by pattern analysis, which is the basis for this automated approach. An additional study that presents automatic melanoma identification is [<xref ref-type="bibr" rid="ref-136">136</xref>]. The dermoscopic rules calculated using the ABCD rule are the foundation of the framework.</p>
<p>The majority of the CAD observed with the ML method has been reported [<xref ref-type="bibr" rid="ref-137">137</xref>&#x2013;<xref ref-type="bibr" rid="ref-139">139</xref>]. A methodology that uses the K-nearest neighbors (KNN) classifier and removes distinguishing variables including color, texture, and border was given by Ganster et al. [<xref ref-type="bibr" rid="ref-19">19</xref>]. The methodology achieved 92% specificity (SP) and 87% sensitivity (SE). Another study involved Artificial Neural Networks (ANN) utilized as classification entities [<xref ref-type="bibr" rid="ref-138">138</xref>] and the features withdrawn were border and color. In another study, a skin lesion classification formulated on SVM is presented [<xref ref-type="bibr" rid="ref-111">111</xref>]. The features withdrawn include shape, texture, and color; attaining a SE of 93% and a SP of 92%.</p>
<p>Tumpa et al. [<xref ref-type="bibr" rid="ref-140">140</xref>] presented melanoma recognition and classification based on ANN utilizing composite texture features. The procedure employs Otsu&#x2019;s thresholding segmentation and the features withdrawn are ABCD, LBP, and GLCM, attaining an accuracy of 97.7%.</p>
<p>While each investigating organization chooses a distinct data source, results evaluation becomes complicated regardless of whether the CAD practices offer desirable outcomes. It is therefore prevalent to have to create a dataset of dermoscopic pictures that researchers can employ as ground truth.</p>
<p>Skin cancer assessment and categorization are largely processed through DL algorithms especially CNNs. ML and hybrid methods are utilized as well from time to time.</p>
<p>A procedure of removal of artifacts and noise from the raw data file is referred to as pre-processing. The most important phase in the procedure of segmentation is to discover and describe the area of lesion in the pictures.</p>
<p>Feature extraction is a crucial step in obtaining relevant data from the partitioned area. Domain-specific features are often used in machine learning settings. Thanks to DL, the field of ML has completely changed in the last few decades. It is believed that ANN algorithms represent the most sophisticated area of ML. These frameworks got inspiration from the formation and functioning of the human brain. In many different situations, DL methodologies have demonstrated impressive results contrary to distinct conventional ML techniques. Many DL approaches have been used in computer-based skin cancer screening throughout the last few decades.</p>
<p>Summary: In the latter half of the 20th century, investigators started to investigate the possibility of computers augmenting and analyzing medical images. The primary focus of CAD software was basic pattern recognition in radiographic images. The journey of CAD started as a quest to augment human vision with computing capacity. In recent times, artificial intelligence (AI) and machine learning evolved to an extent where CAD has emerged as an exceptionally developed platform for discerning minute inconsistencies that are largely overlooked by human vision. The advancement was thwarted by certain challenges. Owing to the lack of precision and inability to overtake human experience, early CAD methods were viewed with skepticism But persistent exploration and development in technology made it suitable for CAD to enhance our potential and emerge as an effective testing technique.</p>
<p>CAD is a technique that effectively merges modern innovations in technology and medicinal attributes to offer a rather prudent solution. Optimizing the precision as well as the efficacy of disease observation by means of the utilization of several feasible techniques is a primary goal of the comprehensive architecture. A lot of advantages stem from CAD synthesis in the therapeutic domain. Most specifically it allows for identifying cancers at an early stage and enhances the accuracy of diagnosis while minimizing the duration of time necessary for interpreting. Diagnoses might be conducted more efficiently and precisely attributable to the practical equipment termed computer-aided diagnostic (CAD). However, there are actually plenty of problems with CAD operation. Among the primary issues is the fact that algorithm training relies on reliable information that may prove a limitation for specific circumstances. Furthermore, the integration of CAD into conventional healthcare services implies significant infrastructure and mechanical modifications.</p>
</sec>
</sec>
<sec id="s3">
<label>3</label>
<title>Challenges in Detecting Skin Cancer</title>
<p>There are a few complications in detecting skin cancer that are associated with variations in image kinds and sources [<xref ref-type="bibr" rid="ref-141">141</xref>,<xref ref-type="bibr" rid="ref-142">142</xref>]. The distinction in human skin color makes the recognition of skin cancer complicated [<xref ref-type="bibr" rid="ref-143">143</xref>]. The prime complications of skin cancer are the numerous sizes and configurations of the images, which cannot provide a precise outcome of recognition. In this regard, pre-processing is necessary for precise evaluation. Some unnecessary signals are to be adapted, which are actually not part of an image, although they might be interrupted to attain a better outcome. Therefore, every noise has to be removed in the pre-processing stage. Another complication is that low contrast from adjoining tissues extends additional problems, making it difficult for precise analysis of skin cancer. Distinct components of color illumination, for instance, the texture of color, reflections, etc., also cause certain complications. There are certain moles that might never evolve cancer cells, but they produce several complications in identifying skin cancer precisely from cancerous images. In addition to instances involving accessibility and statistical value, DL experiences issues with both over-and under-fitting. If a DNN seeks to establish noise in the input data instead of identifying core patterns, it constitutes over-fitting. On the other side, if the algorithm seems overly basic or unable to identify complicated structures in the data, it may underfit.</p>
</sec>
<sec id="s4">
<label>4</label>
<title>Segmentation Methods</title>
<p>For computer vision (CV) and feature extraction procedures, the segmentation of a digital image is a crucial task for the evaluation of the image. It is a procedure for dividing an image into several divisions or sub-divisions separated by gray values that coordinate with distinct real-world entities in the image. Of all the segmentation methods, thresholding is the most fundamental and effective approach for digital image segmentation. The distinct kinds of approaches for segmentation are present in [<xref ref-type="bibr" rid="ref-144">144</xref>,<xref ref-type="bibr" rid="ref-145">145</xref>]. The leading domain in image processing is image segmentation with multi-level thresholding. The analysis of thresholding techniques and their significance is discussed in [<xref ref-type="bibr" rid="ref-146">146</xref>]. Thresholding approaches are typically of 2 classes: (i) bi-level and (ii) multilevel thresholding (MT). It relies upon thresholding magnitude evaluated on the grayscale dimensions. The image in the bi-level thresholding approach is partitioned into 2 divisions on the basis of threshold magnitude, the partitioning factor; thresholding magnitude is specified on the histogram. An extensive histogram-based approach called Otsu&#x2019;s method of segmentation is an example of bi-level thresholding [<xref ref-type="bibr" rid="ref-96">96</xref>]. A slight transformation to Kittler and Illingworth&#x2019;s minimum error thresholding is discussed in [<xref ref-type="bibr" rid="ref-147">147</xref>], entropy based MT of grayscale images is discussed in [<xref ref-type="bibr" rid="ref-148">148</xref>], and a novel cluster evaluation based threshold selection approach is discussed in [<xref ref-type="bibr" rid="ref-149">149</xref>]. An innovative fuzzy classification entropy method that lowers the processing time for determining the ideal thresholds for MT is presented in [<xref ref-type="bibr" rid="ref-150">150</xref>]. The ideal threshold is computed by the augmentation of Kapur&#x2019;s or Otsu&#x2019;s objective function using the Krill Herd Optimization technique [<xref ref-type="bibr" rid="ref-151">151</xref>]. The criterion of the MT approach is to divide digital images into more than 2 divisions by selecting numerous gray terms on the histogram to accomplish certain conditions [<xref ref-type="bibr" rid="ref-152">152</xref>]. An MT image segmentation approach employing a transformed salp swarm algorithm (SSA) is discussed in [<xref ref-type="bibr" rid="ref-153">153</xref>].</p>
<p>The following segmentation techniques have been utilized and compared belonging to the categories: Thresholding based, cluster based, edge based, and region based.</p>
<sec id="s4_1">
<label>4.1</label>
<title>Otsu&#x2019;s Thresholding Based Segmentation</title>
<p>It is utilized for implementing automatic image thresholding in image processing. This method divides a pixel into foreground and background by returning a single intensity threshold. <xref ref-type="fig" rid="fig-4">Fig. 4</xref> illustrates skin cancer images after applying Otsu thresholding in the proposed methodology. The method examines the threshold which reduces the intra-class variance, characterized as a weighted aggregate of variances of the 2 categories. The clip limit (CL), as well as block size (BS), are two significant CLAHE parameters. These two factors primarily regulate enhanced image quality. Due to the input image having poor intensity and the large clip limit obtaining histogram flatter, the image becomes brighter when the clip limit is increased. The dynamic range expands along with the block size, which also increases image contrast. When using the entropy of the image, the two factors define at point having the highest entropy curvature and generate inputs with a subjectively high quality [<xref ref-type="bibr" rid="ref-48">48</xref>]. Each contextual region is subjected to histogram equalization using the CLAHE method. The clipped pixels from the original histogram are then distributed among each grey level. The redistribution histogram differs from the regular histogram since each pixel intensity is restricted to the selected maximum. However, enhanced and the input image carry minimal and maximal grey values. The steps of the CLAHE algorithm to enhance an input image are as follows:</p>
<fig id="fig-4">
<label>Figure 4</label>
<caption>
<title>Illustration of skin cancer images after applying Otsu thresholding</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_52548-fig-4.tif"/>
</fig>
<p>Stage 1: Separate an input intensity image into disjoint contextual areas. The overall number of image tiles is equivalent to M &#x00D7; N, and 8 &#x00D7; 8 is a good score to maintain the chromatic information of an image.</p>
<p>Stage 2: Calculate a histogram of each contextual area by grey levels present in the array image.</p>
<p>Stage 3: Compute a contrast limit histogram of the contextual regions by the clip limit value as:
<disp-formula id="eqn-1"><label>(1)</label><mml:math id="mml-eqn-1" display="block"><mml:msubsup><mml:mi>&#x03C3;</mml:mi><mml:mrow><mml:mi>w</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:msub><mml:mi>&#x03C9;</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:msup><mml:mi>&#x03C3;</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:msub><mml:mi>&#x03C9;</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:msubsup><mml:mi>&#x03C3;</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></disp-formula>where <inline-formula id="ieqn-1"><mml:math id="mml-ieqn-1"><mml:msub><mml:mi>&#x03C9;</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-2"><mml:math id="mml-ieqn-2"><mml:msub><mml:mi>&#x03C9;</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula> represent weights for the probabilities of 2 categories segmented by <italic>t</italic>, i.e., threshold and <inline-formula id="ieqn-3"><mml:math id="mml-ieqn-3"><mml:msup><mml:mi>&#x03C3;</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:math></inline-formula>, <inline-formula id="ieqn-4"><mml:math id="mml-ieqn-4"><mml:msubsup><mml:mi>&#x03C3;</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> represent variances.</p>
<p>The class probability &#x03C9;((1, 2)) (<italic>t</italic>) is calculated from <italic>N</italic> bins of histogram as:
<disp-formula id="eqn-2"><label>(2)</label><mml:math id="mml-eqn-2" display="block"><mml:msub><mml:mi>&#x03C9;</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msubsup><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>m</mml:mi><mml:mo>=</mml:mo><mml:mn>0</mml:mn></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msubsup><mml:mi>P</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>m</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula>where <inline-formula id="ieqn-5"><mml:math id="mml-ieqn-5"><mml:msub><mml:mi>N</mml:mi><mml:mrow><mml:mi>C</mml:mi><mml:mi>L</mml:mi><mml:mi>I</mml:mi><mml:mi>P</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> stands for normalized clip limit in the range [0, 1], where <inline-formula id="ieqn-6"><mml:math id="mml-ieqn-6"><mml:msub><mml:mi>N</mml:mi><mml:mrow><mml:mi>C</mml:mi><mml:mi>L</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> is the actual clip limit. The pixels will be clipped if the number of pixels exceeds <inline-formula id="ieqn-7"><mml:math id="mml-ieqn-7"><mml:msub><mml:mi>N</mml:mi><mml:mrow><mml:mi>C</mml:mi><mml:mi>L</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>. If <inline-formula id="ieqn-8"><mml:math id="mml-ieqn-8"><mml:msub><mml:mi>N</mml:mi><mml:mrow><mml:mo>&#x2211;</mml:mo><mml:mi>c</mml:mi><mml:mi>l</mml:mi><mml:mi>i</mml:mi><mml:mi>p</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> is the total amount of clipped pixels, then the mean of the remaining pixels for distribution to each grey level as:
<disp-formula id="eqn-3"><label>(3)</label><mml:math id="mml-eqn-3" display="block"><mml:msub><mml:mi>&#x03C9;</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msubsup><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>m</mml:mi><mml:mo>=</mml:mo><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>N</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msubsup><mml:mi>P</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>m</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula></p>
</sec>
<sec id="s4_2">
<label>4.2</label>
<title>Multilevel Thresholding Using Otsu</title>
<p>According to this approach for MT, the gray levels of the image are divided into groups or divisions. Let R be the whole area of space that an image occupies. The goal of image segmentation is to split the region R up into smaller sections R1, R2, ..., RN.</p>
<p>For this procedure a thresholding level (<italic>t</italic>) is specified, the rule set for bi-level thresholding can be stated as follows:
<disp-formula id="eqn-4"><label>(4)</label><mml:math id="mml-eqn-4" display="block"><mml:msub><mml:mi>R</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo stretchy="false">&#x2190;</mml:mo><mml:mi>g</mml:mi><mml:mtext>&#x00A0;</mml:mtext><mml:mi>i</mml:mi><mml:mi>f</mml:mi><mml:mtext>&#x00A0;</mml:mtext><mml:mn>0</mml:mn><mml:mo>&#x2264;</mml:mo><mml:mi>g</mml:mi><mml:mo>&#x2264;</mml:mo><mml:mi>t</mml:mi><mml:mo>,</mml:mo><mml:msub><mml:mi>R</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mo stretchy="false">&#x2190;</mml:mo><mml:mi>g</mml:mi><mml:mtext>&#x00A0;</mml:mtext><mml:mi>i</mml:mi><mml:mi>f</mml:mi><mml:mtext>&#x00A0;</mml:mtext><mml:mi>t</mml:mi><mml:mo>&#x2264;</mml:mo><mml:mi>g</mml:mi><mml:mo>&#x2264;</mml:mo><mml:mi>N</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:math></disp-formula></p>
<p>Here <inline-formula id="ieqn-9"><mml:math id="mml-ieqn-9"><mml:msub><mml:mi>R</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-10"><mml:math id="mml-ieqn-10"><mml:msub><mml:mi>R</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula> are regions, <italic>N</italic> &#x2212; 1 represents the maximum pixel term, <italic>g</italic> represents any gray term from the specified range {0, 1, 2, &#x2026;, <italic>N</italic> &#x2212; 1}.</p>
<p>In case the pixel value lies below threshold <italic>t</italic>, it corresponds to Region <italic>R</italic>1, else it corresponds to Region <italic>R</italic>2. The rule set for <italic>n</italic> MT can be stated as:</p>
<p>Else if <inline-formula id="ieqn-11"><mml:math id="mml-ieqn-11"><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>H</mml:mi><mml:mrow><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>g</mml:mi><mml:mi>i</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>+</mml:mo></mml:mrow></mml:msub><mml:msub><mml:mi>N</mml:mi><mml:mrow><mml:mi>a</mml:mi><mml:mi>v</mml:mi><mml:mi>g</mml:mi><mml:mi>g</mml:mi><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>y</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">)</mml:mo><mml:mo>&#x003E;</mml:mo><mml:msub><mml:mi>N</mml:mi><mml:mrow><mml:mi>C</mml:mi><mml:mi>L</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> then
<disp-formula id="eqn-5"><label>(5)</label><mml:math id="mml-eqn-5" display="block"><mml:msub><mml:mi>R</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo stretchy="false">&#x2190;</mml:mo><mml:mrow><mml:mtext>g</mml:mtext></mml:mrow><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:mtext>if</mml:mtext></mml:mrow><mml:mtext>&#x00A0;</mml:mtext><mml:mn>0</mml:mn><mml:mo>&#x2264;</mml:mo><mml:mi>g</mml:mi><mml:mo>&#x2264;</mml:mo><mml:msub><mml:mi>t</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:math></disp-formula>
<disp-formula id="eqn-6"><label>(6)</label><mml:math id="mml-eqn-6" display="block"><mml:msub><mml:mi>R</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mo stretchy="false">&#x2190;</mml:mo><mml:mrow><mml:mtext>g</mml:mtext></mml:mrow><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:mtext>if</mml:mtext></mml:mrow><mml:mtext>&#x00A0;</mml:mtext><mml:msub><mml:mi>t</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>&#x2264;</mml:mo><mml:mi>g</mml:mi><mml:mo>&#x2264;</mml:mo><mml:msub><mml:mi>t</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:math></disp-formula>
<disp-formula id="eqn-7"><label>(7)</label><mml:math id="mml-eqn-7" display="block"><mml:msub><mml:mi>R</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">&#x2190;</mml:mo><mml:mrow><mml:mtext>g</mml:mtext></mml:mrow><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:mtext>if</mml:mtext></mml:mrow><mml:mtext>&#x00A0;</mml:mtext><mml:msub><mml:mi>t</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2264;</mml:mo><mml:mi>g</mml:mi><mml:mo>&#x2264;</mml:mo><mml:msub><mml:mi>t</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:math></disp-formula>
<disp-formula id="eqn-8"><label>(8)</label><mml:math id="mml-eqn-8" display="block"><mml:msub><mml:mi>R</mml:mi><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">&#x2190;</mml:mo><mml:mrow><mml:mtext>g</mml:mtext></mml:mrow><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:mtext>if</mml:mtext></mml:mrow><mml:mtext>&#x00A0;</mml:mtext><mml:msub><mml:mi>t</mml:mi><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2264;</mml:mo><mml:mi>g</mml:mi><mml:mo>&#x2264;</mml:mo><mml:msub><mml:mi>t</mml:mi><mml:mrow><mml:mi>n</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:math></disp-formula></p>
<p>Here <inline-formula id="ieqn-12"><mml:math id="mml-ieqn-12"><mml:msub><mml:mi>t</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-13"><mml:math id="mml-ieqn-13"><mml:msub><mml:mi>t</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-14"><mml:math id="mml-ieqn-14"><mml:msub><mml:mi>t</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-15"><mml:math id="mml-ieqn-15"><mml:msub><mml:mi>t</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-16"><mml:math id="mml-ieqn-16"><mml:msub><mml:mi>t</mml:mi><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-17"><mml:math id="mml-ieqn-17"><mml:msub><mml:mi>t</mml:mi><mml:mrow><mml:mi>n</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula> represent different thresholds. The overall grayscale is precisely categorized into numerous exclusive regions through preference or by evaluating the threshold value (<italic>t</italic>). In this paper generation of threshold for 2 level, 3 level, and 7 level using Otsu&#x2019;s approach is considered for evaluating the optimized t values. <xref ref-type="fig" rid="fig-5">Fig. 5</xref> illustrates skin cancer images after applying Multilevel Otsu thresholding in the proposed methodology.</p>
<fig id="fig-5">
<label>Figure 5</label>
<caption>
<title>Illustration of skin cancer images after applying Multilevel Otsu thresholding</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_52548-fig-5.tif"/>
</fig>
</sec>
<sec id="s4_3">
<label>4.3</label>
<title>Adaptive Thresholding</title>
<p>This is a local thresholding approach where instead of manually defining the threshold value, it is adapted and determined automatically in accordance with image pixels and arrangement for transforming the pixels of the image to grayscale or a binary image. Primarily, this approach advocates selecting threshold values automatically for segmenting the prime entity from its background in a state where there are distinct illumination, colors, or contrast in the image. The pixel intensity upon which the pixels related to the background and foreground are dissociated is termed as threshold value for an image. In other words, the higher intensity image pixels than the threshold will be dissociated from the lower intensity image pixels. For executing adaptive thresholding, as opposed to providing a single threshold value by primitive attempt, it is possible to partition the image foreground and background more effectively by employing a much more precise threshold value.</p>
<p>The segmentation of lesions can be attained by correlating the color of every pixel by a threshold <italic>t</italic>. In this paper for adaptive segmentation, small blocks are used to adapt the images. Here the function provides a rate Y running on X to evaluate the adapted threshold. Further standard deviation (SD) of X is evaluated. If the rate of SD of the block of pixels is less than 1, it is labeled as a background.</p>
</sec>
<sec id="s4_4">
<label>4.4</label>
<title>Niblack Thresholding and Sauvola Thresholding</title>
<p>The Niblack and Sauvola thresholding approach has particularly evolved to enhance the image quality. These are local thresholding techniques that change the threshold in accordance with the local mean and SD for every pixel in a sliding window. The final local pixel value of these thresholding approaches is felicitated by distinct positive parameters also. It is performed to ensure the division between the entity and the background.
<disp-formula id="eqn-9"><label>(9)</label><mml:math id="mml-eqn-9" display="block"><mml:msub><mml:mi>S</mml:mi><mml:mrow><mml:mi>V</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mover><mml:mi>X</mml:mi><mml:mo accent="false">&#x00AF;</mml:mo></mml:mover><mml:mo>&#x2217;</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mn>1</mml:mn><mml:mo>+</mml:mo><mml:mi>c</mml:mi><mml:mo>&#x2217;</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mi>&#x03C3;</mml:mi><mml:mrow><mml:mo>/</mml:mo></mml:mrow><mml:mi>p</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula>
<disp-formula id="eqn-10"><label>(10)</label><mml:math id="mml-eqn-10" display="block"><mml:msub><mml:mi>N</mml:mi><mml:mrow><mml:mi>V</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mover><mml:mi>X</mml:mi><mml:mo accent="false">&#x00AF;</mml:mo></mml:mover><mml:mo>+</mml:mo><mml:mi>c</mml:mi><mml:mo>&#x2217;</mml:mo><mml:mi>&#x03C3;</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>a</mml:mi></mml:math></disp-formula>where <inline-formula id="ieqn-18"><mml:math id="mml-ieqn-18"><mml:msub><mml:mi>S</mml:mi><mml:mrow><mml:mi>V</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> represents Sauvola value, <inline-formula id="ieqn-19"><mml:math id="mml-ieqn-19"><mml:msub><mml:mi>N</mml:mi><mml:mrow><mml:mi>V</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> represents Niblack value, <inline-formula id="ieqn-20"><mml:math id="mml-ieqn-20"><mml:mover><mml:mi>X</mml:mi><mml:mo accent="false">&#x00AF;</mml:mo></mml:mover></mml:math></inline-formula> represents mean, &#x03C3; represents standard deviation (SD), <inline-formula id="ieqn-21"><mml:math id="mml-ieqn-21"><mml:mi>c</mml:mi></mml:math></inline-formula> represents positive parameter, <inline-formula id="ieqn-22"><mml:math id="mml-ieqn-22"><mml:mi>p</mml:mi></mml:math></inline-formula> represents dynamic range of SD.</p>
<p>In this paper, Niblack, Sauvola, and binarized Sauvola techniques are used for classifying skin cancer lesions. <xref ref-type="fig" rid="fig-6">Fig. 6</xref> illustrates skin cancer images after applying Niblack and Sauvola thresholding in the proposed methodology.</p>
<fig id="fig-6">
<label>Figure 6</label>
<caption>
<title>Illustration of skin cancer images after applying Niblack and Sauvola thresholding</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_52548-fig-6a.tif"/><graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_52548-fig-6b.tif"/>
</fig>
</sec>
<sec id="s4_5">
<label>4.5</label>
<title>Active Contour Segmentation</title>
<p>This segmentation technique utilizes energy forces and limitations to split up the pixels of interest out of the remainder of the image for additional refining and evaluation. For segmentation, an active contour interprets a particular edge or curvature for the areas of the target entity. The contour relies on certain constraints, and on this basis, it is categorized into distinct types. The approach is used in several image processing applications, especially in therapeutic imaging, by segmenting the regions from distinct therapeutic images [<xref ref-type="bibr" rid="ref-153">153</xref>,<xref ref-type="bibr" rid="ref-154">154</xref>]. The contours are edges formulated for the targeted region required in an image. It is a cluster of points that experience interpolation procedures that might be linear, splines, or polynomials, representing the curve in the image [<xref ref-type="bibr" rid="ref-155">155</xref>]. Distinct models of active contours are implemented for the segmentation approach in image processing, and the prime objective of is to specify a smooth shape and create a closed contour for the region. These models outline the edges of entities or distinct attributes of the image to produce a contour. The curve of the models is ascertained by numerous contour methods by applying external and internal forces. The energy function is related to the curvature specified in the image. The synthesis of forces because of the image that is precisely employed for administering the orientation of the contour on the picture is referred to as external energy, while internal energy regulates the deformable variations. The appropriate contour is attained by determining the minimum of the energy functional. The cluster of points that observe a contour is considered to be contour-deforming. By reducing the energy function this contour fits the preferred image contour defined by minimizing the energy function.</p>
<p>In this paper, the Chan Vese model for active contour is utilized for segmenting skin cancer lesions. It is an effective and flexible approach that can segment distinct types of images, which are relatively difficult to segment through traditional segmentation approaches [<xref ref-type="bibr" rid="ref-155">155</xref>]. The approach is demonstrated by the Mumford-Shah functional that is utilized in the therapeutic imaging domain.</p>
<p>The segmentation is performed using the image segmenter app by evolving 100 iterations. The components connected to the edges of the image are suppressed. The masked illustration is then created through the input, which is the final segmented image. <xref ref-type="fig" rid="fig-7">Fig. 7</xref> illustrates skin cancer images after applying the Chan Vese segmentation approach to the proposed methodology.</p>
<fig id="fig-7">
<label>Figure 7</label>
<caption>
<title>Illustration of skin cancer images after applying the Chan Vese segmentation approach</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_52548-fig-7.tif"/>
</fig>
</sec>
<sec id="s4_6">
<label>4.6</label>
<title>Watershed Segmentation</title>
<p>The watershed algorithm is formulated by withdrawing certain background and foreground and later utilizing markers to create a watershed run and determine the accurate edges. The approach typically helps in determining connected and overlapping entities in an image.</p>
<p>In this paper, the utilization of the watershed algorithm is initiated by modifying the RGB images into grayscale. Consequently, to provide a smooth image, a morphological top-hat filter is utilized. In the next step, the images are transformed into binary format employing grayscale and are later complemented. In the next step, the distance is computed and the watershed transformation is calculated. The segmented regions are displayed by employing distinct colors. <xref ref-type="fig" rid="fig-8">Fig. 8</xref> illustrates skin cancer images after applying the watershed segmentation approach.</p>
<fig id="fig-8">
<label>Figure 8</label>
<caption>
<title>Illustration of skin cancer images after applying the watershed segmentation approach</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_52548-fig-8.tif"/>
</fig>
</sec>
<sec id="s4_7">
<label>4.7</label>
<title>Fast Marching Approach (FMM)</title>
<p>It is an arithmetical method that is used to interpret boundary value problems of the Eikonal equation:
<disp-formula id="eqn-11"><label>(11)</label><mml:math id="mml-eqn-11" display="block"><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mi mathvariant="normal">&#x2207;</mml:mi><mml:mi>T</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>y</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mrow><mml:mi>v</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>y</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mfrac></mml:math></disp-formula></p>
<p>The problem generally illustrates the progression of a closed surface as a function of time <italic>T</italic> and speed <italic>v</italic> in the usual direction at point <italic>y</italic> on the propagating area. The speed function is defined, and the time when the contour traverses the point <italic>y</italic> is attained by solving the equation. The FMM uses this optimal control evaluation of the complication so as to come up with a solution emerging from the boundary values. The FMM works on the fact that data only moves outward from the seeding region.</p>
<p>In this paper, FMM is utilized to segment the ROI of the images on the basis of variations in gray intensity in contrast to the seed locations. A mask is then created by defining seed locations. In the subsequent step, a weighted array is computed on the basis of grayscale intensity variations, and the images are segmented using these weights. Further, by thresholding the geodesic distance matrix by distinct thresholds, different segmentation results are computed. <xref ref-type="fig" rid="fig-9">Fig. 9</xref> illustrates skin cancer images after applying FCM.</p>
<fig id="fig-9">
<label>Figure 9</label>
<caption>
<title>Illustration of skin cancer images after applying FCM</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_52548-fig-9.tif"/>
</fig>
</sec>
<sec id="s4_8">
<label>4.8</label>
<title>Simple Linear Iterative Clustering</title>
<p>By synthesizing pixels in the image plane based on their proximity and color homogeneity, this method produces superpixels. It is the most often used method for superpixel segmentation; however, it requires too much processing power. Images of skin cancer after using the simple linear iterative clustering technique are shown in <xref ref-type="fig" rid="fig-10">Fig. 10</xref>.</p>
<fig id="fig-10">
<label>Figure 10</label>
<caption>
<title>Illustration of skin cancer images after applying the K-means clustering approach</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_52548-fig-10.tif"/>
</fig>
</sec>
<sec id="s4_9">
<label>4.9</label>
<title>K-Means Clustering</title>
<p>It is utilized for implementing automatic image thresholding in image processing. This method divides a pixel into foreground and background by returning a single intensity threshold. <xref ref-type="fig" rid="fig-11">Fig. 11</xref> illustrates skin cancer images after applying K-means clustering in the proposed methodology. The method examines the threshold which reduces the intra-class variance, characterized as a weighted aggregate of variances of the 2 categories. It is an unsupervised clustering algorithm that simply indicates that there is no availability of labeled data. It is employed to ascertain distinct groups or categories in the specified data derived from the similarity of the data. Data points in the same category are indistinguishable from distinct data points in a similar group compared to those in different groups.</p>
<fig id="fig-11">
<label>Figure 11</label>
<caption>
<title>Illustration of skin cancer images after applying the K-means clustering approach</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_52548-fig-11.tif"/>
</fig>
<p>It is one of the most extensively employed clustering approaches, where K denotes the number of clusters.</p>
<p>The algorithm works as follows:</p>
<p>1. Select the number of clusters that is K.</p>
<p>2. Arbitrarily allocate the data points to the K clusters.</p>
<p>3. Evaluate the center of the clusters.</p>
<p>4. Evaluate the length of the data points from the centers of every cluster.</p>
<p>5. Derived from the length of data points from the cluster, redistribute them to the adjacent clusters.</p>
<p>6. Then compute the new cluster center.</p>
<p>7. Reiterate Steps 4, 5 and 6 until data points turn the clusters, or till the specified number of iterations is attained.</p>
</sec>
<sec id="s4_10">
<label>4.10</label>
<title>Fuzzy C-Means Clustering (FCM)</title>
<p>It is a particular case of K-means when the probability function applied is merely 1 in case the data point is nearest to a centroid and 0 contrarily.</p>
<p>The algorithm works as follows:</p>
<p>&#x2022; Presume a certain quantity of clusters, i.e., K.</p>
<p>&#x2022; Arbitrarily compute the K-means <italic>&#x03BC;</italic><sub><italic>K</italic></sub> related to the clusters and evaluate the probability that every data point <italic>x</italic> is a component of a specified cluster K.</p>
<p>&#x2022; Re-compute the centroid of the cluster as the weighted centroid provided the likelihood of components of each data point <italic>x</italic>:
<disp-formula id="eqn-12"><label>(12)</label><mml:math id="mml-eqn-12" display="block"><mml:msub><mml:mi>&#x03BC;</mml:mi><mml:mrow><mml:mi>K</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>n</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:msub><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>x</mml:mi><mml:mi>&#x03F5;</mml:mi><mml:mi>K</mml:mi></mml:mrow></mml:msub><mml:mi>x</mml:mi><mml:mo>&#x2217;</mml:mo><mml:mi>P</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:msup><mml:mrow><mml:mo>{</mml:mo><mml:msub><mml:mi>&#x03BC;</mml:mi><mml:mrow><mml:mi>K</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mi>x</mml:mi><mml:mo>}</mml:mo></mml:mrow><mml:mrow><mml:mi>b</mml:mi></mml:mrow></mml:msup><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mrow><mml:msub><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>x</mml:mi><mml:mi>&#x03F5;</mml:mi><mml:mi>K</mml:mi></mml:mrow></mml:msub><mml:mi>x</mml:mi><mml:mi>P</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:msup><mml:mrow><mml:mo>{</mml:mo><mml:msub><mml:mi>&#x03BC;</mml:mi><mml:mrow><mml:mi>K</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mi>x</mml:mi><mml:mo>}</mml:mo></mml:mrow><mml:mrow><mml:mi>b</mml:mi></mml:mrow></mml:msup></mml:mrow></mml:mfrac></mml:math></disp-formula></p>
<p>Reiterate till convergence or till the number of iterations specified by the user has been obtained.</p>
<p>In this paper, fuzzy c-means clustering of images is executed which divides the image into <italic>n</italic> clusters automatically by random initialization. The counts of clusters and iterations might be controlled and determined by the user. The outcomes of the function are upgraded cluster centers and segmented images illustrated in <xref ref-type="fig" rid="fig-12">Fig. 12</xref>.</p>
<fig id="fig-12">
<label>Figure 12</label>
<caption>
<title>Illustration of skin cancer images after applying FCM</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_52548-fig-12.tif"/>
</fig>
</sec>
<sec id="s4_11">
<label>4.11</label>
<title>Edge-Based Segmentation Approaches</title>
<p>An image processing approach that detects the edges of entities within an image is known as the edge detection technique. It is utilized for segmenting images by identifying discontinuities in brightness. <xref ref-type="fig" rid="fig-13">Fig. 13</xref> illustrates skin cancer images after applying (a) Sobel (b) Prewitt (c) Robert (d) Log (e) Zero Crossing (f) Canny. The distinct edge detection approaches used in this paper are discussed in <xref ref-type="table" rid="table-2">Table 2</xref>.</p>
<fig id="fig-13">
<label>Figure 13</label>
<caption>
<title>Illustration of skin cancer images after applying (a) Sobel (b) Prewitt (c) Robert (d) Log (e) Zero Crossing (f) Canny</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_52548-fig-13a.tif"/><graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_52548-fig-13b.tif"/></fig><table-wrap id="table-2">
<label>Table 2</label>
<caption>
<title>The edge detection methodologies applied in the proposed work</title>
</caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
</colgroup>
<tbody>
<tr>
<td>Sobel</td>
<td>Spots edge at the points of the maximum gradient of the image, employing the Sobel approximation to the derivative.</td>
</tr>
<tr>
<td>Prewitt</td>
<td>Locates edges at the points of the maximum gradient of the image, employing the Prewitt proximity to the derivative.</td>
</tr>
<tr>
<td>Roberts</td>
<td>Locates boundaries at the locale of the maximum gradient of the image, employing the Roberts proximity to the derivative.</td>
</tr>
<tr>
<td>Log</td>
<td>Locates edges by searching zero-crossings subsequent to refining images with a Laplacian of Gaussian (LoG) filter.</td>
</tr>
<tr>
<td>Zerocross</td>
<td>Locates boundaries by searching zero-crossings subsequent to refining images through a filter one specifies.</td>
</tr>
<tr>
<td>Canny</td>
<td>Locates edges by searching local maxima of the gradient with the help of a Gaussian filter. The approach utilizes 2 thresholds to identify strong and weak boundaries, incorporating weaker ones in the output provided they are interconnected with strong edges. Because of the 2 thresholds, the Canny approach is not affected by the noise like other approaches and detects true weak edges accurately.</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec id="s5">
<label>5</label>
<title>Proposed Methodology</title>
<p>In this paper, we bring forward an image processing established framework to determine, withdraw, and categorize the lesion from the dermoscopy pictures, the method will help in the identification of skin cancer. The block diagram for the proposed methodology is illustrated in <xref ref-type="fig" rid="fig-14">Fig. 14</xref>.</p>
<fig id="fig-14">
<label>Figure 14</label>
<caption>
<title>Block diagram of skin cancer diagnosis classified according to the melanocytic nevis and malignant conditions</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_52548-fig-14.tif"/>
</fig>
<sec id="s5_1">
<label>5.1</label>
<title>Dataset Description</title>
<p>The PH2 database was developed by a mutual research association among the Universidade do Porto, T&#x2019;ecnico Lisboa, and the Dermatology service of Hospital Pedro Hispano in Matosinhos, Portugal [<xref ref-type="bibr" rid="ref-156">156</xref>]. This dataset consists of 200 dermoscopic pictures (8 bits RGB pictures) having a resolution of 768 &#x002A; 560. The pictures are categorized into 80 common nevi, 80 atypical nevi and 40 melanomas.</p>
</sec>
<sec id="s5_2">
<label>5.2</label>
<title>Image Pre-processing</title>
<p>This step involves transforming the RGB images to grayscale, and noise filtering by using the dullrazor software. It is utilized since some of the acquired images are noisy because of the presence of hair and bubbles around the lesion. So Dullrazor is administered to reduce the consequence of hair being obscured on the skin in the final picture utilized for categorization, making the segmentation more accurate. <xref ref-type="fig" rid="fig-15">Fig. 15</xref> illustrates the output image from the pre-processing phase.</p>
<fig id="fig-15">
<label>Figure 15</label>
<caption>
<title>The original picture, the gray picture prior to hair identification and elimination, and the gray image following hair detection, exclusion, and reconstruction are the three samples for hair detection, exclusion, and reconstruction that are illustrated</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_52548-fig-15.tif"/>
</fig>
</sec>
<sec id="s5_3">
<label>5.3</label>
<title>Segmentation</title>
<p>The subsequent step is identifying and segmenting the region of interest (ROI) that depicts the suspected area. This procedure comprises steps as follows: thresholding, computing the histogram, and conversion to a black-and-white image in the case of global thresholding using OTSU segmentation. For multi-level thresholding using the Otsu approach, for a 2-level approach, a single threshold value is computed and the image is segmented into 2 levels. For the 3-level approach, 2 threshold values are computed, and the image is segmented into 3 levels, which are then converted into a colored image. For the 7-level approach, thresholds for 7 levels from the entire RGB image are generated. The threshold for each plane of the RGB picture is then generated. Process the picture with an array of threshold terms enumerated from the picture. Process each RGB plane separately using the threshold vector computed from the given plane. Quantize every RGB plane through the threshold vector prompted for that plane.</p>
</sec>
<sec id="s5_4">
<label>5.4</label>
<title>Feature Extraction</title>
<p>Feature extraction depicts approaches aiming at withdrawing additional value details off the images. The withdrawn elements called features might be statistical, boundary, morphological, and textural characteristics. These features are applied as input data for distinct image processing procedures such as segmentation and classification.</p>
<p>The proposed algorithm extracts textural features of the images in the test phase. Let X represent the feature database, which has 200 images. Let <inline-formula id="ieqn-23"><mml:math id="mml-ieqn-23"><mml:mi>F</mml:mi><mml:msub><mml:mi>V</mml:mi><mml:mrow><mml:mi>q</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> represent query feature vector (FV) having length 34 (representing features extracted). The 200 images are divided into 3 categories: atypical Nevi, common Nevi, and Melanoma. These categories are subdivided into benign (containing atypical and common nevi mages) and malignant (containing melanoma images) forms. The final FV is a combination of both benign and malignant classifications forming a vector of 200 &#x002A; 34. The feature extraction mechanisms utilized in the paper are discussed below:</p>
<sec id="s5_4_1">
<label>5.4.1</label>
<title>Grey Level Co-occurrence Matrix (GLCM)</title>
<p>The texture of a picture provides the detail about the spatial positioning of color or hue. 1 dimensional (1-D) histogram is not convenient for exhibiting the texture and therefore a 2-D matrix called GLCM is employed for evaluating the texture. It picks up the numerical textural features that are utilized to illustrate, categorize and compare textures.</p>
<p>Calculation of GLCM:</p>
<p>It comprises size of M &#x002A; M (M representing gray levels) illustrating the group of possible pixel levels.</p>
<p>It is calculated on the basis of 2 criterions:</p>
<p>R &#x2192; relative distance amongst the pixel pair</p>
<p>&#x03B8; &#x2192; rotational angle</p>
<p>GLCM computes the joint probability of 2 pixels separated by R and across &#x03B8; comprising co occurring values j and k as shown in <xref ref-type="fig" rid="fig-16">Fig. 16</xref>.</p>
<fig id="fig-16">
<label>Figure 16</label>
<caption>
<title>Illustration of GLCM employed for evaluating the texture of an image</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_52548-fig-16.tif"/>
</fig>
<p>Different GLCM textural measures:</p>
<p>This paper includes 8 local texture features that are illustrated below with mathematical interpretation on the evaluation of each texture. Here <italic>f</italic>(<italic>i</italic>, <italic>j</italic>) represents the frequency of components in the GLCM with index <italic>i</italic>, <italic>j</italic>.</p>
<p>Energy: It symbolizes the image&#x2019;s uniformity or stability. The more homogeneous texture is indicated by substantial energy levels.
<disp-formula id="eqn-13"><label>(13)</label><mml:math id="mml-eqn-13" display="block"><mml:mrow><mml:mtext>Energy</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mi>f</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula></p>
<p>Entropy: The entropy of grayscale picture I is returned by entropy(I). The source image&#x2019;s texture can be described using entropy, an empirical metric of randomization.
<disp-formula id="eqn-14"><label>(14)</label><mml:math id="mml-eqn-14" display="block"><mml:mrow><mml:mtext>Entropy</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mi>f</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:msub><mml:mi>log</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mo>&#x2061;</mml:mo><mml:mi>f</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>,</mml:mo><mml:mrow><mml:mtext>&#xA0;or&#xA0;</mml:mtext></mml:mrow><mml:mn>0</mml:mn><mml:mrow><mml:mtext>&#xA0;if&#xA0;</mml:mtext></mml:mrow><mml:mi>f</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mn>0</mml:mn></mml:math></disp-formula></p>
<p>Correlation: It calculates the linear dependence among two pairs of pixels. A more consistent texture is indicated by high correlation coefficients.
<disp-formula id="eqn-15"><label>(15)</label><mml:math id="mml-eqn-15" display="block"><mml:mrow><mml:mtext>Correlation&#xA0;</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mfrac><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>&#x03BC;</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>j</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>&#x03BC;</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mi>f</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:msup><mml:mi>&#x03C3;</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:mfrac></mml:math></disp-formula></p>
<p>Inverse Difference Moment (IDM): For asymmetrical images, the outcome is a low IDM magnitude, while for uniform pictures, a significantly greater measure. The uniformity of the picture also affects IDM. The weighting factor means that asymmetrical regions will only contribute slightly to IDM.
<disp-formula id="eqn-16"><label>(16)</label><mml:math id="mml-eqn-16" display="block"><mml:mrow><mml:mtext>IDM&#xA0;</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mfrac><mml:mn>1</mml:mn><mml:mrow><mml:mn>1</mml:mn><mml:mo>+</mml:mo><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:mrow></mml:mfrac><mml:mi>f</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula></p>
<p>Contrast: Quantifies the image&#x2019;s local variances. Significant variations in the brightness of adjacent pixels are indicated by high contrast indices.
<disp-formula id="eqn-17"><label>(17)</label><mml:math id="mml-eqn-17" display="block"><mml:mrow><mml:mtext>Contrast&#xA0;</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mi>f</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula></p>
<p>Cluster Shade: To minimize the number of computations required for the cluster shade calculations, this texture feature can be expressed in the form of the image pixel values present in a given <inline-formula id="ieqn-24"><mml:math id="mml-ieqn-24"><mml:mi>i</mml:mi><mml:mo>&#x2217;</mml:mo><mml:mi>j</mml:mi></mml:math></inline-formula> neighborhood with G gray levels ranging from 0 to G&#x2212;1, where f(i, j) exhibits the intensity at sample <inline-formula id="ieqn-25"><mml:math id="mml-ieqn-25"><mml:mi>i</mml:mi></mml:math></inline-formula>, line <inline-formula id="ieqn-26"><mml:math id="mml-ieqn-26"><mml:mi>j</mml:mi></mml:math></inline-formula> of the region.
<disp-formula id="eqn-18"><label>(18)</label><mml:math id="mml-eqn-18" display="block"><mml:mrow><mml:mtext>Cluster Shade&#xA0;</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>&#x03BC;</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mi>j</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>&#x03BC;</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:msup><mml:mo stretchy="false">)</mml:mo><mml:mrow><mml:mn>3</mml:mn></mml:mrow></mml:msup><mml:mi>f</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula></p>
<p>Cluster Prominence: The GLCM&#x2019;s skewness and asymmetry are gauged by this. Greater asymmetry over the mean is implied by values that are greater, whilst less fluctuation over the mean and a peak close to the mean are indicated by values that are lower.
<disp-formula id="eqn-19"><label>(19)</label><mml:math id="mml-eqn-19" display="block"><mml:mrow><mml:mtext>Cluster Prominence</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>&#x03BC;</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mi>j</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>&#x03BC;</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:msup><mml:mo stretchy="false">)</mml:mo><mml:mrow><mml:mn>4</mml:mn></mml:mrow></mml:msup><mml:mi>f</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula></p>
<p>Auto Correlation: An indicator of how fine or coarse a texture is called autocorrelation.
<disp-formula id="eqn-20"><label>(20)</label><mml:math id="mml-eqn-20" display="block"><mml:mrow><mml:mtext>Auto Correlation</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>.</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mi>f</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula></p>
<p>Homogeneity: It represents how closely the GLCM diagonal and the element scatter are related. Elevated homogeneity levels imply a more homogeneous texture since they show that the elements are grouped across the diagonal.
<disp-formula id="eqn-21"><label>(21)</label><mml:math id="mml-eqn-21" display="block"><mml:mrow><mml:mtext>Homogeneity</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:msubsup><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>0</mml:mn></mml:mrow><mml:mrow><mml:mi>M</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msubsup><mml:msubsup><mml:mi>f</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></disp-formula>where <italic>&#x03BC;</italic> is the weighted pixel average due to matrix symmetry and &#x03C3; is the weighted pixel variance due to matrix symmetry.</p>
<p>The 10 advanced texture features that are utilized in this paper are illustrated below along with their mathematical expressions.</p>
<p>Mean: It represents the mean intensity of gray levels in the region of interest (ROI).
<disp-formula id="eqn-22"><label>(22)</label><mml:math id="mml-eqn-22" display="block"><mml:mrow><mml:mtext>Mean&#xA0;</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mi>i</mml:mi><mml:mi>f</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula></p>
<p>The Sum of Squares Variance: An estimation of the spread of nearby intensity scale pairs around the GLCM mean intensity level is called sum variance.
<disp-formula id="eqn-23"><label>(23)</label><mml:math id="mml-eqn-23" display="block"><mml:mrow><mml:mtext>Sum of Squares Variance</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>&#x03BC;</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mi>f</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula></p>
<p>Dissimilarity: A linear assessment of an image&#x2019;s local variances is called dissimilarity.
<disp-formula id="eqn-24"><label>(24)</label><mml:math id="mml-eqn-24" display="block"><mml:mrow><mml:mtext>Dissimilarity&#xA0;</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mi>f</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula></p>
<p>Sum Average: The association between merging with lower intensity levels and pairings with higher intensity levels is computed by the sum average.
<disp-formula id="eqn-25"><label>(25)</label><mml:math id="mml-eqn-25" display="block"><mml:mrow><mml:mtext>Sum Average&#xA0;</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mi>i</mml:mi><mml:msub><mml:mi>f</mml:mi><mml:mrow><mml:mi>x</mml:mi><mml:mo>+</mml:mo><mml:mi>y</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula></p>
<p>Sum Variance: With a significant correlation to first-order statistical parameters like standard deviation, variance serves as an index of variability. As a gray level score deviates from its mean, variance rises.
<disp-formula id="eqn-26"><label>(26)</label><mml:math id="mml-eqn-26" display="block"><mml:mrow><mml:mtext>Sum Variance&#xA0;</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>f</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:msub><mml:mi>f</mml:mi><mml:mrow><mml:mi>x</mml:mi><mml:mo>+</mml:mo><mml:mi>y</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula></p>
<p>Sum Entropy: The variation in adjacent intensity values added together is known as total entropy.
<disp-formula id="eqn-27"><label>(27)</label><mml:math id="mml-eqn-27" display="block"><mml:mrow><mml:mtext>Sum Entropy&#xA0;</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mi>f</mml:mi><mml:mrow><mml:mi>x</mml:mi><mml:mo>+</mml:mo><mml:mi>y</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mi>log</mml:mi><mml:mo>&#x2061;</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>f</mml:mi><mml:mrow><mml:mi>x</mml:mi><mml:mo>+</mml:mo><mml:mi>y</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula></p>
<p>Difference Variance: It is measured by variance, which assigns greater weights to intensity threshold differences across pairings that deviate further from the mean.
<disp-formula id="eqn-28"><label>(28)</label><mml:math id="mml-eqn-28" display="block"><mml:mrow><mml:mtext>Difference Variance&#xA0;</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:msubsup><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>2</mml:mn></mml:mrow><mml:mrow><mml:mn>2</mml:mn><mml:mi>M</mml:mi></mml:mrow></mml:msubsup><mml:mrow><mml:mo>(</mml:mo><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:msubsup><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>2</mml:mn></mml:mrow><mml:mrow><mml:mn>2</mml:mn><mml:mi>M</mml:mi></mml:mrow></mml:msubsup><mml:mi>i</mml:mi><mml:msub><mml:mi>f</mml:mi><mml:mrow><mml:mi>x</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>y</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula></p>
<p>Difference Entropy: This quantifies the degree of unpredictability or unpredictability in the variations in adjacent intensity values.
<disp-formula id="eqn-29"><label>(29)</label><mml:math id="mml-eqn-29" display="block"><mml:mrow><mml:mtext>Difference Entropy&#xA0;</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:mo>&#x2212;</mml:mo><mml:msubsup><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>0</mml:mn></mml:mrow><mml:mrow><mml:msub><mml:mi>N</mml:mi><mml:mrow><mml:mi>g</mml:mi><mml:mo>&#x2212;</mml:mo></mml:mrow></mml:msub><mml:mn>1</mml:mn></mml:mrow></mml:msubsup><mml:msub><mml:mi>P</mml:mi><mml:mrow><mml:mi>x</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>y</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mi>l</mml:mi><mml:mi>o</mml:mi><mml:mi>g</mml:mi><mml:mrow><mml:mo>{</mml:mo><mml:msub><mml:mi>P</mml:mi><mml:mrow><mml:mi>x</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>y</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>}</mml:mo></mml:mrow></mml:math></disp-formula></p>
<p>Information Measures of Correlation (IC1): It quantifies the texture&#x2019;s uncertainty by evaluating the relationship amongst the probability variations of <italic>i</italic> and <italic>j</italic> employing mutual information I(<italic>x</italic>, <italic>y</italic>):
<disp-formula id="eqn-30"><label>(30)</label><mml:math id="mml-eqn-30" display="block"><mml:mrow><mml:mtext>IC</mml:mtext></mml:mrow><mml:mn>1</mml:mn><mml:mo>=</mml:mo><mml:msubsup><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi></mml:mrow><mml:mrow><mml:mi>N</mml:mi></mml:mrow></mml:msubsup><mml:msubsup><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>N</mml:mi></mml:mrow></mml:msubsup><mml:mi>f</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:msub><mml:mi>log</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mo>&#x2061;</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mi>f</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>log</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mo>&#x2061;</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>f</mml:mi><mml:mrow><mml:mi>x</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:msub><mml:mi>f</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>j</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula></p>
<p>Information Measures of Correlation (IC2): It measures the texture&#x2019;s uncertainty by evaluating the relationship amongst the probability distributions of <italic>i</italic> and <italic>j</italic>. IMC2 has a range of [0, 1], where 0 denotes the situation where there is no exchange of information between two independent ranges, and the greatest value denotes the situation when there are two fully dependent and homogeneous probabilities.
<disp-formula id="eqn-31"><label>(31)</label><mml:math id="mml-eqn-31" display="block"><mml:mrow><mml:mtext>IC</mml:mtext></mml:mrow><mml:mn>2</mml:mn><mml:mo>=</mml:mo><mml:msqrt><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mn>2</mml:mn><mml:msub><mml:mi>log</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mo>&#x2061;</mml:mo><mml:mi>N</mml:mi></mml:mrow></mml:msup></mml:msqrt></mml:math></disp-formula></p>
</sec>
<sec id="s5_4_2">
<label>5.4.2</label>
<title>GLRLM Features</title>
<p>In this paper, 7 local higher order statistics texture coefficients based on the grey level run-length matrix will be processed that are discussed below with mathematical expressions. Here <italic>f</italic>(<italic>i, j</italic>) represents the element in cell <italic>i</italic>, <italic>j</italic> of a normalized Run Length Matrix, <italic>N</italic> represents the total number of runs and NP represents the sum of pixels:</p>
<p>Short Run Emphasis (SRE): A range that represents a measurement of small run lengths; a higher value denotes more fine-grained textural features and shorter run lengths.
<disp-formula id="eqn-32"><label>(32)</label><mml:math id="mml-eqn-32" display="block"><mml:mrow><mml:mtext>SRE</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mi>N</mml:mi></mml:mfrac><mml:msub><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mfrac><mml:mrow><mml:mi>f</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:msup><mml:mi>j</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:mfrac></mml:math></disp-formula></p>
<p>Long Run Emphasis (LRE): A metric used to determine the long run length pattern; longer run lengths and coarser structural textures are indicated by higher levels.
<disp-formula id="eqn-33"><label>(33)</label><mml:math id="mml-eqn-33" display="block"><mml:mrow><mml:mtext>LRE</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mi>N</mml:mi></mml:mfrac><mml:msub><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mi>f</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x2217;</mml:mo><mml:msup><mml:mi>j</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:math></disp-formula></p>
<p>Grey-Level Non-uniformity (GLN): A lesser GLN value is correlated with more intensity value coherence. GLN is a metric used to quantify the likeness of gray-level intensity levels in a picture.
<disp-formula id="eqn-34"><label>(34)</label><mml:math id="mml-eqn-34" display="block"><mml:mrow><mml:mtext>GLN</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mi>N</mml:mi></mml:mfrac><mml:msub><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msub><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mi>f</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:math></disp-formula></p>
<p>Run Length Non-uniformity (RLN): A smaller number of RLN indicates greater homogeneity across the run lengths in the picture. RLN assesses the similarities of run lengths across the picture.
<disp-formula id="eqn-35"><label>(35)</label><mml:math id="mml-eqn-35" display="block"><mml:mrow><mml:mtext>RLN</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mi>N</mml:mi></mml:mfrac><mml:msub><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msub><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mi>f</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:math></disp-formula></p>
<p>Run Percentage (RP): It evaluates the correlation among the number of runs and voxels in the ROI to ascertain how fine the texture is.
<disp-formula id="eqn-36"><label>(36)</label><mml:math id="mml-eqn-36" display="block"><mml:mrow><mml:mtext>RP</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mi>N</mml:mi><mml:msub><mml:mi>N</mml:mi><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:msub></mml:mfrac></mml:math></disp-formula></p>
<p>Low Grey-Level Run Emphasis (LGRE): A higher score in LGLRE indicates an increased number of low gray-level frequencies in the picture. LGLRE evaluates the proportion of low gray-level frequencies.
<disp-formula id="eqn-37"><label>(37)</label><mml:math id="mml-eqn-37" display="block"><mml:mrow><mml:mtext>LGRE</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mi>N</mml:mi></mml:mfrac><mml:msub><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mfrac><mml:mrow><mml:mi>f</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:msup><mml:mi>i</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:mfrac></mml:math></disp-formula></p>
<p>High Grey-Level Run Emphasis (HGRE): A higher score in HGLRE indicates an increased amount of high gray-level frequencies in the picture. HGLRE quantifies the spread of the larger gray-level frequencies.
<disp-formula id="eqn-38"><label>(38)</label><mml:math id="mml-eqn-38" display="block"><mml:mrow><mml:mtext>HGRE</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mi>N</mml:mi></mml:mfrac><mml:msub><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mi>f</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x2217;</mml:mo><mml:msup><mml:mi>i</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:math></disp-formula></p>
</sec>
<sec id="s5_4_3">
<label>5.4.3</label>
<title>Statistical Features</title>
<p>In this paper, we evaluated 8 statistical features that are discussed below with mathematical expressions.
<disp-formula id="eqn-39"><label>(39)</label><mml:math id="mml-eqn-39" display="block"><mml:mrow><mml:mtext>Mean Intensity</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mi>n</mml:mi></mml:mfrac><mml:msubsup><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msubsup><mml:mi>Y</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>i</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula></p>
<p>Contrast: Although it depends on the total grayscale dynamic range, contrast is a representation of a variation in spatial magnitude. A picture with an extensive array of gray levels and significant variations across individual voxels and their surrounding areas is said to have a high contrast when both the dynamic range and the spatial transition rate are substantial.
<disp-formula id="eqn-40"><label>(40)</label><mml:math id="mml-eqn-40" display="block"><mml:mrow><mml:mtext>Contrast&#xA0;</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:msubsup><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>0</mml:mn></mml:mrow><mml:mrow><mml:mi>M</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msubsup><mml:msup><mml:mi>n</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mrow><mml:mo>{</mml:mo><mml:msubsup><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>0</mml:mn></mml:mrow><mml:mrow><mml:mi>M</mml:mi></mml:mrow></mml:msubsup><mml:mrow><mml:mo>(</mml:mo><mml:mi>f</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:math></disp-formula></p>
<p>Correlation: A correlation coefficient ranges from 0 (uncorrelated) to 1 (completely correlated), indicating how linearly the gray level scores rely on the corresponding voxels.
<disp-formula id="eqn-41"><label>(41)</label><mml:math id="mml-eqn-41" display="block"><mml:mrow><mml:mtext>Correlation</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:msubsup><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi></mml:mrow><mml:mrow><mml:mi>M</mml:mi></mml:mrow></mml:msubsup><mml:msubsup><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>M</mml:mi></mml:mrow></mml:msubsup><mml:mfrac><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>&#x03BC;</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>j</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>&#x03BC;</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mi>f</mml:mi><mml:mrow><mml:mo>[</mml:mo><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>]</mml:mo></mml:mrow></mml:mrow><mml:msup><mml:mi>&#x03C3;</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:mfrac></mml:math></disp-formula></p>
<p>Standard deviation: It specifies how much the mean value deviates or is dissipated.
<disp-formula id="eqn-42"><label>(42)</label><mml:math id="mml-eqn-42" display="block"><mml:mrow><mml:mtext>Standard deviation</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:msqrt><mml:mrow><mml:mo>{</mml:mo><mml:msubsup><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>0</mml:mn></mml:mrow><mml:mrow><mml:mi>M</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msubsup><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mi>m</mml:mi><mml:msup><mml:mo stretchy="false">)</mml:mo><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mo>&#x2217;</mml:mo><mml:mi>f</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:msqrt></mml:math></disp-formula></p>
<p>Entropy: It describes how arbitrary or uncertain the image attributes are. It calculates the mean quantity of data needed to encode the picture elements.
<disp-formula id="eqn-43"><label>(43)</label><mml:math id="mml-eqn-43" display="block"><mml:mrow><mml:mtext>Entropy</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:msubsup><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>0</mml:mn></mml:mrow><mml:mrow><mml:mi>M</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msubsup><mml:mrow><mml:mo>(</mml:mo><mml:mi>f</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x2217;</mml:mo><mml:mi>l</mml:mi><mml:mi>o</mml:mi><mml:mi>g</mml:mi><mml:mn>2</mml:mn><mml:mi>f</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula></p>
<p>HI represents 3 histogram features.</p>
<p>A detailed explanation of the proposed methodology is illustrated in <xref ref-type="fig" rid="fig-17">Fig. 17</xref>.</p>
<fig id="fig-17">
<label>Figure 17</label>
<caption>
<title>A detailed visual illustration of the proposed methodology</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_52548-fig-17.tif"/>
</fig>
</sec>
</sec>
<sec id="s5_5">
<label>5.5</label>
<title>Quality Evaluation Metrics</title>
<p>Image classification, or deriving information from a picture through CV and ML techniques, is the final phase. The 12 distinct metrics have been utilized to quantify the textural feature extraction.</p>
<sec id="s5_5_1">
<label>5.5.1</label>
<title>Sensitivity</title>
<p>Sensitivity (SE) represents the exactness of predictions made and expressed mathematically as the number of rightly detected points in the class (true positives; TP) divided by the total number of points in the class (positives; P).
<disp-formula id="eqn-44"><label>(44)</label><mml:math id="mml-eqn-44" display="block"><mml:mrow><mml:mtext>SE</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mtext>&#xA0;TP</mml:mtext></mml:mrow><mml:mrow><mml:mo>/</mml:mo></mml:mrow><mml:mrow><mml:mtext>P</mml:mtext></mml:mrow></mml:math></disp-formula></p>
</sec>
<sec id="s5_5_2">
<label>5.5.2</label>
<title>Specificity</title>
<p>Specificity refers to the likelihood of a negative test result with a condition of the individual actually being negative.
<disp-formula id="eqn-45"><label>(45)</label><mml:math id="mml-eqn-45" display="block"><mml:mrow><mml:mtext>SP</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mtext>TN</mml:mtext></mml:mrow><mml:mrow><mml:mo>/</mml:mo></mml:mrow><mml:mrow><mml:mtext>N</mml:mtext></mml:mrow></mml:math></disp-formula></p>
</sec>
<sec id="s5_5_3">
<label>5.5.3</label>
<title>Positive Predictive Value (PPV) and Negative Predictive Value (NPV)</title>
<p>The positive and negative predictive values (PPV and NPV) represent the amount of positive and negative outcomes in statistics and diagnostic tests that are true positive and true negative results, respectively.
<disp-formula id="eqn-46"><label>(46)</label><mml:math id="mml-eqn-46" display="block"><mml:mrow><mml:mtext>PPV</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mtext>TP</mml:mtext></mml:mrow><mml:mrow><mml:mo>/</mml:mo></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mtext>TP</mml:mtext></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:mtext>FP</mml:mtext></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula>
<disp-formula id="eqn-47"><label>(47)</label><mml:math id="mml-eqn-47" display="block"><mml:mrow><mml:mtext>NPV</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mtext>TN</mml:mtext></mml:mrow><mml:mrow><mml:mo>/</mml:mo></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mtext>TN</mml:mtext></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:mtext>FN</mml:mtext></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula></p>
</sec>
<sec id="s5_5_4">
<label>5.5.4</label>
<title>False Positive Rate (FPR) and False Negative Rate (FNR)</title>
<p>FPR is the inaccurate exclusion of a null hypothesis that is actually true. FNR is the inadequacy to reject a null hypothesis that is actually false.
<disp-formula id="eqn-48"><label>(48)</label><mml:math id="mml-eqn-48" display="block"><mml:mrow><mml:mtext>FN</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mtext>FN</mml:mtext></mml:mrow><mml:mrow><mml:mo>/</mml:mo></mml:mrow><mml:mrow><mml:mtext>P</mml:mtext></mml:mrow></mml:math></disp-formula>
<disp-formula id="eqn-49"><label>(49)</label><mml:math id="mml-eqn-49" display="block"><mml:mrow><mml:mtext>FPR</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mtext>FP</mml:mtext></mml:mrow><mml:mrow><mml:mo>/</mml:mo></mml:mrow><mml:mrow><mml:mtext>N</mml:mtext></mml:mrow></mml:math></disp-formula></p>
</sec>
<sec id="s5_5_5">
<label>5.5.5</label>
<title>False Discovery Rate (FDR)</title>
<p>It is the anticipated proportion of the number of false discoveries to the sum of positive classifications. This summation includes both the number of FP and TP.
<disp-formula id="eqn-50"><label>(50)</label><mml:math id="mml-eqn-50" display="block"><mml:mrow><mml:mtext>FDR</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mtext>FP</mml:mtext></mml:mrow><mml:mrow><mml:mo>/</mml:mo></mml:mrow><mml:mspace width="negativethinmathspace" /><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mtext>TP</mml:mtext></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:mtext>FP</mml:mtext></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula></p>
</sec>
<sec id="s5_5_6">
<label>5.5.6</label>
<title>F Score</title>
<p>F score is an estimate of a test&#x2019;s accuracy in the analytical study of binary classification.
<disp-formula id="eqn-51"><label>(51)</label><mml:math id="mml-eqn-51" display="block"><mml:mrow><mml:mtext>F</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:mn>2</mml:mn><mml:mrow><mml:mtext>TP</mml:mtext></mml:mrow><mml:mrow><mml:mo>/</mml:mo></mml:mrow><mml:mspace width="negativethinmathspace" /><mml:mrow><mml:mo>(</mml:mo><mml:mn>2</mml:mn><mml:mrow><mml:mtext>TP</mml:mtext></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:mtext>FP</mml:mtext></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:mtext>FN</mml:mtext></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula></p>
</sec>
<sec id="s5_5_7">
<label>5.5.7</label>
<title>Matthews Correlation Coefficient (MCC)</title>
<p>It is a measure of association for two binary variables employed as a measure of the quality of binary classifications.
<disp-formula id="eqn-52"><label>(52)</label><mml:math id="mml-eqn-52" display="block"><mml:mrow><mml:mtext>MCC</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mtext>TP * TN</mml:mtext></mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mrow><mml:mtext>FP * FN</mml:mtext></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mrow><mml:mo>/</mml:mo></mml:mrow><mml:msqrt><mml:mo stretchy="false">(</mml:mo></mml:msqrt><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mtext>TP</mml:mtext></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:mtext>FP</mml:mtext></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mtext>TP</mml:mtext></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:mtext>FN</mml:mtext></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mtext>TN</mml:mtext></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:mtext>FP</mml:mtext></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mtext>TN</mml:mtext></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:mtext>FN</mml:mtext></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo></mml:math></disp-formula></p>
</sec>
<sec id="s5_5_8">
<label>5.5.8</label>
<title>Accuracy</title>
<p>Accuracy is how close a given set of measurements (observations or readings) are to their true value.</p>
</sec>
<sec id="s5_5_9">
<label>5.5.9</label>
<title>Area Under Curve (AUC)</title>
<p>AUC illustrates the capability of a model, notwithstanding a specified threshold, to characterize the 2 classes. The higher value indicates the model performs better. It has a value between 1 and 0.</p>
<fig id="fig-21">
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_52548-fig-21.tif"/>
</fig>
</sec>
</sec>
</sec>
<sec id="s6">
<label>6</label>
<title>Experimental Results and Discussion</title>
<p>The evaluation of various segmentation techniques is covered in <xref ref-type="sec" rid="s6">Section 6</xref>. The measures outlined in <xref ref-type="sec" rid="s6">Section 6</xref> served as the foundation for the evaluation. There are three sections to the experimental results. Initially, we assess the algorithms using all of the image data. Next, we calculated each of the 200 photos&#x2019; textural features. Next, we talk about how well the segmentation techniques work for three different kinds of lesions: melanomas, atypical nevi, and common nevi. As we&#x2019;ll see, in each of these three scenarios, the segmentation approaches perform differently.</p>
<p><bold><italic>Ablation Study</italic></bold></p>
<p>We assembled the skin cancer pictures from the PH2 database. The resolution of pictures is 768 &#x002A; 560 and is categorized into 3 classes Melanoma, Atypical Nevus, and Common nevi. The number of pictures in every class is 40, 80 and 80, respectively. A few sample pictures from the database are illustrated in <xref ref-type="fig" rid="fig-2">Fig. 2</xref>. We employ the textural features of skin cancer photos to validate the precision and effectiveness of our suggested method. From GLCM numerous features are deployed for the skin cancer classification namely autocorrelation, Contrast, Correlation, Energy, Entropy, homogeneity, maximum probability, sum of squares, sum average, and sum variance. Each class of skin malignancy is differentiated by certain features. The algorithms for feature extraction withdraw distinctive features out of every class of skin cancer images. The GLCM acquires the RGB image initially and transforms it to a grayscale image, wherein the number of rows and columns matches the number of gray levels. The periodicity of a particular number of gray levels recurring at multiple points within an image is determined by GLCM. GLCM feature extraction exploits the spatial relationship among various combinations of pixels to map the gray level co-occurrence probabilities in distinct angular positions. The GLRLM is also employed to extract textural features. An algorithm for generating higher order statistical texture features is the GLRLM methodology. A line of pixels having an identical intensity value oriented in a particular direction is referred to as a gray level run. The gray level run length is computed by the amount of these pixels, and the run length value is the total number of occurrences. In this case, the number of adjacent pixels in the specified direction that have the same grey intensity can be referred to as the run length. 7 GLRLM features are withdrawn in our proposed method and the parameters are SRE, LRE, GLN, RLN, RP, LGLRE, and HGLRE. Also, we evaluated 8 statistical features. With the combination of 3 types of textural feature extraction, a total of 34 features are used in this work. A feature vector of 200 &#x002A; 34, i.e., 200 images from the PH2 dataset and 34 textural features is made which is used to classify skin cancer into benign and malignant categories. The different segmentation techniques have been utilized and compared belonging to the categories: Thresholding based, cluster based, edge based, and region based. The segmentation results were compared with the reference images (ground truth) and the segmentation errors were evaluated by the 12 metrics described above. The results are illustrated in <xref ref-type="fig" rid="fig-18">Figs. 18</xref>&#x2013;<xref ref-type="fig" rid="fig-20">20</xref>. Type 1: With an accuracy score of 90.00%, the Otsu (3 level) method is the best approach. The Otsu (2 level) and Sauvola have the best SE rate, which is 97.50%. The Binarized Sauvola achieves the best SP rate of 82.50%. The Binarized Sauvola achieves the best PPV rate of 91.30% (benign) and 82.5% (malignant). The Binarized Sauvola achieves the best FDR rate of 8.8% (benign) and 17.5% (malignant). The Otsu (2 level) yields the best NPV rate of 91.30%. The Otsu (2 level) yields the best FNR rate of 2.50%. The Binarized Sauvola achieves the highest FPR (17.55%). The Binarized Sauvola achieves the best BA rate of 86.80%. The Otsu (3 level) achieves the best F1 rate of 92.70%. The Otsu (3 level) achieves the best MCC rate of 77.15%. The Otsu (2 level) and Sauvola have the best AUC rate, which is 97.50%. The Binarized Sauvola achieves the best SE rate of 93.50% as shown in <xref ref-type="table" rid="table-3">Table 3</xref>. <xref ref-type="table" rid="table-4">Table 4</xref> shows the median of the segmentation scores for each method.</p>
<fig id="fig-18">
<label>Figure 18</label>
<caption>
<title>AUC plots and confusion matrix referring to Type 1 and Type 2 approaches for FCM, respectively</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_52548-fig-18.tif"/>
</fig><fig id="fig-19">
<label>Figure 19</label>
<caption>
<title>Graphical representation of resultant ACC, SE, SP and AUC for all the segmentation approaches corresponding to Type 1 classification</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_52548-fig-19.tif"/>
</fig><fig id="fig-20">
<label>Figure 20</label>
<caption>
<title>Graphical representation of resultant Accuracy, SE, SP and AUC for all the segmentation approaches corresponding to Type 2 classification</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_52548-fig-20.tif"/>
</fig><table-wrap id="table-3">
<label>Table 3</label>
<caption>
<title>The resultant of the segmentation scores for each method for Type 1</title>
</caption>
<table frame="hsides">
<colgroup>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th>Approach</th>
<th>Acc</th>
<th>SE</th>
<th>SP</th>
<th>PPV</th>
<th>FDR</th>
<th>NPV</th>
<th>FNR</th>
<th>FPR</th>
<th>BA</th>
<th>F1</th>
<th>MCC</th>
<th>AUC</th>
</tr>
</thead>
<tbody>
<tr>
<td>Otsu (global)</td>
<td>84.20</td>
<td>92.50</td>
<td>67.50</td>
<td>85.10(0)<break/> 81.80(1)</td>
<td>14.90(0)<break/> 18.20(1)</td>
<td>81.80</td>
<td>7.50</td>
<td>32.50</td>
<td>80.00</td>
<td>88.60</td>
<td>63.34</td>
<td>88.70</td>
</tr>
<tr>
<td>Otsu (2 level)</td>
<td>82.50</td>
<td><bold>97.50</bold></td>
<td>52.50</td>
<td>80.40<break/> 91.30</td>
<td>19.60(0)<break/> 8.70(1)</td>
<td><bold>91.30</bold></td>
<td><bold>2.50</bold></td>
<td>47.50</td>
<td>75.00</td>
<td>88.10</td>
<td>60.19</td>
<td>90.03</td>
</tr>
<tr>
<td>Otsu (3 level)</td>
<td><bold>90.00</bold></td>
<td>96.00</td>
<td>77.50</td>
<td>89.50(0)<break/> 91.20(1)</td>
<td>10.50(0)<break/> 8.80(1)</td>
<td>91.10</td>
<td>3.70</td>
<td>22.50</td>
<td>86.75</td>
<td><bold>92.77</bold></td>
<td><bold>77.15</bold></td>
<td>92.68</td>
</tr>
<tr>
<td>Otsu (7 level)</td>
<td>84.20</td>
<td>92.50</td>
<td>67.50</td>
<td>85.10(0)<break/>81.80(1)</td>
<td>14.90(0)<break/>18.20(1)</td>
<td>81.81</td>
<td>7.50</td>
<td>32.50</td>
<td>80.00</td>
<td>88.62</td>
<td>63.33</td>
<td>89.40</td>
</tr>
<tr>
<td>Niblack</td>
<td>83.30</td>
<td>91.25</td>
<td>67.50</td>
<td>84.90(0)<break/>79.40(1)</td>
<td>20.50(0)<break/>20.60(1)</td>
<td>79.40</td>
<td>8.75</td>
<td>32.5</td>
<td>79.30</td>
<td>87.95</td>
<td>61.46</td>
<td>86.30</td>
</tr>
<tr>
<td>Sauvola</td>
<td>75</td>
<td><bold>97.50</bold></td>
<td>30.00</td>
<td>73.60(0)<break/>85.70(1)</td>
<td>8.80(0)<break/>17.50(1)</td>
<td>85.70</td>
<td><bold>2.50</bold></td>
<td>70.00</td>
<td>63.75</td>
<td>83.80</td>
<td>40.38</td>
<td>83.5</td>
</tr>
<tr>
<td>Binarized Sauvola</td>
<td>88.30</td>
<td>91.25</td>
<td><bold>82.50</bold></td>
<td><bold>91.3</bold>(<bold>0</bold>)<break/><bold>82.5</bold>(<bold>1</bold>)</td>
<td><bold>8.8</bold>(<bold>0</bold>)<break/><bold>17.5</bold>(<bold>1</bold>)</td>
<td>82.5</td>
<td>8.75</td>
<td><bold>17.5</bold></td>
<td><bold>86.8</bold></td>
<td>91.25</td>
<td>73.75</td>
<td><bold>93.5</bold></td>
</tr>
<tr>
<td>Chan Vese</td>
<td>82.50</td>
<td>72.90</td>
<td>80</td>
<td>90(0)<break/>80(1)</td>
<td>10.0(0)<break/>20.0(1)</td>
<td>80.00</td>
<td>10.00</td>
<td>20.00</td>
<td>85.00</td>
<td>90.00</td>
<td>71.00</td>
<td>84.96</td>
</tr>
<tr>
<td>Watershed</td>
<td>81.70</td>
<td>95.00</td>
<td>55.00</td>
<td>80.90(0)<break/>84.60(1)</td>
<td>19.1(0)<break/>15.4(1)</td>
<td>84.60</td>
<td>5.00</td>
<td>45.00</td>
<td>75.00</td>
<td>87.35</td>
<td>57.21</td>
<td>87.1</td>
</tr>
<tr>
<td>FCM</td>
<td>81.70</td>
<td>88.7</td>
<td>67.5</td>
<td>84.5(0)<break/>75(1)</td>
<td>15.5(0)<break/>25.00(1)</td>
<td>75</td>
<td>11.25</td>
<td>32.5</td>
<td>78.1</td>
<td>86.58</td>
<td>57.87</td>
<td>87</td>
</tr>
<tr>
<td>K-Means</td>
<td>85.80</td>
<td>95.00</td>
<td>67.50</td>
<td>85.40(0)<break/>87.1(1)</td>
<td>14.60(0)<break/>12.90(1)</td>
<td>87.00</td>
<td>5.00</td>
<td>32.50</td>
<td>81.20</td>
<td>89.90</td>
<td>67.31</td>
<td>85.80</td>
</tr>
<tr>
<td>Log</td>
<td>70.00</td>
<td>80.00</td>
<td>50.00</td>
<td>76.20(0)<break/>55.60(1)</td>
<td>23.80(0)<break/>44.40(1)</td>
<td>55.00</td>
<td>20.00</td>
<td>50.00</td>
<td>65.00</td>
<td>78.00</td>
<td>30.86</td>
<td>76.50</td>
</tr>
<tr>
<td>Prewitt</td>
<td>71.70</td>
<td>96.20</td>
<td>22.50</td>
<td>71.30(0)<break/>75.00(1)</td>
<td>28.70(0)<break/>25.00(1)</td>
<td>75.00</td>
<td>3.07</td>
<td>77.50</td>
<td>59.30</td>
<td>81.91</td>
<td>29.46</td>
<td>68.10</td>
</tr>
<tr>
<td>Robert</td>
<td>75.00</td>
<td>88.70</td>
<td>47.50</td>
<td>77.20(0)<break/>67.90(1)</td>
<td>22.80(0)<break/>32.10(1)</td>
<td>32.11</td>
<td>11.2</td>
<td>52.50</td>
<td>68.00</td>
<td>82.50</td>
<td>40.71</td>
<td>81.60</td>
</tr>
<tr>
<td>Zero crossing</td>
<td>78.30</td>
<td>87.50</td>
<td>60.00</td>
<td>81.40(0)<break/>70.60(1)</td>
<td>18.60(0)<break/>29.40(1)</td>
<td>70.50</td>
<td>12.50</td>
<td>40.00</td>
<td>73.75</td>
<td>84.33</td>
<td>49.69</td>
<td>82.70</td>
</tr>
<tr>
<td>FMM</td>
<td>72.50</td>
<td>81.25</td>
<td>55.00</td>
<td>78.30(0)<break/>59.50(1)</td>
<td>21.70(0)<break/>40.50(1)</td>
<td>59.40</td>
<td>18.75</td>
<td>45.00</td>
<td>68.10</td>
<td>79.75</td>
<td>37.00</td>
<td>75.70</td>
</tr>
<tr>
<td>Geodesic</td>
<td>72.50</td>
<td>86.20</td>
<td>45.00</td>
<td>75.80(0)<break/>62.1(1)</td>
<td>24.20(0)<break/>37.90(1)</td>
<td>62.00</td>
<td>13.75</td>
<td>55.00</td>
<td>63.10</td>
<td>80.00</td>
<td>34.40</td>
<td>76.80</td>
</tr>
<tr>
<td>Canny</td>
<td>65.80</td>
<td>81.20</td>
<td>35.00</td>
<td>71.40(0)<break/>48.30(1)</td>
<td>28.60(0)<break/>51.70(1)</td>
<td>48.00</td>
<td>18.70</td>
<td>65.00</td>
<td>58.10</td>
<td>76.00</td>
<td>17.8</td>
<td>66.20</td>
</tr>
<tr>
<td>Gray difference</td>
<td>77.50</td>
<td>88.00</td>
<td>55.00</td>
<td>79.80(0)<break/>71.00(1)</td>
<td>20.20(0)<break/>29.00(1)</td>
<td>70.90</td>
<td>11.20</td>
<td>45.00</td>
<td>71.50</td>
<td>84.00</td>
<td>47.11</td>
<td>82.50</td>
</tr>
<tr>
<td>Sobel</td>
<td>74.20</td>
<td>96.25</td>
<td>30.00</td>
<td>73.30(0)<break/>80.00(1)</td>
<td>26.70(0)<break/>20.00(1)</td>
<td>80.00</td>
<td>3.75</td>
<td>70.00</td>
<td>63.12</td>
<td>83.24</td>
<td>37.41</td>
<td>67.21</td>
</tr>
<tr>
<td>Iterative</td>
<td>81.70</td>
<td>95.00</td>
<td>55.00</td>
<td>80.90(0)<break/>84.60(1)</td>
<td>19.10(0)<break/>15.40(1)</td>
<td>84.61</td>
<td>5.00</td>
<td>45.00</td>
<td>75.00</td>
<td>87.35</td>
<td>57.21</td>
<td>86.90</td>
</tr>
</tbody>
</table>
</table-wrap><table-wrap id="table-4">
<label>Table 4</label>
<caption>
<title>The resultant of the segmentation scores for each method for Type 2</title>
</caption>
<table frame="hsides">
<colgroup>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th>Approach</th>
<th>Acc</th>
<th>SE</th>
<th>SP</th>
<th>PPV</th>
<th>FDR</th>
<th>NPV</th>
<th>FNR</th>
<th>FPR</th>
<th>BA</th>
<th>F1</th>
<th>MCC</th>
<th>AUC</th>
</tr>
</thead>
<tbody>
<tr>
<td>Otsu (global)</td>
<td>87.50</td>
<td>91.25</td>
<td>80.00</td>
<td>90.10(0)<break/> 82.10(1)</td>
<td>9.90(0)<break/> 17.90(1)</td>
<td>8.75</td>
<td>20.00</td>
<td>17.90</td>
<td>85.60</td>
<td>90.68</td>
<td>5.28</td>
<td>93.00</td>
</tr>
<tr>
<td>Otsu (2 level)</td>
<td>82.50</td>
<td>91.25</td>
<td>65.00</td>
<td>83.90<break/>78.80</td>
<td>16.10(0)<break/>21.20(1)</td>
<td>78.78</td>
<td>8.70</td>
<td>35.00</td>
<td>78.10</td>
<td>87.40</td>
<td>59.30</td>
<td>94.30</td>
</tr>
<tr>
<td>Otsu (3 level)</td>
<td>87.50</td>
<td>91.25</td>
<td>80.00</td>
<td>90.10(0)<break/>82.10(1)</td>
<td>9.90(0)<break/>17.90(1)</td>
<td>82.00</td>
<td>8.70</td>
<td>20.00</td>
<td>85.60</td>
<td>90.68</td>
<td>71.71</td>
<td>93.3</td>
</tr>
<tr>
<td>Otsu (7 level)</td>
<td>86.60</td>
<td>89.80</td>
<td>80.00</td>
<td>89.90(0)<break/>80.00(1)</td>
<td>10.00(0)<break/>20.00(1)</td>
<td>1.01</td>
<td>20.00</td>
<td>17.90</td>
<td>84.00</td>
<td>89.80</td>
<td>71.39</td>
<td>93.7</td>
</tr>
<tr>
<td>Niblack</td>
<td>81.70</td>
<td>86.25</td>
<td>72.50</td>
<td>86.30(0)<break/>72.50(1)</td>
<td>13.70(0)<break/>27.50(1)</td>
<td>72.50</td>
<td>13.70</td>
<td>27.50</td>
<td>79.30</td>
<td>86.25</td>
<td>58.75</td>
<td>89.06</td>
</tr>
<tr>
<td>Sauvola</td>
<td>80.80</td>
<td><bold>96.25</bold></td>
<td>50.00</td>
<td>79.40(0)<break/>87.00(1)</td>
<td>20.60(0)<break/>13.00(1)</td>
<td>86.95</td>
<td>3.70</td>
<td>50.00</td>
<td>73.10</td>
<td>87.00</td>
<td>55.30</td>
<td>84.70</td>
</tr>
<tr>
<td>Binarized Sauvola</td>
<td><bold>90.80</bold></td>
<td>92.50</td>
<td><bold>87.50</bold></td>
<td><bold>93.7</bold>(<bold>0</bold>)<break/><bold>85.4</bold>(<bold>1</bold>)</td>
<td><bold>6.30</bold>(<bold>0</bold>)<break/><bold>14.60</bold>(<bold>1</bold>)</td>
<td>85.30</td>
<td>7.50</td>
<td>12.50</td>
<td><bold>90.00</bold></td>
<td><bold>93.08</bold></td>
<td>79.71</td>
<td><bold>95.20</bold></td>
</tr>
<tr>
<td>Chan Vese</td>
<td>86.70</td>
<td>91.25</td>
<td>65</td>
<td>83.90(0)<break/>78.80(1)</td>
<td>16.1(0)<break/>21.2(1)</td>
<td>78.78</td>
<td>8.70</td>
<td>35.00</td>
<td>78.10</td>
<td>87.42</td>
<td>59.38</td>
<td>84.90</td>
</tr>
<tr>
<td>Watershed</td>
<td>86.70</td>
<td><bold>96.25</bold></td>
<td>67.50</td>
<td>85.60(0)<break/>90.00(1)</td>
<td>14.4(0)<break/>10.00(1)</td>
<td><bold>90.00</bold></td>
<td>3.70</td>
<td>32.50</td>
<td>81.80</td>
<td>90.58</td>
<td>69.40</td>
<td>89.70</td>
</tr>
<tr>
<td>FCM</td>
<td>87.40</td>
<td>87.50</td>
<td><bold>87.50</bold></td>
<td>93.3(0)<break/>77.8(1)</td>
<td>6.7(0)<break/>22.20(1)</td>
<td>77.77</td>
<td>1.25</td>
<td>22.2</td>
<td>87.50</td>
<td>90.32</td>
<td>73.02</td>
<td>94</td>
</tr>
<tr>
<td>K-Means</td>
<td>86.70</td>
<td>90.00</td>
<td>77.50</td>
<td>88.90(0)<break/>79.5(1)</td>
<td>11.10(0)<break/>20.50(1)</td>
<td>79.40</td>
<td><bold>1.00</bold></td>
<td>22.50</td>
<td>89.44</td>
<td>67.93</td>
<td>69.40</td>
<td>94.20</td>
</tr>
<tr>
<td>Log</td>
<td>72.500</td>
<td>83.700</td>
<td>50.00</td>
<td>77.00(0)<break/>60.60(1)</td>
<td>23.00(0)<break/>39.40(1)</td>
<td>60.00</td>
<td>16.00</td>
<td>50.00</td>
<td>66.00</td>
<td>80.00</td>
<td>35.60</td>
<td>77.90</td>
</tr>
<tr>
<td>Prewitt</td>
<td>70.80</td>
<td>88.75</td>
<td>35.00</td>
<td>73.20(0)<break/>60.90(1)</td>
<td>26.80(0)<break/>39.10(1)</td>
<td>60.80</td>
<td>11.20</td>
<td>65.00</td>
<td>61.80</td>
<td>80.20</td>
<td>28.44</td>
<td>76.30</td>
</tr>
<tr>
<td>Robert</td>
<td>82.50</td>
<td>93.75</td>
<td>60.00</td>
<td>82.40(0)<break/>82.80(1)</td>
<td>17.60(0)<break/>17.20(1)</td>
<td>82.70</td>
<td>6.25</td>
<td>40.00</td>
<td>76.80</td>
<td>87.70</td>
<td>59.18</td>
<td>86.30</td>
</tr>
<tr>
<td>Zero crossing</td>
<td>79.20</td>
<td>90.00</td>
<td>57.500</td>
<td>80.90(0)<break/>74.20(1)</td>
<td>19.10(0)<break/>25.80(1)</td>
<td>74.10</td>
<td>10.00</td>
<td>42.50</td>
<td>73.75</td>
<td>85.20</td>
<td>51.15</td>
<td>85.00</td>
</tr>
<tr>
<td>FMM</td>
<td>75.00</td>
<td>83.75</td>
<td>57.50</td>
<td>79.80(0)<break/>63.90(1)</td>
<td>20.20(0)<break/>36.10(1)</td>
<td>63.80</td>
<td>16.20</td>
<td>42.50</td>
<td>68.10</td>
<td>70.60</td>
<td><bold>81.70</bold></td>
<td>85.10</td>
</tr>
<tr>
<td>Geodesic</td>
<td>80.00</td>
<td>91.25</td>
<td>57.50</td>
<td>81.10(0)<break/>76.7(1)</td>
<td>18.90(0)<break/>23.30(1)</td>
<td>76.60</td>
<td>8.70</td>
<td>42.50</td>
<td>74.30</td>
<td>85.88</td>
<td>53.00</td>
<td>86.78</td>
</tr>
<tr>
<td>Canny</td>
<td>60.80</td>
<td>82.50</td>
<td>17.50</td>
<td>66.70(0)<break/>33.30(1)</td>
<td>33.30(0)<break/>66.70(1)</td>
<td>33.00</td>
<td>17.50</td>
<td><bold>82.50</bold></td>
<td>50.00</td>
<td>73.70</td>
<td>0.00</td>
<td>62.80</td>
</tr>
<tr>
<td>Gray difference</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>Sobel</td>
<td>72.50</td>
<td>90.00</td>
<td>37.5</td>
<td>74.20(0)<break/>65.20(1)</td>
<td>25.80(0)<break/>34.80(1)</td>
<td>65.21</td>
<td>10.00</td>
<td>62.50</td>
<td>63.75</td>
<td>81.35</td>
<td>32.93</td>
<td>76.96</td>
</tr>
<tr>
<td>Iterative</td>
<td>86.70</td>
<td>92.50</td>
<td>75.00</td>
<td>86.10(0)<break/>83.30(1)</td>
<td>11.90(0)<break/>16.70(1)</td>
<td>83.33</td>
<td>7.50</td>
<td>25.00</td>
<td>83.75</td>
<td>90.24</td>
<td>69.43</td>
<td>93.96</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Type 2: With an accuracy score of 90.80%, the Binarized Sauvola is the best approach. The Sauvola and Watershed have the best SE rate, at 96.25%. The Binarized Sauvola and FCM achieve the best SP rate of 87.50%. The Binarized Sauvola achieves the best PPV rate of 85.4% (malignant) and 93.7% (benign). The Binarized Sauvola exhibits the best FDR rate, which varies from 14.60% for instances of cancer to 6.30% for benign cases. The Watershed reaches the highest net present value rate at 90.00%. K-means clustering has the best FNR rate at 1.00%. At 82.50 percent, The Canny records the highest FPR.</p>
<p>The Binarized Sauvola achieves the best BA rate of 90.00%. The Binarized Sauvola achieves the best F1 rate of 93.08%. The FMM achieves the best MCC rate of 81.7%. The Binarized Sauvola achieves the best AUC rate of 95.20% as shown in <xref ref-type="table" rid="table-4">Table 4</xref>.</p>

</sec>
<sec id="s7">
<label>7</label>
<title>Conclusions and Future Work</title>
<p>By recognizing and classifying the lesion from the dermoscopy images, the research offers an image processing method that is crucial for the detection of skin cancer. 200 dermoscopic images (8 bits RGB images) with a 768 &#x002A; 560 resolution are recovered, segmented, feature extracted, and classed after preprocessing from the PH2 database. Pre-processing techniques include grayscale photo conversion from RGB and noise reduction with dull razor software. Numerous segmentation methodologies, including region-based, edge-based, cluster-based, and threshold-based approaches, have been put into operation and evaluated. Knowing how to distinguish benign from malignant moles is essential because they may initially seem to be the same. An essential tool for therapists with little to no training is automatic identification. In order to aid in the early diagnosis of malignant melanoma and melanocytic nevi lesions, this study implemented a computer-aided technique of categorizing melanoma based on dermoscopy images. Pre-processing mainly improves the images by removing unwanted hair, which facilitates precise feature extraction and helps distinguish between different kinds of lesions. Research has highlighted the combination of statistical, GLCM, and GLRLM features for dermoscopy picture identification. Thirty-two pertinent textural features have been identified from the co-occurrence matrix. In the final stage, the SVM classifier for classification has been recommended in two ways. Different levels of classification accuracy were demonstrated by the classification methodology&#x2019;s performance. The most effective method for classifying melanoma and atypical nevi, two types of skin cancer, is the Otsu (3 level) technique. At 90.00% accuracy, it received the best possible score. A different method that yields far better results divides melanoma and ordinary nevi into two categories of skin cancer using the Binarized Sauvola. This strategy offers an ideal accuracy score of 90.80%. This study suggests that the combination of textural traits can be useful in efficiently classifying malignant melanoma, common nevi, and atypical nevi in dermoscopy pictures.</p>
<p>In the future, this methodology could be applied for evaluation on larger skin-cancer databases like ISIC and HAM10000. We intend to use more sophisticated techniques in the future, particularly deep learning algorithms, to increase system stability and yield more precise diagnoses on dermatological photos. The aim of implementing these tactics is to identify and differentiate between skin lesions. Subsequent investigations might leverage this unique approach to augment the treatment estimate of the melanoma lesion framework and incorporate temporal visualizations to enhance the algorithm&#x2019;s capability to detect borders for melanoma lesions.</p>
</sec>
</body>
<back>
<ack>
<p>The authors thank the Biomedical Sensors &#x0026; Systems Lab for supporting this research and the article processing charges.</p>
</ack>
<sec><title>Funding Statement</title>
<p>The authors received no external funding for this study.</p>
</sec>
<sec><title>Author Contributions</title>
<p>Study conception and design: Khushmeen Kaur Brar, Bhawna Goyal; methodology: Khushmeen Kaur Brar, Bhawna Goyal, Ayush Dogra; software: Khushmeen Kaur Brar, Bhawna Goyal, Ayush Dogra; formal analysis: Khushmeen Kaur Brar, Bhawna Goyal, Ayush Dogra; data correction: Khushmeen Kaur Brar; writing&#x2014;original draft preparation: Khushmeen Kaur Brar, Bhawna Goyal, Ayush Dogra, Sampangi Rama Reddy; writing&#x2014;review and editing: Khushmeen Kaur Brar, Bhawna Goyal, Ahmed Alkhayyat, Manob Jyoti Saikia; supervision, Sampangi Rama Reddy, Ahmed Alkhayyat, Rajesh Singh, Manob Jyoti Saikia. All authors reviewed the results and approved the final version of the manuscript.</p>
</sec>
<sec sec-type="data-availability"><title>Availability of Data and Materials</title>
<p>The data used in the study is openly available at <ext-link ext-link-type="uri" xlink:href="https://www.fc.up.pt/addi/ph2%20database.html">https://www.fc.up.pt/addi/ph2%20database.html</ext-link>, accessed on 19 May 2022.</p>
</sec>
<sec><title>Ethics Approval</title>
<p>Not applicable.</p>
</sec>
<sec sec-type="COI-statement"><title>Conflicts of Interest</title>
<p>The authors declare no conflicts of interest to report regarding the present study.</p>
</sec>
<ref-list content-type="authoryear">
<title>References</title>
<ref id="ref-1"><label>[1]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Y.</given-names> <surname>Wu</surname></string-name>, <string-name><given-names>B.</given-names> <surname>Chen</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Zeng</surname></string-name>, <string-name><given-names>D.</given-names> <surname>Pan</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Wang</surname></string-name> and <string-name><given-names>S.</given-names> <surname>Zhao</surname></string-name></person-group>, &#x201C;<article-title>Skin cancer classification with deep learning: A systematic review</article-title>,&#x201D; <source>Front. Oncol.</source>, vol. <volume>12</volume>, pp. <fpage>1</fpage>&#x2013;<lpage>20</lpage>, <year>2022</year>. doi: <pub-id pub-id-type="doi">10.3389/fonc.2022.893972</pub-id>; <pub-id pub-id-type="pmid">35912265</pub-id></mixed-citation></ref>
<ref id="ref-2"><label>[2]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>P.</given-names> <surname>Clarke</surname></string-name></person-group>, &#x201C;<article-title>Benign pigmented skin lesions</article-title>,&#x201D; <source>Royal Austral. Coll. General Practit. 2019</source>, vol. <volume>48</volume>, no. <issue>6</issue>, pp. <fpage>364</fpage>&#x2013;<lpage>367</lpage>, <year>2019</year>. doi: <pub-id pub-id-type="doi">10.31128/AJGP-12-18-4802</pub-id>; <pub-id pub-id-type="pmid">31220885</pub-id></mixed-citation></ref>
<ref id="ref-3"><label>[3]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>R. H.</given-names> <surname>Oza</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>Role of dermoscopy in hyperpigmented skin disorders: A tertiary care centre experience</article-title>,&#x201D; <source>IP Indian J. Clin. Exp. Dermatol.</source>, vol. <volume>9</volume>, no. <issue>4</issue>, pp. <fpage>184</fpage>&#x2013;<lpage>193</lpage>, <year>2023</year>. doi: <pub-id pub-id-type="doi">10.18231/j.ijced.2023.036</pub-id>.</mixed-citation></ref>
<ref id="ref-4"><label>[4]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Michalak-Stoma</surname></string-name>, <string-name><given-names>K.</given-names> <surname>Ma&#x0142;ki&#x0144;ska</surname></string-name>, and <string-name><given-names>D.</given-names> <surname>Krasowska</surname></string-name></person-group>, &#x201C;<article-title>Usefulness of dermoscopy to provide accurate assessment of skin cancers</article-title>,&#x201D; <source>Clin. Cosmet. Investig. Dermatol.</source>, vol. <volume>14</volume>, pp. <fpage>733</fpage>&#x2013;<lpage>746</lpage>, <year>2021</year>. doi: <pub-id pub-id-type="doi">10.2147/CCID.S305924</pub-id>; <pub-id pub-id-type="pmid">34234499</pub-id></mixed-citation></ref>
<ref id="ref-5"><label>[5]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>G.</given-names> <surname>Babino</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>Melanoma diagnosed on digital dermoscopy monitoring: A side-by-side image comparison is needed to improve early detection</article-title>,&#x201D; <source>J. Am. Acad. Dermatol.</source>, vol. <volume>85</volume>, no. <issue>3</issue>, pp. <fpage>619</fpage>&#x2013;<lpage>625</lpage>, <year>2021</year>. doi: <pub-id pub-id-type="doi">10.1016/j.jaad.2020.07.013</pub-id>; <pub-id pub-id-type="pmid">32652193</pub-id></mixed-citation></ref>
<ref id="ref-6"><label>[6]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>G.</given-names> <surname>Nikolakis</surname></string-name>, <string-name><given-names>A. G.</given-names> <surname>Vaiopoulos</surname></string-name>, <string-name><given-names>I.</given-names> <surname>Georgopoulos</surname></string-name>, <string-name><given-names>E.</given-names> <surname>Papakonstantinou</surname></string-name>, <string-name><given-names>G.</given-names> <surname>Gaitanis</surname></string-name> and <string-name><given-names>C. C.</given-names> <surname>Zouboulis</surname></string-name></person-group>, &#x201C;<article-title>Insights, advantages, and barriers of teledermatology vs. face-to-face dermatology for the diagnosis and follow-up of non-melanoma skin cancer: A systematic review</article-title>,&#x201D; <source>Cancers</source>, vol. <volume>16</volume>, no. <issue>3</issue>, <year>2024, Art. no. 578</year>. doi: <pub-id pub-id-type="doi">10.3390/cancers16030578</pub-id>; <pub-id pub-id-type="pmid">38339329</pub-id></mixed-citation></ref>
<ref id="ref-7"><label>[7]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J.</given-names> <surname>Fr&#x00FC;hauf</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>Patient acceptance and diagnostic utility of automated digital image analysis of pigmented skin lesions</article-title>,&#x201D; <source>J. Eur. Acad. Dermatol. Venereol.</source>, vol. <volume>26</volume>, no. <issue>3</issue>, pp. <fpage>368</fpage>&#x2013;<lpage>372</lpage>, <year>2012</year>. doi: <pub-id pub-id-type="doi">10.1111/j.1468-3083.2011.04081.x</pub-id>; <pub-id pub-id-type="pmid">21504486</pub-id></mixed-citation></ref>
<ref id="ref-8"><label>[8]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>G. P.</given-names> <surname>Pfeifer</surname></string-name></person-group>, &#x201C;<article-title>Mechanisms of UV-induced mutations and skin cancer</article-title>,&#x201D; <source>Genome Instab. Dis.</source>, vol. <volume>1</volume>, no. <issue>3</issue>, pp. <fpage>99</fpage>&#x2013;<lpage>113</lpage>, <year>2020</year>. doi: <pub-id pub-id-type="doi">10.1007/s42764-020-00009-8</pub-id>; <pub-id pub-id-type="pmid">34589668</pub-id></mixed-citation></ref>
<ref id="ref-9"><label>[9]</label><mixed-citation publication-type="book"><person-group person-group-type="author"><string-name><given-names>R. E.</given-names> <surname>Neale</surname></string-name> <etal>et al.</etal></person-group>, <source>The Effects of Exposure to Solar Radiation on Human Health</source>. <publisher-loc>Australia</publisher-loc>: <publisher-name>Springer International Publishing</publisher-name>, <year>2023</year>, vol. <volume>22, no. 5</volume>. doi: <pub-id pub-id-type="doi">10.1007/s43630-023-00375-8</pub-id>.</mixed-citation></ref>
<ref id="ref-10"><label>[10]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Zafar</surname></string-name>, <string-name><given-names>M. I.</given-names> <surname>Sharif</surname></string-name>, <string-name><given-names>M. I.</given-names> <surname>Sharif</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Kadry</surname></string-name>, <string-name><given-names>S. A. C.</given-names> <surname>Bukhari</surname></string-name> and <string-name><given-names>H. T.</given-names> <surname>Rauf</surname></string-name></person-group>, &#x201C;<article-title>Skin lesion analysis and cancer detection based on machine/deep learning techniques: A comprehensive survey</article-title>,&#x201D; <source>Life</source>, vol. <volume>13</volume>, no. <issue>1</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>18</lpage>, <year>2023</year>. doi: <pub-id pub-id-type="doi">10.3390/life13010146</pub-id>; <pub-id pub-id-type="pmid">36676093</pub-id></mixed-citation></ref>
<ref id="ref-11"><label>[11]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>R.</given-names> <surname>Marrapodi</surname></string-name> and <string-name><given-names>B.</given-names> <surname>Bellei</surname></string-name></person-group>, &#x201C;<article-title>The keratinocyte in the picture cutaneous melanoma microenvironment</article-title>,&#x201D; <source>Cancers</source>, vol. <volume>16</volume>, no. <issue>5</issue>, <year>2024</year>. doi: <pub-id pub-id-type="doi">10.3390/cancers16050913</pub-id>; <pub-id pub-id-type="pmid">38473275</pub-id></mixed-citation></ref>
<ref id="ref-12"><label>[12]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>F.</given-names> <surname>Alendar</surname></string-name>, <string-name><given-names>I.</given-names> <surname>Drljevic</surname></string-name>, <string-name><given-names>K.</given-names> <surname>Drljevic</surname></string-name>, and <string-name><given-names>T.</given-names> <surname>Alendar</surname></string-name></person-group>, &#x201C;<article-title>Early detection of melanoma skin cancer</article-title>,&#x201D; <source>Biomol. Biomed.</source>, vol. <volume>9</volume>, no. <issue>1</issue>, pp. <fpage>77</fpage>&#x2013;<lpage>80</lpage>, <year>2009</year>. doi: <pub-id pub-id-type="doi">10.17305/bjbms.2009.2861</pub-id>; <pub-id pub-id-type="pmid">19284400</pub-id></mixed-citation></ref>
<ref id="ref-13"><label>[13]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>P.</given-names> <surname>Riazi Esfahani</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>Leveraging machine learning for accurate detection and diagnosis of melanoma and nevi: An interdisciplinary study in dermatology</article-title>,&#x201D; <source>Cureus</source>, vol. <volume>15</volume>, no. <issue>8</issue>, <year>2023, Art. no. e44120</year>. doi: <pub-id pub-id-type="doi">10.7759/cureus.44120</pub-id>; <pub-id pub-id-type="pmid">37750114</pub-id></mixed-citation></ref>
<ref id="ref-14"><label>[14]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>F.</given-names> <surname>Nachbar</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>The ABCD rule of dermatoscopy. High prospective value in the diagnosis of doubtful melanocytic skin lesions</article-title>,&#x201D; <source>J. Am. Acad. Dermatol.</source>, vol. <volume>30</volume>, no. <issue>4</issue>, pp. <fpage>551</fpage>&#x2013;<lpage>559</lpage>, <year>1994</year>. doi: <pub-id pub-id-type="doi">10.1016/S0190-9622(94)70061-3</pub-id>; <pub-id pub-id-type="pmid">8157780</pub-id></mixed-citation></ref>
<ref id="ref-15"><label>[15]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>E. M.</given-names> <surname>Senan</surname></string-name> and <string-name><given-names>M. E.</given-names> <surname>Jadhav</surname></string-name></person-group>, &#x201C;<article-title>Analysis of dermoscopy images by using ABCD rule for early detection of skin cancer</article-title>,&#x201D; <source>Glob. Trans. Proc.</source>, vol. <volume>2</volume>, no. <issue>1</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>7</lpage>, <year>2021</year>. doi: <pub-id pub-id-type="doi">10.1016/j.gltp.2021.01.001</pub-id>.</mixed-citation></ref>
<ref id="ref-16"><label>[16]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J.</given-names> <surname>Zhao</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>The correlation between dermoscopy and clinical and pathological tests in the evaluation of skin photoaging</article-title>,&#x201D; <source>Skin Res. Technol.</source>, vol. <volume>30</volume>, no. <issue>1</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>7</lpage>, <year>2024</year>. doi: <pub-id pub-id-type="doi">10.1111/srt.13578</pub-id>; <pub-id pub-id-type="pmid">38221782</pub-id></mixed-citation></ref>
<ref id="ref-17"><label>[17]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Z. Zainal</given-names> <surname>Abidin</surname></string-name>, <string-name><given-names>S. A.</given-names> <surname>Asmai</surname></string-name>, <string-name><given-names>Z. Abal</given-names> <surname>Abas</surname></string-name>, <string-name><given-names>N. A.</given-names> <surname>Zakaria</surname></string-name>, and <string-name><given-names>S. N.</given-names> <surname>Ibrahim</surname></string-name></person-group>, &#x201C;<article-title>Development of edge detection for image segmentation</article-title>,&#x201D; <source>IOP Conf. Ser.: Mater. Sci. Eng.</source>, vol. <volume>864</volume>, no. <issue>1</issue>, <year>2020</year>. doi: <pub-id pub-id-type="doi">10.1088/1757-899X/864/1/012058</pub-id>.</mixed-citation></ref>
<ref id="ref-18"><label>[18]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>P.</given-names> <surname>Schmid</surname></string-name></person-group>, &#x201C;<article-title>Lesion detection in dermatoscopic images using anisotropic diffusion and morphological flooding</article-title>,&#x201D; <source>IEEE Int. Conf. Image Process</source>, vol. <volume>3</volume>, pp. <fpage>449</fpage>&#x2013;<lpage>453</lpage>, <year>1999</year>. doi: <pub-id pub-id-type="doi">10.1109/icip.1999.817154</pub-id>.</mixed-citation></ref>
<ref id="ref-19"><label>[19]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>H.</given-names> <surname>Ganster</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Pinz</surname></string-name>, <string-name><given-names>R.</given-names> <surname>R&#x00F6;hrer</surname></string-name>, <string-name><given-names>E.</given-names> <surname>Wildling</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Binder</surname></string-name> and <string-name><given-names>H.</given-names> <surname>Kittler</surname></string-name></person-group>, &#x201C;<article-title>Automated melanoma recognition</article-title>,&#x201D; <source>IEEE Trans. Med. Imaging.</source>, vol. <volume>20</volume>, no. <issue>3</issue>, pp. <fpage>233</fpage>&#x2013;<lpage>239</lpage>, <year>2001</year>. doi: <pub-id pub-id-type="doi">10.1109/42.918473</pub-id>; <pub-id pub-id-type="pmid">11341712</pub-id></mixed-citation></ref>
<ref id="ref-20"><label>[20]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Cacciapuoti</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>A measurement software for professional training in early detection of melanoma</article-title>,&#x201D; <source>Appl. Sci.</source>, vol. <volume>10</volume>, no. <issue>12</issue>, <year>2020, Art. no. 4351</year>. doi: <pub-id pub-id-type="doi">10.3390/app10124351</pub-id>.</mixed-citation></ref>
<ref id="ref-21"><label>[21]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Garg</surname></string-name> and <string-name><given-names>B.</given-names> <surname>Jindal</surname></string-name></person-group>, &#x201C;<article-title>Skin lesion segmentation using k-mean and optimized fire fly algorithm</article-title>,&#x201D; <source>Multimed. Tools Appl.</source>, vol. <volume>80</volume>, no. <issue>5</issue>, pp. <fpage>7397</fpage>&#x2013;<lpage>7410</lpage>, <year>2021</year>. doi: <pub-id pub-id-type="doi">10.1007/s11042-020-10064-8</pub-id>.</mixed-citation></ref>
<ref id="ref-22"><label>[22]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>G. M.</given-names> <surname>Kosgiker</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Deshpande</surname></string-name>, and <string-name><given-names>K.</given-names> <surname>Anjum</surname></string-name></person-group>, &#x201C;<article-title>Significant of multi-level pre-processing steps and its proper sequence in SegCaps skin lesion segmentation of dermoscopic images</article-title>,&#x201D; <source>Mater. Today: Proc.</source>, vol. <volume>51</volume>, no. <issue>2</issue>, pp. <fpage>129</fpage>&#x2013;<lpage>141</lpage>, <year>2021</year>. doi: <pub-id pub-id-type="doi">10.1016/j.matpr.2021.05.016</pub-id>.</mixed-citation></ref>
<ref id="ref-23"><label>[23]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>N.</given-names> <surname>&#x015E;ahin</surname></string-name>, <string-name><given-names>N.</given-names> <surname>Alpaslan</surname></string-name>, and <string-name><given-names>D.</given-names> <surname>Hanbay</surname></string-name></person-group>, &#x201C;<article-title>Robust optimization of SegNet hyperparameters for skin lesion segmentation</article-title>,&#x201D; <source>Multimed. Tools Appl.</source>, vol. <volume>81</volume>, no. <issue>25</issue>, pp. <fpage>36031</fpage>&#x2013;<lpage>36051</lpage>, <year>2022</year>. doi: <pub-id pub-id-type="doi">10.1007/s11042-021-11032-6</pub-id>.</mixed-citation></ref>
<ref id="ref-24"><label>[24]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M. Pezhman</given-names> <surname>Pour</surname></string-name> and <string-name><given-names>H.</given-names> <surname>Seker</surname></string-name></person-group>, &#x201C;<article-title>Transform domain representation-driven convolutional neural networks for skin lesion segmentation</article-title>,&#x201D; <source>Expert Syst. Appl.</source>, vol. <volume>144</volume>, no. <issue>9</issue>, <year>2020, Art. no. 113129</year>. doi: <pub-id pub-id-type="doi">10.1016/j.eswa.2019.113129</pub-id>.</mixed-citation></ref>
<ref id="ref-25"><label>[25]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>W.</given-names> <surname>Li</surname></string-name>, <string-name><given-names>A. N.</given-names> <surname>Joseph Raj</surname></string-name>, <string-name><given-names>T.</given-names> <surname>Tjahjadi</surname></string-name>, and <string-name><given-names>Z.</given-names> <surname>Zhuang</surname></string-name></person-group>, &#x201C;<article-title>Digital hair removal by deep learning for skin lesion segmentation</article-title>,&#x201D; <source>Pattern Recognit.</source>, vol. <volume>117</volume>, <year>2021, Art. no. 107994</year>. doi: <pub-id pub-id-type="doi">10.1016/j.patcog.2021.107994</pub-id>.</mixed-citation></ref>
<ref id="ref-26"><label>[26]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>L.</given-names> <surname>Singh</surname></string-name>, <string-name><given-names>R. R.</given-names> <surname>Janghel</surname></string-name>, and <string-name><given-names>S. P.</given-names> <surname>Sahu</surname></string-name></person-group>, &#x201C;<article-title>SLICACO: An automated novel hybrid approach for dermatoscopic melanocytic skin lesion segmentation</article-title>,&#x201D; <source>Int. J. Imaging Syst. Technol.</source>, vol. <volume>31</volume>, no. <issue>4</issue>, pp. <fpage>1817</fpage>&#x2013;<lpage>1833</lpage>, <year>2021</year>. doi: <pub-id pub-id-type="doi">10.1002/ima.22591</pub-id>.</mixed-citation></ref>
<ref id="ref-27"><label>[27]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Nasir</surname></string-name>, <string-name><given-names>M. Attique</given-names> <surname>Khan</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Sharif</surname></string-name>, <string-name><given-names>I. U.</given-names> <surname>Lali</surname></string-name>, <string-name><given-names>T.</given-names> <surname>Saba</surname></string-name> and <string-name><given-names>T.</given-names> <surname>Iqbal</surname></string-name></person-group>, &#x201C;<article-title>An improved strategy for skin lesion detection and classification using uniform segmentation and feature selection based approach</article-title>,&#x201D; <source>Microsc. Res. Tech.</source>, vol. <volume>81</volume>, no. <issue>6</issue>, pp. <fpage>528</fpage>&#x2013;<lpage>543</lpage>, <year>2018</year>. doi: <pub-id pub-id-type="doi">10.1002/jemt.23009</pub-id>; <pub-id pub-id-type="pmid">29464868</pub-id></mixed-citation></ref>
<ref id="ref-28"><label>[28]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>E.</given-names> <surname>Santos</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Veras</surname></string-name>, <string-name><given-names>H.</given-names> <surname>Miguel</surname></string-name>, <string-name><given-names>K.</given-names> <surname>Aires</surname></string-name>, <string-name><given-names>M. L.</given-names> <surname>Claro</surname></string-name> and <string-name><given-names>G. B.</given-names> <surname>Junior</surname></string-name></person-group>, &#x201C;<article-title>A skin lesion semi-supervised segmentation method</article-title>,&#x201D; <source>Int. Conf. Syst. Signals, Image Process</source>, vol. <volume>2020</volume>, pp. <fpage>33</fpage>&#x2013;<lpage>38</lpage>, <year>2020</year>. doi: <pub-id pub-id-type="doi">10.1109/IWSSIP48289.2020.9145240</pub-id>.</mixed-citation></ref>
<ref id="ref-29"><label>[29]</label><mixed-citation publication-type="other"><person-group person-group-type="author"><string-name><given-names>Z. N.</given-names> <surname>Khan</surname></string-name></person-group>, &#x201C;<article-title>Frequency and spatial domain based saliency for pigmented skin lesion segmentation</article-title>,&#x201D; <year>2020</year>, <comment><italic>arXiv:2010.04022</italic></comment>.</mixed-citation></ref>
<ref id="ref-30"><label>[30]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>K.</given-names> <surname>Zafar</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>Skin lesion segmentation from dermoscopic images using convolutional neural network</article-title>,&#x201D; <source>Sensors</source>, vol. <volume>20</volume>, no. <issue>6</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>14</lpage>, <year>2020</year>. doi: <pub-id pub-id-type="doi">10.3390/s20061601</pub-id>; <pub-id pub-id-type="pmid">32183041</pub-id></mixed-citation></ref>
<ref id="ref-31"><label>[31]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S. M.</given-names> <surname>Jaisakthi</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Mirunalini</surname></string-name>, and <string-name><given-names>C.</given-names> <surname>Aravindan</surname></string-name></person-group>, &#x201C;<article-title>Automated skin lesion segmentation of dermoscopic images using GrabCut and kmeans algorithms</article-title>,&#x201D; <source>IET Comput. Vis.</source>, vol. <volume>12</volume>, no. <issue>8</issue>, pp. <fpage>1088</fpage>&#x2013;<lpage>1095</lpage>, <year>2018</year>. doi: <pub-id pub-id-type="doi">10.1049/iet-cvi.2018.5289</pub-id>.</mixed-citation></ref>
<ref id="ref-32"><label>[32]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Ammar</surname></string-name>, <string-name><given-names>S. G.</given-names> <surname>Khawaja</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Atif</surname></string-name>, <string-name><given-names>M. U.</given-names> <surname>Akram</surname></string-name>, and <string-name><given-names>M.</given-names> <surname>Sakeena</surname></string-name></person-group>, &#x201C;<article-title>Learning based segmentation of skin lesion from dermoscopic images</article-title>,&#x201D; in <conf-name>2018 IEEE 20th Int. Conf. e-Health Networking Appl. Serv. Heal. 2018</conf-name>, <year>2018</year>, pp. <fpage>1</fpage>&#x2013;<lpage>6</lpage>. doi: <pub-id pub-id-type="doi">10.1109/HealthCom.2018.8531156</pub-id>.</mixed-citation></ref>
<ref id="ref-33"><label>[33]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Y.</given-names> <surname>Jiang</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Cao</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Tao</surname></string-name>, and <string-name><given-names>H.</given-names> <surname>Zhang</surname></string-name></person-group>, &#x201C;<article-title>Skin lesion segmentation based on multi-scale attention convolutional neural network</article-title>,&#x201D; <source>IEEE Access</source>, vol. <volume>8</volume>, pp. <fpage>122811</fpage>&#x2013;<lpage>122825</lpage>, <year>2020</year>. doi: <pub-id pub-id-type="doi">10.1109/ACCESS.2020.3007512</pub-id>.</mixed-citation></ref>
<ref id="ref-34"><label>[34]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>H. M.</given-names> <surname>&#x00DC;nver</surname></string-name> and <string-name><given-names>E.</given-names> <surname>Ayan</surname></string-name></person-group>, &#x201C;<article-title>Skin lesion segmentation in dermoscopic images with combination of yolo and grabcut algorithm</article-title>,&#x201D; <source>Diagnostics</source>, vol. <volume>9</volume>, no. <issue>3</issue>, <year>2019, Art. no. 72</year>. doi: <pub-id pub-id-type="doi">10.3390/diagnostics9030072</pub-id>; <pub-id pub-id-type="pmid">31295856</pub-id></mixed-citation></ref>
<ref id="ref-35"><label>[35]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>O. O.</given-names> <surname>Olugbara</surname></string-name>, <string-name><given-names>T. B.</given-names> <surname>Taiwo</surname></string-name>, and <string-name><given-names>D.</given-names> <surname>Heukelman</surname></string-name></person-group>, &#x201C;<article-title>Segmentation of melanoma skin lesion using perceptual color difference saliency with morphological analysis</article-title>,&#x201D; <source>Math. Probl. Eng.</source>, vol. <volume>2018</volume>, no. <issue>2</issue>, <year>2018, Art. no. 1524286</year>. doi: <pub-id pub-id-type="doi">10.1155/2018/1524286</pub-id>.</mixed-citation></ref>
<ref id="ref-36"><label>[36]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>T.</given-names> <surname>Akram</surname></string-name>, <string-name><given-names>M. A.</given-names> <surname>Khan</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Sharif</surname></string-name>, and <string-name><given-names>M.</given-names> <surname>Yasmin</surname></string-name></person-group>, &#x201C;<article-title>Skin lesion segmentation and recognition using multichannel saliency estimation and M-SVM on selected serially fused features</article-title>,&#x201D; <source>J. Ambient Intell. Humaniz. Comput.</source>, vol. <volume>15</volume>, no. <issue>1</issue>, pp. <fpage>1083</fpage>&#x2013;<lpage>1102</lpage>, <year>2018</year>. doi: <pub-id pub-id-type="doi">10.1007/s12652-018-1051-5</pub-id>.</mixed-citation></ref>
<ref id="ref-37"><label>[37]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M. A.</given-names> <surname>Khan</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>An implementation of normal distribution based segmentation and entropy controlled features selection for skin lesion detection and classification</article-title>,&#x201D; <source>BMC Cancer</source>, vol. <volume>18</volume>, no. <issue>1</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>20</lpage>, <year>2018</year>. doi: <pub-id pub-id-type="doi">10.1186/s12885-018-4465-8</pub-id>; <pub-id pub-id-type="pmid">29871593</pub-id></mixed-citation></ref>
<ref id="ref-38"><label>[38]</label><mixed-citation publication-type="other"><person-group person-group-type="author"><string-name><given-names>S. M.</given-names> <surname>Jaisakthi</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Chandrabose</surname></string-name>, and <string-name><given-names>P.</given-names> <surname>Mirunalini</surname></string-name></person-group>, &#x201C;<article-title>Automatic skin lesion segmentation using semi-supervised learning technique</article-title>,&#x201D; <year>2017</year>, <comment><italic>arXiv:1703.04301</italic></comment>.</mixed-citation></ref>
<ref id="ref-39"><label>[39]</label><mixed-citation publication-type="other"><person-group person-group-type="author"><string-name><given-names>X.</given-names> <surname>Jia</surname></string-name> and <string-name><given-names>L.</given-names> <surname>Shen</surname></string-name></person-group>, &#x201C;<article-title>Skin lesion classification using class activation map</article-title>,&#x201D; <year>2017</year>, <comment><italic>arXiv:1703.01053</italic></comment>.</mixed-citation></ref>
<ref id="ref-40"><label>[40]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>F.</given-names> <surname>Jiang</surname></string-name>, <string-name><given-names>F.</given-names> <surname>Zhou</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Qin</surname></string-name>, <string-name><given-names>T.</given-names> <surname>Wang</surname></string-name>, and <string-name><given-names>B.</given-names> <surname>Lei</surname></string-name></person-group>, &#x201C;<article-title>Decision-augmented generative adversarial network for skin lesion segmentation</article-title>,&#x201D; <source>Proc. Int. Symp. Biomed. Imaging.</source>, vol. <volume>2019</volume>, pp. <fpage>447</fpage>&#x2013;<lpage>450</lpage>, <year>2019</year>. doi: <pub-id pub-id-type="doi">10.1109/ISBI.2019.8759434</pub-id>.</mixed-citation></ref>
<ref id="ref-41"><label>[41]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Z.</given-names> <surname>Rahman</surname></string-name>, <string-name><given-names>M. S.</given-names> <surname>Hossain</surname></string-name>, <string-name><given-names>M. R.</given-names> <surname>Islam</surname></string-name>, <string-name><given-names>M. M.</given-names> <surname>Hasan</surname></string-name>, and <string-name><given-names>R. A.</given-names> <surname>Hridhee</surname></string-name></person-group>, &#x201C;<article-title>An approach for multiclass skin lesion classification based on ensemble learning</article-title>,&#x201D; <source>Inform. Med. Unlocked</source>, vol. <volume>25</volume>, no. <issue>10</issue>, <year>2021, Art. no. 100659</year>. doi: <pub-id pub-id-type="doi">10.1016/j.imu.2021.100659</pub-id>.</mixed-citation></ref>
<ref id="ref-42"><label>[42]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Moldovanu</surname></string-name>, <string-name><given-names>F. A. D.</given-names> <surname>Michis</surname></string-name>, <string-name><given-names>K. C.</given-names> <surname>Biswas</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Culea-Florescu</surname></string-name>, and <string-name><given-names>L.</given-names> <surname>Moraru</surname></string-name></person-group>, &#x201C;<article-title>Skin lesion classification based on surface fractal dimensions and statistical color cluster features using an ensemble of machine learning techniques</article-title>,&#x201D; <source>Cancers</source>, vol. <volume>13</volume>, no. <issue>21</issue>, <year>2021, Art. no. 5256</year>. doi: <pub-id pub-id-type="doi">10.3390/cancers13215256</pub-id>; <pub-id pub-id-type="pmid">34771421</pub-id></mixed-citation></ref>
<ref id="ref-43"><label>[43]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M. A.</given-names> <surname>Khan</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Sharif</surname></string-name>, <string-name><given-names>T.</given-names> <surname>Akram</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Dama&#x0161;evi&#x010D;ius</surname></string-name>, and <string-name><given-names>R.</given-names> <surname>Maskeli&#x016B;nas</surname></string-name></person-group>, &#x201C;<article-title>Skin lesion segmentation and multiclass classification using deep learning features and improved moth flame optimization</article-title>,&#x201D; <source>Diagnostics</source>, vol. <volume>11</volume>, no. <issue>5</issue>, <year>2021</year>, Art. no. 811. doi: <pub-id pub-id-type="doi">10.3390/diagnostics11050811</pub-id>; <pub-id pub-id-type="pmid">33947117</pub-id></mixed-citation></ref>
<ref id="ref-44"><label>[44]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M. M. K.</given-names> <surname>Sarker</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>SLSNet: Skin lesion segmentation using a lightweight generative adversarial network</article-title>,&#x201D; <source>Expert Syst. Appl.</source>, vol. <volume>183</volume>, <year>2021, Art. no. 115433</year>. doi: <pub-id pub-id-type="doi">10.1016/j.eswa.2021.115433</pub-id>.</mixed-citation></ref>
<ref id="ref-45"><label>[45]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>H.</given-names> <surname>Li</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>Skin lesion segmentation via dense connected deconvolutional network</article-title>,&#x201D; <source>Proc.-Int. Conf. Pattern Recognit.</source>, vol. <volume>2018</volume>, pp. <fpage>671</fpage>&#x2013;<lpage>675</lpage>, <year>2018</year>. doi: <pub-id pub-id-type="doi">10.1109/ICPR.2018.8545136</pub-id>.</mixed-citation></ref>
<ref id="ref-46"><label>[46]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>K.</given-names> <surname>Korotkov</surname></string-name> and <string-name><given-names>R.</given-names> <surname>Garcia</surname></string-name></person-group>, &#x201C;<article-title>Computerized analysis of pigmented skin lesions: A review</article-title>,&#x201D; <source>Artif. Intell. Med.</source>, vol. <volume>56</volume>, no. <issue>2</issue>, pp. <fpage>69</fpage>&#x2013;<lpage>90</lpage>, <year>2012</year>. doi: <pub-id pub-id-type="doi">10.1016/j.artmed.2012.08.002</pub-id>; <pub-id pub-id-type="pmid">23063256</pub-id></mixed-citation></ref>
<ref id="ref-47"><label>[47]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>D.</given-names> <surname>Bardou</surname></string-name>, <string-name><given-names>H.</given-names> <surname>Bouaziz</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Lv</surname></string-name>, and <string-name><given-names>T.</given-names> <surname>Zhang</surname></string-name></person-group>, &#x201C;<article-title>Hair removal in dermoscopy images using variational autoencoders</article-title>,&#x201D; <source>Ski. Res. Technol.</source>, vol. <volume>28</volume>, no. <issue>3</issue>, pp. <fpage>445</fpage>&#x2013;<lpage>454</lpage>, <year>2022</year>. doi: <pub-id pub-id-type="doi">10.1111/srt.13145</pub-id>; <pub-id pub-id-type="pmid">35254677</pub-id></mixed-citation></ref>
<ref id="ref-48"><label>[48]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>R.</given-names> <surname>Kasmi</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>SharpRazor: Automatic removal of hair and ruler marks from dermoscopy images</article-title>,&#x201D; <source>Ski. Res. Technol.</source>, vol. <volume>29</volume>, no. <issue>4</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>12</lpage>, <year>2023</year>. doi: <pub-id pub-id-type="doi">10.1111/srt.13203</pub-id>; <pub-id pub-id-type="pmid">37113095</pub-id></mixed-citation></ref>
<ref id="ref-49"><label>[49]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>H. K.</given-names> <surname>Latif</surname></string-name> and <string-name><given-names>M.</given-names> <surname>Aljanabi</surname></string-name></person-group>, &#x201C;<article-title>Analysing and evaluation of the effectiveness of different filters on segmentation skin tumors images</article-title>,&#x201D; <source>IOP Conf. Ser.: Mater. Sci. Eng.</source>, vol. <volume>1105</volume>, no. <issue>1</issue>, <year>2021, Art. no. 012068</year>. doi: <pub-id pub-id-type="doi">10.1088/1757-899x/1105/1/012068</pub-id>.</mixed-citation></ref>
<ref id="ref-50"><label>[50]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>P. M. M.</given-names> <surname>Pereira</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>Dermoscopic skin lesion image segmentation based on local binary pattern clustering: Comparative study</article-title>,&#x201D; <source>Biomed Signal Process. Control</source>, vol. <volume>59</volume>, no. <issue>3</issue>, <year>2020, Art. no. 101924</year>. doi: <pub-id pub-id-type="doi">10.1016/j.bspc.2020.101924</pub-id>.</mixed-citation></ref>
<ref id="ref-51"><label>[51]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Malik</surname></string-name>, <string-name><given-names>T.</given-names> <surname>Akram</surname></string-name>, <string-name><given-names>I.</given-names> <surname>Ashraf</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Rafiullah</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Ullah</surname></string-name> and <string-name><given-names>J.</given-names> <surname>Tanveer</surname></string-name></person-group>, &#x201C;<article-title>A hybrid preprocessor DE-ABC for efficient skin-lesion segmentation with improved contrast</article-title>,&#x201D; <source>Diagnostics</source>, vol. <volume>12</volume>, no. <issue>11</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>14</lpage>, <year>2022</year>. doi: <pub-id pub-id-type="doi">10.3390/diagnostics12112625</pub-id>; <pub-id pub-id-type="pmid">36359469</pub-id></mixed-citation></ref>
<ref id="ref-52"><label>[52]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>R.</given-names> <surname>Kaur</surname></string-name>, <string-name><given-names>H.</given-names> <surname>GholamHosseini</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Sinha</surname></string-name>, and <string-name><given-names>M.</given-names> <surname>Lind&#x00E9;n</surname></string-name></person-group>, &#x201C;<article-title>Automatic lesion segmentation using atrous convolutional deep neural networks in dermoscopic skin cancer images</article-title>,&#x201D; <source>BMC Med. Imaging</source>, vol. <volume>22</volume>, no. <issue>1</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>13</lpage>, <year>2022</year>. doi: <pub-id pub-id-type="doi">10.1186/s12880-022-00829-y</pub-id>; <pub-id pub-id-type="pmid">35644612</pub-id></mixed-citation></ref>
<ref id="ref-53"><label>[53]</label><mixed-citation publication-type="other"><person-group person-group-type="author"><string-name><given-names>P. M.</given-names> <surname>Kazaj</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Koosheshi</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Shahedi</surname></string-name>, and <string-name><given-names>A. V.</given-names> <surname>Sadr</surname></string-name></person-group>, &#x201C;<article-title>U-Net-based models for skin lesion segmentation: More attention and augmentation</article-title>,&#x201D; <year>2022</year>, <comment><italic>arXiv:2210.16399</italic></comment>.</mixed-citation></ref>
<ref id="ref-54"><label>[54]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J.</given-names> <surname>Ruan</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Xiang</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Xie</surname></string-name>, <string-name><given-names>T.</given-names> <surname>Liu</surname></string-name>, and <string-name><given-names>Y.</given-names> <surname>Fu</surname></string-name></person-group>, &#x201C;<article-title>MALUNet: A multi-attention and light-weight UNet for skin lesion segmentation</article-title>,&#x201D; <source>Proc. 2022 IEEE Int. Conf. Bioinform. Biomed. (BIBM)</source>, vol. <volume>30</volume>, no. <issue>1</issue>, pp. <fpage>1150</fpage>&#x2013;<lpage>1156</lpage>, <year>2022</year>. doi: <pub-id pub-id-type="doi">10.1109/BIBM55620.2022.9995040</pub-id>.</mixed-citation></ref>
<ref id="ref-55"><label>[55]</label><mixed-citation publication-type="other"><person-group person-group-type="author"><string-name><given-names>H.</given-names> <surname>Xu</surname></string-name> and <string-name><given-names>T. H.</given-names> <surname>Hwang</surname></string-name></person-group>, &#x201C;<article-title>Automatic skin lesion segmentation using deep fully convolutional networks</article-title>,&#x201D; <year>2018</year>, <comment><italic>arXiv:1807.06466</italic></comment>.</mixed-citation></ref>
<ref id="ref-56"><label>[56]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>D.</given-names> <surname>Popescu</surname></string-name>, <string-name><given-names>L. I. Dan</given-names> <surname>El-khatib</surname></string-name>, and <string-name><given-names>L.</given-names> <surname>Ichim</surname></string-name></person-group>, &#x201C;<article-title>Skin lesion classification using collective intelligence of multiple neural networks</article-title>,&#x201D; <source>Sensors</source>, vol. <volume>22</volume>, no. <issue>12</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>22</lpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-57"><label>[57]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>T. H. H.</given-names> <surname>Aldhyani</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Verma</surname></string-name>, <string-name><given-names>M. H.</given-names> <surname>Al-Adhaileh</surname></string-name>, and <string-name><given-names>D.</given-names> <surname>Koundal</surname></string-name></person-group>, &#x201C;<article-title>Multi-class skin lesion classification using a lightweight dynamic kernel deep-learning-based convolutional neural network</article-title>,&#x201D; <source>Diagnostics</source>, vol. <volume>12</volume>, no. <issue>9</issue>, <year>2022</year>, Art. no. 2048. doi: <pub-id pub-id-type="doi">10.3390/diagnostics12092048</pub-id>; <pub-id pub-id-type="pmid">36140447</pub-id></mixed-citation></ref>
<ref id="ref-58"><label>[58]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>V. D.</given-names> <surname>Nguyen</surname></string-name>, <string-name><given-names>N. D.</given-names> <surname>Bui</surname></string-name>, and <string-name><given-names>H. K.</given-names> <surname>Do</surname></string-name></person-group>, &#x201C;<article-title>Skin lesion classification on imbalanced data using deep learning with soft attention</article-title>,&#x201D; <source>Sensors</source>, vol. <volume>22</volume>, no. <issue>19</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>24</lpage>, <year>2022</year>. doi: <pub-id pub-id-type="doi">10.3390/s22197530</pub-id>; <pub-id pub-id-type="pmid">36236628</pub-id></mixed-citation></ref>
<ref id="ref-59"><label>[59]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>X.</given-names> <surname>Wang</surname></string-name>, <string-name><given-names>X.</given-names> <surname>Jiang</surname></string-name>, <string-name><given-names>H.</given-names> <surname>Ding</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Zhao</surname></string-name>, and <string-name><given-names>J.</given-names> <surname>Liu</surname></string-name></person-group>, &#x201C;<article-title>Knowledge-aware deep framework for collaborative skin lesion segmentation and melanoma recognition</article-title>,&#x201D; <source>Pattern Recognit.</source>, vol. <volume>120</volume>, no. <issue>2</issue>, <year>2021, Art. no. 108075</year>. doi: <pub-id pub-id-type="doi">10.1016/j.patcog.2021.108075</pub-id>.</mixed-citation></ref>
<ref id="ref-60"><label>[60]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>P. G.</given-names> <surname>Cavalcanti</surname></string-name> and <string-name><given-names>J.</given-names> <surname>Scharcanski</surname></string-name></person-group>, &#x201C;<article-title>Automated prescreening of pigmented skin lesions using standard cameras</article-title>,&#x201D; <source>Comput. Med. Imaging Graph.</source>, vol. <volume>35</volume>, no. <issue>6</issue>, pp. <fpage>481</fpage>&#x2013;<lpage>491</lpage>, <year>2011</year>. doi: <pub-id pub-id-type="doi">10.1016/j.compmedimag.2011.02.007</pub-id>; <pub-id pub-id-type="pmid">21489751</pub-id></mixed-citation></ref>
<ref id="ref-61"><label>[61]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>R.</given-names> <surname>Amelard</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Wong</surname></string-name>, and <string-name><given-names>D. A.</given-names> <surname>Clausi</surname></string-name></person-group>, &#x201C;<article-title>Extracting morphological high-level intuitive features (HLIF) for enhancing skin lesion classification</article-title>,&#x201D; in <conf-name>Proc. Annu. Int. Conf. IEEE Eng. Med. Biol. Soc. (EMBS)</conf-name>, <year>2012</year>, pp. <fpage>4458</fpage>&#x2013;<lpage>4461</lpage>. doi: <pub-id pub-id-type="doi">10.1109/EMBC.2012.6346956</pub-id>; <pub-id pub-id-type="pmid">23366917</pub-id></mixed-citation></ref>
<ref id="ref-62"><label>[62]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>P. G.</given-names> <surname>Cavalcanti</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Scharcanski</surname></string-name>, <string-name><given-names>L. E. Di</given-names> <surname>Persia</surname></string-name>, and <string-name><given-names>D. H.</given-names> <surname>Milone</surname></string-name></person-group>, &#x201C;<article-title>An ICA-based method for the segmentation of pigmented skin lesions in macroscopic images</article-title>,&#x201D; in <conf-name>Proc. Annu. Int. Conf. IEEE Eng. Med. Biol. Soc. (EMBS)</conf-name>, <year>2011</year>, pp. <fpage>5993</fpage>&#x2013;<lpage>5996</lpage>. doi: <pub-id pub-id-type="doi">10.1109/IEMBS.2011.6091481</pub-id>; <pub-id pub-id-type="pmid">22255705</pub-id></mixed-citation></ref>
<ref id="ref-63"><label>[63]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>R.</given-names> <surname>Amelard</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Wong</surname></string-name>, and <string-name><given-names>D. A.</given-names> <surname>Clausi</surname></string-name></person-group>, &#x201C;<article-title>Extracting high-level intuitive features (HLIF) for classifying skin lesions using standard camera images</article-title>,&#x201D; in <conf-name>2012 Ninth Conf. Comput. Robot Vis.</conf-name>, <year>2012</year>, pp. <fpage>396</fpage>&#x2013;<lpage>403</lpage>. doi: <pub-id pub-id-type="doi">10.1109/CRV.2012.59</pub-id>.</mixed-citation></ref>
<ref id="ref-64"><label>[64]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>I.</given-names> <surname>Giotis</surname></string-name>, <string-name><given-names>N.</given-names> <surname>Molders</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Land</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Biehl</surname></string-name>, <string-name><given-names>M. F.</given-names> <surname>Jonkman</surname></string-name> and <string-name><given-names>N.</given-names> <surname>Petkov</surname></string-name></person-group>, &#x201C;<article-title>MED-NODE: A computer-assisted melanoma diagnosis system using non-dermoscopic images</article-title>,&#x201D; <source>Expert. Syst. Appl.</source>, vol. <volume>42</volume>, no. <issue>19</issue>, pp. <fpage>6578</fpage>&#x2013;<lpage>6585</lpage>, <year>2015</year>. doi: <pub-id pub-id-type="doi">10.1016/j.eswa.2015.04.034</pub-id>.</mixed-citation></ref>
<ref id="ref-65"><label>[65]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>C.</given-names> <surname>Sagar</surname></string-name> and <string-name><given-names>L. M.</given-names> <surname>Saini</surname></string-name></person-group>, &#x201C;<article-title>Color channel based segmentation of skin lesion from clinical images for the detection of melanoma</article-title>,&#x201D; in <conf-name>1st IEEE Int. Conf. Power Electron. Intell. Control Energy Syst., ICPEICES 2016</conf-name>, <year>2017</year>, pp. <fpage>1</fpage>&#x2013;<lpage>5</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ICPEICES.2016.7853624</pub-id>.</mixed-citation></ref>
<ref id="ref-66"><label>[66]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>E.</given-names> <surname>Nasr-Esfahani</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>Melanoma detection by analysis of clinical images using convolutional neural network</article-title>,&#x201D; <source>Proc. Annu. Int. Conf. IEEE Eng. Med. Biol. Soc.(EMBS)</source>, vol. <volume>2016</volume>, pp. <fpage>1373</fpage>&#x2013;<lpage>1376</lpage>, <year>2016</year>. doi: <pub-id pub-id-type="doi">10.1109/EMBC.2016.7590963</pub-id>; <pub-id pub-id-type="pmid">28268581</pub-id></mixed-citation></ref>
<ref id="ref-67"><label>[67]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M. H.</given-names> <surname>Jafari</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Samavi</surname></string-name>, <string-name><given-names>N.</given-names> <surname>Karimi</surname></string-name>, <string-name><given-names>S. M. R.</given-names> <surname>Soroushmehr</surname></string-name>, <string-name><given-names>K.</given-names> <surname>Ward</surname></string-name> and <string-name><given-names>K.</given-names> <surname>Najarian</surname></string-name></person-group>, &#x201C;<article-title>Automatic detection of melanoma using broad extraction of features from digital images</article-title>,&#x201D; in <source>proc. Annu. Int. Conf. IEEE Eng. Med. Biol. Soc. (EMBS)</source>, vol. <volume>2016</volume>, pp. <fpage>1357</fpage>&#x2013;<lpage>1360</lpage>, <year>2016</year>. doi: <pub-id pub-id-type="doi">10.1109/EMBC.2016.7590959</pub-id>; <pub-id pub-id-type="pmid">28268577</pub-id></mixed-citation></ref>
<ref id="ref-68"><label>[68]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>N.</given-names> <surname>Karimi</surname></string-name>, <string-name><given-names>K.</given-names> <surname>Najarian</surname></string-name>, <string-name><given-names>H.</given-names> <surname>Jafari</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Samavi</surname></string-name>, <string-name><given-names>S. M. R.</given-names> <surname>Soroushmehr</surname></string-name>, and <string-name><given-names>H.</given-names> <surname>Mohaghegh</surname></string-name></person-group>, &#x201C;<article-title>Set of descriptors for skin cancer diagnosis using non-dermoscopic color images</article-title>,&#x201D; in <source>2016 IEEE Int. Conf. Image Process. (ICIP)</source>, <publisher-loc>Phoenix, AZ, USA</publisher-loc>, <year>2016</year>, pp. <fpage>2638</fpage>&#x2013;<lpage>2642</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ICIP.2016.7532837</pub-id>.</mixed-citation></ref>
<ref id="ref-69"><label>[69]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>R.</given-names> <surname>Amelard</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Glaister</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Wong</surname></string-name>, and <string-name><given-names>D. A.</given-names> <surname>Clausi</surname></string-name></person-group>, &#x201C;<article-title>Melanoma decision support using lighting-corrected intuitive feature models</article-title>,&#x201D; in <conf-name>Comput. Vis. Tech. Diagnos. Skin Cancer Ser. Bioeng.</conf-name>, <year>2014</year>, pp. <fpage>193</fpage>&#x2013;<lpage>219</lpage>. doi: <pub-id pub-id-type="doi">10.1007/978-3-642-39608-3_7</pub-id>.</mixed-citation></ref>
<ref id="ref-70"><label>[70]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J.</given-names> <surname>Glaister</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Amelard</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Wong</surname></string-name>, and <string-name><given-names>D. A.</given-names> <surname>Clausi</surname></string-name></person-group>, &#x201C;<article-title>Multistage illumination modeling of dermatological photographs for illumination-corrected skin lesion analysis</article-title>,&#x201D; <source>IEEE Trans. Biomed. Eng.</source>, vol. <volume>60</volume>, no. <issue>7</issue>, pp. <fpage>1873</fpage>&#x2013;<lpage>1883</lpage>, <year>2013</year>. doi: <pub-id pub-id-type="doi">10.1109/TBME.2013.2244596</pub-id>; <pub-id pub-id-type="pmid">23380843</pub-id></mixed-citation></ref>
<ref id="ref-71"><label>[71]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J.</given-names> <surname>Glaister</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Member</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Wong</surname></string-name>, <string-name><given-names>D. A.</given-names> <surname>Clausi</surname></string-name>, and <string-name><given-names>S.</given-names> <surname>Member</surname></string-name></person-group>, &#x201C;<article-title>Segmentation of skin lesions from digital images using joint statistical texture distinctiveness</article-title>,&#x201D; <source>IEEE Trans. Biomed. Eng.</source>, vol. <volume>61</volume>, no. <issue>4</issue>, pp. <fpage>1220</fpage>&#x2013;<lpage>1230</lpage>, <year>2014</year>. doi: <pub-id pub-id-type="doi">10.1109/TBME.2013.2297622</pub-id>; <pub-id pub-id-type="pmid">24658246</pub-id></mixed-citation></ref>
<ref id="ref-72"><label>[72]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>R.</given-names> <surname>Amelard</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Glaister</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Wong</surname></string-name>, and <string-name><given-names>D. A.</given-names> <surname>Clausi</surname></string-name></person-group>, &#x201C;<article-title>High-level intuitive features (HLIFs) for intuitive skin lesion description</article-title>,&#x201D; <source>IEEE Trans. Biomed. Eng.</source>, vol. <volume>62</volume>, no. <issue>3</issue>, pp. <fpage>820</fpage>&#x2013;<lpage>831</lpage>, <year>2015</year>. doi: <pub-id pub-id-type="doi">10.1109/TBME.2014.2365518</pub-id>; <pub-id pub-id-type="pmid">25361498</pub-id></mixed-citation></ref>
<ref id="ref-73"><label>[73]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Kamboj</surname></string-name></person-group>, &#x201C;<article-title>A color-based approach for melanoma skin cancer detection</article-title>,&#x201D; <source>2018 First Int. Conf. Secur. Cyber Comput. Commun.</source>, vol. <volume>181</volume>, pp. <fpage>508</fpage>&#x2013;<lpage>513</lpage>, <year>2018</year>. doi: <pub-id pub-id-type="doi">10.1109/ICSCCC.2018.8703309</pub-id>.</mixed-citation></ref>
<ref id="ref-74"><label>[74]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>C.</given-names> <surname>Barata</surname></string-name>, <string-name><given-names>J. S.</given-names> <surname>Marques</surname></string-name>, and <string-name><given-names>J.</given-names> <surname>Rozeira</surname></string-name></person-group>, &#x201C;<article-title>A system for the detection of pigment network in dermoscopy images using directional filters</article-title>,&#x201D; <source>IEEE Trans. Biomed. Eng.</source>, vol. <volume>59</volume>, no. <issue>10</issue>, pp. <fpage>2744</fpage>&#x2013;<lpage>2754</lpage>, <year>2012</year>. doi: <pub-id pub-id-type="doi">10.1109/TBME.2012.2209423</pub-id>; <pub-id pub-id-type="pmid">22829364</pub-id></mixed-citation></ref>
<ref id="ref-75"><label>[75]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Ramezani</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Karimian</surname></string-name>, and <string-name><given-names>P.</given-names> <surname>Moallem</surname></string-name></person-group>, &#x201C;<article-title>Automatic detection of malignant melanoma using macroscopic images</article-title>,&#x201D; <source>J. Med. Signals Sens.</source>, vol. <volume>4</volume>, no. <issue>4</issue>, pp. <fpage>281</fpage>&#x2013;<lpage>290</lpage>, <year>2014</year>. doi: <pub-id pub-id-type="doi">10.4103/2228-7477.144052</pub-id>.</mixed-citation></ref>
<ref id="ref-76"><label>[76]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>T.</given-names> <surname>Lee</surname></string-name>, <string-name><given-names>V.</given-names> <surname>Ng</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Gallagher</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Coldman</surname></string-name>, and <string-name><given-names>D.</given-names> <surname>McLean</surname></string-name></person-group>, &#x201C;<article-title>Dullrazor<sup>&#x00AE;</sup>: A software approach to hair removal from images</article-title>,&#x201D; <source>Comput. Biol. Med.</source>, vol. <volume>27</volume>, no. <issue>6</issue>, pp. <fpage>533</fpage>&#x2013;<lpage>543</lpage>, <year>1997</year>. doi: <pub-id pub-id-type="doi">10.1016/S0010-4825(97)00020-6</pub-id>; <pub-id pub-id-type="pmid">9437554</pub-id></mixed-citation></ref>
<ref id="ref-77"><label>[77]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>P.</given-names> <surname>Sabouri</surname></string-name>, <string-name><given-names>H.</given-names> <surname>Gholamhosseini</surname></string-name>, <string-name><given-names>T.</given-names> <surname>Larsson</surname></string-name>, and <string-name><given-names>J.</given-names> <surname>Collins</surname></string-name></person-group>, &#x201C;<article-title>A cascade classifier for diagnosis of melanoma in clinical images</article-title>,&#x201D; in <source>2014 36th Annu. Int. Conf. IEEE Eng. Med. Biol. Soc. (EMBC 2014)</source>, vol. <volume>42</volume>, pp. <fpage>6748</fpage>&#x2013;<lpage>6751</lpage>, <year>2014</year>. doi: <pub-id pub-id-type="doi">10.1109/EMBC.2014.6945177</pub-id>; <pub-id pub-id-type="pmid">25571545</pub-id></mixed-citation></ref>
<ref id="ref-78"><label>[78]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Huang</surname></string-name>, <string-name><given-names>S. Y.</given-names> <surname>Kwan</surname></string-name>, <string-name><given-names>W. Y.</given-names> <surname>Chang</surname></string-name>, <string-name><given-names>M. Y.</given-names> <surname>Liu</surname></string-name>, <string-name><given-names>M. H.</given-names> <surname>Chi</surname></string-name> and <string-name><given-names>G. S.</given-names> <surname>Chen</surname></string-name></person-group>, &#x201C;<article-title>A robust hair segmentation and removal approach for clinical images of skin lesions</article-title>,&#x201D; in <conf-name>Proc. Annu. Int. Conf. IEEE Eng. Med. Biol. Soc. (EMBS)</conf-name>, <year>2013</year>, pp. <fpage>3315</fpage>&#x2013;<lpage>3318</lpage>. doi: <pub-id pub-id-type="doi">10.1109/EMBC.2013.6610250</pub-id>; <pub-id pub-id-type="pmid">24110437</pub-id></mixed-citation></ref>
<ref id="ref-79"><label>[79]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>R. B.</given-names> <surname>Oliveira</surname></string-name>, <string-name><given-names>N.</given-names> <surname>Marranghello</surname></string-name>, <string-name><given-names>A. S.</given-names> <surname>Pereira</surname></string-name>, and <string-name><given-names>J. M. R. S.</given-names> <surname>Tavares</surname></string-name></person-group>, &#x201C;<article-title>A computational approach for detecting pigmented skin lesions in macroscopic images</article-title>,&#x201D; <source>Expert. Syst. Appl.</source>, vol. <volume>61</volume>, pp. <fpage>53</fpage>&#x2013;<lpage>63</lpage>, <year>2016</year>. doi: <pub-id pub-id-type="doi">10.1016/j.eswa.2016.05.017</pub-id>.</mixed-citation></ref>
<ref id="ref-80"><label>[80]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A. M.</given-names> <surname>Martinez</surname></string-name> and <string-name><given-names>A. C.</given-names> <surname>Kak</surname></string-name></person-group>, &#x201C;<article-title>PCA versus LDA</article-title>,&#x201D; <source>IEEE Trans. Pattern Anal. Mach. Intell.</source>, vol. <volume>23</volume>, no. <issue>2</issue>, pp. <fpage>228</fpage>&#x2013;<lpage>233</lpage>, <year>2001</year>. doi: <pub-id pub-id-type="doi">10.1109/34.908974</pub-id>.</mixed-citation></ref>
<ref id="ref-81"><label>[81]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Shihab</surname></string-name>, <string-name><given-names>H.</given-names> <surname>Salah</surname></string-name>, and <string-name><given-names>M.</given-names> <surname>Mocanu</surname></string-name></person-group>, &#x201C;<article-title>Detection and diagnosis of skin cancer based on K-means cluster and convolutional neural network</article-title>,&#x201D; in <conf-name>Proc. 2021 23rd Int. Conf. Control Syst. Comput. Sci. Technol., CSCS 2021</conf-name>, <year>2021</year>, <fpage>143</fpage>&#x2013;<lpage>150</lpage>. doi: <pub-id pub-id-type="doi">10.1109/CSCS52396.2021.00031</pub-id>.</mixed-citation></ref>
<ref id="ref-82"><label>[82]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M. Q.</given-names> <surname>Khan</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>Classification of melanoma and nevus in digital images for diagnosis of skin cancer</article-title>,&#x201D; <source>IEEE Access</source>, vol. <volume>7</volume>, pp. <fpage>90132</fpage>&#x2013;<lpage>90144</lpage>, <year>2019</year>. doi: <pub-id pub-id-type="doi">10.1109/ACCESS.2019.2926837</pub-id>.</mixed-citation></ref>
<ref id="ref-83"><label>[83]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M. H.</given-names> <surname>Jafari</surname></string-name>, <string-name><given-names>E. N.</given-names> <surname>Nader</surname></string-name>, <string-name><given-names>S. M. R.</given-names> <surname>Soroushmehr</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Samavi</surname></string-name>, and <string-name><given-names>K.</given-names> <surname>Najarian</surname></string-name></person-group>, &#x201C;<article-title>Extraction of skin lesions from non-dermoscopic images for surgical excision of melanoma</article-title>,&#x201D; <source>Int. J. CARS</source>, vol. <volume>12</volume>, no. <issue>6</issue>, pp. <fpage>1021</fpage>&#x2013;<lpage>1030</lpage>, <year>2017</year>. doi: <pub-id pub-id-type="doi">10.1007/s11548-017-1567-8</pub-id>; <pub-id pub-id-type="pmid">28342106</pub-id></mixed-citation></ref>
<ref id="ref-84"><label>[84]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>K.</given-names> <surname>He</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Sun</surname></string-name>, and <string-name><given-names>X.</given-names> <surname>Tang</surname></string-name></person-group>, &#x201C;<article-title>Guided image filtering</article-title>,&#x201D; <source>IEEE Trans. Pattern Anal. Mach. Intell.</source>, vol. <volume>35</volume>, no. <issue>6</issue>, pp. <fpage>1397</fpage>&#x2013;<lpage>1409</lpage>, <year>2013</year>. doi: <pub-id pub-id-type="doi">10.1109/TPAMI.2012.213</pub-id>; <pub-id pub-id-type="pmid">23599054</pub-id></mixed-citation></ref>
<ref id="ref-85"><label>[85]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Y.</given-names> <surname>Fujisawa</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>Deep-learning-based, computer-aided classifier developed with a small dataset of clinical images surpasses board-certified dermatologists in skin tumour diagnosis</article-title>,&#x201D; <source>Br. J. Dermatol.</source>, vol. <volume>180</volume>, no. <issue>2</issue>, pp. <fpage>373</fpage>&#x2013;<lpage>381</lpage>, <year>2019</year>. doi: <pub-id pub-id-type="doi">10.1111/bjd.16924</pub-id>; <pub-id pub-id-type="pmid">29953582</pub-id></mixed-citation></ref>
<ref id="ref-86"><label>[86]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A. G. C.</given-names> <surname>Pacheco</surname></string-name> and <string-name><given-names>R. A.</given-names> <surname>Krohling</surname></string-name></person-group>, &#x201C;<article-title>An attention-based mechanism to combine images and metadata in deep learning models applied to skin cancer classification</article-title>,&#x201D; <source>IEEE J. Biomed. Heal. Informatics.</source>, vol. <volume>25</volume>, no. <issue>9</issue>, pp. <fpage>3554</fpage>&#x2013;<lpage>3563</lpage>, <year>2021</year>. doi: <pub-id pub-id-type="doi">10.1109/JBHI.2021.3062002</pub-id>; <pub-id pub-id-type="pmid">33635800</pub-id></mixed-citation></ref>
<ref id="ref-87"><label>[87]</label><mixed-citation publication-type="other"><person-group person-group-type="author"><string-name><given-names>B.</given-names> <surname>Krohling</surname></string-name>, <string-name><given-names>P. B. C.</given-names> <surname>Castro</surname></string-name>, <string-name><given-names>A. G. C.</given-names> <surname>Pacheco</surname></string-name>, and <string-name><given-names>R. A.</given-names> <surname>Krohling</surname></string-name></person-group>, &#x201C;<article-title>A smartphone based application for skin cancer classification using deep learning with clinical images and lesion information</article-title>,&#x201D; <year>2021</year>, <comment><italic>arXiv:2104.14353</italic></comment>.</mixed-citation></ref>
<ref id="ref-88"><label>[88]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Mukherjee</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Adhikari</surname></string-name>, and <string-name><given-names>M.</given-names> <surname>Roy</surname></string-name></person-group>, &#x201C;<article-title>Malignant melanoma classification using cross-platform dataset with deep learning CNN architecture</article-title>,&#x201D; in <source>Recent Trends in Signal and Image Processing</source>. <publisher-loc>Singapore</publisher-loc>: <publisher-name>Springer</publisher-name>, <year>2019</year>, pp. <fpage>31</fpage>&#x2013;<lpage>41</lpage>. doi: <pub-id pub-id-type="doi">10.1007/978-981-13-6783-0_4</pub-id>.</mixed-citation></ref>
<ref id="ref-89"><label>[89]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>B. A.</given-names> <surname>Albert</surname></string-name></person-group>, &#x201C;<article-title>Deep learning from limited training data: Novel segmentation and ensemble algorithms applied to automatic melanoma diagnosis</article-title>,&#x201D; <source>IEEE Access</source>, vol. <volume>8</volume>, pp. <fpage>31254</fpage>&#x2013;<lpage>31269</lpage>, <year>2020</year>. doi: <pub-id pub-id-type="doi">10.1109/ACCESS.2020.2973188</pub-id>.</mixed-citation></ref>
<ref id="ref-90"><label>[90]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A. R.</given-names> <surname>Jadhav</surname></string-name>, <string-name><given-names>A. G.</given-names> <surname>Ghontale</surname></string-name>, and <string-name><given-names>V. K.</given-names> <surname>Shrivastava</surname></string-name></person-group>, &#x201C;<article-title>Segmentation and border detection of melanoma lesions using convolutional neural network and SVM</article-title>,&#x201D; <source>Springer Singapore</source>, vol. <volume>798</volume>, <year>2019</year>. doi: <pub-id pub-id-type="doi">10.1007/978-981-13-1132-1</pub-id>.</mixed-citation></ref>
<ref id="ref-91"><label>[91]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A. G. C.</given-names> <surname>Pacheco</surname></string-name> and <string-name><given-names>R. A.</given-names> <surname>Krohling</surname></string-name></person-group>, &#x201C;<article-title>The impact of patient clinical information on automated skin cancer detection</article-title>,&#x201D; <source>Comput. Biol. Med.</source>, vol. <volume>116</volume>, no. <issue>1</issue>, <year>2020, Art. no. 103545</year>. doi: <pub-id pub-id-type="doi">10.1016/j.compbiomed.2019.103545</pub-id>; <pub-id pub-id-type="pmid">31760271</pub-id></mixed-citation></ref>
<ref id="ref-92"><label>[92]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>N. -M. C. V.</given-names> <surname>Pomponiu</surname></string-name> and <string-name><given-names>H.</given-names> <surname>Nejati</surname></string-name></person-group>, &#x201C;<article-title>Deepmole: Deep neural networks for skin mole lesion classification</article-title>,&#x201D; in <source>2016 IEEE Int. Conf. Image Process. (ICIP)</source>, <publisher-loc>Phoenix, AZ, USA</publisher-loc>, <year>2016</year>, pp. <fpage>2623</fpage>&#x2013;<lpage>2627</lpage>.</mixed-citation></ref>
<ref id="ref-93"><label>[93]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>X.</given-names> <surname>Chen</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Mottaghi</surname></string-name>, <string-name><given-names>X.</given-names> <surname>Liu</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Fidler</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Urtasun</surname></string-name> and <string-name><given-names>A.</given-names> <surname>Yuille</surname></string-name></person-group>, <source>DermoCC-GAN: A New Approach for
Standardizing Dermatological Images Using Generative Adversarial Networks</source>. <publisher-loc>USA</publisher-loc>: The Center for Brains, Minds &#x0026; Machines, <year>2014</year>, vol. <volume>225</volume>, pp. <fpage>1979</fpage>&#x2013;<lpage>1986</lpage>.
</mixed-citation></ref>
<ref id="ref-94"><label>[94]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>V.</given-names> <surname>Venugopal</surname></string-name>, <string-name><given-names>M. K.</given-names> <surname>Nath</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Joseph</surname></string-name>, and <string-name><given-names>M. V.</given-names> <surname>Das</surname></string-name></person-group>, &#x201C;<article-title>A deep learning-based illumination transform for devignetting photographs of dermatological lesions</article-title>,&#x201D; <source>Image Vis. Comput.</source>, vol. <volume>142</volume>, no. <issue>1</issue>, <year>2024, Art. no. 104909</year>. doi: <pub-id pub-id-type="doi">10.1016/j.imavis.2024.104909</pub-id>.</mixed-citation></ref>
<ref id="ref-95"><label>[95]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>L. S.</given-names> <surname>Shapiro</surname></string-name></person-group>, <source>Computer Vision</source>, <publisher-loc>Hoboken, NJ, USA</publisher-loc>: <publisher-name>Prentice Hall</publisher-name>, <year>2001</year>, vol. <volume>9</volume>.</mixed-citation></ref>
<ref id="ref-96"><label>[96]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>N.</given-names> <surname>Otsu</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>Otsu 1979 Otsu method</article-title>,&#x201D; <source>IEEE Trans. Syst. Man. Cybern.</source>, vol. <volume>C</volume>, no. <issue>1</issue>, pp. <fpage>62</fpage>&#x2013;<lpage>66</lpage>, <year>1979</year>.</mixed-citation></ref>
<ref id="ref-97"><label>[97]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Mukherjee</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Adhikari</surname></string-name>, and <string-name><given-names>M.</given-names> <surname>Roy</surname></string-name></person-group>, &#x201C;<article-title>Malignant melanoma detection using multi layer perceptron with optimized network parameter selection by PSO</article-title>,&#x201D; in <source>Contemporary Advances in Innovative and Applicable Information Technology Conference paper</source>. <publisher-loc>Singapore</publisher-loc>: <publisher-name>Springer</publisher-name>, <year>2019</year>, pp. <fpage>101</fpage>&#x2013;<lpage>109</lpage>. doi: <pub-id pub-id-type="doi">10.1007/978-981-13-1540-4_11</pub-id>.</mixed-citation></ref>
<ref id="ref-98"><label>[98]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>T.</given-names> <surname>Tabassum</surname></string-name>, <string-name><given-names>K.</given-names> <surname>Munia</surname></string-name>, <string-name><given-names>N.</given-names> <surname>Alam</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Neubert</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Fazel-rezai</surname></string-name> and <string-name><given-names>S.</given-names> <surname>Member</surname></string-name></person-group>, &#x201C;<article-title>Automatic diagnosis of melanoma using linear and nonlinear features from digital image</article-title>,&#x201D; <source>IEEE J. Biomed. Heal. Informat.</source>, vol. <volume>17</volume>, pp. <fpage>4281</fpage>&#x2013;<lpage>4284</lpage>, <year>2017</year>.</mixed-citation></ref>
<ref id="ref-99"><label>[99]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>T. F.</given-names> <surname>Chan</surname></string-name>, <string-name><given-names>B. Yezrielev</given-names> <surname>Sandberg</surname></string-name>, and <string-name><given-names>L. A.</given-names> <surname>Vese</surname></string-name></person-group>, &#x201C;<article-title>Active contours without edges for vector-valued images</article-title>,&#x201D; <source>J. Vis. Commun. Image Represent.</source>, vol. <volume>11</volume>, no. <issue>2</issue>, pp. <fpage>130</fpage>&#x2013;<lpage>141</lpage>, <year>2000</year>. doi: <pub-id pub-id-type="doi">10.1006/jvci.1999.0442</pub-id>.</mixed-citation></ref>
<ref id="ref-100"><label>[100]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Hyvarinen</surname></string-name></person-group>, &#x201C;<article-title>Family of fixed-point algorithms for independent component analysis</article-title>,&#x201D; in <source>1997 IEEE Int. Conf. Acoust. Speech Signal Process.</source>, <publisher-loc>Munich, Germany</publisher-loc>, <year>1997</year>, pp. <fpage>3917</fpage>&#x2013;<lpage>3920</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ICASSP.1997.604766</pub-id>.</mixed-citation></ref>
<ref id="ref-101"><label>[101]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>T. T.</given-names> <surname>Do</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>Accessible melanoma detection using smartphones and mobile image analysis</article-title>,&#x201D; <source>IEEE Trans. Multimed.</source>, vol. <volume>20</volume>, no. <issue>10</issue>, pp. <fpage>2849</fpage>&#x2013;<lpage>2864</lpage>, <year>2018</year>. doi: <pub-id pub-id-type="doi">10.1109/TMM.2018.2814346</pub-id>.</mixed-citation></ref>
<ref id="ref-102"><label>[102]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>P.</given-names> <surname>Sabouri</surname></string-name> and <string-name><given-names>H.</given-names> <surname>Gholamhosseini</surname></string-name></person-group>, &#x201C;<article-title>Lesion border detection using deep learning</article-title>,&#x201D; in <conf-name>2016 IEEE Congr. Evol. Comput. CEC 2016</conf-name>, <year>2016</year>, pp. <fpage>1416</fpage>&#x2013;<lpage>1421</lpage>. doi: <pub-id pub-id-type="doi">10.1109/CEC.2016.7743955</pub-id>.</mixed-citation></ref>
<ref id="ref-103"><label>[103]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Udrea</surname></string-name> and <string-name><given-names>G. D.</given-names> <surname>Mitra</surname></string-name></person-group>, &#x201C;<article-title>Generative adversarial neural networks for pigmented and non-pigmented skin lesions detection in clinical images</article-title>,&#x201D; in <conf-name>Proc. 2017 21st Int. Conf. Control Syst. Comput. CSCS 2017</conf-name>, <year>2017</year>, pp. <fpage>364</fpage>&#x2013;<lpage>368</lpage>. doi: <pub-id pub-id-type="doi">10.1109/CSCS.2017.56</pub-id>.</mixed-citation></ref>
<ref id="ref-104"><label>[104]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>R. L.</given-names> <surname>Ara</surname></string-name> and <string-name><given-names>R. D. A. L.</given-names> <surname>Rab&#x005E;</surname></string-name></person-group>, &#x201C;<article-title>Automatic segmentation of melanoma skin cancer using deep learning</article-title>,&#x201D; in <conf-name>2020 IEEE Int. Conf. E-health Netw., App. Serv. (HEALTHCOM) Groups</conf-name>, <year>2021</year>, pp. <fpage>1</fpage>&#x2013;<lpage>6</lpage>.</mixed-citation></ref>
<ref id="ref-105"><label>[105]</label><mixed-citation publication-type="book"><person-group person-group-type="author"><string-name><given-names>O.</given-names> <surname>Ronneberger</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Fischer</surname></string-name>, and <string-name><given-names>T.</given-names> <surname>Brox</surname></string-name></person-group>, &#x201C;<chapter-title>U-Net: Convolutional networks for biomedical image segmentation</chapter-title>,&#x201D; in <source>Medical Image Computing and Computer-Assisted Intervention-MICCAI</source>, vol. <year>2015</year>, vol. <volume>9351</volume>, pp. <fpage>234</fpage>&#x2013;<lpage>241</lpage>.</mixed-citation></ref>
<ref id="ref-106"><label>[106]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S. S.</given-names> <surname>Devi</surname></string-name>, <string-name><given-names>N. H.</given-names> <surname>Singh</surname></string-name>, and <string-name><given-names>R. H.</given-names> <surname>Laskar</surname></string-name></person-group>, &#x201C;<article-title>Fuzzy c-means clustering with histogram based cluster selection for skin lesion segmentation using non-dermoscopic images</article-title>,&#x201D; <source>Int. J. Interact. Multimed. Artif. Intell.</source>, vol. <volume>6</volume>, no. <issue>1</issue>, pp. <fpage>26</fpage>&#x2013;<lpage>31</lpage>, <year>2020</year>. doi: <pub-id pub-id-type="doi">10.9781/ijimai.2020.01.001</pub-id>.</mixed-citation></ref>
<ref id="ref-107"><label>[107]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>G.</given-names> <surname>Friedland</surname></string-name>, <string-name><given-names>K.</given-names> <surname>Jantz</surname></string-name>, and <string-name><given-names>R.</given-names> <surname>Rojas</surname></string-name></person-group>, &#x201C;<article-title>SIOX: Simple interactive object extraction in still images</article-title>,&#x201D; in <conf-name>Proc.-Seventh IEEE Int. Symp. Multimedia, ISM 2005</conf-name>, <year>2005</year>, vol. <volume>2005</volume>, pp. <fpage>253</fpage>&#x2013;<lpage>259</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ISM.2005.106</pub-id>.</mixed-citation></ref>
<ref id="ref-108"><label>[108]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>R.</given-names> <surname>Moussa</surname></string-name>, <string-name><given-names>F.</given-names> <surname>Gerges</surname></string-name>, <string-name><given-names>C.</given-names> <surname>Salem</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Akiki</surname></string-name>, <string-name><given-names>O.</given-names> <surname>Falou</surname></string-name> and <string-name><given-names>D.</given-names> <surname>Azar</surname></string-name></person-group>, &#x201C;<article-title>Computer-aided detection of Melanoma using geometric features</article-title>,&#x201D; in <source>2016 3rd Middle East Conf. Biomed. Eng. (MECBME)</source>, <publisher-loc>Beirut, Lebanon</publisher-loc>, <year>2016</year>, pp. <fpage>125</fpage>&#x2013;<lpage>128</lpage>. doi: <pub-id pub-id-type="doi">10.1109/MECBME.2016.7745423</pub-id>.</mixed-citation></ref>
<ref id="ref-109"><label>[109]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>V.</given-names> <surname>Venugopal</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Joseph</surname></string-name>, <string-name><given-names>M. V.</given-names> <surname>Das</surname></string-name>, and <string-name><given-names>M. K.</given-names> <surname>Nath</surname></string-name></person-group>, &#x201C;<article-title>DTP-Net: A convolutional neural network model to predict threshold for localizing the lesions on dermatological macro-images</article-title>,&#x201D; <source>Comput. Biol. Med.</source>, vol. <volume>148</volume>, no. <issue>3</issue>, <year>2022, Art. no. 105852</year>. doi: <pub-id pub-id-type="doi">10.1016/j.compbiomed.2022.105852</pub-id>; <pub-id pub-id-type="pmid">35853397</pub-id></mixed-citation></ref>
<ref id="ref-110"><label>[110]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>H. M.</given-names> <surname>Shaw</surname></string-name>, <string-name><given-names>D. S.</given-names> <surname>Rigel</surname></string-name>, <string-name><given-names>R. J.</given-names> <surname>Friedman</surname></string-name>, <string-name><given-names>W. H.</given-names> <surname>Mccarthy</surname></string-name>, and <string-name><given-names>A. W.</given-names> <surname>Kopf</surname></string-name></person-group>, &#x201C;<article-title>Early diagnosis of cutaneous melanoma</article-title>,&#x201D; <source>JAMA</source>, vol. <volume>292</volume>, no. <issue>22</issue>, pp. <fpage>2771</fpage>&#x2013;<lpage>2776</lpage>, <year>2015</year>. doi: <pub-id pub-id-type="doi">10.1001/jama.292.22.2771</pub-id>; <pub-id pub-id-type="pmid">15585738</pub-id></mixed-citation></ref>
<ref id="ref-111"><label>[111]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M. E.</given-names> <surname>Celebi</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>A methodological approach to the classification of dermoscopy images</article-title>,&#x201D; <source>Comput. Med. Imaging Graph.</source>, vol. <volume>31</volume>, no. <issue>6</issue>, pp. <fpage>362</fpage>&#x2013;<lpage>373</lpage>, <year>2007</year>. doi: <pub-id pub-id-type="doi">10.1016/j.compmedimag.2007.01.003</pub-id>; <pub-id pub-id-type="pmid">17387001</pub-id></mixed-citation></ref>
<ref id="ref-112"><label>[112]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>R. C.</given-names> <surname>Gonzalez</surname></string-name> and <string-name><given-names>R. E.</given-names> <surname>Woods</surname></string-name></person-group>, Digital Image Processing. <publisher-loc>India: Pearson</publisher-loc>, <year>2018</year>.</mixed-citation></ref>
<ref id="ref-113"><label>[113]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A. G.</given-names> <surname>Manousaki</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>A simple digital image processing system to aid in melanoma diagnosis in an everyday melanocytic skin lesion unit. A preliminary report</article-title>,&#x201D; <source>Int. J. Dermatol.</source>, vol. <volume>45</volume>, no. <issue>4</issue>, pp. <fpage>402</fpage>&#x2013;<lpage>410</lpage>, <year>2006</year>. doi: <pub-id pub-id-type="doi">10.1111/j.1365-4632.2006.02726.x</pub-id>; <pub-id pub-id-type="pmid">16650167</pub-id></mixed-citation></ref>
<ref id="ref-114"><label>[114]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Kjoelen</surname></string-name>, <string-name><given-names>M. J.</given-names> <surname>Thompson</surname></string-name>, <string-name><given-names>S. E.</given-names> <surname>Umbaugh</surname></string-name>, <string-name><given-names>R. H.</given-names> <surname>Moss</surname></string-name>, and <string-name><given-names>W. V.</given-names> <surname>Stoecker</surname></string-name></person-group>, &#x201C;<article-title>Performance of Al methods in detecting melanoma</article-title>,&#x201D; <source>IEEE Eng. Med. Biol. Mag.</source>, vol. <volume>14</volume>, no. <issue>4</issue>, pp. <fpage>411</fpage>&#x2013;<lpage>416</lpage>, <year>1995</year>. doi: <pub-id pub-id-type="doi">10.1109/51.395323</pub-id>.</mixed-citation></ref>
<ref id="ref-115"><label>[115]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Parolin</surname></string-name>, <string-name><given-names>E.</given-names> <surname>Herzer</surname></string-name>, and <string-name><given-names>C. R.</given-names> <surname>Jung</surname></string-name></person-group>, &#x201C;<article-title>Semi-automated diagnosis of melanoma through the analysis of dermatological images</article-title>,&#x201D; in <conf-name>2010 23rd SIBGRAPI Conf. Graph. Patterns Images</conf-name>, <year>2010</year>, <fpage>71</fpage>&#x2013;<lpage>78</lpage>. doi: <pub-id pub-id-type="doi">10.1109/SIBGRAPI.2010.18</pub-id>.</mixed-citation></ref>
<ref id="ref-116"><label>[116]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J. H.</given-names> <surname>Christensen</surname></string-name>, <string-name><given-names>M. B. T.</given-names> <surname>Soerensen</surname></string-name>, <string-name><given-names>Z.</given-names> <surname>Linghui</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Chen</surname></string-name>, and <string-name><given-names>M. O.</given-names> <surname>Jensen</surname></string-name></person-group>, &#x201C;<article-title>Pre-diagnostic digital imaging prediction model to discriminate between malignant melanoma and benign pigmented skin lesion</article-title>,&#x201D; <source>Ski. Res. Technol.</source>, vol. <volume>16</volume>, no. <issue>1</issue>, pp. <fpage>98</fpage>&#x2013;<lpage>108</lpage>, <year>2010</year>. doi: <pub-id pub-id-type="doi">10.1111/j.1600-0846.2009.00408.x</pub-id>; <pub-id pub-id-type="pmid">20384888</pub-id></mixed-citation></ref>
<ref id="ref-117"><label>[117]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Y.</given-names> <surname>Chang</surname></string-name>, <string-name><given-names>R. J.</given-names> <surname>Stanley</surname></string-name>, <string-name><given-names>R. H.</given-names> <surname>Moss</surname></string-name>, and <string-name><given-names>W. Van</given-names> <surname>Stoecker</surname></string-name></person-group>, &#x201C;<article-title>A systematic heuristic approach for feature selection for melanoma discrimination using clinical images</article-title>,&#x201D; <source>Ski Res. Technol.</source>, vol. <volume>11</volume>, no. <issue>3</issue>, pp. <fpage>165</fpage>&#x2013;<lpage>178</lpage>, <year>2005</year>. doi: <pub-id pub-id-type="doi">10.1111/j.1600-0846.2005.00116.x</pub-id>; <pub-id pub-id-type="pmid">15998327</pub-id></mixed-citation></ref>
<ref id="ref-118"><label>[118]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>E.</given-names> <surname>Claridge</surname></string-name>, <string-name><given-names>P. N.</given-names> <surname>Hall</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Keefe&#x2019;</surname></string-name>, and <string-name><given-names>J. P.</given-names> <surname>Allen</surname></string-name></person-group>, &#x201C;<article-title>Shape analysis for classification melanoma</article-title>,&#x201D; <source>J. Biomed. Eng. 1992</source>, vol. <volume>14</volume>, no. <issue>3</issue>, pp. <fpage>229</fpage>&#x2013;<lpage>234</lpage>, <year>1992</year>. doi: <pub-id pub-id-type="doi">10.1016/0141-5425(92)90057-R</pub-id>; <pub-id pub-id-type="pmid">1588780</pub-id></mixed-citation></ref>
<ref id="ref-119"><label>[119]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J.</given-names> <surname>Fernandez Alc&#x00F3;n</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>Automatic imaging system with decision support for inspection of pigmented skin lesions and melanoma diagnosis</article-title>,&#x201D; <source>IEEE J. Sel. Top. Signal Process</source>, vol. <volume>3</volume>, no. <issue>1</issue>, pp. <fpage>14</fpage>&#x2013;<lpage>25</lpage>, <year>2009</year>. doi: <pub-id pub-id-type="doi">10.1109/JSTSP.2008.2011156</pub-id>.</mixed-citation></ref>
<ref id="ref-120"><label>[120]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S. K.</given-names> <surname>Haralick RM</surname></string-name></person-group>, &#x201C;<article-title>Textural features for image classification</article-title>,&#x201D; <source>IEEE Trans. Syst., Man, Cybernet.</source>, vol. <volume>3</volume>, pp. <fpage>610</fpage>&#x2013;<lpage>621</lpage>, <year>1973</year>.</mixed-citation></ref>
<ref id="ref-121"><label>[121]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>D. A.</given-names> <surname>Clausi</surname></string-name></person-group>, &#x201C;<article-title>An analysis of co-occurrence texture statistics as a function of grey level quantization</article-title>,&#x201D; <source>Can J. Remote Sens.</source>, vol. <volume>28</volume>, no. <issue>1</issue>, pp. <fpage>45</fpage>&#x2013;<lpage>62</lpage>, <year>2002</year>. doi: <pub-id pub-id-type="doi">10.5589/m02-004</pub-id>; <pub-id pub-id-type="pmid">17060990</pub-id></mixed-citation></ref>
<ref id="ref-122"><label>[122]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>C.</given-names> <surname>Mar&#x00ED;n</surname></string-name>, <string-name><given-names>G. H.</given-names> <surname>Alf&#x00E9;rez</surname></string-name>, <string-name><given-names>J.</given-names> <surname>C&#x00F3;rdova</surname></string-name>, and <string-name><given-names>V.</given-names> <surname>Gonz&#x00E1;lez</surname></string-name></person-group>, &#x201C;<article-title>Detection of melanoma through image recognition and artificial neural networks</article-title>,&#x201D; in <source>World Cong. Med. Phys. Biomed. Eng.</source>, <publisher-loc>Toronto, Canada</publisher-loc>, <year>2015</year>, pp. <fpage>832</fpage>&#x2013;<lpage>835</lpage>. doi: <pub-id pub-id-type="doi">10.1007/978-3-319-19387-8</pub-id>.</mixed-citation></ref>
<ref id="ref-123"><label>[123]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>X.</given-names> <surname>Tang</surname></string-name></person-group>, &#x201C;<article-title>Texture information in run-length matrices</article-title>,&#x201D; <source>IEEE Trans. Image Process</source>, vol. <volume>7</volume>, no. <issue>11</issue>, pp. <fpage>1602</fpage>&#x2013;<lpage>1609</lpage>, <year>1998</year>. doi: <pub-id pub-id-type="doi">10.1109/83.725367</pub-id>; <pub-id pub-id-type="pmid">18276225</pub-id></mixed-citation></ref>
<ref id="ref-124"><label>[124]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Z.</given-names> <surname>Guo</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Zhang</surname></string-name>, and <string-name><given-names>D.</given-names> <surname>Zhang</surname></string-name></person-group>, &#x201C;<article-title>A completed modeling of local binary pattern operator for texture classification</article-title>,&#x201D; <source>IEEE Trans. Image Process</source>, vol. <volume>19</volume>, no. <issue>6</issue>, pp. <fpage>1657</fpage>&#x2013;<lpage>1663</lpage>, <year>2010</year>. doi: <pub-id pub-id-type="doi">10.1109/TIP.2010.2044957</pub-id>; <pub-id pub-id-type="pmid">20215079</pub-id></mixed-citation></ref>
<ref id="ref-125"><label>[125]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>I.</given-names> <surname>Giotis</surname></string-name>, <string-name><given-names>K.</given-names> <surname>Bunte</surname></string-name>, <string-name><given-names>N.</given-names> <surname>Petkov</surname></string-name>, and <string-name><given-names>M.</given-names> <surname>Biehl</surname></string-name></person-group>, &#x201C;<article-title>Adaptive matrices and filters for color texture classification</article-title>,&#x201D; <source>J. Math. Imaging Vis.</source>, vol. <volume>47</volume>, no. <issue>1&#x2013;2</issue>, pp. <fpage>79</fpage>&#x2013;<lpage>92</lpage>, <year>2013</year>. doi: <pub-id pub-id-type="doi">10.1007/s10851-012-0356-9</pub-id>.</mixed-citation></ref>
<ref id="ref-126"><label>[126]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>D. G.</given-names> <surname>Lowe</surname></string-name></person-group>, &#x201C;<article-title>Object recognition from local scale-invariant features</article-title>,&#x201D; <source>Seventh IEEE Int. Conf. Comput. Vis.</source>, <publisher-loc>Kerkyra, Greece</publisher-loc>, vol. <volume>2</volume>, no. <issue>4</issue>, pp. <fpage>1150</fpage>&#x2013;<lpage>1157</lpage>, <year>1999</year>. doi: <pub-id pub-id-type="doi">10.1016/0262-5075(81)90042-7</pub-id>.</mixed-citation></ref>
<ref id="ref-127"><label>[127]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>L. M. De</given-names> <surname>Lima</surname></string-name> and <string-name><given-names>R. A.</given-names> <surname>Krohling</surname></string-name></person-group>, &#x201C;<article-title>Exploring advances in transformers and CNN for skin lesion diagnosis on small datasets</article-title>,&#x201D; <source>Intell. Syst. Springer Int. Publ. Berlin/Heidelberg, Ger.</source>, vol. <volume>13654</volume>, pp. <fpage>282</fpage>&#x2013;<lpage>296</lpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-128"><label>[128]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J.</given-names> <surname>Yang</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>Self-paced balance learning for clinical skin disease recognition</article-title>,&#x201D; <source>IEEE Trans. Neural Netw. Learn. Syst.</source>, vol. <volume>31</volume>, no. <issue>8</issue>, pp. <fpage>2832</fpage>&#x2013;<lpage>2846</lpage>, <year>2020</year>. doi: <pub-id pub-id-type="doi">10.1109/TNNLS.2019.2917524</pub-id>; <pub-id pub-id-type="pmid">31199274</pub-id></mixed-citation></ref>
<ref id="ref-129"><label>[129]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>J.</given-names> <surname>Deng</surname></string-name>, <string-name><given-names>W.</given-names> <surname>Dong</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Socher</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Li</surname></string-name>, <string-name><given-names>K.</given-names> <surname>Li</surname></string-name> and <string-name><given-names>L.</given-names> <surname>Fei-Fei</surname></string-name></person-group>, &#x201C;<article-title>ImageNet: A large-scale hierarchical image database</article-title>,&#x201D; in <conf-name>IEEE Comput. Soc. Conf. Comput. Vis. Pattern Recognit.</conf-name>, <year>2009</year>, pp. <fpage>248</fpage>&#x2013;<lpage>255</lpage>.</mixed-citation></ref>
<ref id="ref-130"><label>[130]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>S. R. Christian</given-names> <surname>Szegedy</surname></string-name>, <string-name><given-names>W.</given-names> <surname>Liu</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Jia</surname></string-name>, and <string-name><given-names>P.</given-names> <surname>Sermanet</surname></string-name></person-group>, &#x201C;<article-title>Going deeper with convolutions</article-title>,&#x201D; in <conf-name>Proc. IEEE Conf. Comput. Vis. Pattern Recognit.</conf-name>, <year>2015</year>, pp. <fpage>1</fpage>&#x2013;<lpage>9</lpage>. doi: <pub-id pub-id-type="doi">10.4324/9781410605337-29</pub-id>.</mixed-citation></ref>
<ref id="ref-131"><label>[131]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>K.</given-names> <surname>He</surname></string-name>, <string-name><given-names>X.</given-names> <surname>Zhang</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Ren</surname></string-name>, and <string-name><given-names>J.</given-names> <surname>Sun</surname></string-name></person-group>, &#x201C;<article-title>Deep residual learning for image recognition</article-title>,&#x201D; in <source>Proc. IEEE Comput. Soc. Conf. Comput. Vis. Pattern Recognit.</source>, vol. <volume>2016</volume>, pp. <fpage>770</fpage>&#x2013;<lpage>778</lpage>, <year>2016</year>. doi: <pub-id pub-id-type="doi">10.1109/CVPR.2016.90</pub-id>.</mixed-citation></ref>
<ref id="ref-132"><label>[132]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S. S.</given-names> <surname>Han</surname></string-name>, <string-name><given-names>M. S.</given-names> <surname>Kim</surname></string-name>, <string-name><given-names>W.</given-names> <surname>Lim</surname></string-name>, <string-name><given-names>G. H.</given-names> <surname>Park</surname></string-name>, and <string-name><given-names>I.</given-names> <surname>Park</surname></string-name></person-group>, &#x201C;<article-title>Classification of the clinical images for benign and malignant cutaneous tumors using a deep learning algorithm</article-title>,&#x201D; <source>J. Invest. Dermatol.</source>, vol. <volume>138</volume>, no. <issue>7</issue>, pp. <fpage>1529</fpage>&#x2013;<lpage>1538</lpage>, <year>2018</year>. doi: <pub-id pub-id-type="doi">10.1016/j.jid.2018.01.028</pub-id>; <pub-id pub-id-type="pmid">29428356</pub-id></mixed-citation></ref>
<ref id="ref-133"><label>[133]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S. S.</given-names> <surname>Han</surname></string-name>, <string-name><given-names>I.</given-names> <surname>Park</surname></string-name>, <string-name><given-names>S. E.</given-names> <surname>Chang</surname></string-name>, <string-name><given-names>W.</given-names> <surname>Lim</surname></string-name>, and <string-name><given-names>M. S.</given-names> <surname>Kim</surname></string-name></person-group>, &#x201C;<article-title>Augmented intelligence dermatology: deep neural networks empower medical professionals in diagnosing skin cancer and predicting treatment options for 134 skin disorders</article-title>,&#x201D; <source>J. Invest. Dermatol.</source>, vol. <volume>140</volume>, no. <issue>9</issue>, pp. <fpage>1753</fpage>&#x2013;<lpage>1761</lpage>, <year>2020</year>. doi: <pub-id pub-id-type="doi">10.1016/j.jid.2020.01.019</pub-id>; <pub-id pub-id-type="pmid">32243882</pub-id></mixed-citation></ref>
<ref id="ref-134"><label>[134]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>G. Di</given-names> <surname>Leo</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Paolillo</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Sommella</surname></string-name>, <string-name><given-names>G.</given-names> <surname>Fabbrocini</surname></string-name>, and <string-name><given-names>O.</given-names> <surname>Rescigno</surname></string-name></person-group>, &#x201C;<article-title>A software tool for the diagnosis of melanomas</article-title>,&#x201D; in <conf-name>2010 IEEE Instrument. Measurem. Technol. Conf. Proc.</conf-name>, <year>2010</year>, pp. <fpage>886</fpage>&#x2013;<lpage>891</lpage>. doi: <pub-id pub-id-type="doi">10.1109/imtc.2010.5488165</pub-id>.</mixed-citation></ref>
<ref id="ref-135"><label>[135]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>A. I.</given-names> <surname>Gola</surname></string-name>, <string-name><given-names>B. G.</given-names> <surname>Zapirain</surname></string-name>, <string-name><given-names>A. M.</given-names> <surname>Zorrilla</surname></string-name>, and <string-name><given-names>I. R.</given-names> <surname>Oleagordia</surname></string-name></person-group>, &#x201C;<article-title>Automated diagnosis of melanomas based on globular and reticular pattern recognition algorithms for epiluminiscence images</article-title>,&#x201D; in <conf-name>Eur. Signal Process. Conf.</conf-name>, <year>2010</year>, pp. <fpage>264</fpage>&#x2013;<lpage>268</lpage>.</mixed-citation></ref>
<ref id="ref-136"><label>[136]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>J.</given-names> <surname>Jaworek-Korjakowska</surname></string-name></person-group>, &#x201C;<article-title>Automatic detection of melanomas: An application based on the ABCD criteria</article-title>,&#x201D; in <conf-name>Information Technologies in Biomedicine</conf-name>. <publisher-loc>Berlin, Heidelberg</publisher-loc>: <publisher-name>Springer</publisher-name>, <year>2012</year>, pp. <fpage>67</fpage>&#x2013;<lpage>76</lpage>. doi: <pub-id pub-id-type="doi">10.1007/978-3-642-31196-3_7</pub-id>.</mixed-citation></ref>
<ref id="ref-137"><label>[137]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>R. P.</given-names> <surname>Braun</surname></string-name>, <string-name><given-names>H. S.</given-names> <surname>Rabinovitz</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Oliviero</surname></string-name>, <string-name><given-names>A. W.</given-names> <surname>Kopf</surname></string-name>, and <string-name><given-names>J. H.</given-names> <surname>Saurat</surname></string-name></person-group>, &#x201C;<article-title>Dermoscopy of pigmented skin lesions</article-title>,&#x201D; <source>J. Am. Acad. Dermatol.</source>, vol. <volume>52</volume>, no. <issue>1</issue>, pp. <fpage>109</fpage>&#x2013;<lpage>121</lpage>, <year>2005</year>. doi: <pub-id pub-id-type="doi">10.1016/j.jaad.2001.11.001</pub-id>; <pub-id pub-id-type="pmid">15627088</pub-id></mixed-citation></ref>
<ref id="ref-138"><label>[138]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>I.</given-names> <surname>Maglogiannis</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Pavlopoulos</surname></string-name>, and <string-name><given-names>D.</given-names> <surname>Koutsouris</surname></string-name></person-group>, &#x201C;<article-title>An integrated computer supported acquisition, handling, and characterization system for pigmented skin lesions in dermatological images</article-title>,&#x201D; <source>IEEE Trans. Inf. Technol. Biomed.</source>, vol. <volume>9</volume>, no. <issue>1</issue>, pp. <fpage>86</fpage>&#x2013;<lpage>98</lpage>, <year>2005</year>. doi: <pub-id pub-id-type="doi">10.1109/TITB.2004.837859</pub-id>; <pub-id pub-id-type="pmid">15787011</pub-id></mixed-citation></ref>
<ref id="ref-139"><label>[139]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>H.</given-names> <surname>Iyatomi</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>An improved Internet-based melanoma screening system with dermatologist-like tumor area extraction algorithm</article-title>,&#x201D; <source>Comput. Med. Imaging Graph.</source>, vol. <volume>32</volume>, no. <issue>7</issue>, pp. <fpage>566</fpage>&#x2013;<lpage>579</lpage>, <year>2008</year>. doi: <pub-id pub-id-type="doi">10.1016/j.compmedimag.2008.06.005</pub-id>; <pub-id pub-id-type="pmid">18703311</pub-id></mixed-citation></ref>
<ref id="ref-140"><label>[140]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>P. P.</given-names> <surname>Tumpa</surname></string-name> and <string-name><given-names>A.</given-names> <surname>Kabir</surname></string-name></person-group>, &#x201C;<article-title>An artificial neural network based detection and classification of melanoma skin cancer using hybrid texture features</article-title>,&#x201D; <source>Sens. Int.</source>, vol. <volume>2</volume>, no. <issue>1</issue>, <year>2021, Art. no. 100128</year>. doi: <pub-id pub-id-type="doi">10.1016/j.sintl.2021.100128</pub-id>.</mixed-citation></ref>
<ref id="ref-141"><label>[141]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A. R. F.</given-names> <surname>Santos</surname></string-name>, <string-name><given-names>K. R. T.</given-names> <surname>Aires</surname></string-name>, and <string-name><given-names>R. M. S.</given-names> <surname>Veras</surname></string-name></person-group>, &#x201C;<article-title>Aspects of lighting and color in classifying malignant skin cancer with deep learning</article-title>,&#x201D; <source>Appl. Sci.</source>, vol. <volume>14</volume>, no. <issue>8</issue>, <year>2024</year>, Art. no. 3297. doi: <pub-id pub-id-type="doi">10.3390/app14083297</pub-id>.</mixed-citation></ref>
<ref id="ref-142"><label>[142]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>L.</given-names> <surname>Rey-Barroso</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Pe&#x00F1;a-Guti&#x00E9;rrez</surname></string-name>, <string-name><given-names>C.</given-names> <surname>Y&#x00E1;&#x00F1;ez</surname></string-name>, <string-name><given-names>F. J.</given-names> <surname>Burgos-Fern&#x00E1;ndez</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Vilaseca</surname></string-name> and <string-name><given-names>S.</given-names> <surname>Royo</surname></string-name></person-group>, &#x201C;<article-title>Optical technologies for the improvement of skin cancer diagnosis: A review</article-title>,&#x201D; <source>Sensors</source>, vol. <volume>21</volume>, no. <issue>1</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>31</lpage>, <year>2021</year>. doi: <pub-id pub-id-type="doi">10.3390/s21010252</pub-id>; <pub-id pub-id-type="pmid">33401739</pub-id></mixed-citation></ref>
<ref id="ref-143"><label>[143]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Naji</surname></string-name>, <string-name><given-names>H. A.</given-names> <surname>Jalab</surname></string-name>, and <string-name><given-names>S. A.</given-names> <surname>Kareem</surname></string-name></person-group>, &#x201C;<article-title>A survey on skin detection in colored images</article-title>,&#x201D; <source>Artif. Intell. Rev.</source>, vol. <volume>52</volume>, no. <issue>2</issue>, pp. <fpage>1041</fpage>&#x2013;<lpage>1087</lpage>, <year>2019</year>. doi: <pub-id pub-id-type="doi">10.1007/s10462-018-9664-9</pub-id>.</mixed-citation></ref>
<ref id="ref-144"><label>[144]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Hao</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>Salp swarm algorithm with iterative mapping and local escaping for multi-level threshold image segmentation: A skin cancer dermoscopic case study</article-title>,&#x201D; <source>J. Comput. Des. Eng.</source>, vol. <volume>10</volume>, no. <issue>2</issue>, pp. <fpage>655</fpage>&#x2013;<lpage>693</lpage>, <year>2023</year>. doi: <pub-id pub-id-type="doi">10.1093/jcde/qwad006</pub-id>.</mixed-citation></ref>
<ref id="ref-145"><label>[145]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Y.</given-names> <surname>Olmez</surname></string-name>, <string-name><given-names>G. O.</given-names> <surname>Koca</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Sengur</surname></string-name>, <string-name><given-names>U. R.</given-names> <surname>Acharya</surname></string-name>, and <string-name><given-names>H.</given-names> <surname>Mir</surname></string-name></person-group>, &#x201C;<article-title>Improved PSO with visit table and multiple direction search strategies for skin cancer image segmentation</article-title>,&#x201D; <source>IEEE Access</source>, vol. <volume>12</volume>, no. <issue>1</issue>, pp. <fpage>840</fpage>&#x2013;<lpage>867</lpage>, <year>2024</year>. doi: <pub-id pub-id-type="doi">10.1109/ACCESS.2023.3347587</pub-id>.</mixed-citation></ref>
<ref id="ref-146"><label>[146]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Cho</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Haralick</surname></string-name>, and <string-name><given-names>S.</given-names> <surname>Yi</surname></string-name></person-group>, &#x201C;<article-title>Improvement of kittler and illingworth&#x2019;s minimum error thresholding</article-title>,&#x201D; <source>Pattern Recognit.</source>, vol. <volume>22</volume>, no. <issue>5</issue>, pp. <fpage>609</fpage>&#x2013;<lpage>617</lpage>, <year>1989</year>. doi: <pub-id pub-id-type="doi">10.1016/0031-3203(89)90029-0</pub-id>.</mixed-citation></ref>
<ref id="ref-147"><label>[147]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J. N.</given-names> <surname>Kapur</surname></string-name>, <string-name><given-names>P. K.</given-names> <surname>Sahoo</surname></string-name>, and <string-name><given-names>A. K. C.</given-names> <surname>Wong</surname></string-name></person-group>, &#x201C;<article-title>A new method for gray-level picture thresholding using the entropy of the histogram</article-title>,&#x201D; <source>Comput. Vision, Graph. Image Process</source>, vol. <volume>29</volume>, no. <issue>3</issue>, pp. <fpage>273</fpage>&#x2013;<lpage>285</lpage>, <year>1985</year>. doi: <pub-id pub-id-type="doi">10.1016/0734-189X(85)90125-2</pub-id>.</mixed-citation></ref>
<ref id="ref-148"><label>[148]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S. H.</given-names> <surname>Kwon</surname></string-name></person-group>, &#x201C;<article-title>Threshold selection based on cluster analysis</article-title>,&#x201D; <source>Pattern Recognit. Lett.</source>, vol. <volume>25</volume>, no. <issue>9</issue>, pp. <fpage>1045</fpage>&#x2013;<lpage>1050</lpage>, <year>2004</year>. doi: <pub-id pub-id-type="doi">10.1016/j.patrec.2004.03.001</pub-id>.</mixed-citation></ref>
<ref id="ref-149"><label>[149]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>D.</given-names> <surname>Liu</surname></string-name>, <string-name><given-names>Z.</given-names> <surname>Jiang</surname></string-name>, and <string-name><given-names>H.</given-names> <surname>Feng</surname></string-name></person-group>, &#x201C;<article-title>A novel fuzzy classification entropy approach to image thresholding</article-title>,&#x201D; <source>Pattern Recognit. Lett.</source>, vol. <volume>27</volume>, no. <issue>16</issue>, pp. <fpage>1968</fpage>&#x2013;<lpage>1975</lpage>, <year>2006</year>. doi: <pub-id pub-id-type="doi">10.1016/j.patrec.2006.05.006</pub-id>.</mixed-citation></ref>
<ref id="ref-150"><label>[150]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>K. P. Baby</given-names> <surname>Resma</surname></string-name>, <string-name><given-names>M. S.</given-names> <surname>Nair</surname></string-name></person-group>, &#x201C;<article-title>Multilevel thresholding for image segmentation using Krill Herd Optimization algorithm</article-title>,&#x201D; <source>J. King Saud Univ.-Comput. Inf. Sci.</source>, vol. <volume>33</volume>, no. <issue>5</issue>, pp. <fpage>528</fpage>&#x2013;<lpage>541</lpage>, <year>2021</year>. doi: <pub-id pub-id-type="doi">10.1016/j.jksuci.2018.04.007</pub-id>.</mixed-citation></ref>
<ref id="ref-151"><label>[151]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Q.</given-names> <surname>Huang</surname></string-name>, <string-name><given-names>W.</given-names> <surname>Gao</surname></string-name>, and <string-name><given-names>W.</given-names> <surname>Cai</surname></string-name></person-group>, &#x201C;<article-title>Thresholding technique with adaptive window selection for uneven lighting image</article-title>,&#x201D; <source>Pattern Recognit. Lett.</source>, vol. <volume>26</volume>, no. <issue>6</issue>, pp. <fpage>801</fpage>&#x2013;<lpage>808</lpage>, <year>2005</year>. doi: <pub-id pub-id-type="doi">10.1016/j.patrec.2004.09.035</pub-id>.</mixed-citation></ref>
<ref id="ref-152"><label>[152]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>H. S. N.</given-names> <surname>Alwerfali</surname></string-name> <etal>et al.</etal></person-group>, &#x201C;<article-title>A multilevel image thresholding based on hybrid salp swarm algorithm and fuzzy entropy</article-title>,&#x201D; <source>IEEE Access</source>, vol. <volume>7</volume>, pp. <fpage>181405</fpage>&#x2013;<lpage>181422</lpage>, <year>2019</year>. doi: <pub-id pub-id-type="doi">10.1109/ACCESS.2019.2959325</pub-id>.</mixed-citation></ref>
<ref id="ref-153"><label>[153]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Niaz</surname></string-name>, <string-name><given-names>E.</given-names> <surname>Iqbal</surname></string-name>, <string-name><given-names>A. A.</given-names> <surname>Memon</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Munir</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Kim</surname></string-name> and <string-name><given-names>K. N.</given-names> <surname>Choi</surname></string-name></person-group>, &#x201C;<article-title>Edge-based local and global energy active contour model driven by signed pressure force for image segmentation</article-title>,&#x201D; <source>IEEE Trans. Instrum. Meas.</source>, vol. <volume>72</volume>, pp. <fpage>1</fpage>&#x2013;<lpage>14</lpage>, <year>2023</year>. doi: <pub-id pub-id-type="doi">10.1109/TIM.2023.3317481</pub-id>.</mixed-citation></ref>
<ref id="ref-154"><label>[154]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Y.</given-names> <surname>Chen</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Ge</surname></string-name>, <string-name><given-names>G.</given-names> <surname>Wang</surname></string-name>, <string-name><given-names>G.</given-names> <surname>Weng</surname></string-name>, and <string-name><given-names>H.</given-names> <surname>Chen</surname></string-name></person-group>, &#x201C;<article-title>An overview of intelligent image segmentation using active contour models</article-title>,&#x201D; <source>Intell. Robot.</source>, vol. <volume>3</volume>, no. <issue>1</issue>, pp. <fpage>23</fpage>&#x2013;<lpage>55</lpage>, <year>2023</year>. doi: <pub-id pub-id-type="doi">10.20517/ir.2023.02</pub-id>.</mixed-citation></ref>
<ref id="ref-155"><label>[155]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Y.</given-names> <surname>Yuan</surname></string-name> and <string-name><given-names>C.</given-names> <surname>He</surname></string-name></person-group>, &#x201C;<article-title>Adaptive active contours without edges</article-title>,&#x201D; <source>Math. Comput. Model.</source>, vol. <volume>55</volume>, no. <issue>5&#x2013;6</issue>, pp. <fpage>1705</fpage>&#x2013;<lpage>1721</lpage>, <year>2012</year>. doi: <pub-id pub-id-type="doi">10.1016/j.mcm.2011.11.014</pub-id>.</mixed-citation></ref>
<ref id="ref-156"><label>[156]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>T.</given-names> <surname>Mendonca</surname></string-name>, <string-name><given-names>P. M.</given-names> <surname>Ferreira</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Marques</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Marcal</surname></string-name>, and <string-name><given-names>J.</given-names> <surname>Rozeira</surname></string-name></person-group>, &#x201C;<article-title>PH<sup>2</sup>&#x2014;A dermoscopic image database for research and benchmarking</article-title>,&#x201D; in <source>2013 35th Annu. Int. Conf. IEEE Eng. Med. Biol. Soc. (EMBC)</source>, <publisher-loc>Osaka, Japan</publisher-loc>, <year>1967</year>, pp. <fpage>5437</fpage>&#x2013;<lpage>5440</lpage>. doi: <pub-id pub-id-type="doi">10.1109/EMBC.2013.6610779</pub-id>; <pub-id pub-id-type="pmid">24110966</pub-id></mixed-citation></ref>
</ref-list>
</back></article>