<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.1 20151215//EN" "http://jats.nlm.nih.gov/publishing/1.1/JATS-journalpublishing1.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:mml="http://www.w3.org/1998/Math/MathML" xml:lang="en" article-type="research-article" dtd-version="1.1">
<front>
<journal-meta>
<journal-id journal-id-type="pmc">CSSE</journal-id>
<journal-id journal-id-type="nlm-ta">CSSE</journal-id>
<journal-id journal-id-type="publisher-id">CSSE</journal-id>
<journal-title-group>
<journal-title>Computer Systems Science &#x0026; Engineering</journal-title>
</journal-title-group>
<issn pub-type="ppub">0267-6192</issn>
<publisher>
<publisher-name>Tech Science Press</publisher-name>
<publisher-loc>USA</publisher-loc>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">38959</article-id>
<article-id pub-id-type="doi">10.32604/csse.2023.038959</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Article</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Computational Intelligence Driven Secure Unmanned Aerial Vehicle Image Classification in Smart City Environment</article-title>
<alt-title alt-title-type="left-running-head">Computational Intelligence Driven Secure Unmanned Aerial Vehicle Image Classification in Smart City Environment</alt-title>
<alt-title alt-title-type="right-running-head">Computational Intelligence Driven Secure Unmanned Aerial Vehicle Image Classification in Smart City Environment</alt-title>
</title-group>
<contrib-group>
<contrib id="author-1" contrib-type="author">
<name name-style="western"><surname>Abedi</surname><given-names>Firas</given-names></name><xref ref-type="aff" rid="aff-1">1</xref></contrib>
<contrib id="author-2" contrib-type="author">
<name name-style="western"><surname>Ghanimi</surname><given-names>Hayder M. A.</given-names></name><xref ref-type="aff" rid="aff-2">2</xref></contrib>
<contrib id="author-3" contrib-type="author">
<name name-style="western"><surname>Algarni</surname><given-names>Abeer D.</given-names></name><xref ref-type="aff" rid="aff-3">3</xref></contrib>
<contrib id="author-4" contrib-type="author" corresp="yes">
<name name-style="western"><surname>Soliman</surname><given-names>Naglaa F.</given-names></name><xref ref-type="aff" rid="aff-3">3</xref><email>nfsoliman@pnu.edu.sa</email></contrib>
<contrib id="author-5" contrib-type="author">
<name name-style="western"><surname>El-Shafai</surname><given-names>Walid</given-names></name><xref ref-type="aff" rid="aff-4">4</xref><xref ref-type="aff" rid="aff-5">5</xref></contrib>
<contrib id="author-6" contrib-type="author">
<name name-style="western"><surname>Abbas</surname><given-names>Ali Hashim</given-names></name><xref ref-type="aff" rid="aff-6">6</xref></contrib>
<contrib id="author-7" contrib-type="author">
<name name-style="western"><surname>Kareem</surname><given-names>Zahraa H.</given-names></name><xref ref-type="aff" rid="aff-7">7</xref></contrib>
<contrib id="author-8" contrib-type="author">
<name name-style="western"><surname>Hariz</surname><given-names>Hussein Muhi</given-names></name><xref ref-type="aff" rid="aff-8">8</xref></contrib>
<contrib id="author-9" contrib-type="author">
<name name-style="western"><surname>Alkhayyat</surname><given-names>Ahmed</given-names></name><xref ref-type="aff" rid="aff-9">9</xref></contrib>
<aff id="aff-1"><label>1</label><institution>Department of Mathematics, College of Education, Al-Zahraa University for Women</institution>, <addr-line>Karbala</addr-line>, <country>Iraq</country></aff>
<aff id="aff-2"><label>2</label><institution>Biomedical Engineering Department, College of Engineering, University of Warith Al-Anbiyaa</institution>, <addr-line>Karbala</addr-line>, <country>Iraq</country></aff>
<aff id="aff-3"><label>3</label><institution>Department of Information Technology, College of Computer and Information Sciences, Princess Nourah bint Abdulrahman University</institution>, <addr-line>P.O. Box 84428, Riyadh, 11671</addr-line>, <country>Saudi Arabia</country></aff>
<aff id="aff-4"><label>4</label><institution>Security Engineering Lab, Computer Science Department, Prince Sultan University</institution>, <addr-line>Riyadh, 11586</addr-line>, <country>Saudi Arabia</country></aff>
<aff id="aff-5"><label>5</label><institution>Department of Electronics and Electrical Communications Engineering, Faculty of Electronic Engineering, Menoufia University</institution>, <addr-line>Menouf, 32952</addr-line>, <country>Egypt</country></aff>
<aff id="aff-6"><label>6</label><institution>College of Information Technology, Imam Jaafar Al-Sadiq University</institution>, <addr-line>Al-Muthanna, 66002</addr-line>, <country>Iraq</country></aff>
<aff id="aff-7"><label>7</label><institution>Department of Medical Instrumentation Techniques Engineering, Al-Mustaqbal University College</institution>, <addr-line>Hillah, 51001</addr-line>, <country>Iraq</country></aff>
<aff id="aff-8"><label>8</label><institution>Computer Engineering Department, Mazaya University College</institution>, <addr-line>Dhi Qar</addr-line>, <country>Iraq</country></aff>
<aff id="aff-9"><label>9</label><institution>College of Technical Engineering, The Islamic University</institution>, <addr-line>Najaf</addr-line>, <country>Iraq</country></aff>
</contrib-group>
<author-notes>
<corresp id="cor1"><label>&#x002A;</label>Corresponding Author: Naglaa F. Soliman. Email: <email>nfsoliman@pnu.edu.sa</email></corresp>
</author-notes>
<pub-date date-type="collection" publication-format="electronic"><year>2023</year></pub-date>
<pub-date date-type="pub" publication-format="electronic"><day>09</day><month>11</month><year>2023</year></pub-date>
<volume>47</volume>
<issue>3</issue>
<fpage>3127</fpage>
<lpage>3144</lpage>
<history>
<date date-type="received"><day>05</day><month>1</month><year>2023</year></date>
<date date-type="accepted"><day>11</day><month>4</month><year>2023</year></date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2023 Abedi et al.</copyright-statement>
<copyright-year>2023</copyright-year>
<copyright-holder>Abedi et al.</copyright-holder>
<license xlink:href="https://creativecommons.org/licenses/by/4.0/">
<license-p>This work is licensed under a <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</ext-link>, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.</license-p>
</license>
</permissions>
<self-uri content-type="pdf" xlink:href="TSP_CSSE_38959.pdf"></self-uri>
<abstract>
<p>Computational intelligence (CI) is a group of nature-simulated computational models and processes for addressing difficult real-life problems. The CI is useful in the UAV domain as it produces efficient, precise, and rapid solutions. Besides, unmanned aerial vehicles (UAV) developed a hot research topic in the smart city environment. Despite the benefits of UAVs, security remains a major challenging issue. In addition, deep learning (DL) enabled image classification is useful for several applications such as land cover classification, smart buildings, etc. This paper proposes novel meta-heuristics with a deep learning-driven secure UAV image classification (MDLS-UAVIC) model in a smart city environment. The major purpose of the MDLS-UAVIC algorithm is to securely encrypt the images and classify them into distinct class labels. The proposed MDLS-UAVIC model follows a two-stage process: encryption and image classification. The encryption technique for image encryption effectively encrypts the UAV images. Next, the image classification process involves an Xception-based deep convolutional neural network for the feature extraction process. Finally, shuffled shepherd optimization (SSO) with a recurrent neural network (RNN) model is applied for UAV image classification, showing the novelty of the work. The experimental validation of the MDLS-UAVIC approach is tested utilizing a benchmark dataset, and the outcomes are examined in various measures. It achieved a high accuracy of 98&#x0025;.</p>
</abstract>
<kwd-group kwd-group-type="author">
<kwd>Computational intelligence</kwd>
<kwd>unmanned aerial vehicles</kwd>
<kwd>deep learning</kwd>
<kwd>metaheuristics</kwd>
<kwd>smart city</kwd>
<kwd>image encryption</kwd>
<kwd>image classification</kwd>
</kwd-group>
<funding-group>
<award-group id="awg1">
<funding-source>Deputyship for Research &#x0026; Innovation, Ministry of Education in Saudi Arabia</funding-source>
<award-id>RI-44-0446</award-id>
</award-group>
</funding-group>
</article-meta>
</front>
<body>
<sec id="s1"><label>1</label><title>Introduction</title>
<p>The concept of a smart city has become a prominent research field worldwide. The number of stationary sensors and the amount of data gathered by a surveillance camera and other devices placed in a smart city are massive. Using a mobile platform to replace them might decrease resource and energy costs [<xref ref-type="bibr" rid="ref-1">1</xref>]. The smart city paradigm directly connects the telecommunication industry to sustainable economic growth and better living standards with advanced techniques such as unmanned aerial vehicles (UAVs). Over the last few years, the advancement of UAVs has gained considerable attention due to essential characteristics like the ability to establish line of sight (LOS) links with the user, mobility, and easy deployment [<xref ref-type="bibr" rid="ref-2">2</xref>]. Generally, UAVs are categorized into fixed-wing and rotary-wing UAVs. All kinds of UAV are adapted to a particular kind of application. For instance, fixed-wing UAV is best suited for the type of mission whereby stationarity is not needed, for example, military applications, namely surveillance and attack. But rotary-wing UAV has increasingly complex aerodynamics [<xref ref-type="bibr" rid="ref-3">3</xref>]. Also, they can remain stationary at a specified location, but they cannot implement long-range missions [<xref ref-type="bibr" rid="ref-4">4</xref>]. Affordability and ease of use are two major elements for the extensive usage of UAVs in military and civilian applications [<xref ref-type="bibr" rid="ref-5">5</xref>]. Images taken through UAVs are utilized for geographical information system databases, data collection for agricultural mapping, land use, automatic decision-making, urban planning, environmental monitoring and assessment, and land cover detection [<xref ref-type="bibr" rid="ref-6">6</xref>]. Because of the quality of UAV images at present, abstracting reliable characteristics for forming data collection is less of a problem. Illustration of these features island cover characteristics (spectral and geometrical) from hyperspectral data and Light Detection and Ranging (LiDAR) [<xref ref-type="bibr" rid="ref-7">7</xref>]. In addition, the amalgamation of various sources (passive or active sensors) or multimodal data (data with distinct features) is suggested for improving land cover categorisation.</p>
<p>Over the past few years, the arrival of deep learning (DL) methods has provided strong and brilliant techniques for enhancing the mapping of the earth&#x2019;s surface [<xref ref-type="bibr" rid="ref-8">8</xref>]. DL is an artificial neural network (ANN) technique of deeper combinations and numerous hidden layers accountable for maximizing and returning superior learning models over a general ANN. A splendid volume of revision materials exists in the scientific chronicles describing DL-related methods, common usage, and its historical evolution, along with that briefing functions and networks [<xref ref-type="bibr" rid="ref-9">9</xref>]. In recent years as computer processing and labeled instances (i.e., samples) are highly more accessible, the outcomes of deep neural networks (DNNs) raise in image-processing applications. DNN has been implemented in data-driven approaches successfully. But more should be covered under this to understand its efficiency and restrictions [<xref ref-type="bibr" rid="ref-10">10</xref>]. From this point of view, various studies on the application of DL in remote sensing have been advanced in general as well as in specific contexts to describe its significance in a better way.</p>
<p>There are some restrictions that can be associated with the use of computational intelligence techniques for secure unmanned aerial vehicle (UAV) image classification in a smart city environment:
<list list-type="simple">
<list-item><label>&#x2010;</label><p>Limited availability of UAVs: UAVs may not be readily available or accessible in all smart city environments, which can limit the effectiveness and feasibility of implementing a UAV image classification system.</p></list-item>
<list-item><label>&#x2010;</label><p>Cost: The cost of acquiring and maintaining UAVs and associated equipment can be prohibitive for some cities and organizations, especially for those with limited budgets.</p></list-item>
<list-item><label>&#x2010;</label><p>Limited battery life and range: UAVs have limited battery life and range, which can restrict the amount of time they can be used for image classification and the distance they can travel to collect data.</p></list-item>
<list-item><label>&#x2010;</label><p>Technical expertise: Implementing a UAV image classification system requires technical expertise in areas such as machine learning, computer vision, and UAV operation. This expertise may not be available or accessible to all organizations.</p></list-item>
<list-item><label>&#x2010;</label><p>Public perception: The use of UAVs in a smart city environment may be perceived as intrusive or invasive by members of the public, which can lead to opposition and negative public sentiment.</p></list-item>
<list-item><label>&#x2010;</label><p>Weather conditions: Adverse weather conditions such as strong winds, heavy rain, and low visibility can restrict the use of UAVs, limiting the reliability and effectiveness of image classification systems.</p></list-item>
<list-item><label>&#x2010;</label><p>Regulatory challenges: The use of UAVs is subject to regulatory challenges, including airspace regulations, licensing requirements, and data privacy laws. Compliance with these regulations can be time-consuming and complex.</p></list-item>
</list></p>
<p>This study designs a novel metaheuristic with a deep learning-driven certain UAV image classification (MDLS-UAVIC) model in a smart city environment. The proposed MDLS-UAVIC model uses the signcryption technique to encrypt UAV images effectively. Next, the image classification process involves an Xception-based deep convolutional neural network for the feature extraction process. Finally, shuffled shepherd optimization with a recurrent neural network model is applied for UAV image classification. The experimental validation of the MDLS-UAVIC approach was tested employing a benchmark dataset, and the outcomes are examined in various measures.</p>
</sec>
<sec id="s2"><label>2</label><title>Related Works</title>
<p>This section offers a brief review of existing UAV-based image classifier approaches. Raj [<xref ref-type="bibr" rid="ref-11">11</xref>] employed the blockchain method to gather healthcare information from the user and save them on a nearby server is presented. The UAV communicates with body sensor hives (BSH) via a low power secured method. This technique can be recognized by a token where the UAV establishes relationships with the BSH. Shibli et al. [<xref ref-type="bibr" rid="ref-12">12</xref>] introduced an AI drone-based encrypted ML of image classifier with a pertained CNN and image encrypt-decrypt using XOR-Secret-Key block cipher cryptology and singular value decomposition (SVD). Firstly, a pre-trained convolution neural network (CNN) is widely employed for extracting and classifying image features exploiting ML training tool features.</p>
<p>The researchers in [<xref ref-type="bibr" rid="ref-13">13</xref>] focused on the structure of the share creation (SC) system using the social spider optimization based ECC method named SC-SSOECC for a secured image communication system in UAV. Initially, the presented method separates the color bands (RGB) for all the images. Next, the generation of the SC system occurs for all the images, making it difficult for the hacker to retrieve the original images. Mardiyanto et al. [<xref ref-type="bibr" rid="ref-14">14</xref>] proposed an analogue video communication security for UAVs using assembling arbitrary image pieces using the Linear Feedback Shift Register approach and image encryption technique with Pseudo Random Number Generator. The LFSR is a seed that acts as a key to the randomization pattern from the image processed by software and taken by the camera on Raspberry Pi.</p>
<p>Abualsauod [<xref ref-type="bibr" rid="ref-15">15</xref>] classified and analyzed the study on the UAV IoT framework and recognized the solution to the problem associated with the security comprising privacy of the framework. In this study, an optimal solution for different reliability and security problems in UAV-assisted IoT applications is presented that uses the combination of different techniques merging blockchain-based techniques. Punithavathi et al. [<xref ref-type="bibr" rid="ref-16">16</xref>] introduced an optimum dense convolution network (DenseNet) using a BiLSTM-based image classifier method named optimum DenseNet (ODN)-Bi-LSTM for UAV-based ad-hoc network. Kumar et al. [<xref ref-type="bibr" rid="ref-17">17</xref>] projected a secure privacy-preserving framework (SP2F) for intelligent agriculture UAV technique. The presented architecture contains a DL-based anomaly detection technique and a two-level privacy system.</p>
</sec>
<sec id="s3"><label>3</label><title>The Proposed Model</title>
<p>This study establishes a novel MDLS-UAVIC algorithm to securely encrypt the images and classify them into distinct class labels in the smart city environment. The MDLS-UAVIC model encompasses a series of processes: signcyrption, Xception-based feature extraction, RNN classification, and SSO-based hyperparameter optimization. <xref ref-type="fig" rid="fig-1">Fig. 1</xref> illustrates the overall MDLS-UAVIC technique.</p>
<fig id="fig-1"><label>Figure 1</label><caption><title>Working process of MDLS-UAVIC technique</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_38959-fig-1.tif"/></fig>
<sec id="s3_1"><label>3.1</label><title>Image Encryption Module</title>
<p>At the primary level, the proposed MDLS-UAVIC approach uses encryption techniques to encrypt the UAV images effectively. The security system is a public key encryption technique using a digital signature that might increase availability, confidentiality, integrity, authenticity, and nonrepudiation [<xref ref-type="bibr" rid="ref-18">18</xref>]. A single session key is used again for encryption to obtain an effective presentation for name encryption compared to the encryption technique. The encryption technique has three phases: encryption, des encryption, and key generation. A signature provides authenticity, and encryption provides confidentiality simultaneously. It includes encryption, des encryption, generation of keys, and parameter initialization phase. Initially, the signature-based security analysis assigns certain parameters, namely large prime numbers for sender and receiver keys, key generation, and hash values. The initial parameter is <inline-formula id="ieqn-1"><mml:math id="mml-ieqn-1"><mml:mi>S</mml:mi><mml:msub><mml:mi>r</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mspace width="thinmathspace" /><mml:mi>S</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mspace width="thinmathspace" /><mml:mi>R</mml:mi><mml:msub><mml:mi>r</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula>, and <inline-formula id="ieqn-2"><mml:math id="mml-ieqn-2"><mml:mi>R</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula>.
<list list-type="bullet">
<list-item><p>Signcryption Phase</p>
<p>(i) This encryption technique transfers the information to the receiver after security analysis; at this point, motion vectors, hash, and one-key hash value-based encryption data are considered. The transformation of plain data to ciphered data can be defined as follows. Firstly, the sender transmits the data with a proper value <italic>A</italic> ranging from [<inline-formula id="ieqn-3"><mml:math id="mml-ieqn-3"><mml:mn>1</mml:mn><mml:mo>&#x2026;</mml:mo><mml:mi>P</mml:mi><mml:mi>F</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mrow><mml:mtext>I</mml:mtext></mml:mrow><mml:mo stretchy="false">]</mml:mo><mml:mo>.</mml:mo></mml:math></inline-formula></p>
<p>(ii) Evaluate hash values of the sender applied by the receiver <inline-formula id="ieqn-4"><mml:math id="mml-ieqn-4"><mml:mi>R</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula> and <italic>A</italic>. The output <inline-formula id="ieqn-5"><mml:math id="mml-ieqn-5"><mml:msub><mml:mi>O</mml:mi><mml:mrow><mml:mo>&#x2212;</mml:mo></mml:mrow></mml:msub><mml:msub><mml:mi>H</mml:mi><mml:mrow><mml:mi>o</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of the hash value is 128-bit. The mathematical expression of the hash value has been given in the following:
<disp-formula id="eqn-1"><label>(1)</label><mml:math id="mml-eqn-1" display="block"><mml:msub><mml:mi>O</mml:mi><mml:mrow><mml:mo>&#x2212;</mml:mo></mml:mrow></mml:msub><mml:msub><mml:mi>H</mml:mi><mml:mrow><mml:mi>o</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mi>H</mml:mi><mml:mi>A</mml:mi><mml:mi>S</mml:mi><mml:mi>H</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>R</mml:mi><mml:msubsup><mml:mrow><mml:mi>u</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mrow><mml:mi>A</mml:mi></mml:mrow></mml:msubsup><mml:mo>&#x2217;</mml:mo><mml:mi>m</mml:mi><mml:mi>o</mml:mi><mml:mi>d</mml:mi><mml:mtext>&#x00A0;</mml:mtext><mml:mi>P</mml:mi><mml:mi>N</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:math></disp-formula>iii) The resultant value of 128 bits is divided into 2 bits of sixty-four bits, as <inline-formula id="ieqn-6"><mml:math id="mml-ieqn-6"><mml:msub><mml:mi>O</mml:mi><mml:mrow><mml:mo>&#x2212;</mml:mo></mml:mrow></mml:msub><mml:msub><mml:mi>H</mml:mi><mml:mrow><mml:mi>o</mml:mi></mml:mrow></mml:msub><mml:mn>1</mml:mn></mml:math></inline-formula> and <inline-formula id="ieqn-7"><mml:math id="mml-ieqn-7"><mml:msub><mml:mi>O</mml:mi><mml:mrow><mml:mo>&#x2212;</mml:mo></mml:mrow></mml:msub><mml:msub><mml:mi>H</mml:mi><mml:mrow><mml:mi>o</mml:mi></mml:mrow></mml:msub><mml:mn>2.</mml:mn></mml:math></inline-formula></p>
<p>iv) The sender encrypts the data for encryption <italic>E</italic> and <inline-formula id="ieqn-8"><mml:math id="mml-ieqn-8"><mml:msub><mml:mi>O</mml:mi><mml:mrow><mml:mo>&#x2212;</mml:mo></mml:mrow></mml:msub><mml:msub><mml:mi>H</mml:mi><mml:mrow><mml:mi>o</mml:mi></mml:mrow></mml:msub><mml:mn>1</mml:mn></mml:math></inline-formula>. The cipher information <inline-formula id="ieqn-9"><mml:math id="mml-ieqn-9"><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> is defined in the following:
<disp-formula id="eqn-2"><label>(2)</label><mml:math id="mml-eqn-2" display="block"><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mi>E</mml:mi><mml:msub><mml:mi>O</mml:mi><mml:mrow><mml:mo>&#x2212;</mml:mo></mml:mrow></mml:msub><mml:msub><mml:mi>H</mml:mi><mml:mrow><mml:mi>o</mml:mi></mml:mrow></mml:msub><mml:mn>1</mml:mn><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mi>n</mml:mi><mml:mi>f</mml:mi><mml:mi>o</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:math></disp-formula>v) Next, the <inline-formula id="ieqn-10"><mml:math id="mml-ieqn-10"><mml:msub><mml:mi>O</mml:mi><mml:mrow><mml:mo>&#x2212;</mml:mo></mml:mrow></mml:msub><mml:msub><mml:mi>H</mml:mi><mml:mrow><mml:mi>o</mml:mi></mml:mrow></mml:msub><mml:mn>2</mml:mn></mml:math></inline-formula> value is employed efficiently from the one-key hash function <inline-formula id="ieqn-11"><mml:math id="mml-ieqn-11"><mml:msub><mml:mi>K</mml:mi><mml:mrow><mml:mo>&#x2212;</mml:mo></mml:mrow></mml:msub><mml:msub><mml:mi>H</mml:mi><mml:mrow><mml:mi>o</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> to hash the information that results in a 128-bit hash as follows:
<disp-formula id="eqn-3"><label>(3)</label><mml:math id="mml-eqn-3" display="block"><mml:mi>F</mml:mi><mml:mo>=</mml:mo><mml:msub><mml:mi>K</mml:mi><mml:mrow><mml:mo>&#x2212;</mml:mo></mml:mrow></mml:msub><mml:msub><mml:mi>H</mml:mi><mml:mrow><mml:mi>o</mml:mi></mml:mrow></mml:msub><mml:mn>2</mml:mn><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mi>n</mml:mi><mml:mi>f</mml:mi><mml:mi>o</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:math></disp-formula>vi) Lastly, the encryption of the information is evaluated, and the cipher data are given in the following:
<disp-formula id="eqn-4"><label>(4)</label><mml:math id="mml-eqn-4" display="block"><mml:mi>S</mml:mi><mml:mo>=</mml:mo><mml:mi>A</mml:mi><mml:msub><mml:mrow><mml:mo>/</mml:mo></mml:mrow><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>F</mml:mi><mml:mo>+</mml:mo><mml:msub><mml:mi>A</mml:mi><mml:mrow><mml:mrow><mml:msub><mml:mi>O</mml:mi><mml:mrow><mml:mo>&#x2212;</mml:mo></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:msub><mml:mi>H</mml:mi><mml:mrow><mml:mi>O</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mrow></mml:msub><mml:mn>1</mml:mn><mml:mo>)</mml:mo></mml:mrow><mml:mi>m</mml:mi><mml:mi>o</mml:mi><mml:mi>d</mml:mi><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:mtext>PF</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></disp-formula>vii) From the calculation, three different values, F and <inline-formula id="ieqn-12"><mml:math id="mml-ieqn-12"><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, are transmitted to the sender as well as the receiver.</p></list-item>
<list-item><p>Unsigncryption Phase</p>
<p>i) In the receiver end, the decryption method, that is, the unsigncryption technique, is implemented afterwards after receiving the encrypted information, that is, <italic>F</italic> and <inline-formula id="ieqn-13"><mml:math id="mml-ieqn-13"><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>. The receiver is capable of decrypting the subsequent steps.</p>
<p>ii) The receiver keys <inline-formula id="ieqn-14"><mml:math id="mml-ieqn-14"><mml:mi>S</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-15"><mml:math id="mml-ieqn-15"><mml:mi>S</mml:mi><mml:msub><mml:mi>r</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula> with encryption data are transformed to 128-bit output decrypted data.
<disp-formula id="eqn-5"><label>(5)</label><mml:math id="mml-eqn-5" display="block"><mml:msub><mml:mi>O</mml:mi><mml:mrow><mml:mo>&#x2212;</mml:mo></mml:mrow></mml:msub><mml:msub><mml:mi>H</mml:mi><mml:mrow><mml:mi>o</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mi>H</mml:mi><mml:mi>A</mml:mi><mml:mi>S</mml:mi><mml:mi>H</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mi>R</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>&#x2217;</mml:mo><mml:msup><mml:mi>i</mml:mi><mml:mrow><mml:mi>F</mml:mi></mml:mrow></mml:msup><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x2217;</mml:mo><mml:mi>S</mml:mi><mml:msub><mml:mi>r</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mi>m</mml:mi><mml:mi>o</mml:mi><mml:mi>d</mml:mi><mml:mtext>&#x00A0;</mml:mtext><mml:mi>P</mml:mi><mml:mi>N</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:math></disp-formula>iii) The inverse operation of encryption can be executed; that is, 128-bit data is separated into sixty-four bits of 2 key pairs.
<disp-formula id="eqn-6"><label>(6)</label><mml:math id="mml-eqn-6" display="block"><mml:msub><mml:mi>O</mml:mi><mml:mrow><mml:mo>&#x2212;</mml:mo></mml:mrow></mml:msub><mml:msub><mml:mi>H</mml:mi><mml:mrow><mml:mi>o</mml:mi></mml:mrow></mml:msub><mml:mn>1</mml:mn><mml:mtext>&#x00A0;</mml:mtext><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>d</mml:mi><mml:mtext>&#x00A0;</mml:mtext><mml:msub><mml:mi>O</mml:mi><mml:mrow><mml:mo>&#x2212;</mml:mo></mml:mrow></mml:msub><mml:msub><mml:mi>H</mml:mi><mml:mrow><mml:mi>o</mml:mi></mml:mrow></mml:msub><mml:mn>2.</mml:mn></mml:math></disp-formula></p> 
<p>iv) The receiver uses the output key <inline-formula id="ieqn-16"><mml:math id="mml-ieqn-16"><mml:msub><mml:mi>O</mml:mi><mml:mrow><mml:mo>&#x2212;</mml:mo></mml:mrow></mml:msub><mml:msub><mml:mi>H</mml:mi><mml:mrow><mml:mi>o</mml:mi></mml:mrow></mml:msub><mml:mn>1</mml:mn></mml:math></inline-formula> for decrypting the cipher data <inline-formula id="ieqn-17"><mml:math id="mml-ieqn-17"><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, and then the decrypted data is described by&#x2009;<inline-formula id="ieqn-18"><mml:math id="mml-ieqn-18"><mml:mo>=</mml:mo><mml:mi>D</mml:mi><mml:msub><mml:mi>O</mml:mi><mml:mrow><mml:mo>&#x2212;</mml:mo></mml:mrow></mml:msub><mml:msub><mml:mi>H</mml:mi><mml:mrow><mml:mi>o</mml:mi></mml:mrow></mml:msub><mml:mn>1</mml:mn><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula>.</p> 
<p>v) After completing the abovementioned process, valid data had attained as:
<disp-formula id="eqn-7"><label>(7)</label><mml:math id="mml-eqn-7" display="block"><mml:msub><mml:mi>K</mml:mi><mml:mrow><mml:mo>&#x2212;</mml:mo></mml:mrow></mml:msub><mml:msub><mml:mi>H</mml:mi><mml:mrow><mml:mi>o</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mi>O</mml:mi><mml:mrow><mml:mo>&#x2212;</mml:mo></mml:mrow></mml:msub><mml:msub><mml:mi>H</mml:mi><mml:mrow><mml:mi>o</mml:mi></mml:mrow></mml:msub><mml:mn>2</mml:mn><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mi>n</mml:mi><mml:mi>f</mml:mi><mml:mi>o</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mi>F</mml:mi><mml:mo>.</mml:mo></mml:math></disp-formula></p> 
<p>vi) If it is equal, the message is considered a verified message; otherwise, it is invalid.</p></list-item>
</list></p>
</sec>
<sec id="s3_2"><label>3.2</label><title>Image Classification Module</title>
<p>For the image classification process, the MDLS-UAVIC approach comprises feature extraction, classification, and parameter optimization. The working process of each module is elaborated in the following sections.</p>
<sec id="s3_2_1"><label>3.2.1</label><title>Feature Extraction</title>
<p>The Xception approach is used in this work to develop a helpful feature vector group. Xception [<xref ref-type="bibr" rid="ref-19">19</xref>] represents &#x201C;extreme inception&#x201D;. Xception was proposed in 2016. The Xception module is thirty-six layers deep, except for the fully connected (FC) layer at the end. Different from inceptionV3, the Xception parcel input record as a compacted lump also map the spatial connection for all the channels separately, and a 1&#x2009;&#x00D7;&#x2009;1 depthwise convolutional layer is implemented for catching cross channel relationship. This work introduces a pre-trained Xception method (trained on ImageNet data). Then, the module is fine-tuned on UAV images. During Fine-tuning, Xception gives an input image of 224&#x2009;&#x00D7;&#x2009;224&#x2009;&#x00D7;&#x2009;3 that goes with shortcuts and a depthwise separable layer.</p>
</sec>
<sec id="s3_2_2"><label>3.2.2</label><title>Image Classification Using RNN Model</title>
<p>For the image classification procedure, the derived features are passed into the RNN approach, which assigns proper class labels. RNN is extensively utilized for analyzing sequence datasets, namely machine translation and speech recognition, considering that sequential dataset <inline-formula id="ieqn-19"><mml:math id="mml-ieqn-19"><mml:mi>x</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mo>&#x22EF;</mml:mo><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula>, whereas <inline-formula id="ieqn-20"><mml:math id="mml-ieqn-20"><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mi>t</mml:mi><mml:mo>&#x2208;</mml:mo><mml:mrow><mml:mo>{</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mo>&#x22EF;</mml:mo><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mi>T</mml:mi><mml:mo>}</mml:mo></mml:mrow></mml:math></inline-formula> denotes the data at <inline-formula id="ieqn-21"><mml:math id="mml-ieqn-21"><mml:mi>t</mml:mi><mml:mrow><mml:mo>&#x2212;</mml:mo></mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:math></inline-formula> time step. While using RNN to HSI classifications, <inline-formula id="ieqn-22"><mml:math id="mml-ieqn-22"><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> corresponds to the spectral values at <inline-formula id="ieqn-23"><mml:math id="mml-ieqn-23"><mml:mi>t</mml:mi><mml:mrow><mml:mo>&#x2212;</mml:mo></mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:math></inline-formula> band. In RNN, the output of the hidden state at time <italic>t</italic> can be expressed as follows [<xref ref-type="bibr" rid="ref-20">20</xref>]:
<disp-formula id="eqn-8"><label>(8)</label><mml:math id="mml-eqn-8" display="block"><mml:msub><mml:mi>h</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mi>&#x03C6;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>h</mml:mi><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>h</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mi>h</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>b</mml:mi><mml:mrow><mml:mi>h</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula>whereas <inline-formula id="ieqn-24"><mml:math id="mml-ieqn-24"><mml:msub><mml:mi>b</mml:mi><mml:mrow><mml:mi>h</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> represents a bias vector, <inline-formula id="ieqn-25"><mml:math id="mml-ieqn-25"><mml:mi>&#x03C6;</mml:mi></mml:math></inline-formula> denotes a non-linear activation function, namely hyperbolic tangent or logistic sigmoid functions,<inline-formula id="ieqn-26"><mml:math id="mml-ieqn-26"><mml:mtext>&#x00A0;</mml:mtext><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>h</mml:mi><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-27"><mml:math id="mml-ieqn-27"><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>h</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> denote the weight matrix from the existing input to hidden layers and preceding hidden layers to existing hidden layers, <inline-formula id="ieqn-28"><mml:math id="mml-ieqn-28"><mml:msub><mml:mi>h</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula> indicates the output of hidden state at the prior time, correspondingly. We observed from the formula that the context relationship in the time domain would e created using a recurrent connection. Usually, <inline-formula id="ieqn-29"><mml:math id="mml-ieqn-29"><mml:msub><mml:mi>h</mml:mi><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> captures maximum time data for the sequential dataset. For classifier tasks, <inline-formula id="ieqn-30"><mml:math id="mml-ieqn-30"><mml:msub><mml:mi>h</mml:mi><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> is frequently fed into the output layer, in addition to the possibility that a softmax function derives the sequence belonging to <inline-formula id="ieqn-31"><mml:math id="mml-ieqn-31"><mml:mi>i</mml:mi><mml:mrow><mml:mo>&#x2212;</mml:mo></mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:math></inline-formula> class. This process is expressed in the following:
<disp-formula id="ueqn-1">
<mml:math id="mml-ueqn-1" display="block"><mml:msub><mml:mi>O</mml:mi><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>o</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mi>h</mml:mi><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>b</mml:mi><mml:mrow><mml:mi>o</mml:mi></mml:mrow></mml:msub></mml:math></disp-formula>
<disp-formula id="eqn-9"><label>(9)</label><mml:math id="mml-eqn-9" display="block"><mml:mi>P</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mspace width="thinmathspace" /><mml:mrow><mml:mover><mml:mi>y</mml:mi><mml:mo>&#x007E;</mml:mo></mml:mover></mml:mrow><mml:mo>=</mml:mo><mml:mi>i</mml:mi><mml:mo fence="false" stretchy="false">|</mml:mo><mml:mi>&#x03B8;</mml:mi><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mi>b</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:mrow><mml:msub><mml:mi>&#x03B8;</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:msub><mml:mi>O</mml:mi><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:msub><mml:mi>b</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mrow></mml:msup><mml:mrow><mml:msubsup><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:msubsup><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:mrow><mml:msub><mml:mi>&#x03B8;</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:msub><mml:mi>O</mml:mi><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:msub><mml:mi>b</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mrow></mml:msup></mml:mrow></mml:mfrac></mml:math></disp-formula></p>
<p>Now <inline-formula id="ieqn-32"><mml:math id="mml-ieqn-32"><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>o</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> denotes the weight matrixes from hidden to output layers, <inline-formula id="ieqn-33"><mml:math id="mml-ieqn-33"><mml:msub><mml:mi>b</mml:mi><mml:mrow><mml:mi>o</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> indicates a bias vector, <inline-formula id="ieqn-34"><mml:math id="mml-ieqn-34"><mml:mi>&#x03B8;</mml:mi></mml:math></inline-formula> and <italic>b</italic> represent variables of softmax function, <italic>C</italic> signifies the class count to differentiate. The succeeding loss function is given as follows:
<disp-formula id="eqn-10"><label>(10)</label><mml:math id="mml-eqn-10" display="block"><mml:mrow><mml:mrow><mml:mi>&#x02112;</mml:mi></mml:mrow></mml:mrow><mml:mo>=</mml:mo><mml:mo>&#x2212;</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mi>N</mml:mi></mml:mfrac><mml:msubsup><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>N</mml:mi></mml:mrow></mml:msubsup><mml:mrow><mml:mo>[</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:mtext>log</mml:mtext></mml:mrow><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mrow><mml:mover><mml:mi>y</mml:mi><mml:mo>&#x007E;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mrow><mml:mtext>i</mml:mtext></mml:mrow></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:mtext>log</mml:mtext></mml:mrow><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:mo>(</mml:mo><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mrow><mml:mover><mml:mi>y</mml:mi><mml:mo>&#x007E;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mrow><mml:mtext>i</mml:mtext></mml:mrow></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:math></disp-formula></p>
<p>Here, <italic>N</italic> indicates the number of trained instances,<inline-formula id="ieqn-35"><mml:math id="mml-ieqn-35"><mml:mtext>&#x00A0;</mml:mtext><mml:msub><mml:mrow><mml:mover><mml:mi>y</mml:mi><mml:mo>&#x007E;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mrow><mml:mtext>i</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-36"><mml:math id="mml-ieqn-36"><mml:msub><mml:mi>y</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> corresponding to the predicted and true labels of the <inline-formula id="ieqn-37"><mml:math id="mml-ieqn-37"><mml:mi>i</mml:mi><mml:mrow><mml:mo>&#x2212;</mml:mo></mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:math></inline-formula> trained instance.</p>
</sec>
<sec id="s3_2_3"><label>3.2.3</label><title>Hyperparameter Optimization</title>
<p>Finally, the SSO technique has been employed to adjust the RNN model&#x2019;s hyperparameters properly. SSO technique is a multi-community (MC) population-based metaheuristic algorithm that imitates the nature of a shepherd. The steps included in the SSO approach are shown in [<xref ref-type="bibr" rid="ref-21">21</xref>]. Initially, the SSO approach starts by arbitrarily creating members of the community (MOC) in the search space as follows:
<disp-formula id="ueqn-2">
<mml:math id="mml-ueqn-2" display="block"><mml:msubsup><mml:mrow><mml:mrow><mml:mtext>MOC</mml:mtext></mml:mrow></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mtext>MOC</mml:mtext></mml:mrow><mml:mrow><mml:mrow><mml:mtext>&#x00A0;min&#x00A0;</mml:mtext></mml:mrow></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>d</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mrow><mml:mtext>MOC</mml:mtext></mml:mrow><mml:mrow><mml:mo movablelimits="true" form="prefix">max</mml:mo></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mrow><mml:mtext>MOC</mml:mtext></mml:mrow><mml:mrow><mml:mo movablelimits="true" form="prefix">min</mml:mo></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mo>;</mml:mo></mml:math></disp-formula>
<disp-formula id="eqn-11"><label>(11)</label><mml:math id="mml-eqn-11" display="block"><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mo>&#x2026;</mml:mo><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mi>m</mml:mi><mml:mtext>&#x00A0;</mml:mtext><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>d</mml:mi><mml:mtext>&#x00A0;</mml:mtext><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mo>&#x2026;</mml:mo><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mi>n</mml:mi></mml:math></disp-formula></p>
<p>Here, <inline-formula id="ieqn-38"><mml:math id="mml-ieqn-38"><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>d</mml:mi></mml:math></inline-formula> indicates arbitrary number lies within [0, 1]; <inline-formula id="ieqn-39"><mml:math id="mml-ieqn-39"><mml:msub><mml:mrow><mml:mtext>MOC</mml:mtext></mml:mrow><mml:mrow><mml:mo movablelimits="true" form="prefix">min</mml:mo></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-40"><mml:math id="mml-ieqn-40"><mml:msub><mml:mrow><mml:mtext>MOC</mml:mtext></mml:mrow><mml:mrow><mml:mo movablelimits="true" form="prefix">max</mml:mo></mml:mrow></mml:msub></mml:math></inline-formula> correspondingly signify lower and upper limits; <italic>m</italic> denotes community amount, and <italic>n</italic> indicates the number of members. With this regard, it is considered that the total amount of members of the community can be obtained using the following equation:
<disp-formula id="eqn-12"><label>(12)</label><mml:math id="mml-eqn-12" display="block"><mml:mi>n</mml:mi><mml:mi>M</mml:mi><mml:mi>C</mml:mi><mml:mo>=</mml:mo><mml:mi>m</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mi>n</mml:mi></mml:math></disp-formula></p>
<p>In the shuffling method, the <italic>m</italic> member of the community is selected based on its objective function and arranged randomly in the first column of the MC matrix as the first members of the community. Next, generate the second column of MC; the <italic>m</italic> member is chosen corresponding to the preceding step and arranged randomly in the column. The process is implemented in <italic>n</italic> time separately until the MC matrix is generated as:
<disp-formula id="eqn-13"><label>(13)</label><mml:math id="mml-eqn-13" display="block"><mml:mi>M</mml:mi><mml:mi>C</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mtable columnalign="left left left left left left" rowspacing="4pt" columnspacing="1em"><mml:mtr><mml:mtd><mml:mi>M</mml:mi><mml:mi>O</mml:mi><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:mi>M</mml:mi><mml:mi>O</mml:mi><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:mo>&#x22EF;</mml:mo></mml:mtd><mml:mtd><mml:mi>M</mml:mi><mml:mi>O</mml:mi><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:mo>&#x22EF;</mml:mo></mml:mtd><mml:mtd><mml:mi>M</mml:mi><mml:mi>O</mml:mi><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mi>M</mml:mi><mml:mi>O</mml:mi><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:mi>M</mml:mi><mml:mi>O</mml:mi><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:mo>&#x22EF;</mml:mo></mml:mtd><mml:mtd><mml:mi>M</mml:mi><mml:mi>O</mml:mi><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:mo>&#x22EF;</mml:mo></mml:mtd><mml:mtd><mml:mi>M</mml:mi><mml:mi>O</mml:mi><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mi>M</mml:mi><mml:mi>O</mml:mi><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:mi>M</mml:mi><mml:mi>O</mml:mi><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:mo>&#x22EF;</mml:mo></mml:mtd><mml:mtd><mml:mi>M</mml:mi><mml:mi>O</mml:mi><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:mo>&#x22EF;</mml:mo></mml:mtd><mml:mtd><mml:mi>M</mml:mi><mml:mi>O</mml:mi><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mi>M</mml:mi><mml:mi>O</mml:mi><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mi>m</mml:mi><mml:mo>,</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:mi>M</mml:mi><mml:mi>O</mml:mi><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mi>m</mml:mi><mml:mo>,</mml:mo><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:mo>&#x22EF;</mml:mo></mml:mtd><mml:mtd><mml:mi>M</mml:mi><mml:mi>O</mml:mi><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mi>m</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:mo>&#x22EF;</mml:mo></mml:mtd><mml:mtd><mml:mi>M</mml:mi><mml:mi>O</mml:mi><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mi>m</mml:mi><mml:mo>,</mml:mo><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:mtd></mml:mtr></mml:mtable><mml:mo>]</mml:mo></mml:mrow></mml:math></disp-formula></p>
<p>A step size of motion for each community member can be estimated based on the 2 vectors. It is expressed in the following:
<disp-formula id="eqn-14"><label>(14)</label><mml:math id="mml-eqn-14" display="block"><mml:msub><mml:mrow><mml:mtext>stepsise</mml:mtext></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msubsup><mml:mrow><mml:mrow><mml:mtext>stepsize</mml:mtext></mml:mrow></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mtext>Worse</mml:mtext></mml:mrow></mml:mrow></mml:msubsup><mml:mo>+</mml:mo><mml:msubsup><mml:mrow><mml:mrow><mml:mtext>stepsize</mml:mtext></mml:mrow></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mtext>Better</mml:mtext></mml:mrow></mml:mrow></mml:msubsup><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mo>&#x2026;</mml:mo><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mi>m</mml:mi><mml:mtext>&#x00A0;</mml:mtext><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>d</mml:mi><mml:mtext>&#x00A0;</mml:mtext><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mo>&#x2026;</mml:mo><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mi>n</mml:mi></mml:math></disp-formula>whereas <inline-formula id="ieqn-41"><mml:math id="mml-ieqn-41"><mml:msubsup><mml:mrow><mml:mrow><mml:mtext>stepse</mml:mtext></mml:mrow></mml:mrow><mml:mrow><mml:mrow><mml:mtext>i</mml:mtext></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:mtext>j</mml:mtext></mml:mrow></mml:mrow><mml:mrow><mml:mrow><mml:mtext>Worse</mml:mtext></mml:mrow></mml:mrow></mml:msubsup></mml:math></inline-formula> and <inline-formula id="ieqn-42"><mml:math id="mml-ieqn-42"><mml:msubsup><mml:mrow><mml:mrow><mml:mtext>stepsise</mml:mtext></mml:mrow></mml:mrow><mml:mrow><mml:mrow><mml:mtext>i</mml:mtext></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:mtext>j</mml:mtext></mml:mrow></mml:mrow><mml:mrow><mml:mrow><mml:mtext>Better</mml:mtext></mml:mrow></mml:mrow></mml:msubsup></mml:math></inline-formula> are defined as follows:
<disp-formula id="eqn-15"><label>(15)</label><mml:math id="mml-eqn-15" display="block"><mml:msubsup><mml:mrow><mml:mrow><mml:mtext>stepsise</mml:mtext></mml:mrow></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mtext>Worse</mml:mtext></mml:mrow></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mi>&#x03B1;</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>d</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>&#x00D7;</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mrow><mml:mtext>MOC</mml:mtext></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>w</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mrow><mml:mtext>MOC</mml:mtext></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula>
<disp-formula id="eqn-16"><label>(16)</label><mml:math id="mml-eqn-16" display="block"><mml:msubsup><mml:mrow><mml:mrow><mml:mtext>stepsise</mml:mtext></mml:mrow></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mtext>Better</mml:mtext></mml:mrow></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mi>&#x03B2;</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>d</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mo>&#x00D7;</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mrow><mml:mtext>MOC</mml:mtext></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>b</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mrow><mml:mtext>MOC</mml:mtext></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula></p>
<p>Now, <inline-formula id="ieqn-43"><mml:math id="mml-ieqn-43"><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>d</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-44"><mml:math id="mml-ieqn-44"><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>d</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula> characterize arbitrary vectors;<inline-formula id="ieqn-45"><mml:math id="mml-ieqn-45"><mml:mtext>&#x00A0;</mml:mtext><mml:msub><mml:mrow><mml:mtext>MOC</mml:mtext></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>w</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> (chosen sheep) and <inline-formula id="ieqn-46"><mml:math id="mml-ieqn-46"><mml:msub><mml:mrow><mml:mtext>MOC</mml:mtext></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>b</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>(chosen horse) denote optimum and worse member-based objective function values associated with <inline-formula id="ieqn-47"><mml:math id="mml-ieqn-47"><mml:msub><mml:mrow><mml:mtext>MOC</mml:mtext></mml:mrow><mml:mrow><mml:mrow><mml:mtext>i</mml:mtext></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:mtext>j</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula>(shepherd). It is worth declaring that the primary member of <inline-formula id="ieqn-48"><mml:math id="mml-ieqn-48"><mml:msup><mml:mi>i</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msup></mml:math></inline-formula> community <inline-formula id="ieqn-49"><mml:math id="mml-ieqn-49"><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mrow><mml:mtext>MOC</mml:mtext></mml:mrow><mml:mrow><mml:mrow><mml:mtext>i</mml:mtext></mml:mrow><mml:mo>,</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> does not take a member better than itself. Hence, <inline-formula id="ieqn-50"><mml:math id="mml-ieqn-50"><mml:msubsup><mml:mrow><mml:mrow><mml:mtext>stepsise</mml:mtext></mml:mrow></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mrow><mml:mtext>Better</mml:mtext></mml:mrow></mml:mrow></mml:msubsup></mml:math></inline-formula> is equal to zero. On the other hand, <inline-formula id="ieqn-51"><mml:math id="mml-ieqn-51"><mml:msub><mml:mrow><mml:mtext>MOC</mml:mtext></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mtext>&#x00A0;</mml:mtext><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> does not have a member worse than itself because of the last member of <italic>i-th</italic> community. Therefore, <inline-formula id="ieqn-52"><mml:math id="mml-ieqn-52"><mml:msubsup><mml:mrow><mml:mrow><mml:mtext>stepsise</mml:mtext></mml:mrow></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mrow><mml:mtext>Worse</mml:mtext></mml:mrow></mml:mrow></mml:msubsup></mml:math></inline-formula> is equal to zero. Moreover, <inline-formula id="ieqn-53"><mml:math id="mml-ieqn-53"><mml:mi>&#x03B1;</mml:mi></mml:math></inline-formula> and <inline-formula id="ieqn-54"><mml:math id="mml-ieqn-54"><mml:mi>&#x03B2;</mml:mi></mml:math></inline-formula> indicate factors that manage the exploitation and exploration stage. It is defined as follows:
<disp-formula id="eqn-17"><label>(17)</label><mml:math id="mml-eqn-17" display="block"><mml:mi>&#x03B1;</mml:mi><mml:mo>=</mml:mo><mml:msub><mml:mi>&#x03B1;</mml:mi><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>&#x03B1;</mml:mi><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:mo>&#x00D7;</mml:mo><mml:mi>t</mml:mi><mml:mo>;</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mi>t</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mtext>iteration</mml:mtext></mml:mrow><mml:mrow><mml:mrow><mml:mtext>Max</mml:mtext></mml:mrow><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:mtext>iteration</mml:mtext></mml:mrow></mml:mrow></mml:mfrac></mml:math></disp-formula>
<disp-formula id="eqn-18"><label>(18)</label><mml:math id="mml-eqn-18" display="block"><mml:mi>&#x03B2;</mml:mi><mml:mo>=</mml:mo><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mrow><mml:mtext>max</mml:mtext></mml:mrow></mml:mrow></mml:msub><mml:mtext>&#x00A0;</mml:mtext><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x00D7;</mml:mo><mml:mi>t</mml:mi></mml:math></disp-formula></p>
<p>According to the previous step, the novel position of <inline-formula id="ieqn-55"><mml:math id="mml-ieqn-55"><mml:msub><mml:mrow><mml:mtext>MOC</mml:mtext></mml:mrow><mml:mrow><mml:mrow><mml:mtext>i</mml:mtext></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:mtext>j</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> can be estimated as follows. Later, the position of <inline-formula id="ieqn-56"><mml:math id="mml-ieqn-56"><mml:msub><mml:mrow><mml:mtext>MOC</mml:mtext></mml:mrow><mml:mrow><mml:mrow><mml:mtext>i</mml:mtext></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:mtext>j</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> is upgraded, or else the objective old function value can be given as:
<disp-formula id="eqn-19"><label>(19)</label><mml:math id="mml-eqn-19" display="block"><mml:msub><mml:mrow><mml:mtext>newMOC</mml:mtext></mml:mrow><mml:mrow><mml:mrow><mml:mtext>i</mml:mtext></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:mtext>j</mml:mtext></mml:mrow></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mtext>MOC</mml:mtext></mml:mrow><mml:mrow><mml:mrow><mml:mtext>i</mml:mtext></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:mtext>j</mml:mtext></mml:mrow></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mrow><mml:mtext>stepsize</mml:mtext></mml:mrow><mml:mrow><mml:mrow><mml:mtext>i</mml:mtext></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:mtext>j</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></disp-formula></p>
<p>The optimisation process would be terminated when the predetermined iteration number is reached, or the ending conditions are accomplished. Otherwise, it returns to step 2 for the new iteration. The SSO method progresses a fitness function (FF) for attaining effective classification efficiency. It resolves the positive integer for representing the best performance of the candidate solution.
<disp-formula id="ueqn-3">
<mml:math id="mml-ueqn-3" display="block"><mml:mrow><mml:mtext mathvariant="italic">fitness</mml:mtext></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mtext mathvariant="italic">ClassifierErrorRate</mml:mtext></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula>
<disp-formula id="eqn-20"><label>(20)</label><mml:math id="mml-eqn-20" display="block"><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">number</mml:mtext></mml:mrow><mml:mtext>&#x00A0;</mml:mtext><mml:mi>o</mml:mi><mml:mi>f</mml:mi><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:mtext mathvariant="italic">misclassified</mml:mtext></mml:mrow><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:mtext mathvariant="italic">samples</mml:mtext></mml:mrow></mml:mrow><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">Total</mml:mtext></mml:mrow><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:mtext mathvariant="italic">number</mml:mtext></mml:mrow><mml:mtext>&#x00A0;</mml:mtext><mml:mi>o</mml:mi><mml:mi>f</mml:mi><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:mtext mathvariant="italic">samples</mml:mtext></mml:mrow></mml:mrow></mml:mfrac><mml:mo>&#x2217;</mml:mo><mml:mn>100</mml:mn></mml:math></disp-formula>
</p>
<fig id="fig-9">
<graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_38959-fig-9.tif"/>
</fig>
</sec>
</sec>
</sec>
<sec id="s4"><label>4</label><title>Experimental Validation</title>
<p>The performance validation of the MDLS-UAVIC approach is tested utilizing a benchmark dataset, namely UCM dataset (<ext-link ext-link-type="uri" xlink:href="http://weegee.vision.ucmerced.edu/datasets/landuse.html">http://weegee.vision.ucmerced.edu/datasets/landuse.html</ext-link>) and the AID dataset (<ext-link ext-link-type="uri" xlink:href="https://captain-whu.github.io/AID/">https://captain-whu.github.io/AID/</ext-link>). The results are investigated under two aspects such as security and image classification. A few sample images are displayed in <xref ref-type="fig" rid="fig-2">Fig. 2</xref>.</p>
<fig id="fig-2"><label>Figure 2</label><caption><title>Sample UAVs images</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_38959-fig-2.tif"/></fig>
<p><xref ref-type="table" rid="table-1">Table 1</xref> provides a qualitative result analysis of the MDLS-UAVIC model on distinct sample test images. The outcomes showed that the MDLS-UAVIC approach has achieved an effective encryption process with maximal values of PSNR and CC under all images. At the same time, the MDLS-UAVIC approach has resulted in lower values of MSE under the very image.</p>
<table-wrap id="table-1"><label>Table 1</label><caption><title>Visualization of proposed MDLS-UAVIC methodology on sample images</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="left">Input images</th>
<th align="left">Encrypted images</th>
<th align="left">Decrypted images</th>
<th align="left">MSE</th>
<th align="left">PSNR</th>
<th align="left">CC</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left"><inline-graphic xlink:href="CSSE_38959-inline-1.tif"/></td>
<td align="left"><inline-graphic xlink:href="CSSE_38959-inline-2.tif"/></td>
<td align="left"><inline-graphic xlink:href="CSSE_38959-inline-3.tif"/></td>
<td align="left">0.047</td>
<td align="left">61.410</td>
<td align="left">99.910</td>
</tr>
<tr>
<td align="left"><inline-graphic xlink:href="CSSE_38959-inline-4.tif"/></td>
<td align="left"><inline-graphic xlink:href="CSSE_38959-inline-5.tif"/></td>
<td align="left"><inline-graphic xlink:href="CSSE_38959-inline-6.tif"/></td>
<td align="left">0.059</td>
<td align="left">60.422</td>
<td align="left">99.990</td>
</tr>
<tr>
<td align="left"><inline-graphic xlink:href="CSSE_38959-inline-7.tif"/></td>
<td align="left"><inline-graphic xlink:href="CSSE_38959-inline-8.tif"/></td>
<td align="left"><inline-graphic xlink:href="CSSE_38959-inline-9.tif"/></td>
<td align="left">0.035</td>
<td align="left">62.690</td>
<td align="left">99.910</td>
</tr>
<tr>
<td align="left"><inline-graphic xlink:href="CSSE_38959-inline-10.tif"/></td>
<td align="left"><inline-graphic xlink:href="CSSE_38959-inline-11.tif"/></td>
<td align="left"><inline-graphic xlink:href="CSSE_38959-inline-12.tif"/></td>
<td align="left">0.105</td>
<td align="left">57.919</td>
<td align="left">99.920</td>
</tr>
<tr>
<td align="left"><inline-graphic xlink:href="CSSE_38959-inline-13.tif"/></td>
<td align="left"><inline-graphic xlink:href="CSSE_38959-inline-14.tif"/></td>
<td align="left"><inline-graphic xlink:href="CSSE_38959-inline-15.tif"/></td>
<td align="left">0.108</td>
<td align="left">57.797</td>
<td align="left">99.930</td>
</tr>
</tbody>
</table>
</table-wrap>
<p><xref ref-type="table" rid="table-2">Table 2</xref> illustrates a detailed MSE and PSNR inspection of the MDLS-UAVIC model with existing models under distinct sample images [<xref ref-type="bibr" rid="ref-22">22</xref>]. <xref ref-type="fig" rid="fig-3">Fig. 3</xref> reports a comparative MSE assessment of the MDLS-UAVIC approach with recent algorithms under dissimilar images. The figure depicted that the MDLS-UAVIC algorithm has obtained enhanced performance with lower values of MSE. For instance, with sample image 1, the MDLS-UAVIC system has provided a minimal MSE of 0.047, whereas the AIUAV, CSO, and GWO processes have obtained increased MSE of 0.060, 0.189, and 0.288, correspondingly. Also, with sample image 3, the MDLS-UAVIC approach provided the least MSE of 0.035, but the AIUAV, CSO, and GWO approach obtained maximum MSE of 0.049, 0.206, and 0.237, correspondingly. In addition, with sample image 5, the MDLS-UAVIC algorithm has provided a lesser MSE of 0.108, but the AIUAV, CSO, and GWO methods have gained higher MSE of 0.135, 0.183, and 0.234, correspondingly.</p>
<table-wrap id="table-2"><label>Table 2</label><caption><title>MSE and PSNR analysis of MDLS-UAVIC technique with various sample images</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead valign="top">
<tr>
<th align="left" rowspan="2">Sample images</th>
<th align="center" colspan="2">MDLS-UAVIC</th>
<th align="center" colspan="2">AIUAV model</th>
<th align="center" colspan="2">CSO algorithm</th>
<th align="center" colspan="2">GWO algorithm</th>
</tr>
<tr>
<th align="left">MSE</th>
<th align="left">PSNR</th>
<th align="left">MSE</th>
<th align="left">PSNR</th>
<th align="left">MSE</th>
<th align="left">PSNR</th>
<th align="left">MSE</th>
<th align="left">PSNR</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">Sample 1</td>
<td align="left">0.047</td>
<td align="left">61.410</td>
<td align="left">0.060</td>
<td align="left">60.371</td>
<td align="left">0.189</td>
<td align="left">55.373</td>
<td align="left">0.288</td>
<td align="left">53.538</td>
</tr>
<tr>
<td align="left">Sample 2</td>
<td align="left">0.059</td>
<td align="left">60.422</td>
<td align="left">0.080</td>
<td align="left">59.089</td>
<td align="left">0.176</td>
<td align="left">55.666</td>
<td align="left">0.232</td>
<td align="left">54.472</td>
</tr>
<tr>
<td align="left">Sample 3</td>
<td align="left">0.035</td>
<td align="left">62.690</td>
<td align="left">0.049</td>
<td align="left">61.273</td>
<td align="left">0.206</td>
<td align="left">54.994</td>
<td align="left">0.237</td>
<td align="left">54.391</td>
</tr>
<tr>
<td align="left">Sample 4</td>
<td align="left">0.105</td>
<td align="left">57.919</td>
<td align="left">0.130</td>
<td align="left">56.981</td>
<td align="left">0.215</td>
<td align="left">54.815</td>
<td align="left">0.262</td>
<td align="left">53.956</td>
</tr>
<tr>
<td align="left">Sample 5</td>
<td align="left">0.108</td>
<td align="left">57.797</td>
<td align="left">0.135</td>
<td align="left">56.827</td>
<td align="left">0.183</td>
<td align="left">55.509</td>
<td align="left">0.234</td>
<td align="left">54.439</td>
</tr>
</tbody>
</table>
</table-wrap><fig id="fig-3"><label>Figure 3</label><caption><title>MSE analysis of MDLS-UAVIC approach with distinct sample images</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_38959-fig-3.tif"/></fig>
<p>A detailed PSNR examination of the MDLS-UAVIC model with current models is provided in <xref ref-type="fig" rid="fig-4">Fig. 4</xref>. The experimental values specified that the MDLS-UAVIC system had improved PSNR values under every sample image. For sample, with sample image 1, the MDLS-UAVIC model has increased PSNR by 61.410&#x2005;dB, whereas the AIUAV, CSO, and GWO algorithms have provided reduced PSNR of 60.371, 55.373, and 53.538&#x2005;dB, respectively. Meanwhile, with sample image 3, the MDLS-UAVIC technique has accessible increased PSNR of 62.690&#x2005;dB. In contrast, the AIUAV, CSO, and GWO algorithms have provided lesser PSNR of 61.273, 54.994, and 54.391&#x2005;dB, correspondingly. Eventually, with sample image 5, the MDLS-UAVIC method has enhanced SNR of 57.797&#x2005;dB, whereas the AIUAV, CSO, and GWO approaches have provided reduced PSNR of 56.827, 55.509, and 54.439&#x2005;dB.</p>
<fig id="fig-4"><label>Figure 4</label><caption><title>PSNR analysis of MDLS-UAVIC approach with distinct sample images</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_38959-fig-4.tif"/></fig>
<p>A detailed CC examination of the MDLS-UAVIC approach with current algorithms is provided in <xref ref-type="table" rid="table-3">Table 3</xref> and <xref ref-type="fig" rid="fig-5">Fig. 5</xref>. The experimental values show that the MDLS-UAVIC system has gained enhanced CC values under every sample image. For instance, with sample image 1, the MDLS-UAVIC technique has obtainable increased CC of 99.910, whereas the AIUAV, CSO, and GWO techniques have provided lower CC of 99.700, 99.470, and 99.240, respectively. In the meantime, with sample image 3, the MDLS-UAVIC model has existing increased CC of 99.910, whereas the AIUAV, CSO, and GWO algorithms have provided reduced CC of 99.710, 99.460, and 99.210, respectively. At last, with sample image 5, the MDLS-UAVIC model has obtainable maximal CC of 99.930, whereas the AIUAV, CSO, and GWO techniques have provided lower CC of 99.650, 99.360, and 99.160, correspondingly.</p>
<table-wrap id="table-3"><label>Table 3</label><caption><title>Correlation coefficient (CC) analysis of MDLS-UAVIC system with various sample images</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="left">Sample images</th>
<th align="left">MDLS-UAVIC</th>
<th align="left">AIUAV model</th>
<th align="left">CSO algorithm</th>
<th align="left">GWO algorithm</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">Sample 1</td>
<td align="left">99.910</td>
<td align="left">99.700</td>
<td align="left">99.470</td>
<td align="left">99.240</td>
</tr>
<tr>
<td align="left">Sample 2</td>
<td align="left">99.990</td>
<td align="left">99.740</td>
<td align="left">99.500</td>
<td align="left">99.220</td>
</tr>
<tr>
<td align="left">Sample 3</td>
<td align="left">99.910</td>
<td align="left">99.710</td>
<td align="left">99.460</td>
<td align="left">99.250</td>
</tr>
<tr>
<td align="left">Sample 4</td>
<td align="left">99.920</td>
<td align="left">99.650</td>
<td align="left">99.450</td>
<td align="left">99.210</td>
</tr>
<tr>
<td align="left">Sample 5</td>
<td align="left">99.930</td>
<td align="left">99.650</td>
<td align="left">99.360</td>
<td align="left">99.160</td>
</tr>
</tbody>
</table>
</table-wrap><fig id="fig-5"><label>Figure 5</label><caption><title>CC analysis of MDLS-UAVIC technique with distinct sample images</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_38959-fig-5.tif"/></fig>
<p><xref ref-type="table" rid="table-4">Table 4</xref> and <xref ref-type="fig" rid="fig-6">Fig. 6</xref> define a comparative CT assessment of the MDLS-UAVIC approach with recent models under distinct images. The figure depicted that the MDLS-UAVIC process has gained enhanced performance with lower values of CT. With sample image 1, the MDLS-UAVIC technique has provided minimal CT of 1.154&#x2005;s, whereas the AIUAV, CSO, and GWO algorithms have obtained increased CT of 1.449, 2.056, and 2.379&#x2005;s, correspondingly. Likewise, with sample image 3, the MDLS-UAVIC technique has provided the least CT of 1.411&#x2005;s, whereas the AIUAV, CSO, and GWO algorithms have obtained maximum CT of 1.802, 2.160 and 2.213&#x2005;s, respectively. Additionally, with sample image 5, the MDLS-UAVIC model has provided minimal CT of 1.166&#x2005;s, whereas the AIUAV, CSO, and GWO algorithms have obtained higher CT of 1.682, 1.873, and 2.125&#x2005;s, correspondingly.</p>
<table-wrap id="table-4"><label>Table 4</label><caption><title>Computation time analysis of MDLS-UAVIC technique with various sample images</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="left">Sample images</th>
<th align="left">MDLS-UAVIC</th>
<th align="left">AIUAV model</th>
<th align="left">CSO algorithm</th>
<th align="left">GWO algorithm</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">Sample 1</td>
<td align="left">1.154</td>
<td align="left">1.449</td>
<td align="left">2.056</td>
<td align="left">2.379</td>
</tr>
<tr>
<td align="left">Sample 2</td>
<td align="left">1.126</td>
<td align="left">1.578</td>
<td align="left">1.642</td>
<td align="left">2.063</td>
</tr>
<tr>
<td align="left">Sample 3</td>
<td align="left">1.411</td>
<td align="left">1.802</td>
<td align="left">2.160</td>
<td align="left">2.213</td>
</tr>
<tr>
<td align="left">Sample 4</td>
<td align="left">0.995</td>
<td align="left">1.471</td>
<td align="left">2.226</td>
<td align="left">2.387</td>
</tr>
<tr>
<td align="left">Sample 5</td>
<td align="left">1.166</td>
<td align="left">1.682</td>
<td align="left">1.873</td>
<td align="left">2.125</td>
</tr>
</tbody>
</table>
</table-wrap><fig id="fig-6"><label>Figure 6</label><caption><title>CT analysis of MDLS-UAVIC technique with distinct sample images</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_38959-fig-6.tif"/></fig>
<p><xref ref-type="table" rid="table-5">Table 5</xref> provides detailed classification results of the MDLS-UAVIC model on the UCM multi-label dataset [<xref ref-type="bibr" rid="ref-23">23</xref>]. <xref ref-type="fig" rid="fig-7">Fig. 7</xref> provides a brief <inline-formula id="ieqn-57"><mml:math id="mml-ieqn-57"><mml:mi>p</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-58"><mml:math id="mml-ieqn-58"><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>a</mml:mi><mml:mrow><mml:mi>l</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> examination of the MDLS-UAVIC model with existing models on the test UCM multi-label dataset. The figure indicated that the Conv. NN, CNN-ANN, and CNN-Bil.STM models have provided worse performance with lower values of <inline-formula id="ieqn-59"><mml:math id="mml-ieqn-59"><mml:mi>p</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-60"><mml:math id="mml-ieqn-60"><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>a</mml:mi><mml:mrow><mml:mi>l</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>. Besides, the CNN-RNN, GNN-SGAT, and GNN-MLIGAT models have reached slightly increased values of <inline-formula id="ieqn-61"><mml:math id="mml-ieqn-61"><mml:mi>p</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-62"><mml:math id="mml-ieqn-62"><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>a</mml:mi><mml:mrow><mml:mi>l</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>. Though the optimal SqueezeNet model has resulted in reasonable <inline-formula id="ieqn-63"><mml:math id="mml-ieqn-63"><mml:mi>p</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-64"><mml:math id="mml-ieqn-64"><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>a</mml:mi><mml:mrow><mml:mi>l</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 91.72&#x0025; and 92.92&#x0025;, the MDLS-UAVIC model has accomplished maximum <inline-formula id="ieqn-65"><mml:math id="mml-ieqn-65"><mml:mi>p</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-66"><mml:math id="mml-ieqn-66"><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>a</mml:mi><mml:mrow><mml:mi>l</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> values of 92.81&#x0025; and 94.28&#x0025;, respectively.</p>
<table-wrap id="table-5"><label>Table 5</label><caption><title>Comparative analysis of MDLS-UAVIC technique with recent algorithms on UCM multi-label dataset</title></caption>
<table frame="hsides" >
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="left">Methods</th>
<th align="left">Precision</th>
<th align="left">Recall</th>
<th align="left">F1-Score</th>
<th align="left">F2-Score</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">Conv. NN</td>
<td align="left">79.91</td>
<td align="left">83.20</td>
<td align="left">80.56</td>
<td align="left">79.36</td>
</tr>
<tr>
<td align="left">CNN-ANN</td>
<td align="left">77.90</td>
<td align="left">83.97</td>
<td align="left">80.30</td>
<td align="left">80.69</td>
</tr>
<tr>
<td align="left">CNN-Bil.STM</td>
<td align="left">79.87</td>
<td align="left">84.13</td>
<td align="left">80.52</td>
<td align="left">81.18</td>
</tr>
<tr>
<td align="left">CNN-RNN</td>
<td align="left">87.79</td>
<td align="left">86.32</td>
<td align="left">84.99</td>
<td align="left">86.96</td>
</tr>
<tr>
<td align="left">GNN-SGAT</td>
<td align="left">87.04</td>
<td align="left">87.56</td>
<td align="left">87.44</td>
<td align="left">86.16</td>
</tr>
<tr>
<td align="left">GNN-MLIGAT</td>
<td align="left">87.36</td>
<td align="left">89.75</td>
<td align="left">85.88</td>
<td align="left">88.13</td>
</tr>
<tr>
<td align="left">Optimal SqueezeNet</td>
<td align="left">91.72</td>
<td align="left">92.92</td>
<td align="left">94.60</td>
<td align="left">93.41</td>
</tr>
<tr>
<td align="left">MDLS-UAVIC</td>
<td align="left">92.81</td>
<td align="left">94.28</td>
<td align="left">95.93</td>
<td align="left">94.32</td>
</tr>
</tbody>
</table>
</table-wrap><fig id="fig-7"><label>Figure 7</label><caption><title><inline-formula id="ieqn-67"><mml:math id="mml-ieqn-67"><mml:mi>P</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-68"><mml:math id="mml-ieqn-68"><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>a</mml:mi><mml:mrow><mml:mi>l</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> analysis of MDLS-UAVIC technique on UCM multi-label dataset</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_38959-fig-7.tif"/></fig>
<p><xref ref-type="fig" rid="fig-8">Fig. 8</xref> demonstrates a brief <inline-formula id="ieqn-69"><mml:math id="mml-ieqn-69"><mml:mi>F</mml:mi><mml:msub><mml:mn>1</mml:mn><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">score</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-70"><mml:math id="mml-ieqn-70"><mml:mi>F</mml:mi><mml:msub><mml:mn>2</mml:mn><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">score</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> analysis of the MDLS-UAVIC approach with existing models on the test UCM multi-label dataset. The figure indicated that the Conv. NN, CNN-ANN, and CNN-Bil.STM models have provided worse performance with lower values of <inline-formula id="ieqn-71"><mml:math id="mml-ieqn-71"><mml:mi>F</mml:mi><mml:msub><mml:mn>1</mml:mn><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">score</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-72"><mml:math id="mml-ieqn-72"><mml:mi>F</mml:mi><mml:msub><mml:mn>2</mml:mn><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">score</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula>. In addition, the CNN-RNN, GNN-SGAT, and GNN-MLIGAT models have attained somewhat increased values of <inline-formula id="ieqn-73"><mml:math id="mml-ieqn-73"><mml:mi>F</mml:mi><mml:msub><mml:mn>1</mml:mn><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">score</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-74"><mml:math id="mml-ieqn-74"><mml:mi>F</mml:mi><mml:msub><mml:mn>2</mml:mn><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">score</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula>. But, the optimal SqueezeNet model has resulted in reasonable <inline-formula id="ieqn-75"><mml:math id="mml-ieqn-75"><mml:mi>F</mml:mi><mml:msub><mml:mn>1</mml:mn><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">score</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-76"><mml:math id="mml-ieqn-76"><mml:mi>F</mml:mi><mml:msub><mml:mn>2</mml:mn><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">score</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> of 94.60&#x0025; and 93.41&#x0025;, the MDLS-UAVIC model has accomplished maximal <inline-formula id="ieqn-77"><mml:math id="mml-ieqn-77"><mml:mi>F</mml:mi><mml:msub><mml:mn>1</mml:mn><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">score</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-78"><mml:math id="mml-ieqn-78"><mml:mi>F</mml:mi><mml:msub><mml:mn>2</mml:mn><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">score</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> values of 95.93&#x0025; and 94.32&#x0025;, correspondingly.</p>
<fig id="fig-8"><label>Figure 8</label><caption><title><inline-formula id="ieqn-91"><mml:math id="mml-ieqn-91"><mml:mi>F</mml:mi><mml:msub><mml:mn>1</mml:mn><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">score</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-92"><mml:math id="mml-ieqn-92"><mml:mi>F</mml:mi><mml:msub><mml:mn>2</mml:mn><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">score</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> analysis of MDLS-UAVIC technique on UCM multi-label dataset</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_38959-fig-8.tif"/></fig>
<p><xref ref-type="table" rid="table-6">Table 6</xref> provides detailed classification results of the MDLS-UAVIC system on the AID multi-label dataset. The results indicated that the Conv. NN, CNN-ANN, and CNN-Bil.STM models have provided worse performance with lower values of <inline-formula id="ieqn-79"><mml:math id="mml-ieqn-79"><mml:mi>p</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-80"><mml:math id="mml-ieqn-80"><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>a</mml:mi><mml:mrow><mml:mi>l</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>. Also, the CNN-RNN, GNN-SGAT, and GNN-MLIGAT models have reached slightly increased values of <inline-formula id="ieqn-81"><mml:math id="mml-ieqn-81"><mml:mi>p</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-82"><mml:math id="mml-ieqn-82"><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>a</mml:mi><mml:mrow><mml:mi>l</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>. The optimal SqueezeNet model has resulted in reasonable <inline-formula id="ieqn-83"><mml:math id="mml-ieqn-83"><mml:mi>p</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-84"><mml:math id="mml-ieqn-84"><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>a</mml:mi><mml:mrow><mml:mi>l</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 93.10&#x0025; and 94.63&#x0025;, the MDLS-UAVIC model has accomplished maximum <inline-formula id="ieqn-85"><mml:math id="mml-ieqn-85"><mml:mi>p</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-86"><mml:math id="mml-ieqn-86"><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>a</mml:mi><mml:mrow><mml:mi>l</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> values of 94.46&#x0025; and 95.82&#x0025;, respectively. Next, the optimal SqueezeNet model has resulted in reasonable <inline-formula id="ieqn-87"><mml:math id="mml-ieqn-87"><mml:mi>F</mml:mi><mml:msub><mml:mn>1</mml:mn><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">score</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-88"><mml:math id="mml-ieqn-88"><mml:mi>F</mml:mi><mml:msub><mml:mn>2</mml:mn><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">score</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> of 92.14&#x0025; and 93.24&#x0025;, and the MDLS-UAVIC model has accomplished maximum <inline-formula id="ieqn-89"><mml:math id="mml-ieqn-89"><mml:mi>F</mml:mi><mml:msub><mml:mn>1</mml:mn><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">score</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-90"><mml:math id="mml-ieqn-90"><mml:mi>F</mml:mi><mml:msub><mml:mn>2</mml:mn><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">score</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> values of 93.66&#x0025; and 94.32&#x0025;, respectively.</p>
<table-wrap id="table-6"><label>Table 6</label><caption><title>Comparative analysis of MDLS-UAVIC technique with recent algorithms on AID multi-label dataset</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="left">Methods</th>
<th align="left">Precision</th>
<th align="left">Recall</th>
<th align="left">F1-Score</th>
<th align="left">F2-Score</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">Conv. NN</td>
<td align="left">87.20</td>
<td align="left">87.38</td>
<td align="left">86.63</td>
<td align="left">85.30</td>
</tr>
<tr>
<td align="left">CNN-ANN</td>
<td align="left">85.58</td>
<td align="left">88.36</td>
<td align="left">84.75</td>
<td align="left">86.86</td>
</tr>
<tr>
<td align="left">CNN-Bil.STM</td>
<td align="left">87.78</td>
<td align="left">88.10</td>
<td align="left">85.83</td>
<td align="left">87.53</td>
</tr>
<tr>
<td align="left">CNN-RNN</td>
<td align="left">89.73</td>
<td align="left">90.15</td>
<td align="left">88.83</td>
<td align="left">89.60</td>
</tr>
<tr>
<td align="left">GNN-SGAT</td>
<td align="left">89.94</td>
<td align="left">90.64</td>
<td align="left">88.00</td>
<td align="left">89.44</td>
</tr>
<tr>
<td align="left">GNN-MLIGAT</td>
<td align="left">90.97</td>
<td align="left">90.31</td>
<td align="left">87.92</td>
<td align="left">88.83</td>
</tr>
<tr>
<td align="left">Optimal SqueezeNet</td>
<td align="left">93.10</td>
<td align="left">94.63</td>
<td align="left">92.14</td>
<td align="left">93.24</td>
</tr>
<tr>
<td align="left">MDLS-UAVIC</td>
<td align="left">94.46</td>
<td align="left">95.82</td>
<td align="left">93.66</td>
<td align="left">94.32</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>After detecting the results and discussion, it has been concluded that the MDLS-UAVIC approach has accomplished maximum classification performance over the other models.</p>
</sec>
<sec id="s5"><label>5</label><title>Conclusion</title>
<p>In this study, a novel MDLS-UAVIC approach was established to securely encrypt the images and classify them into distinct class labels in the smart city environment. The proposed MDLS-UAVIC model follows a two-stage process: encryption and image classification. For image encryption, the signcryption technique effectively encrypts the UAV images. Next, the image classification process involves an Xception-based deep convolutional neural network for the feature extraction process. Finally, SSO with a recurrent neural network (RNN) model is exploited for UAV image classification. The experimental validation of the MDLS-UAVIC approach was tested utilizing a benchmark dataset, and the outcomes are examined in various measures. The comparative analysis ensured the effective performance of the MDLS-UAVIC approach on recent methodologies. In the future, an ensemble of DL-based classification methods can be designed to accomplish maximum performance.</p>
<p>Some limitations that can be associated with the use of computational intelligence techniques for secure unmanned aerial vehicle (UAV) image classification in a smart city environment:
<list list-type="simple">
<list-item><label>&#x2010;</label><p>Dependence on training data: Computational intelligence techniques, such as deep learning algorithms, require a large amount of labeled training data to achieve high accuracy in image classification. However, collecting and labeling such data can be time-consuming and expensive, especially for a specific smart city environment.</p></list-item>
<list-item><label>&#x2010;</label><p>Sensitivity to environmental factors: UAV image classification can be affected by various environmental factors, such as lighting conditions, weather, and camera quality. These factors can impact the quality of the captured images, which in turn affects the accuracy of the classification results.</p></list-item>
<list-item><label>&#x2010;</label><p>Security concerns: The use of UAVs in a smart city environment raises security concerns, as these vehicles can be vulnerable to cyberattacks and can potentially be used for malicious purposes. While the paper may address security concerns, it is important to consider the potential limitations of the proposed approach in mitigating such risks.</p></list-item>
<list-item><label>&#x2010;</label><p>Integration with existing systems: In a smart city environment, UAV image classification systems need to be integrated with other existing systems, such as surveillance cameras and emergency response systems. The integration process can be challenging, as different systems may have different data formats and communication protocols.</p></list-item>
<list-item><label>&#x2010;</label><p>Regulatory and ethical considerations: The use of UAVs in a smart city environment may be subject to regulatory and ethical considerations, such as privacy concerns and compliance with local laws and regulations. These considerations need to be taken into account when implementing a UAV image classification system.</p></list-item>
</list></p>
</sec>
</body>
<back>
<ack>
<p>The authors would like to acknowledge the appreciation to the Deputyship for Research &#x0026; Innovation, Ministry of Education in Saudi Arabia for funding this research work.</p>
</ack>
<sec><title>Funding Statement</title>
<p>The authors extend their appreciation to the Deputyship for Research &#x0026; Innovation, Ministry of Education in Saudi Arabia for funding this research work through the Project Number RI-44-0446.</p></sec>
<sec><title>Author Contributions</title>
<p>The authors confirm contribution to the paper as follows: study conception and design: Firas Abedi, Hayder M. A. Ghanimi; data collection: Abeer D. Algarni, Naglaa F. Soliman; analysis and interpretation of results: Walid El-Shafai, Ali Hashim Abbas, Zahraa H. Kareem; draft manuscript preparation: Hussein Muhi Hariz and Ahmed Alkhayyat. All authors reviewed the results and approved the final version of the manuscript.</p></sec>
<sec sec-type="data-availability"><title>Availability of Data and Materials</title>
<p>Not applicable.</p></sec>
<sec sec-type="COI-statement"><title>Conflicts of Interest</title>
<p>The authors declare they have no conflicts of interest to report regarding the present study.</p></sec>
<ref-list content-type="authoryear">
<title>References</title>
<ref id="ref-1"><label>[1]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>T.</given-names> <surname>Caram&#x00E9;s</surname></string-name>, <string-name><given-names>O.</given-names> <surname>Novoa</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Albela</surname></string-name> and <string-name><given-names>P.</given-names> <surname>Lamas</surname></string-name></person-group>, &#x201C;<article-title>A UAV and blockchain-based system for industry 4.0 inventory and traceability applications</article-title>,&#x201D; <source>Sensors and Applications</source>, vol. <volume>4</volume>, no. <issue>1</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>7</lpage>, <year>2018</year>.</mixed-citation></ref>
<ref id="ref-2"><label>[2]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>T.</given-names> <surname>Caram&#x00E9;s</surname></string-name>, <string-name><given-names>O.</given-names> <surname>Novoa</surname></string-name>, <string-name><given-names>I.</given-names> <surname>M&#x00ED;guez</surname></string-name> and <string-name><given-names>P.</given-names> <surname>Lamas</surname></string-name></person-group>, &#x201C;<article-title>Towards an autonomous industry 4.0 warehouse: A UAV and blockchain-based system for inventory and traceability applications in big data-driven supply chain management</article-title>,&#x201D; <source>Sensors</source>, vol. <volume>19</volume>, no. <issue>10</issue>, pp. <fpage>23</fpage>&#x2013;<lpage>94</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-3"><label>[3]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>G.</given-names> <surname>Lee</surname></string-name>, <string-name><given-names>W.</given-names> <surname>Saad</surname></string-name> and <string-name><given-names>M.</given-names> <surname>Bennis</surname></string-name></person-group>, &#x201C;<article-title>Online optimization for UAV-assisted distributed fog computing in smart factories of Industry 4.0</article-title>,&#x201D; in <conf-name>Proc. of IEEE Global Communications Conf. (GLOBECOM)</conf-name>, <conf-loc>Abu Dhabi, United Arab Emirates</conf-loc>, pp. <fpage>1</fpage>&#x2013;<lpage>6</lpage>, <year>2018</year>.</mixed-citation></ref>
<ref id="ref-4"><label>[4]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Aggarwal</surname></string-name>, <string-name><given-names>N.</given-names> <surname>Kumar</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Alhussein</surname></string-name> and <string-name><given-names>G.</given-names> <surname>Muhammad</surname></string-name></person-group>, &#x201C;<article-title>Blockchain-based UAV path planning for Healthcare 4.0: Current challenges and the way ahead</article-title>,&#x201D; <source>IEEE Network</source>, vol. <volume>35</volume>, no. <issue>1</issue>, pp. <fpage>20</fpage>&#x2013;<lpage>29</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-5"><label>[5]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>R.</given-names> <surname>Barenji</surname></string-name> and <string-name><given-names>B.</given-names> <surname>Nejad</surname></string-name></person-group>, &#x201C;<article-title>Blockchain applications in UAV-towards Aviation 4.0</article-title>,&#x201D; <source>Intelligent and Fuzzy Techniques</source>, vol. <volume>37</volume>, no. <issue>2</issue>, pp. <fpage>411</fpage>&#x2013;<lpage>430</lpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-6"><label>[6]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>E.</given-names> <surname>Petritoli</surname></string-name> and <string-name><given-names>F.</given-names> <surname>Leccese</surname></string-name></person-group>, &#x201C;<article-title>Precise takagi-sugeno fuzzy logic system for UAV longitudinal stability: An Industry 4.0 case study for aerospace</article-title>,&#x201D; <source>ACTA IMEKO</source>, vol. <volume>9</volume>, no. <issue>4</issue>, pp. <fpage>10</fpage>&#x2013;<lpage>46</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-7"><label>[7]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>V.</given-names> <surname>Alieksieiev</surname></string-name> and <string-name><given-names>B.</given-names> <surname>Markovych</surname></string-name></person-group>, &#x201C;<article-title>Implementation of UAV for environment monitoring of a smart city with an airspace regulation by AIXM-format data streaming</article-title>,&#x201D; <source>Industry</source>, vol. <volume>5</volume>, no. <issue>2</issue>, pp. <fpage>90</fpage>&#x2013;<lpage>93</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-8"><label>[8]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Aggarwal</surname></string-name>, <string-name><given-names>N.</given-names> <surname>Kumar</surname></string-name> and <string-name><given-names>S.</given-names> <surname>Tanwar</surname></string-name></person-group>, &#x201C;<article-title>Blockchain-envisioned UAV communication using 6G networks: Open issues, use cases, and future directions</article-title>,&#x201D; <source>IEEE Internet of Things Journal</source>, vol. <volume>8</volume>, no. <issue>7</issue>, pp. <fpage>5416</fpage>&#x2013;<lpage>5441</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-9"><label>[9]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>L.</given-names> <surname>Das</surname></string-name></person-group>, &#x201C;<article-title>Human target search and detection using autonomous UAV and deep learning</article-title>,&#x201D; in <conf-name>Proc. of IEEE Int. Conf. on Industry 4.0, Artificial Intelligence, and Communications Technology (IAICT)</conf-name>, <conf-loc>Bali, Indonesia</conf-loc>, pp. <fpage>55</fpage>&#x2013;<lpage>61</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-10"><label>[10]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>F.</given-names> <surname>Long</surname></string-name>, <string-name><given-names>F.</given-names> <surname>Sun</surname></string-name> and <string-name><given-names>Z.</given-names> <surname>Yang</surname></string-name></person-group>, &#x201C;<article-title>A novel routing algorithm based on multi-objective optimization for satellite networks</article-title>,&#x201D; <source>Journal of Networks</source>, vol. <volume>6</volume>, no. <issue>2</issue>, pp. <fpage>238</fpage>&#x2013;<lpage>246</lpage>, <year>2011</year>.</mixed-citation></ref>
<ref id="ref-11"><label>[11]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J.</given-names> <surname>Raj</surname></string-name></person-group>, &#x201C;<article-title>Security enhanced blockchain based unmanned aerial vehicle health monitoring system</article-title>,&#x201D; <source>Journal of ISMAC</source>, vol. <volume>2</volume>, no. <issue>2</issue>, pp. <fpage>121</fpage>&#x2013;<lpage>131</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-12"><label>[12]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Shibli</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Marques</surname></string-name> and <string-name><given-names>E.</given-names> <surname>Spiridon</surname></string-name></person-group>, &#x201C;<article-title>Artificial intelligent drone-based encrypted machine learning of image extraction using pretrained convolutional neural network (CNN)</article-title>,&#x201D; in <conf-name>Proc. of Int. Conf. on Artificial Intelligence and Virtual Reality</conf-name>, <conf-loc>Nagoya, Japan</conf-loc>, pp. <fpage>72</fpage>&#x2013;<lpage>82</lpage>, <year>2018</year>.</mixed-citation></ref>
<ref id="ref-13"><label>[13]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Minu</surname></string-name> and <string-name><given-names>R.</given-names> <surname>Canessane</surname></string-name></person-group>, &#x201C;<article-title>Secure image transmission scheme in unmanned aerial vehicles using multiple share creation with optimal elliptic curve cryptography</article-title>,&#x201D; <source>Indian Journal of Computer Science and Engineering</source>, vol. <volume>12</volume>, no. <issue>1</issue>, pp. <fpage>129</fpage>&#x2013;<lpage>134</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-14"><label>[14]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>R.</given-names> <surname>Mardiyanto</surname></string-name>, <string-name><given-names>H.</given-names> <surname>Suryoatmojo</surname></string-name>, <string-name><given-names>F.</given-names> <surname>Setiawan</surname></string-name> and <string-name><given-names>A.</given-names> <surname>Irfansyah</surname></string-name></person-group>, &#x201C;<article-title>Low cost analog video transmission security of unmanned aerial vehicle (UAV) based on linear feedback shift register (LFSR)</article-title>,&#x201D; in <conf-name>Proc. of Int. Seminar on Intelligent Technology and Its Applications (ISITIA)</conf-name>, <conf-loc>Surabaya, Indonesia</conf-loc>, pp. <fpage>414</fpage>&#x2013;<lpage>419</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-15"><label>[15]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>E.</given-names> <surname>Abualsauod</surname></string-name></person-group>, &#x201C;<article-title>A hybrid blockchain method in Internet of Things for privacy and security in unmanned aerial vehicles network</article-title>,&#x201D; <source>Computers and Electrical Engineering</source>, vol. <volume>99</volume>, no. <issue>3</issue>, pp. <fpage>107</fpage>&#x2013;<lpage>127</lpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-16"><label>[16]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>I.</given-names> <surname>Punithavathi</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Dhanasekaran</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Duraipandy</surname></string-name>, <string-name><given-names>E.</given-names> <surname>Lydia</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Sivaram</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Optimal dense convolutional network model for image classification in unmanned aerial vehicles based ad hoc networks</article-title>,&#x201D; <source>International Journal of Ad Hoc and Ubiquitous Computing</source>, vol. <volume>39</volume>, no. <issue>1</issue>, pp. <fpage>46</fpage>&#x2013;<lpage>60</lpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-17"><label>[17]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>R.</given-names> <surname>Kumar</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Kumar</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Tripathi</surname></string-name>, <string-name><given-names>G.</given-names> <surname>Gupta</surname></string-name>, <string-name><given-names>T.</given-names> <surname>Gadekallu</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>SP2F: A secured privacy-preserving framework for smart agricultural unmanned aerial vehicles</article-title>,&#x201D; <source>Computer Networks</source>, vol. <volume>18</volume>, no. <issue>7</issue>, pp. <fpage>107</fpage>&#x2013;<lpage>119</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-18"><label>[18]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Elhoseny</surname></string-name> and <string-name><given-names>K.</given-names> <surname>Shankar</surname></string-name></person-group>, &#x201C;<article-title>Reliable data transmission model for mobile ad hoc network using signcryption technique</article-title>,&#x201D; <source>IEEE Transactions on Reliability</source>, vol. <volume>69</volume>, no. <issue>3</issue>, pp. <fpage>1077</fpage>&#x2013;<lpage>1086</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-19"><label>[19]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>F.</given-names> <surname>Chollet</surname></string-name></person-group>, &#x201C;<article-title>Xception: Deep learning with depthwise separable convolutions</article-title>,&#x201D; in <conf-name>Proc. of the IEEE Conf. on Computer Vision and Pattern Recognition</conf-name>, <conf-loc>Honolulu, HI, USA</conf-loc>, pp. <fpage>1251</fpage>&#x2013;<lpage>1258</lpage>, <year>2017</year>.</mixed-citation></ref>
<ref id="ref-20"><label>[20]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>R.</given-names> <surname>Hang</surname></string-name>, <string-name><given-names>Q.</given-names> <surname>Liu</surname></string-name>, <string-name><given-names>D.</given-names> <surname>Hong</surname></string-name> and <string-name><given-names>P.</given-names> <surname>Ghamisi</surname></string-name></person-group>, &#x201C;<article-title>Cascaded recurrent neural networks for hyperspectral image classification</article-title>,&#x201D; <source>IEEE Transactions on Geoscience and Remote Sensing</source>, vol. <volume>57</volume>, no. <issue>8</issue>, pp. <fpage>5384</fpage>&#x2013;<lpage>5394</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-21"><label>[21]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Kaveh</surname></string-name> and <string-name><given-names>A.</given-names> <surname>Zaerreza</surname></string-name></person-group>, &#x201C;<article-title>Shuffled shepherd optimization method: A new meta-heuristic algorithm</article-title>,&#x201D; <source>Engineering Computations</source>, vol. <volume>37</volume>, no. <issue>7</issue>, pp. <fpage>2357</fpage>&#x2013;<lpage>2389</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-22"><label>[22]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>R.</given-names> <surname>Ambika</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Biradar</surname></string-name> and <string-name><given-names>V.</given-names> <surname>Burkpalli</surname></string-name></person-group>, &#x201C;<article-title>Encryption-based steganography of images by multiobjective whale optimal pixel selection</article-title>,&#x201D; <source>International Journal of Computers and Applications</source>, vol. <volume>2</volume>, no. <issue>6</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>10</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-23"><label>[23]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Y.</given-names> <surname>Li</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Chen</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Zhang</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Zhang</surname></string-name> and <string-name><given-names>L.</given-names> <surname>Chen</surname></string-name></person-group>, &#x201C;<article-title>Multi-label remote sensing image scene classification by combining a convolutional neural network and a graph neural network</article-title>,&#x201D; <source>Remote Sensing</source>, vol. <volume>12</volume>, no. <issue>23</issue>, pp. <fpage>40</fpage>&#x2013;<lpage>63</lpage>, <year>2020</year>.</mixed-citation></ref>
</ref-list>
</back></article>