<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.1 20151215//EN" "http://jats.nlm.nih.gov/publishing/1.1/JATS-journalpublishing1.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:mml="http://www.w3.org/1998/Math/MathML" xml:lang="en" article-type="research-article" dtd-version="1.1">
<front>
<journal-meta>
<journal-id journal-id-type="pmc">CMC</journal-id>
<journal-id journal-id-type="nlm-ta">CMC</journal-id>
<journal-id journal-id-type="publisher-id">CMC</journal-id>
<journal-title-group>
<journal-title>Computers, Materials &#x0026; Continua</journal-title>
</journal-title-group>
<issn pub-type="epub">1546-2226</issn>
<issn pub-type="ppub">1546-2218</issn>
<publisher>
<publisher-name>Tech Science Press</publisher-name>
<publisher-loc>USA</publisher-loc>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">73818</article-id>
<article-id pub-id-type="doi">10.32604/cmc.2025.073818</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Article</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Multi-Objective Enhanced Cheetah Optimizer for Joint Optimization of Computation Offloading and Task Scheduling in Fog Computing</article-title>
<alt-title alt-title-type="left-running-head">Multi-Objective Enhanced Cheetah Optimizer for Joint Optimization of Computation Offloading and Task Scheduling in Fog Computing</alt-title>
<alt-title alt-title-type="right-running-head">Multi-Objective Enhanced Cheetah Optimizer for Joint Optimization of Computation Offloading and Task Scheduling in Fog Computing</alt-title>
</title-group>
<contrib-group>
<contrib id="author-1" contrib-type="author">
<name name-style="western"><surname>Zia</surname><given-names>Ahmad</given-names></name><xref ref-type="aff" rid="aff-1">1</xref></contrib>
<contrib id="author-2" contrib-type="author">
<name name-style="western"><surname>Azim</surname><given-names>Nazia</given-names></name><xref ref-type="aff" rid="aff-2">2</xref></contrib>
<contrib id="author-3" contrib-type="author">
<name name-style="western"><surname>Akbayan</surname><given-names>Bekarystankyzy</given-names></name><xref ref-type="aff" rid="aff-3">3</xref></contrib>
<contrib id="author-4" contrib-type="author">
<name name-style="western"><surname>Alzahrani</surname><given-names>Khalid J.</given-names></name><xref ref-type="aff" rid="aff-4">4</xref></contrib>
<contrib id="author-5" contrib-type="author" corresp="yes">
<name name-style="western"><surname>Rehman</surname><given-names>Ateeq Ur</given-names></name><xref ref-type="aff" rid="aff-5">5</xref><email>202411144@gachon.ac.kr</email></contrib>
<contrib id="author-6" contrib-type="author">
<name name-style="western"><surname>Khan</surname><given-names>Faheem Ullah</given-names></name><xref ref-type="aff" rid="aff-6">6</xref></contrib>
<contrib id="author-7" contrib-type="author">
<name name-style="western"><surname>Al-Kahtani</surname><given-names>Nouf</given-names></name><xref ref-type="aff" rid="aff-7">7</xref></contrib>
<contrib id="author-8" contrib-type="author" corresp="yes">
<name name-style="western"><surname>Alkahtani</surname><given-names>Hend Khalid</given-names></name><xref ref-type="aff" rid="aff-8">8</xref><email>Hkalqahtani@pnu.edu.sa</email></contrib>
<aff id="aff-1"><label>1</label><institution>Department of Electronic, University of Peshawar</institution>, <addr-line>Peshawar, 25000</addr-line>, <country>Pakistan</country></aff>
<aff id="aff-2"><label>2</label><institution>Department of Computer Science, Abdul Wali Khan University Mardan</institution>, <addr-line>Mardan, 23200</addr-line>, <country>Pakistan</country></aff>
<aff id="aff-3"><label>3</label><institution>School of Digital Technologies, Narxoz University</institution>, <addr-line>Almaty, 050035</addr-line>, <country>Kazakhstan</country></aff>
<aff id="aff-4"><label>4</label><institution>Department of Clinical Laboratories Sciences, College of Applied Medical Sciences, Taif University</institution>, <addr-line>P.O. Box 11099</addr-line>, <country>Taif</country>, <addr-line>21944</addr-line>, <country>Saudi Arabia</country></aff>
<aff id="aff-5"><label>5</label><institution>School of Computing, Gachon University</institution>, <addr-line>Seongnam-si, 13120</addr-line>, <country>Republic of Korea</country></aff>
<aff id="aff-6"><label>6</label><institution>Department of Software Engineering, University of Science and Technology</institution>, <addr-line>Bannu, 28100</addr-line>, <country>Pakistan</country></aff>
<aff id="aff-7"><label>7</label><institution>Department of Health Information Management and Technology, College of Public Health, Imam Abdulrahman bin Faisal University</institution>, <addr-line>Dammam, 31441</addr-line>, <country>Saudi Arabia</country></aff>
<aff id="aff-8"><label>8</label><institution>Department of Information Systems, College of Computer and Information Sciences, Princess Nourah Bint Abdulrahman University</institution>, <addr-line>Riyadh, 11671</addr-line>, <country>Saudi Arabia</country></aff>
</contrib-group>
<author-notes>
<corresp id="cor1"><label>&#x002A;</label>Corresponding Authors: Ateeq Ur Rehman. Email: <email>202411144@gachon.ac.kr</email>; Hend Khalid Alkahtani. Email: <email>Hkalqahtani@pnu.edu.sa</email></corresp>
</author-notes>
<pub-date date-type="collection" publication-format="electronic">
<year>2026</year>
</pub-date>
<pub-date date-type="pub" publication-format="electronic">
<day>12</day><month>1</month><year>2026</year>
</pub-date>
<volume>86</volume>
<issue>3</issue>
<elocation-id>66</elocation-id>
<history>
<date date-type="received">
<day>26</day>
<month>09</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>31</day>
<month>10</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2025 The Authors.</copyright-statement>
<copyright-year>2025</copyright-year>
<copyright-holder>Published by Tech Science Press.</copyright-holder>
<license xlink:href="https://creativecommons.org/licenses/by/4.0/">
<license-p>This work is licensed under a <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</ext-link>, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.</license-p>
</license>
</permissions>
<self-uri content-type="pdf" xlink:href="TSP_CMC_73818.pdf"></self-uri>
<abstract>
<p>The cloud-fog computing paradigm has emerged as a novel hybrid computing model that integrates computational resources at both fog nodes and cloud servers to address the challenges posed by dynamic and heterogeneous computing networks. Finding an optimal computational resource for task offloading and then executing efficiently is a critical issue to achieve a trade-off between energy consumption and transmission delay. In this network, the task processed at fog nodes reduces transmission delay. Still, it increases energy consumption, while routing tasks to the cloud server saves energy at the cost of higher communication delay. Moreover, the order in which offloaded tasks are executed affects the system&#x2019;s efficiency. For instance, executing lower-priority tasks before higher-priority jobs can disturb the reliability and stability of the system. Therefore, an efficient strategy of optimal computation offloading and task scheduling is required for operational efficacy. In this paper, we introduced a multi-objective and enhanced version of Cheeta Optimizer (CO), namely (MoECO), to jointly optimize the computation offloading and task scheduling in cloud-fog networks to minimize two competing objectives, i.e., energy consumption and communication delay. MoECO first assigns tasks to the optimal computational nodes and then the allocated tasks are scheduled for processing based on the task priority. The mathematical modelling of CO needs improvement in computation time and convergence speed. Therefore, MoECO is proposed to increase the search capability of agents by controlling the search strategy based on a leader&#x2019;s location. The adaptive step length operator is adjusted to diversify the solution and thus improves the exploration phase, i.e., global search strategy. Consequently, this prevents the algorithm from getting trapped in the local optimal solution. Moreover, the interaction factor during the exploitation phase is also adjusted based on the location of the prey instead of the adjacent Cheetah. This increases the exploitation capability of agents, i.e., local search capability. Furthermore, MoECO employs a multi-objective Pareto-optimal front to simultaneously minimize designated objectives. Comprehensive simulations in MATLAB demonstrate that the proposed algorithm obtains multiple solutions via a Pareto-optimal front and achieves an efficient trade-off between optimization objectives compared to baseline methods.</p>
</abstract>
<kwd-group kwd-group-type="author">
<kwd>Computation offloading</kwd>
<kwd>task scheduling</kwd>
<kwd>cheetah optimizer</kwd>
<kwd>fog computing</kwd>
<kwd>optimization</kwd>
<kwd>resource allocation</kwd>
<kwd>internet of things</kwd>
</kwd-group>
<funding-group>
<award-group id="awg1">
<funding-source>Princess Nourah bint Abdulrahman University</funding-source>
<award-id>PNURSP2025R384</award-id>
</award-group>
</funding-group>
</article-meta>
</front>
<body>
<sec id="s1">
<label>1</label>
<title>Introduction</title>
<p>Cloud computing has become an essential component of advanced computing technology. It allows individuals and businesses to access an extensive range of computing resources, like storage, processing power, and computational power [<xref ref-type="bibr" rid="ref-1">1</xref>,<xref ref-type="bibr" rid="ref-2">2</xref>]. Nevertheless, it faces difficulties meeting the demand for delay-sensitive and data-intensive IoT tools [<xref ref-type="bibr" rid="ref-3">3</xref>,<xref ref-type="bibr" rid="ref-4">4</xref>]. To address these challenges, cloud-fog computing has developed as the best tool to handle different types of user requests, which may vary in their needs for low latency, delay, cost-effectiveness, makespan, bandwidth utilization, and energy efficiency [<xref ref-type="bibr" rid="ref-5">5</xref>,<xref ref-type="bibr" rid="ref-6">6</xref>]. This method involved the joint utilization of cloud computing with node computing fixed near data sources, facilitating efficient energy consumption, bandwidth execution, and minimizing delay. However, network latency, data management, limited wireless resources, and complex environments pose numerous challenges to computation offloading and task scheduling in edge computing, demanding the development of efficient techniques [<xref ref-type="bibr" rid="ref-7">7</xref>].</p>
<p>In cloud computing, computation offloading ensures tasks are allocated to suitable computational nodes for efficient and timely processing, while task scheduling determines the optimal execution order of offloaded jobs to enhance performance metrics like resource utilization, energy consumption, and communication delay, etc. The effectiveness of scheduling techniques in fog computing relies on the seamless integration of task allocation and orderly task execution. However, most existing scheduling algorithms focus solely on either task allocation or execution, leading to resource bottlenecks, increased communication delays, and reduced system stability and reliability. A joint optimization of computation offloading and task scheduling is essential to improve bandwidth utilization, maintain energy efficiency, reduce latency, mitigate resource bottlenecks, and enhance overall system stability and reliability [<xref ref-type="bibr" rid="ref-8">8</xref>&#x2013;<xref ref-type="bibr" rid="ref-10">10</xref>].</p>
<p>Moreover, maintaining an efficient trade-off between computation offloading and task scheduling in cloud-fog computing is also necessary because directing jobs to the cloud servers can reduce energy but increases communication cost and latency, while processing tasks at fog nodes can reduce communication latency but increases energy consumption. Moreover, if the order of execution based on priorities is not taken into account in scheduling, then it will negatively affect the reliability and stability. For example, in a smart city traffic management system, if there is a delay in traffic control based on data received from IoT devices, it can increase the risk of congestion and accidents on the roads. Especially when the system is busy with unnecessary jobs and cannot process important traffic tasks on time. Therefore, optimizing both computing offloading and task scheduling is crucial to achieve the best balance between energy savings and low latency, which is essential for the smooth operation of a system.</p>
<p>Computation offloading and task scheduling in fog computing possess a Nondeterministic polynomial-time complete (NP-complete) optimization problem, requiring an efficient solution to allocate resources effectively [<xref ref-type="bibr" rid="ref-11">11</xref>&#x2013;<xref ref-type="bibr" rid="ref-13">13</xref>]. Many state-of-the-art techniques exist to seek an ideal solution. Some basic approaches, like brute force techniques and dynamic programming, often lack effectiveness in terms of latency and bandwidth. The brute force technique is impractical in large scenarios owing to the comprehensive search required to map available task resources, which increases in computational complexity. Recent Deep Reinforcement Learning (DRL) algorithms are powerful methods to solve decision-making problems. However, they require substantial offline training and careful reward function design, making them inefficient for solving multi-objective optimization problems [<xref ref-type="bibr" rid="ref-14">14</xref>]. In contrast, a multi-objective meta-heuristic algorithm efficiently optimizes the conflicting objectives via Pareto-optimal front generation without requiring an extensive training phase. Moreover, they are generally more sample-efficient, easier to reproduce, and simpler to implement in heterogeneous cloud-fog environments.</p>
<p>Recently, several meta-heuristic techniques have been applied to tackle the issues of task scheduling in cloud computing, including particle swarm optimization (PSO) [<xref ref-type="bibr" rid="ref-15">15</xref>], ant colony optimization (ACO) [<xref ref-type="bibr" rid="ref-16">16</xref>], genetic algorithm (GA) [<xref ref-type="bibr" rid="ref-17">17</xref>], and Gray wolf optimization (GWO) [<xref ref-type="bibr" rid="ref-18">18</xref>], among others. The goal of these algorithms is to efficiently tackle the issue of task scheduling to meet the user&#x2019;s requirement in terms of Quality of service. However, challenges, such as low convergence rates, imbalanced exploration and exploitation phases, limited stochastic operators, focus on a single objective, and the existence of multiple search spaces, can potentially lead to local stagnation, consequently diminishing system efficiency [<xref ref-type="bibr" rid="ref-16">16</xref>].</p>
<p>To address the limitation of existing scheduling techniques, we proposed a Multi-objective Enhanced Cheetah Optimizer (MoECO) to jointly optimize computation offloading and task scheduling in a cloud fog computing environment. MoECO obtains a set of solutions via the Pareto-optimal front. Cheetah Optimizer (CO) is a new meta-heuristic algorithm inspired by the hunting behavior of Cheetahs. The special hunting strategy of Cheetahs comprises three phases, i.e., searching, sitting, waiting, and attacking [<xref ref-type="bibr" rid="ref-19">19</xref>,<xref ref-type="bibr" rid="ref-20">20</xref>].</p>
<p>The CO is well regarded as a strong method for solving many optimization problems. However, the mathematical modelling of the searching and attaching phase could affect the exploration and exploitation capability of agents. Consequently, the CO algorithm reduces convergence speed and increases computation time. To overcome these shortcomings, we modified the CO algorithm, which can improve the local and global search capability of agents while requiring fewer computational resources. The global search capability is improved based on the leader position instead of the Cheetah&#x2019;s previous position. The step length parameter is modified that contribute to the diversity of solutions and hence increase the exploration capability of agents. Furthermore, the interaction factor during the exploitation phase (i.e., local search) is also adjusted based on the location of a prey instead of the adjacent Cheetah. This adjustment can enable the algorithm to find the near-optimal solution. Thus, local search capability (exploitation) is also enhanced. Attributed to its fast convergence rate, avoiding local optimization, and low computation time, the proposed algorithm efficiently optimizes the task scheduling in fog computing.</p>
<p>To the best of our knowledge, this is the first attempt to use the MoECO for computation offloading and task scheduling. The innovative aspect of our approach lies in leveraging a multi-objective enhanced cheetah algorithm to jointly optimize computation offloading and task scheduling within a cloud-fog computing environment. We improved the search strategy of agents based on the leader position to escalate the convergence stability and improve the exploration-exploitation balance. To simultaneously minimize the energy consumption and communication latency, we generate a multi-objective Pareto-optimal front while maintaining diverse trade-offs between competing objectives. In our algorithm, each cheetah serves as a candidate solution, encoding a unique task offloading and scheduling strategy. MoECO efficiently allocates resource-intensive tasks to cloud servers and delays sensitive jobs to fog nodes, achieving an optimal trade-off between minimum energy consumption and communication delay. Moreover, the MoECO processes higher priority jobs before lower priority tasks, mitigating resource bottlenecks and escalating system reliability and stability. We simulate MoECO in MATLAB and compare it against the state-of-the-art algorithms. Simulation results show that our scheme outperforms other schemes in minimizing communication delay and energy consumption, while improving task completion rate and fairness index. Furthermore, due to its distinct position updating mechanism, high convergence speed, low computation time, and equal distribution of workload, MoECO strengthens network stability and enables efficient handling of diverse requests originating from heterogeneous IoT devices.</p>
<p>The main contributions of our article are as follows:
<list list-type="bullet">
<list-item>
<p>The global and local search capability of CO is improved by adjusting the step length operator and the interaction factor during the attaching phase, respectively.</p></list-item>
<list-item>
<p>A new multi-objective mathematical model for task offloading and scheduling in cloud-fog computing is designed to efficiently offload tasks to the optimal computation node and then schedule the tasks based on the tasks&#x2019; priorities.</p></list-item>
<list-item>
<p>The proposed solution is compared with the baseline methods and benchmarks to ensure its efficiency in saving energy, minimizing transmission delay, and reducing network load. The analysis demonstrates that the developed algorithm consistently delivers better performance compared to existing methods.</p></list-item>
</list></p>
<p>Unlike benchmark algorithms, which overlook resource diversity and workload distribution, the proposed algorithm effectively addresses these challenges. Additionally, existing algorithms focus solely on either energy consumption or delay, disregarding the impact of other performance metrics. In contrast, the proposed solution evaluates system performance by considering energy consumption and delay, task completion rate, and fairness index, providing a more realistic and comprehensive assessment.</p>
</sec>
<sec id="s2">
<label>2</label>
<title>Literature Review</title>
<p>In this section, we review the different work done by other authors in the field of cloud-fog Computing. Wang et al. [<xref ref-type="bibr" rid="ref-21">21</xref>] proposed a task scheduling solution known as Deep Reinforcement Learning-based IoT application Scheduling algorithm (DRLIS), which is significant due to the growing need for low-latency applications in the Internet of Things (IoT) environments. These environments are highly dynamic and often change unpredictably. The DRLIS method has been shown to achieve lower optimization costs and shorter scheduling times compared to Q-Learning, Deep Q-Network (DQN), Non-dominated Sorting Genetic Algorithm II (NSGA-II), and NSGA-III. To further enhance the quality of service in fog networks, the authors proposed additional task scheduling methodologies, including Fuzzy Golden Eagle Load Balancing (FGELB) and Golden Eagle Optimization algorithm (GEOA) [<xref ref-type="bibr" rid="ref-22">22</xref>]. These approaches involve three main stages: sorting tasks based on fuzzy logic, ranking and allocating resources using GEOA, and managing power consumption based on the availability and current status of tasks and resources. The FGELB approach was compared against other methods in terms of communication overhead, computation cost, latency, energy consumption, and failure rate, demonstrating its effectiveness.</p>
<p>A time-dependent scheduling mechanism was formulated, exemplifying the use of time-sensitive data modelled by neural networks [<xref ref-type="bibr" rid="ref-23">23</xref>]. A benchmark dataset from authors and consumers was utilized to evaluate energy consumption and response time. The mechanism was implemented using iFogSim, and experiments were conducted against baseline approaches. The results demonstrated that Hierarchical Reinforcement Learning (HIRO) achieved significantly lower energy consumption compared to current conventional methods.</p>
<p>A method known as the Adaptive Multi-objective Optimization Task Scheduling Method for fog computing (FOG-AMOSM) was designed in [<xref ref-type="bibr" rid="ref-24">24</xref>], focusing on resource cost and task execution as key parameters. To clarify, it was proposed as a multi-objective methodology utilizing a heuristic approach. Furthermore, it outperformed the Round Robin (RR) algorithm in effectively addressing the aforementioned parameters. Siyadatzadeh et al. [<xref ref-type="bibr" rid="ref-25">25</xref>] developed a task scheduler aimed at enhancing reliability and improving the ability to schedule tasks on fog computing (FC) nodes. A Reinforcement Learning-Based Real-Time Task Assignment Strategy in Emerging Fault-Tolerant Fog Computing is proposed, named as ReLIEF and compared with state-of-the-art techniques using the iFogSim simulator. The results demonstrated that ReLIEF effectively reduced delays while balancing workloads across the network.</p>
<p>Jain and Kumar in [<xref ref-type="bibr" rid="ref-26">26</xref>] conducted a survey that included a scheduler designed to address Quality of Service (QoS) challenges in the cloud-fog environment. This scheduler was modelled using the Markov Decision Process, focusing on energy consumption in relation to execution time. The approach was implemented in SimPy and tested against DQN and RR algorithms, demonstrating its effectiveness in optimizing performance. Chandrashekar et al. [<xref ref-type="bibr" rid="ref-27">27</xref>] developed a workflow specifically for assigning tasks to individual processors in the cloud computing paradigm. The proposed technique aimed to minimize both cost and time during project implementation. To evaluate the model, several baseline algorithms were used for comparison, including Ant Colony Optimization (ACO), a UAV-based nighttime arrangement optimization model, and First-Come-First-Served (FCFS). The focus on cost-aware job scheduling for cloud instances has been explored in the work of Cheng et al. [<xref ref-type="bibr" rid="ref-28">28</xref>], where the use of advanced reinforcement learning methods is emphasized. This research highlights the critical importance of cost efficiency in cloud computing environments. Deep reinforcement learning is proposed as a solution to enhance job scheduling by improving both execution time and cost efficiency.</p>
<p>The study highlights the growing significance of cost-conscious strategies in cloud computing and demonstrates how deep reinforcement learning can improve task scheduling practices within cloud instances. It emphasizes the ongoing pursuit of innovative approaches to reducing operational costs while maximizing resource utilization. Additionally, it introduces the concept of an effective cloud computing economy, which is increasingly vital in today&#x2019;s context. Topics related to cluster computing have also been discussed, with relevant factors outlined to address key challenges in this domain.</p>
<p>Chen et al. [<xref ref-type="bibr" rid="ref-29">29</xref>] proposed a decentralized architecture for an Intelligent Video Surveillance System (IVSS) capable of performing real-time data analysis using fog computing. In this framework, edge computing collaborates with cloud computing to act as a converged computing system. Artificial intelligence (AI) is employed to collect media data from customer-distributed edge network devices. Subsequently, AI enables the distributed storage of information and manages access to this data through distributed accessory equipment, facilitating automatic video telepresence in real time. Rafique et al. [<xref ref-type="bibr" rid="ref-30">30</xref>] introduced a novel task scheduling algorithm that combines Social Problem-Solving Optimization and Cat Swarm Optimization techniques. The primary focus of this study was to reduce response time by efficiently assigning tasks to appropriate fog nodes. This approach emphasizes achieving an optimal level of resource utilization while ensuring tasks are scheduled effectively within the fog computing environment.</p>
<p>Ghobaei-Arani et al. [<xref ref-type="bibr" rid="ref-31">31</xref>] explored the potential of the Moth-Flame Optimization (MFO) algorithm for resource allocation in fog computing. They proposed a taxonomy based on an optimization-driven approach for task execution, where tasks are optimally mapped to computational resources to meet QoS demands while minimizing computational and transmission time. However, the proposed algorithm does not account for the energy cost associated with using the corresponding fog device or node, which could ultimately degrade the overall performance of the system.</p>
<p>An improved discrete NSGA is proposed in [<xref ref-type="bibr" rid="ref-32">32</xref>] to minimize makespan, computation costs, and communication costs. The objective is to automate the scheduling process, thereby reducing the effort required to allocate tasks among personnel. The algorithm dynamically selects fog nodes or cloud servers for processing to ensure efficient load balancing and optimize overall system performance. Multi-objective Gray Wolf Optimization (MGWO), presented by Saif et al. [<xref ref-type="bibr" rid="ref-33">33</xref>], leverages the predatory chase behavior of grey wolves to enhance the performance of the proposed ACO algorithm for task scheduling in a cloud&#x2013;fog computing network. Energy consumption and transmission delay are considered as optimization objectives. MGWO is implemented on the fog controller, which determines the distribution of the workload to computing assets. The decision is made after accurately assessing and evaluating the nature of the tasks being offered.</p>
<p>Tang and Wong [<xref ref-type="bibr" rid="ref-34">34</xref>] have developed a deep reinforcement learning-based task offloading model in fog computing. The study focuses on addressing issues related to computation offloading and service insertion in fog computing, intending to minimize latency, migration costs, and energy consumption. To achieve this, the optimization problem is framed as a multidimensional Markov decision process.</p>
<p>Razaq et al. [<xref ref-type="bibr" rid="ref-35">35</xref>] propose a fragmentation-based probabilistic Q-learning approach for offloading fragmented tasks to fog computing environments. The concept of software computing is introduced, with the goal of offloading tasks to computational nodes while ensuring load balancing. IoT jobs that arrive are split into segments based on factors such as privacy, completion time, and other real-time constraints. These segments are then delegated to several fog nodes for processing.</p>
<p>The authors in [<xref ref-type="bibr" rid="ref-36">36</xref>] proposed a framework that integrates federated learning with deep reinforcement learning to enable decentralized scheduling, allowing fog nodes to train local DQN models on their own data and aggregate knowledge into a shared global model without exchanging sensitive information. The framework improves task prioritization by classifying workloads based on execution time and deadlines, ensuring that high-priority tasks meet their service level agreements (SLAs). In [<xref ref-type="bibr" rid="ref-37">37</xref>], the authors proposed a hybrid Particle Swarm Optimization (PSO) and Whale Optimization Algorithm (WOA) named PSO &#x002B; WOA, a paradigm to address the challenges of job scheduling in large-scale, dynamic, and heterogeneous cloud-fog environments. The hybrid framework combines the exploration capabilities of PSO with the exploitation strengths of WOA. The proposed framework effectively balances exploration and exploitation, thereby improving convergence toward optimal task scheduling. In [<xref ref-type="bibr" rid="ref-38">38</xref>], the authors proposed a Multi-Objective Workflow Scheduling using a Deep Reinforcement Learning approach that integrates DQN-based scheduling with a priority-driven task mapping mechanism. Job priorities are computed based on task dependencies, while the priorities of computational nodes (e.g., virtual machine) are derived from datacenter electricity cost, enabling the DQN-based task scheduler to adaptively assign tasks that jointly minimize makespan and energy consumption.</p>
<p>In [<xref ref-type="bibr" rid="ref-39">39</xref>], the authors proposed a low delay scheduling algorithm for fog computing workflows with energy constraints. The algorithm focuses on minimizing workflow completion time while adhering energy consumption limit. Additionally, the algorithm also improves the system reliability in mobile fog computing under the same energy constraints. A hybrid bio-inspired algorithm for task scheduling in edge computing is proposed in [<xref ref-type="bibr" rid="ref-40">40</xref>]. The authors combine the Slime Model Algorithm and the optimized Harris Hawks Optimizer to improve the convergence accuracy, communication latency, and energy efficiency. Based on the requirements of each task, K-medoids clustering is employed to divide the tasks into computationally intensive, data-intensive, and integrated groups. This is particularly useful in meeting the requirements of each task.</p>
</sec>
<sec id="s3">
<label>3</label>
<title>Preliminaries</title>
<p>This part presents the architecture of cloud&#x2013;fog computing, along with the workflow of our proposed task scheduling algorithm.</p>
<sec id="s3_1">
<label>3.1</label>
<title>Cloud-Fog Computing Architecture</title>
<p>Cloud-fog computing consists of three layers: (1) the IoT layer, (2) the fog layer, and (3) the cloud layer. These three layers are interconnected through wireless devices such as LoRa, Bluetooth, and Wi-Fi. The first layer contains the IoT devices, enabling them to send data to the other layers for processing. IoT devices can also communicate with each other, generating crucial information needed for quick responses. Once data is generated by the first layer, it is passed to the second layer, which contains two nodes: the task receiver and the task controller, also referred to as the fog node and fog analyzer. Fog nodes have limited computational power and storage capacity, which helps reduce transmission latency and network overhead. However, certain tasks require additional computational power and storage capacity, prompting the fog node to transmit the data to the cloud server for processing. In each scenario, the task is received by the task receiver (fog node) and directly forwarded to the controller. When the fog node receives a task, it further divides it into subsets for estimation analysis and task scheduling, depending on the task requirements. The fog layer (fog controller) incorporates the proposed algorithm to make optimal task scheduling decisions, considering objectives such as energy consumption and delay. The third layer consists of computational servers with significant computational power. This layer is responsible for processing the tasks and sending them back to the fog layer after completion. The working operation of the proposed task scheduling algorithm is shown in <xref ref-type="fig" rid="fig-1">Fig. 1</xref>.</p>
<fig id="fig-1">
<label>Figure 1</label>
<caption>
<title>Working operation of the proposed algorithm</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_73818-fig-1.tif"/>
</fig>
</sec>
<sec id="s3_2">
<label>3.2</label>
<title>Cheetah Optimizer (CO) and Multi-Objective CO</title>
<p>CO algorithm is a new meta-heuristic optimization algorithm, inspired by the hunting behavior of Cheetahs. It is primarily designed for tackling complex real-world optimization problems, not only in the field of computer science, but also in other fields like electronics [<xref ref-type="bibr" rid="ref-41">41</xref>], engineering [<xref ref-type="bibr" rid="ref-42">42</xref>], and medical science [<xref ref-type="bibr" rid="ref-43">43</xref>], etc. It is one of the fastest land animals that can run at a speed of 120 km/h. A cheetah is capable of detecting prey upon patrolling its surroundings. When 	prey is detected, the Cheetah tries to hide himself or on small breaches or hills. In this case, Cheetah does not directly attack the prey, but instead it sits and waits for the prey to come close as much as possible. Cheetah tries to maintain this minimum distance, and once it is within the range, Cheetah attempts to attack the prey. Overall, the hunting strategy of CO is divided into three phases, i.e., (i) search strategy (ii). sit and wait strategy and (iii) attacking strategy. <xref ref-type="fig" rid="fig-2">Fig. 2</xref> presents the arrangements of the Cheetah population according to these strategies. Each arrangement of Cheetah in the swarm indicates a solution to the problem. For example, in our case, each arrangement represents a unique task scheduling strategy. Each arrangement is further evaluated to compute the fitness value. Among the population, the best location is considered as the prey, i.e., the best solution. Cheetahs arrange their locations according to the location of the prey. The description of each strategy is given below:</p>
<p><bold>Search phase:</bold> In the search phase, Cheetah scans the surrounding area in search of the prey. The searching can be done either sitting or standing, or actively patrolling the environment.</p>
<p><bold>Sit and wait phase:</bold> The movement of the Cheetah can lead to the escape of the prey. Therefore, Cheetah needs to be very careful while approaching the prey. To avoid being caught, Cheetahs hide themselves from the prey and sit in a hidden place and wait for the prey to come nearer.</p>
<p><bold>Attack phase:</bold> In this phase, there are two important steps (i) Rushing (ii) capturing. Rushing is when Cheetahs decide to attack the prey; they quickly run towards the prey with maximum speed. In capturing, the Cheetahs used flexibility to capture the prey by approaching the prey with maximum speed. The mathematical modelling of each phase of the CO algorithm is given in (<xref ref-type="disp-formula" rid="eqn-19">19</xref>).</p>
<fig id="fig-2">
<label>Figure 2</label>
<caption>
<title>Representation of CO algorithm</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_73818-fig-2.tif"/>
</fig>
<p>Mult-objective Cheetah Optimizer (MoCO) is extension of the CO, designed to solve multi-objective optimization problems which encompasses two or more competing objectives that are to be solved simultaneously. Unlike single-objective optimizers, multi-objective optimization problems generate multiple solutions because of multiple conflicting objectives. Mathematically, it is represented as below:
<disp-formula id="eqn-1"><label>(1)</label><mml:math id="mml-eqn-1" display="block"><mml:mi>S</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mtable rowspacing="4pt" columnspacing="1em"><mml:mtr><mml:mtd><mml:msub><mml:mi>S</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mi>S</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mi>S</mml:mi><mml:mrow><mml:mi>n</mml:mi><mml:mo>,</mml:mo></mml:mrow></mml:msub></mml:mtd></mml:mtr></mml:mtable><mml:mo>]</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mtable columnalign="center center center center" rowspacing="4pt" columnspacing="1em"><mml:mtr><mml:mtd><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:mo>&#x2026;</mml:mo></mml:mtd><mml:mtd><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mi>d</mml:mi></mml:mrow></mml:msub></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:mo>&#x2026;</mml:mo></mml:mtd><mml:mtd><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mi>d</mml:mi></mml:mrow></mml:msub></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>n</mml:mi><mml:mo>,</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>n</mml:mi><mml:mo>,</mml:mo><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:mo>&#x2026;</mml:mo></mml:mtd><mml:mtd><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>n</mml:mi><mml:mo>,</mml:mo><mml:mo>,</mml:mo><mml:mi>d</mml:mi></mml:mrow></mml:msub></mml:mtd></mml:mtr></mml:mtable><mml:mo>]</mml:mo></mml:mrow></mml:math></disp-formula>
<disp-formula id="eqn-2"><label>(2)</label><mml:math id="mml-eqn-2" display="block"><mml:mi>F</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>S</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mtable rowspacing="4pt" columnspacing="1em"><mml:mtr><mml:mtd><mml:mi>F</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>S</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo stretchy="false">)</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mi>F</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>S</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mi>F</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>S</mml:mi><mml:mrow><mml:mi>n</mml:mi><mml:mo>,</mml:mo></mml:mrow></mml:msub><mml:mo stretchy="false">)</mml:mo></mml:mtd></mml:mtr></mml:mtable><mml:mo>]</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mtable columnalign="center center center center" rowspacing="4pt" columnspacing="1em"><mml:mtr><mml:mtd><mml:mi>F</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo stretchy="false">)</mml:mo></mml:mtd><mml:mtd><mml:mi>F</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mo stretchy="false">)</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x2026;</mml:mo></mml:mtd><mml:mtd><mml:mi>F</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mi>d</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mi>F</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:mtd><mml:mtd><mml:mi>F</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:mtd><mml:mtd><mml:mo>&#x2026;</mml:mo></mml:mtd><mml:mtd><mml:mi>F</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mi>d</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mi>F</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>n</mml:mi><mml:mo>,</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:mtd><mml:mtd><mml:mi>F</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>n</mml:mi><mml:mo>,</mml:mo><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:mtd><mml:mtd><mml:mo>&#x2026;</mml:mo></mml:mtd><mml:mtd><mml:mi>F</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>n</mml:mi><mml:mo>,</mml:mo><mml:mo>,</mml:mo><mml:mi>d</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">)</mml:mo></mml:mtd></mml:mtr></mml:mtable><mml:mo>]</mml:mo></mml:mrow></mml:math></disp-formula>
<disp-formula id="eqn-3"><label>(3)</label><mml:math id="mml-eqn-3" display="block"><mml:mi>Z</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mtable rowspacing="4pt" columnspacing="1em"><mml:mtr><mml:mtd><mml:msub><mml:mi>Z</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mi>Z</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mi>Z</mml:mi><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:mtd></mml:mtr></mml:mtable><mml:mo>]</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mtable columnalign="center center center center" rowspacing="4pt" columnspacing="1em"><mml:mtr><mml:mtd><mml:msub><mml:mi>z</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:msub><mml:mi>z</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:mo>&#x2026;</mml:mo></mml:mtd><mml:mtd><mml:msub><mml:mi>z</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mi>d</mml:mi></mml:mrow></mml:msub></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mi>z</mml:mi><mml:mrow><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:msub><mml:mi>z</mml:mi><mml:mrow><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:mo>&#x2026;</mml:mo></mml:mtd><mml:mtd><mml:msub><mml:mi>z</mml:mi><mml:mrow><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mi>d</mml:mi></mml:mrow></mml:msub></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mi>z</mml:mi><mml:mrow><mml:mi>n</mml:mi><mml:mo>,</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:msub><mml:mi>z</mml:mi><mml:mrow><mml:mi>n</mml:mi><mml:mo>,</mml:mo><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:mtd><mml:mtd><mml:mo>&#x2026;</mml:mo></mml:mtd><mml:mtd><mml:msub><mml:mi>z</mml:mi><mml:mrow><mml:mi>n</mml:mi><mml:mo>,</mml:mo><mml:mo>,</mml:mo><mml:mi>d</mml:mi></mml:mrow></mml:msub></mml:mtd></mml:mtr></mml:mtable><mml:mo>]</mml:mo></mml:mrow></mml:math></disp-formula>where <italic>S</italic> is the population size with n number of solutions, with <italic>d</italic> is the dimension. The objective function <italic>F</italic> is applied on multiple solutions, i.e., <italic>F</italic>(<italic>S</italic>), and multiple <italic>Z</italic> objective values are generated.</p>
<p>The solutions can either dominate or be non-dominant over one another based on specific conditions.
<list list-type="bullet">
<list-item>
<p>Two solutions <italic>s</italic><sub>1</sub> and <italic>s</italic><sub>2</sub> are non-dominating over each other if they have the same fitness values, and any fitness value of <italic>s</italic><sub>1</sub> is either better than <italic>s</italic><sub><italic>2</italic></sub> or worse than <italic>s</italic><sub>2</sub>.</p></list-item>
<list-item>
<p>Alternatively, <italic>s</italic><sub>1</sub> dominates <italic>s</italic><sub>2</sub>, if all fitness values of <italic>s</italic><sub>1</sub> is better than <italic>s</italic><sub>2</sub> or <italic>s</italic><sub>2</sub> has some fitness values that is worse than <italic>s</italic><sub>1</sub>.</p></list-item>
</list></p>
<p>All the non-dominating solutions are stored in an external archive and arranged in a population and ranked based on dominance. The first rank consists of the best non-dominating solutions. The subsequent rank contains solutions dominated by higher-ranked ones. All the solutions in the archive are called Pareto-optimal solutions and form the Pareto-optimal front. When a new solution dominates the existing one in the archive, then the existing solution is replaced by the new solution, ensuring the archive maintains only the most optimal solutions.</p>
</sec>
</sec>
<sec id="s4">
<label>4</label>
<title>Proposed Solution</title>
<p>In this section, we introduce the proposed algorithm for tackling the multi-objective optimization problem using the multi-objective enhanced Cheetah Optimizer. The main aim of the proposed algorithm is to offload and schedule <italic>N</italic> independent tasks with different computing resource needs within cloud computing environments. The algorithm initially offloads each task to an appropriate computational node based on its requirements. For example, energy-intensive tasks are assigned to cloud servers, while delay-sensitive tasks are assigned to fog nodes. The offloaded tasks are then executed considering their deadlines and energy usage. The goal is to minimize total processing time and energy consumption while ensuring all tasks meet their deadlines, thus preventing overloads on any node. Specifically, the algorithm seeks to optimize two key metrics: reducing overall task completion time and decreasing energy consumption.</p>
<p>An overview of the proposed algorithm involves six phases. In the first phase, we create the search space with the necessary dimensions and variables, randomly initializing all agents to find the optimal solution. The second phase involves constructing the solution by first offloading tasks to suitable computational nodes, such as fog nodes or cloud servers, and then scheduling these tasks based on their priority. In the third phase, we mutate the solution to increase diversity. The fourth phase evaluates each solution using the objective function and selects the best one with the lowest energy use and transmission latency. Next, we add the solution to the list of non-dominated solutions. The final phase updates the agents&#x2019; positions to move closer to the most optimal solutions. <xref ref-type="table" rid="table-1">Table 1</xref> presents the list of symbols used in equations and the algorithm.</p>
<table-wrap id="table-1">
<label>Table 1</label>
<caption>
<title>List of symbols</title>
</caption>
<table>
<colgroup>
<col align="center"/>
<col align="center"/> </colgroup>
<thead>
<tr>
<th>List of symbols</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>&#x1D702;</td>
<td>Task Arrival rate</td>
</tr>
<tr>
<td>&#x03BC;</td>
<td>Service rate</td>
</tr>
<tr>
<td><inline-formula id="ieqn-1"><mml:math id="mml-ieqn-1"><mml:mi>&#x03C8;</mml:mi></mml:math></inline-formula></td>
<td>Average length of the queue</td>
</tr>
<tr>
<td><inline-formula id="ieqn-2"><mml:math id="mml-ieqn-2"><mml:msubsup><mml:mi>E</mml:mi><mml:mrow><mml:mi>e</mml:mi><mml:mi>d</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula></td>
<td>Energy of a terminal device <italic>i</italic></td>
</tr>
<tr>
<td><inline-formula id="ieqn-3"><mml:math id="mml-ieqn-3"><mml:mi>&#x03B2;</mml:mi></mml:math></inline-formula></td>
<td>Weight factor</td>
</tr>
<tr>
<td><inline-formula id="ieqn-4"><mml:math id="mml-ieqn-4"><mml:msubsup><mml:mi>&#x03C1;</mml:mi><mml:mrow><mml:mi>e</mml:mi><mml:mi>d</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula></td>
<td>Processing time of end user</td>
</tr>
<tr>
<td><inline-formula id="ieqn-5"><mml:math id="mml-ieqn-5"><mml:msubsup><mml:mi>V</mml:mi><mml:mrow><mml:mi>C</mml:mi><mml:mi>l</mml:mi><mml:mi>o</mml:mi><mml:mi>u</mml:mi><mml:mi>d</mml:mi></mml:mrow><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula></td>
<td>Number of virtual machines on server m</td>
</tr>
<tr>
<td><inline-formula id="ieqn-6"><mml:math id="mml-ieqn-6"><mml:msub><mml:mi>&#x03C3;</mml:mi><mml:mrow><mml:mi>E</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula></td>
<td>Total energy consumption</td>
</tr>
<tr>
<td><inline-formula id="ieqn-7"><mml:math id="mml-ieqn-7"><mml:msub><mml:mi>&#x03C6;</mml:mi><mml:mrow><mml:mi>D</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula></td>
<td>Total transmission delay</td>
</tr>
<tr>
<td><inline-formula id="ieqn-8"><mml:math id="mml-ieqn-8"><mml:msubsup><mml:mrow><mml:mtext>&#x019C;</mml:mtext></mml:mrow><mml:mrow><mml:mi>F</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>m</mml:mi><mml:mi>p</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula></td>
<td>Workload assigned to the Fog node <italic>j</italic></td>
</tr>
<tr>
<td><inline-formula id="ieqn-9"><mml:math id="mml-ieqn-9"><mml:msubsup><mml:mi>&#x03BE;</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula></td>
<td>Interaction factor</td>
</tr>
<tr>
<td><inline-formula id="ieqn-10"><mml:math id="mml-ieqn-10"><mml:mi>I</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>p</mml:mi><mml:mrow><mml:mi>l</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula></td>
<td>Input length of task <italic>t</italic></td>
</tr>
<tr>
<td><inline-formula id="ieqn-11"><mml:math id="mml-ieqn-11"><mml:msubsup><mml:mi>S</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula></td>
<td>Step length parameter</td>
</tr>
</tbody>
</table>
</table-wrap>
<sec id="s4_1">
<label>4.1</label>
<title>System Model</title>
<p>In the context of the fog computing, the system model encompasses fog devices <italic>FD &#x003D; {FD</italic><sub>1</sub>, <italic>FD</italic><sub>2</sub>, <italic>FD</italic><sub>3</sub>, ... <italic>FD</italic><sub><italic>n</italic></sub><italic>}</italic>, set of heterogenous cloud servers represented as <italic>C</italic><sub><italic>s</italic></sub> <italic>&#x003D; {C</italic><sub><italic>s</italic>1</sub>, <italic>C</italic><sub><italic>s</italic>2</sub>, <italic>C</italic><sub><italic>s</italic>3</sub>, ... <italic>C</italic><sub><italic>sm</italic></sub><italic>}</italic>, the number of IoT end nodes denoted as <italic>EN &#x003D; {EN</italic><sub>1</sub>, <italic>EN</italic><sub>2</sub>, <italic>EN</italic><sub>3</sub>, ... <italic>EN</italic><sub><italic>l</italic></sub><italic>}</italic> and the set of search agents represented as <italic>S</italic><sub><italic>A</italic></sub> <italic>&#x003D; {S</italic><sub><italic>A</italic>1</sub>, <italic>S</italic><sub><italic>A</italic>2</sub>, <italic>S</italic><sub><italic>A</italic>3</sub>, ... <italic>S</italic><sub><italic>Ah</italic></sub><italic>}</italic>. Here, we make an assumption that the capacity of cloud computational resources is more than the computation capacity of fog nodes and IoT devices. Similarly, the energy consumption of cloud computational resources is more than that of fog nodes and IoT devices. The scheduling of each incoming task is performed by the fog broker. In the system model, the search agents play the role of candidate solutions that contain a set of tasks, offloaded to various computational resources (fog node or cloud server). The evaluation of solutions is performed by the fitness function that determines the best candidate solution. Afterward, other candidate solution repositions themselves to get closer to the best solution.</p>
</sec>
<sec id="s4_2">
<label>4.2</label>
<title>Search Space Creation</title>
<p>In this phase, all the candidate solutions, i.e., Cheetahs, are randomly populated within the search space along with the initialization of fog nodes, cloud servers, and devices. The search space allows the candidate solution to cooperatively search for the potential optimal solution, i.e., optimal task allocation.</p>
</sec>
<sec id="s4_3">
<label>4.3</label>
<title>Solution Construction</title>
<p>In our solution, each candidate solution can be represented by an <italic>RxT</italic> binary matrix, where <italic>R</italic> is the number of available computing nodes and <italic>T</italic> is the number of tasks to be scheduled. In the matrix, if <inline-formula id="ieqn-12"><mml:math id="mml-ieqn-12"><mml:msub><mml:mi>Y</mml:mi><mml:mrow><mml:mi>k</mml:mi><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:math></inline-formula>, it means that the task <italic>k</italic> is allocated to computing node <italic>i</italic>, otherwise if <inline-formula id="ieqn-13"><mml:math id="mml-ieqn-13"><mml:msub><mml:mi>Y</mml:mi><mml:mrow><mml:mi>k</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mn>0</mml:mn></mml:math></inline-formula>, it signifies that the task k is not yet allocated to any computational node. Search agents intelligently take into account the various important parameters during allocation of each task (e.g., scheduling objectives, network condition, required resources, and available resources). A task is assigned to a computing node only if the available computational resources meet the task requirements. The creation of the candidate solution must be in accordance with the following constraints:
<list list-type="bullet">
<list-item>
<p>Each task is allocated to only one computational node</p></list-item>
<list-item>
<p>The distribution of tasks among the computational nodes must be balanced.</p></list-item>
<list-item>
<p>Tasks must be successfully processed within the deadline</p></list-item>
</list></p>
</sec>
<sec id="s4_4">
<label>4.4</label>
<title>Mutation</title>
<p>Solution mutation performs a critical role in enhancing the diversity of the solution. To this end, we used the Inverse Mutation Technique (IMT) in which the position of two candidate solutions (i.e., task schedules) is interchanged with each other upon meeting the specified criteria. In Mutation, the solution is selected randomly; for instance, if C1 and C2 are selected, their position is swapped if C1 is greater than C2. Otherwise, no swapping occurs. This procedure is illustrated in <xref ref-type="fig" rid="fig-3">Fig. 3</xref>.</p>
<fig id="fig-3">
<label>Figure 3</label>
<caption>
<title>Inverse mutation technique</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_73818-fig-3.tif"/>
</fig>
</sec>
<sec id="s4_5">
<label>4.5</label>
<title>Fitness Function</title>
<p>The fitness function provides a means of evaluating the search agents&#x2019; solutions, which represent the allocation of tasks to the computing nodes. This fitness function computes fitness values of all search agent solutions and it is based on the delay and energy consumption. The fitness function determines the optimal computing node for a task based on the optimal objective values. The fitness function for evaluating each search agent solution is given below:
<disp-formula id="eqn-4"><label>(4)</label><mml:math id="mml-eqn-4" display="block"><mml:mi>F</mml:mi><mml:mo>=</mml:mo><mml:mi>&#x03B2;</mml:mi><mml:mspace width="thinmathspace" /><mml:mrow><mml:mo>&#x2217;</mml:mo></mml:mrow><mml:mspace width="thinmathspace" /><mml:msub><mml:mi>&#x03C6;</mml:mi><mml:mrow><mml:mi>D</mml:mi></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:mi>&#x03B2;</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mo>&#x2217;</mml:mo></mml:mrow><mml:mtext>&#x00A0;</mml:mtext><mml:msub><mml:mi>&#x03C3;</mml:mi><mml:mrow><mml:mi>E</mml:mi></mml:mrow></mml:msub></mml:math></disp-formula>where <inline-formula id="ieqn-14"><mml:math id="mml-ieqn-14"><mml:msub><mml:mi>&#x03C3;</mml:mi><mml:mrow><mml:mi>E</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, <inline-formula id="ieqn-15"><mml:math id="mml-ieqn-15"><mml:msub><mml:mi>&#x03C6;</mml:mi><mml:mrow><mml:mi>D</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, and <inline-formula id="ieqn-16"><mml:math id="mml-ieqn-16"><mml:mi>&#x03B2;</mml:mi></mml:math></inline-formula> are the system total energy consumption, communication delay, and weight factor, respectively. Weight factor determines the priority of each objective during the allocation of tasks to the computing nodes, and its value must be in between 0 and 1, i.e., <inline-formula id="ieqn-17"><mml:math id="mml-ieqn-17"><mml:mi>&#x03B2;</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:mn>1</mml:mn><mml:mo>]</mml:mo></mml:mrow></mml:math></inline-formula>. In case of weight factor equals to 0.5, i.e., <inline-formula id="ieqn-18"><mml:math id="mml-ieqn-18"><mml:mi>&#x03B2;</mml:mi><mml:mo>=</mml:mo><mml:mn>0.5</mml:mn><mml:mo>,</mml:mo></mml:math></inline-formula> then equal priority is given to each objective during the optimization process. If <inline-formula id="ieqn-19"><mml:math id="mml-ieqn-19"><mml:mi>&#x03B2;</mml:mi><mml:mo>&#x003C;</mml:mo><mml:mn>0.5</mml:mn><mml:mo>,</mml:mo></mml:math></inline-formula> then, high priority is given to energy consumption. In this case, the optimization algorithm may delay the task to achieve minimal energy consumption. Conversely for <inline-formula id="ieqn-20"><mml:math id="mml-ieqn-20"><mml:mi>&#x03B2;</mml:mi><mml:mo>&#x003E;</mml:mo><mml:mn>0.5</mml:mn><mml:mo>,</mml:mo><mml:mrow><mml:mtext>a</mml:mtext></mml:mrow></mml:math></inline-formula> higher priority is given to delay, allowing the algorithm to take immediate action during the task allocation process. In MEC environments, tasks are distributed among three levels of computing nodes, i.e., terminal devices or end-user devices, middleware devices or fog nodes, and cloud servers. The availability of computational resources and task processing capability in each layer is different. Therefore, the computation of objective parameters is necessary at each level. Terminal devices are usually less energy-efficient because of their small battery capacity. Middleware devices provide moderate transmission latencies with moderate energy consumption, while cloud servers offer a vast computational capability at the cost of network latency. In the subsequent section, we compute the energy and delay at each computing level.</p>
<sec id="s4_5_1">
<label>4.5.1</label>
<title>Energy Description</title>
<p>In this section, the total energy consumption of each computational node at various levels is calculated. The energy consumption of a terminal device is calculated by the product of computation time and energy consumption of a terminal device. Mathematically, it is represented as below:
<disp-formula id="eqn-5"><label>(5)</label><mml:math id="mml-eqn-5" display="block"><mml:msubsup><mml:mi>E</mml:mi><mml:mrow><mml:mi>e</mml:mi><mml:mi>d</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:msubsup><mml:mi>&#x03C1;</mml:mi><mml:mrow><mml:mi>e</mml:mi><mml:mi>d</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msubsup><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:mo>&#x2217;</mml:mo></mml:mrow><mml:mtext>&#x00A0;</mml:mtext><mml:msub><mml:mi>P</mml:mi><mml:mrow><mml:mi>e</mml:mi><mml:mi>d</mml:mi></mml:mrow></mml:msub></mml:math></disp-formula>where <inline-formula id="ieqn-21"><mml:math id="mml-ieqn-21"><mml:msubsup><mml:mi>&#x03C1;</mml:mi><mml:mrow><mml:mi>e</mml:mi><mml:mi>d</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula> and <inline-formula id="ieqn-22"><mml:math id="mml-ieqn-22"><mml:msub><mml:mi>P</mml:mi><mml:mrow><mml:mi>e</mml:mi><mml:mi>d</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> is the task&#x2019;s computation time and energy of a terminal device <italic>i</italic>. The computation time of each task is determined by task&#x2019;s arrival rate and service rate.
<disp-formula id="eqn-6"><label>(6)</label><mml:math id="mml-eqn-6" display="block"><mml:msubsup><mml:mi>&#x03C1;</mml:mi><mml:mrow><mml:mi>e</mml:mi><mml:mi>d</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mrow><mml:mi>&#x03BC;</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>&#x03B7;</mml:mi></mml:mrow></mml:mfrac></mml:math></disp-formula></p>
<p>The energy consumption at fog node is determined by the computation amount of the workload. The energy consumption of a fog node is directly proportional to the amount of workload; this means that the energy consumption of a node increases with the increase in the workload. It is stated mathematically as below:
<disp-formula id="eqn-7"><label>(7)</label><mml:math id="mml-eqn-7" display="block"><mml:msubsup><mml:mi>E</mml:mi><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mi>a</mml:mi><mml:msubsup><mml:mi>W</mml:mi><mml:mrow><mml:mi>F</mml:mi><mml:mi>o</mml:mi><mml:mi>g</mml:mi></mml:mrow><mml:mrow><mml:msup><mml:mi>j</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:mrow></mml:msubsup><mml:mo>+</mml:mo><mml:mi>b</mml:mi><mml:msubsup><mml:mi>W</mml:mi><mml:mrow><mml:mi>F</mml:mi><mml:mi>o</mml:mi><mml:mi>g</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msubsup><mml:mo>+</mml:mo><mml:mi>c</mml:mi></mml:math></disp-formula>where <inline-formula id="ieqn-23"><mml:math id="mml-ieqn-23"><mml:msubsup><mml:mi>W</mml:mi><mml:mrow><mml:mi>F</mml:mi><mml:mi>o</mml:mi><mml:mi>g</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula> represents the number of tasks processed by the fog node <italic>j</italic>, <italic>a</italic>, <italic>b</italic> and <italic>c</italic> are constants. To calculate the energy consumption of a server, it is assumed that all virtual machines (VMs) of a server have equal CPU processing frequencies. The CPU frequency utilization of individual VMs plays a crucial role in estimating the energy consumption of a server. The power consumption of a server is related to the amount of workload processed by the individual server. When there is no task assigned to a server or a workload almost equal to zero, so in that case server&#x2019;s power is tuned off to conserve overall system energy. Mathematically, the energy consumption of a cloud server is calculated as below:
<disp-formula id="eqn-8"><label>(8)</label><mml:math id="mml-eqn-8" display="block"><mml:msubsup><mml:mi>E</mml:mi><mml:mrow><mml:mi>C</mml:mi></mml:mrow><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mi mathvariant="fraktur">o</mml:mi></mml:mrow><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:msub><mml:mo>&#x00D7;</mml:mo><mml:msub><mml:mrow><mml:mi mathvariant="fraktur">n</mml:mi></mml:mrow><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>[</mml:mo><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>k</mml:mi></mml:mrow></mml:msub><mml:mo>&#x00D7;</mml:mo><mml:msubsup><mml:mi>V</mml:mi><mml:mrow><mml:mi>C</mml:mi><mml:mi>l</mml:mi><mml:mi>o</mml:mi><mml:mi>u</mml:mi><mml:mi>d</mml:mi></mml:mrow><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:msubsup><mml:mo>+</mml:mo><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:msub><mml:mo>]</mml:mo></mml:mrow></mml:math></disp-formula>where <inline-formula id="ieqn-24"><mml:math id="mml-ieqn-24"><mml:msub><mml:mrow><mml:mi mathvariant="fraktur">o</mml:mi></mml:mrow><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi mathvariant="fraktur">n</mml:mi></mml:mrow><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msubsup><mml:mi>V</mml:mi><mml:mrow><mml:mi>C</mml:mi><mml:mi>l</mml:mi><mml:mi>o</mml:mi><mml:mi>u</mml:mi><mml:mi>d</mml:mi></mml:mrow><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula> represent the current ON/OFF state of a server, the number of ON VM, and the total number of VM on server <italic>m</italic>, respectively. <inline-formula id="ieqn-25"><mml:math id="mml-ieqn-25"><mml:msub><mml:mi>a</mml:mi><mml:mrow><mml:mi>k</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-26"><mml:math id="mml-ieqn-26"><mml:msub><mml:mi>b</mml:mi><mml:mrow><mml:mi>k</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> are positive constants. The total energy consumption of the system is calculated as the sum of the energy consumed at each computing level. Mathematically, it is represented as below:
<disp-formula id="eqn-9"><label>(9)</label><mml:math id="mml-eqn-9" display="block"><mml:msub><mml:mi>&#x03C3;</mml:mi><mml:mrow><mml:mi>E</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msubsup><mml:mi>E</mml:mi><mml:mrow><mml:mi>e</mml:mi><mml:mi>d</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msubsup><mml:mo>+</mml:mo><mml:msubsup><mml:mi>E</mml:mi><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msubsup><mml:mo>+</mml:mo><mml:msubsup><mml:mi>E</mml:mi><mml:mrow><mml:mi>C</mml:mi></mml:mrow><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:msubsup></mml:math></disp-formula></p>
</sec>
<sec id="s4_5_2">
<label>4.5.2</label>
<title>Latency Description</title>
<p>In this subsection, we explain the calculation of the latency of devices at different levels. Each terminal device generates tasks as per the Poisson process. Moreover, each device has an input task queue of M/M/1 with the defined service rate. Mathematically, the delay at the terminal device is calculated as below:
<disp-formula id="eqn-10"><label>(10)</label><mml:math id="mml-eqn-10" display="block"><mml:msubsup><mml:mi>L</mml:mi><mml:mrow><mml:mi>e</mml:mi><mml:mi>d</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mfrac><mml:mi>&#x03B7;</mml:mi><mml:mrow><mml:mi>&#x03BC;</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mi>&#x03BC;</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>&#x03B7;</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:mfrac></mml:math></disp-formula>where <inline-formula id="ieqn-27"><mml:math id="mml-ieqn-27"><mml:mi>&#x03B7;</mml:mi></mml:math></inline-formula> indicates the task arrival rate and <inline-formula id="ieqn-28"><mml:math id="mml-ieqn-28"><mml:mi>&#x03BC;</mml:mi></mml:math></inline-formula> represents the system&#x2019;s service rate that follows Poisson and exponential distribution process respectively.</p>
<p>The fog device <italic>j</italic> is assumed to have M/M/C input task queue. Since the middle layer nodes are responsible for task computation, therefore we can compute the delay in this layer as the sum of the communication latency and computation latency. Mathematically, it is represented as below:
<disp-formula id="eqn-11"><label>(11)</label><mml:math id="mml-eqn-11" display="block"><mml:msubsup><mml:mi>L</mml:mi><mml:mrow><mml:mi>F</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>m</mml:mi><mml:mi>p</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mfrac><mml:mi>&#x03C8;</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x03B7;</mml:mi></mml:mrow></mml:mfrac><mml:mo>&#x00D7;</mml:mo><mml:msubsup><mml:mrow><mml:mtext>&#x019C;</mml:mtext></mml:mrow><mml:mrow><mml:mi>F</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>m</mml:mi><mml:mi>p</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msubsup></mml:math></disp-formula>where <inline-formula id="ieqn-29"><mml:math id="mml-ieqn-29"><mml:mi>&#x03C8;</mml:mi></mml:math></inline-formula> is the average length of the queue, <inline-formula id="ieqn-30"><mml:math id="mml-ieqn-30"><mml:msubsup><mml:mrow><mml:mtext>&#x019C;</mml:mtext></mml:mrow><mml:mrow><mml:mi>F</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>m</mml:mi><mml:mi>p</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula> represents the workload assigned to the fog node <italic>j</italic>. Similarly, the communication delay at the fog node is computed as below:
<disp-formula id="eqn-12"><label>(12)</label><mml:math id="mml-eqn-12" display="block"><mml:msubsup><mml:mi>L</mml:mi><mml:mrow><mml:mi>F</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>m</mml:mi><mml:mi>m</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mrow><mml:mtext>&#x025C;</mml:mtext></mml:mrow><mml:mo>&#x00D7;</mml:mo><mml:mi>I</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>p</mml:mi><mml:mrow><mml:mi>l</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula>where <inline-formula id="ieqn-31"><mml:math id="mml-ieqn-31"><mml:mi>I</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>p</mml:mi><mml:mrow><mml:mi>l</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> is the input length of task <italic>t</italic> and <inline-formula id="ieqn-32"><mml:math id="mml-ieqn-32"><mml:mrow><mml:mtext>&#x025C;</mml:mtext></mml:mrow></mml:math></inline-formula> is a scaling constant. So the total system delay at the fog layer is computed as below:
<disp-formula id="eqn-13"><label>(13)</label><mml:math id="mml-eqn-13" display="block"><mml:msubsup><mml:mi>L</mml:mi><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:msubsup><mml:mi>L</mml:mi><mml:mrow><mml:mi>F</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>m</mml:mi><mml:mi>p</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msubsup><mml:mo>+</mml:mo><mml:msubsup><mml:mi>L</mml:mi><mml:mrow><mml:mi>F</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>m</mml:mi><mml:mi>m</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msubsup></mml:math></disp-formula></p>
<p>Cloud servers are assumed to have M/M/&#x221E; input queue. It is assumed that cloud server has an infinite computational capacity. Thus, computation delay at the cloud server is negligible. To compute the latency at the cloud server, only communication delay is taken into account. Mathematically, it is expressed as below:
<disp-formula id="eqn-14"><label>(14)</label><mml:math id="mml-eqn-14" display="block"><mml:msubsup><mml:mi>L</mml:mi><mml:mrow><mml:mi>C</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>m</mml:mi><mml:mi>m</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mi>&#x03D1;</mml:mi><mml:mo>&#x2217;</mml:mo><mml:mi>I</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>p</mml:mi><mml:mrow><mml:mrow><mml:msub><mml:mi>L</mml:mi><mml:mrow><mml:mi>c</mml:mi><mml:mi>l</mml:mi><mml:mi>o</mml:mi><mml:mi>u</mml:mi><mml:mi>d</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>m</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula>where <inline-formula id="ieqn-33"><mml:math id="mml-ieqn-33"><mml:mi>I</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>p</mml:mi><mml:mrow><mml:mrow><mml:msub><mml:mi>L</mml:mi><mml:mrow><mml:mi>c</mml:mi><mml:mi>l</mml:mi><mml:mi>o</mml:mi><mml:mi>u</mml:mi><mml:mi>d</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>m</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> is the input queue length of server m, and <inline-formula id="ieqn-34"><mml:math id="mml-ieqn-34"><mml:mi>&#x03D1;</mml:mi></mml:math></inline-formula> is a scaling constant. The total offloading delay experienced at each tier is computed as below:
<disp-formula id="eqn-15"><label>(15)</label><mml:math id="mml-eqn-15" display="block"><mml:msub><mml:mi>&#x03C6;</mml:mi><mml:mrow><mml:mi>D</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msubsup><mml:mi>L</mml:mi><mml:mrow><mml:mi>e</mml:mi><mml:mi>d</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msubsup><mml:mo>+</mml:mo><mml:msubsup><mml:mi>L</mml:mi><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msubsup><mml:mo>+</mml:mo><mml:msubsup><mml:mi>L</mml:mi><mml:mrow><mml:mi>C</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>m</mml:mi><mml:mi>m</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:msubsup></mml:math></disp-formula></p>
<p>For efficient optimization, it is desirable to achieve the minimum value of optimization objectives, i.e., delay and energy consumption. If we have 100 search agents in the search space, then there will be 100 different solutions containing unique task offloading and scheduling strategies. In that case, the solution with the minimum value of objective parameters is considered as the optimal solution.</p>
</sec>
</sec>
<sec id="s4_6">
<label>4.6</label>
<title>Pareto-Optimal Solution</title>
<p>Non-dominated solutions are called Pareto-optimal solutions, which are stored in a repository whose size is equal to the number of candidate solutions. MoCO uses non-dominating sorting to arrange the solution in a repository found in each iteration. In some cases, the repository may contain the same ranked non-dominated solution, so it is necessary to replace the solution with the new non-dominated solution. This can lead to reducing the exhaustive search and increasing the diversity of the solutions. To replace the solution, we used a predetermined crowding distance value, which is also used to determine the number of neighboring solutions. Mathematically, it is calculated as below:
<disp-formula id="eqn-16"><label>(16)</label><mml:math id="mml-eqn-16" display="block"><mml:mrow><mml:mtext>D</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mi>M</mml:mi><mml:mi>a</mml:mi><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>F</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:mi>M</mml:mi><mml:mi>i</mml:mi><mml:msub><mml:mi>n</mml:mi><mml:mrow><mml:mi>F</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:mi>R</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>p</mml:mi><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>s</mml:mi><mml:mi>i</mml:mi><mml:mi>z</mml:mi><mml:mi>e</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:msub></mml:mrow></mml:mfrac></mml:math></disp-formula>where <inline-formula id="ieqn-35"><mml:math id="mml-ieqn-35"><mml:mi>M</mml:mi><mml:mi>a</mml:mi><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>F</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-36"><mml:math id="mml-ieqn-36"><mml:mi>M</mml:mi><mml:mi>i</mml:mi><mml:msub><mml:mi>n</mml:mi><mml:mrow><mml:mi>F</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> indicate the maximum and minimum values of all objectives. <inline-formula id="ieqn-37"><mml:math id="mml-ieqn-37"><mml:mi>R</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>p</mml:mi><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>s</mml:mi><mml:mi>i</mml:mi><mml:mi>z</mml:mi><mml:mi>e</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> is the size of the repository. If a solution has a high crowding distance value, then the solution has a high probability of being replaced. High crowding distance solution contributes more to the diversity of solutions and reduces the chance of local optimality. Alternatively, a solution with a low crowding distance value has a low probability of being replaced.</p>
</sec>
<sec id="s4_7">
<label>4.7</label>
<title>Enhanced Cheetah Optimizer Algorithm</title>
<p>Cheeta Optimizer has shown its effectiveness in solving various complex, large-scale real-world optimization problems. However, CO suffers from premature convergence and computation time. To address these shortcomings, CO is modified, namely Enhanced Cheeta Optimizer (ECO) to improve convergence speed and reduce computation time. The attacking strategy of ECO encompasses three phases, i.e., search phase, sit and wait phase, and attack phase.</p>
<sec id="s4_7_1">
<label>4.7.1</label>
<title>Search Phase</title>
<p>In this phase, Cheetah searches for the optimal solution (prey) based on its surrounding conditions. In the CO, search agents (Cheeta&#x2019;s) update their location by following their previous position. This leads the algorithm toward local stagnation and reduces convergence speed. In MCO, the searching procedure of search agents is modified by updating positions of search agents based on the position of the leader of the group, i.e., the second-best solution. This reduces randomness and accelerates convergence speed. Mathematically, it is expressed as below:
<disp-formula id="eqn-17"><label>(17)</label><mml:math id="mml-eqn-17" display="block"><mml:mi>L</mml:mi><mml:mi>o</mml:mi><mml:msubsup><mml:mi>c</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mi>L</mml:mi><mml:mi>o</mml:mi><mml:msubsup><mml:mi>c</mml:mi><mml:mrow><mml:mi>L</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup><mml:mo>+</mml:mo><mml:msub><mml:mrow><mml:mi mathvariant="fraktur">R</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>&#x00D7;</mml:mo><mml:msubsup><mml:mi>S</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup></mml:math></disp-formula>where <inline-formula id="ieqn-38"><mml:math id="mml-ieqn-38"><mml:mi>L</mml:mi><mml:mi>o</mml:mi><mml:msubsup><mml:mi>c</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula>, <inline-formula id="ieqn-39"><mml:math id="mml-ieqn-39"><mml:mi>L</mml:mi><mml:mi>o</mml:mi><mml:msubsup><mml:mi>c</mml:mi><mml:mrow><mml:mi>L</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula> and <inline-formula id="ieqn-40"><mml:math id="mml-ieqn-40"><mml:msubsup><mml:mrow><mml:mi mathvariant="fraktur">R</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> indicate the new location of search agents, current position of second-best agent and randomization parameter, respectively. <inline-formula id="ieqn-41"><mml:math id="mml-ieqn-41"><mml:msubsup><mml:mi>S</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula> indicate the dynamic step length. The randomization parameter and random step length for the search agents are calculated as below:
<disp-formula id="eqn-18a"><label>(18a)</label><mml:math id="mml-eqn-18a" display="block"><mml:msub><mml:mrow><mml:mi mathvariant="fraktur">R</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mfrac><mml:msub><mml:mi>r</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:msub><mml:mi>r</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:mfrac></mml:math></disp-formula>
<disp-formula id="eqn-18b"><label>(18b)</label><mml:math id="mml-eqn-18b" display="block"><mml:msubsup><mml:mi>S</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mi>L</mml:mi><mml:mi>o</mml:mi><mml:msubsup><mml:mi>c</mml:mi><mml:mrow><mml:mi>k</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup><mml:mo>&#x2212;</mml:mo><mml:mi>L</mml:mi><mml:mi>o</mml:mi><mml:msubsup><mml:mi>c</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup></mml:math></disp-formula>where <inline-formula id="ieqn-42"><mml:math id="mml-ieqn-42"><mml:msub><mml:mi>r</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula id="ieqn-43"><mml:math id="mml-ieqn-43"><mml:msub><mml:mi>r</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:math></inline-formula> are the random numbers that belong to the normal distribution function. <inline-formula id="ieqn-44"><mml:math id="mml-ieqn-44"><mml:mi>L</mml:mi><mml:mi>o</mml:mi><mml:msubsup><mml:mi>c</mml:mi><mml:mrow><mml:mi>k</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula> and <inline-formula id="ieqn-45"><mml:math id="mml-ieqn-45"><mml:mi>L</mml:mi><mml:mi>o</mml:mi><mml:msubsup><mml:mi>c</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula> indicate the location of the <italic>k</italic><sup>th</sup> and <italic>i</italic><sup>th</sup> search agents in the sorted population, respectively. In <xref ref-type="disp-formula" rid="eqn-13">Eq. (13)</xref>, the term (i.e., <inline-formula id="ieqn-46"><mml:math id="mml-ieqn-46"><mml:msub><mml:mrow><mml:mi mathvariant="fraktur">R</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>&#x00D7;</mml:mo><mml:msubsup><mml:mi>S</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula>) increases diversity in the solution that aids the algorithm in the global search phase. This term creates a long and dynamic step length that replaces the existing solution with the new random solution in the population by extending the existing solution from the accepted range of the variables. Thus, it helps the algorithm to prevent local optimal points.</p>
</sec>
<sec id="s4_7_2">
<label>4.7.2</label>
<title>Sit and Wait Phase</title>
<p>Cheetahs are quick chasers that require much energy. In this phase, Cheetahs sit and wait until the prey is close enough to attack. Consequently, this increases the chance of hunting success. Mathematically, it is modelled as below:
<disp-formula id="eqn-19"><label>(19)</label><mml:math id="mml-eqn-19" display="block"><mml:mi>L</mml:mi><mml:mi>o</mml:mi><mml:msubsup><mml:mi>c</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mi>L</mml:mi><mml:mi>o</mml:mi><mml:msubsup><mml:mi>c</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup></mml:math></disp-formula></p>
</sec>
<sec id="s4_7_3">
<label>4.7.3</label>
<title>Attacking Phase</title>
<p>Speed and flexibility are two important considerations leveraged by the search agents during the attacking phase. In the previous phase, search agent positions themselves as close as possible to the prey. When prey notice the attack, it changes their direction and run away from the search agent. In response, the search agents adjust their direction and use their high flexibility to catch the prey in unstable conditions and successfully execute the attack. Mathematically, it is modelled as follows:
<disp-formula id="eqn-20"><label>(20)</label><mml:math id="mml-eqn-20" display="block"><mml:mi>L</mml:mi><mml:mi>o</mml:mi><mml:msubsup><mml:mi>c</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mi>L</mml:mi><mml:mi>o</mml:mi><mml:msubsup><mml:mi>c</mml:mi><mml:mrow><mml:mi>P</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup><mml:mo>+</mml:mo><mml:msub><mml:mi>&#x03BB;</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>&#x00D7;</mml:mo><mml:msubsup><mml:mi>&#x03BE;</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup></mml:math></disp-formula>where <inline-formula id="ieqn-47"><mml:math id="mml-ieqn-47"><mml:mi>L</mml:mi><mml:mi>o</mml:mi><mml:msubsup><mml:mi>c</mml:mi><mml:mrow><mml:mi>P</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula> indicates the position of the prey, <inline-formula id="ieqn-48"><mml:math id="mml-ieqn-48"><mml:msub><mml:mi>&#x03BB;</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> is the random number between [0, 1] and <inline-formula id="ieqn-49"><mml:math id="mml-ieqn-49"><mml:msubsup><mml:mi>&#x03BE;</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula> is the interaction factor. In <xref ref-type="disp-formula" rid="eqn-19">(19)</xref>, the interaction factor is computed using the location of the adjacent search agent. This may decrease the local search capability, i.e., the exploitation phase. Consequently, the algorithm may not be able to obtain a global optimal solution. To increase the exploitation search capability, it is necessary to adjust the location of agents based on the location of the target, i.e., prey. This will improve the convergence speed and computation time. Mathematically, it is expressed as below:
<disp-formula id="eqn-21"><label>(21)</label><mml:math id="mml-eqn-21" display="block"><mml:msubsup><mml:mi>&#x03BE;</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mi>L</mml:mi><mml:mi>o</mml:mi><mml:msubsup><mml:mi>c</mml:mi><mml:mrow><mml:mi>P</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup><mml:mo>&#x2212;</mml:mo><mml:mi>L</mml:mi><mml:mi>o</mml:mi><mml:msubsup><mml:mi>c</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msubsup></mml:math></disp-formula></p>
</sec>
<sec id="s4_7_4">
<label>4.7.4</label>
<title>Update Position Criteria</title>
<p>CO uses the hunting factor along with the random parameters to update the location of agents. To update the location using either strategy (search strategy or attacking strategy), a hunting factor (H-factor) is used. The H-factor decreases over the course of iteration, and it is modelled as below:</p>
<p><disp-formula id="eqn-22"><label>(22)</label><mml:math id="mml-eqn-22" display="block"><mml:mi>H</mml:mi><mml:mo>=</mml:mo><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:mn>2</mml:mn><mml:mrow><mml:mo>(</mml:mo><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:mfrac><mml:mi>t</mml:mi><mml:mi>T</mml:mi></mml:mfrac><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:msup><mml:mo>&#x00D7;</mml:mo><mml:mn>2</mml:mn><mml:mrow><mml:mo>(</mml:mo><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>d</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula>where <italic>t</italic> and <italic>T</italic> are the current and maximum iteration, respectively, <italic>rand</italic> is a random value between [0, 1].</p>
</sec>
</sec>
<sec id="s4_8">
<label>4.8</label>
<title>Pseudo-Code Description</title>
<p>Algorithm 1 contains the pseudo-code of the proposed algorithm. The process begins by initializing essential system parameters, including several fog nodes, end devices, cloud servers, task count, required and available computational resources, etc. In lines # 1&#x2013;4, the algorithm initializes the search space by randomly populating it with search agents. Each search agent constructs a solution by scheduling <italic>N</italic> heterogeneous tasks to various computational nodes. Subsequently, the algorithm evaluates each solution using a fitness function. Lines # 6&#x2013;7 involve sorting the solution by the search agent based on its fitness value. The second-best solution is designated as the leader&#x2019;s location, while the optimal solution is defined as the prey&#x2019;s location. Lines # 8&#x2013;17 are a nested loop in which each search agent creates a new solution by allocating each task to the appropriate computational nodes and then scheduling the offloaded task based on task priorities. Each solution is evaluated using a fitness function, and a solution with the minimum fitness value is selected as the best solution. In line 16, the best solution is updated if it outperforms the previous one. Lines # 18&#x2013;19, store the non-dominated solution in the external repository and arranges the solution using non-dominated sorting. Lines # 20&#x2013;23, check the size of the repository; if it is equal to the number of search agents, then the solution with the low crowding distance measure is replaced with the high crowding distance measure. Lines # 24&#x2013;27 implement a loop in which the location of each search agent is updated as per the search and attack strategy defined by the proposed ECO algorithm. In Lines # 38&#x2013;42, new solutions are formed after the repositioning of search agents, which are then re-evaluated using the objective function. Line # 40, stores the non-dominated solution in the repository after re-evaluation. The solution with the best objective value from the repository is selected as the best and global optimal solution. In Line # 44, the global optimal solution is returned, which contains a binary matrix of the best computation offloading and task scheduling strategy. The flowchart of the proposed scheduling framework is given in <xref ref-type="fig" rid="fig-4">Fig. 4</xref>.</p>
<fig id="fig-4">
<label>Figure 4</label>
<caption>
<title>Flowchart of proposed algorithm</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_73818-fig-4.tif"/>
</fig>
<fig id="fig-11">
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_73818-fig-11.tif"/>
</fig>
</sec>
<sec id="s4_9">
<label>4.9</label>
<title>Implementation of the Proposed Algorithm in a Cloud-Fog Computing Environment</title>
<p>The implementation of the proposed algorithm requires terminal nodes, fog nodes, communication links, and cloud servers. Each terminal node generates either energy-intensive or delay-intensive tasks. The number of tasks generated by a terminal node varies with the computational demands of the device. Each task is analyzed at the fog controller to determine its resource requirements and execution constraints. Based on this analysis, the fog controller constructs a matrix representing joint task offloading and scheduling strategies. Each strategy is treated as a potential candidate solution (search agent) and evaluated using the defined objective function. Afterward, the mathematical model of MoECO is applied to identify the best offloading and scheduling strategy that minimizes both the energy consumption and transmission latency. The computing network then process according to the selected optimal strategy.</p>
</sec>
<sec id="s4_10">
<label>4.10</label>
<title>Computational Complexity of the Proposed Algorithm</title>
<p>We compute the computational complexity of each phase and then represent the cumulative complexity of the entire algorithm.</p>
<p><bold>Initialization:</bold> The computational complexity of initializing the search space mainly depends on population size. If <italic>m</italic> search agents are initialized within the search space with the problem dimension <italic>D</italic>, then the initialization phase takes <italic>O</italic> (<italic>m</italic> &#x00D7; <italic>D</italic>).</p>
<p><bold>Task assignment phase:</bold> In this phase, the tasks are offloaded to the suitable computing nodes, either fog device (<italic>FD</italic><sub><italic>n</italic></sub>) or cloud server (<italic>C</italic><sub><italic>sm</italic></sub>) by determining task requirements and availability of computational resources. If <italic>T</italic><sub><italic>k</italic></sub> represents the total number of tasks, then task assignment phase takes <italic>O</italic> (<italic>m</italic> &#x00D7; <italic>T</italic><sub><italic>k</italic></sub> &#x00D7; (<italic>FD</italic><sub><italic>n</italic></sub> &#x00D7; <italic>C</italic><sub><italic>sm</italic></sub>)).</p>
<p><bold>Fitness evaluation and sorting phase:</bold> Each iteration performs fitness computation for <inline-formula id="ieqn-60"><mml:math id="mml-ieqn-60"><mml:mi>m</mml:mi></mml:math></inline-formula> solutions and sort them to identify the optimal solution in the current iteration. The evaluation takes <italic>O</italic> (<italic>m</italic> &#x00D7; <italic>C</italic><sub><italic>f</italic></sub>). The sorting phase takes <italic>O</italic> (<italic>B &#x002B; m</italic> log <italic>m</italic>) because of non-dominated sorting. So, the total complexity of this phase is <italic>O</italic> (<italic>m</italic> &#x00D7; <italic>C</italic><sub><italic>f</italic></sub> (<italic>B &#x002B; m</italic> log <italic>m</italic>)) or <italic>O</italic> (<italic>m</italic><sup>2</sup> log <italic>m</italic>). <italic>C</italic><sub><italic>f</italic></sub> is the cost associated with the fitness evaluation, and <italic>B</italic> is the number of objectives.</p>
<p><bold>Position update phase:</bold> Each search agent updates its position based on adaptive operators (attack, search, sit-and-wait) that involve a constant number of arithmetic operations. The computational complexity of this phase is <italic>O</italic>(<italic>m</italic>).</p>
<p><bold>Iterative process:</bold> The above phases (2&#x2013;4) are repeated for <italic>T</italic> iterations. So, the computational complexity of all the important steps with iterations <italic>T</italic> is <italic>O</italic> (<italic>T</italic> &#x00D7; (<italic>m</italic> &#x00D7; <italic>T</italic><sub><italic>k</italic></sub> &#x00D7; (<italic>FD</italic><sub><italic>n</italic></sub> &#x00D7; <italic>C</italic><sub><italic>sm</italic></sub>) <italic>&#x002B; m</italic> &#x00D7; <italic>C</italic><sub><italic>f</italic></sub> (<italic>B&#x002B; m</italic> log <italic>m</italic>)).</p>
<p><bold>Total computational complexity:</bold> The simplified total computational complexity of the proposed algorithm for <italic>T</italic> iterations and <italic>m</italic> search agents is <italic>O</italic> (<italic>T</italic> &#x00D7; (<italic>m</italic> &#x00D7; <italic>D &#x002B; m</italic> &#x00D7; <italic>T</italic><sub><italic>k</italic></sub> &#x00D7; (<italic>FD</italic><sub><italic>n</italic></sub> &#x00D7; <italic>C</italic><sub><italic>sm</italic></sub>)<italic> &#x002B; m</italic> &#x00D7; <italic>C</italic><sub><italic>f</italic></sub> (<italic>B&#x002B; m</italic> log <italic>m</italic>))), which collapses to <italic>O</italic> (<italic>T</italic> (<italic>m</italic><sup>2</sup> log <italic>m &#x002B; m</italic> (<italic>DT</italic><sub><italic>k</italic></sub> &#x00D7; <italic>C</italic><sub><italic>f</italic></sub> <italic>FD</italic><sub><italic>n</italic></sub> &#x00D7; <italic>C</italic><sub><italic>sm</italic></sub>).</p>
<p><xref ref-type="table" rid="table-2">Table 2</xref> unfolds the computational complexities of the proposed and other baseline methods. This analysis demonstrates that the proposed algorithm is capable of solving multi-objective optimization problems effectively, as it provides a better balance between different and mutually exclusive factors such as energy, delay, fairness index, and throughput. Although the computational complexity of this algorithm is slightly higher, this increase is due to the archive non-dominated sorting process, which can affect the performance to some extent in large networks. The computational complexity of ACO and GA algorithms is similar and they are mostly suitable for single-objective optimization. Nevertheless, they require relatively more iterations to reach the optimal solution.</p>
<table-wrap id="table-2">
<label>Table 2</label>
<caption>
<title>Comparative analysis of computational complexity</title>
</caption>
<table>
<colgroup>
<col align="center"/>
<col align="center"/>
<col align="center"/> </colgroup>
<thead>
<tr>
<th>Method</th>
<th>Time complexity 1 iteration</th>
<th>Time complexity T iterations</th>
</tr>
</thead>
<tbody>
<tr>
<td>MoECO</td>
<td><italic>O</italic> (<italic>m</italic><sup>2</sup> log <italic>m &#x002B; m</italic> (<italic>DT</italic><sub><italic>k</italic></sub> <italic>&#x00D7; C</italic><sub><italic>f</italic></sub> <italic>FD</italic><sub><italic>n</italic></sub> <italic>&#x00D7; C</italic><sub><italic>sm</italic></sub>)</td>
<td><italic>O</italic> (<italic>T</italic> (<italic>m</italic><sup>2</sup> log <italic>m &#x002B; m</italic> (<italic>DT</italic><sub><italic>k</italic></sub> <italic>&#x00D7; C</italic><sub><italic>f</italic></sub> <italic>FD</italic><sub><italic>n</italic></sub> <italic>&#x00D7; C</italic><sub><italic>sm</italic></sub>)</td>
</tr>
<tr>
<td>FDQN &#x002B; KMeans</td>
<td><italic>O</italic> (<italic>m &#x002B; m</italic><sup>2</sup> <italic>&#x00D7;</italic> log <italic>m &#x002B; m</italic> (<italic>DT</italic><sub><italic>k</italic></sub> <italic>&#x00D7; C</italic><sub><italic>f</italic></sub> <italic>FD</italic><sub><italic>n</italic></sub> <italic>&#x00D7; C</italic><sub><italic>sm</italic></sub>)</td>
<td><italic>O</italic> (<italic>T</italic> (<italic>m &#x002B; m</italic><sup>2</sup> <italic>&#x00D7;</italic> log <italic>m &#x002B; m</italic> (<italic>DT</italic><sub><italic>k</italic></sub> <italic>&#x00D7; C</italic><sub><italic>f</italic></sub> <italic>FD</italic><sub><italic>n</italic></sub> <italic>&#x00D7; C</italic><sub><italic>sm</italic></sub>))</td>
</tr>
<tr>
<td>PSO &#x002B; WOA</td>
<td><italic>O</italic> (<italic>m</italic><sup>2</sup> <italic>&#x00D7; m</italic> log <italic>m &#x002B; m</italic> (<italic>DT</italic><sub><italic>k</italic></sub> <italic>&#x00D7; C</italic><sub><italic>f</italic></sub> <italic>FD</italic><sub><italic>n</italic></sub> <italic>&#x00D7; C</italic><sub><italic>sm</italic></sub>)</td>
<td><italic>O</italic> (<italic>T</italic> (<italic>m</italic><sup>2</sup> <italic>&#x00D7; m</italic> log <italic>m &#x002B; m</italic> (<italic>DT</italic><sub><italic>k</italic></sub> <italic>&#x00D7; C</italic><sub><italic>f</italic></sub> <italic>FD</italic><sub><italic>n</italic></sub> <italic>&#x00D7; C</italic><sub><italic>sm</italic></sub>))</td>
</tr>
<tr>
<td>GWO</td>
<td><italic>O</italic> (<italic>m</italic><sup>2</sup> <italic>&#x002B; m</italic> log <italic>m &#x002B; m</italic> (<italic>DT</italic><sub><italic>k</italic></sub> <italic>&#x00D7; C</italic><sub><italic>f</italic></sub> <italic>FD</italic><sub><italic>n</italic></sub> <italic>&#x00D7; C</italic><sub><italic>sm</italic></sub>))</td>
<td><italic>O</italic> (<italic>T</italic> (<italic>m</italic><sup>2</sup> <italic>&#x002B; m</italic> log <italic>m &#x002B; m</italic> (<italic>DT</italic><sub><italic>k</italic></sub> <italic>&#x00D7; C</italic><sub><italic>f</italic></sub> <italic>FD</italic><sub><italic>n</italic></sub> <italic>&#x00D7; C</italic><sub><italic>sm</italic></sub>))</td>
</tr>
<tr>
<td>CO</td>
<td><italic>O</italic> (<italic>m</italic> log <italic>m &#x002B; m</italic> (<italic>DT</italic><sub><italic>k</italic></sub> <italic>&#x00D7; C</italic><sub><italic>f</italic></sub> <italic>FD</italic><sub><italic>n</italic></sub> <italic>&#x00D7; C</italic><sub><italic>sm</italic></sub>))</td>
<td><italic>O</italic> (<italic>T</italic> (<italic>m</italic> log <italic>m &#x002B; m</italic> (<italic>DT</italic><sub><italic>k</italic></sub> <italic>&#x00D7; C</italic><sub><italic>f</italic></sub> <italic>FD</italic><sub><italic>n</italic></sub> <italic>&#x00D7; C</italic><sub><italic>sm</italic></sub>)))</td>
</tr>
<tr>
<td>ACO</td>
<td><italic>O</italic> (<italic>m &#x002B; m</italic><sup>2</sup> <italic>&#x00D7; m</italic> log <italic>m</italic>)</td>
<td><italic>O</italic> (<italic>T</italic> (<italic>m &#x002B; m</italic><sup>2</sup> <italic>&#x00D7; m</italic> log <italic>m</italic>))</td>
</tr>
<tr>
<td>GA</td>
<td><italic>O</italic> (<italic>m &#x002B; m</italic><sup>2</sup> <italic>&#x00D7; m</italic> log <italic>m</italic>)</td>
<td><italic>O</italic> (<italic>T</italic> (<italic>m &#x002B; m</italic><sup>2</sup> <italic>&#x00D7; m</italic> log <italic>m</italic>))</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>On the other hand, the time complexity of the GWO algorithm is quadratic, which is effective for single-objective problems such as energy consumption or delay, but it often suffers from the problem of premature convergence.</p>
</sec>
</sec>
<sec id="s5">
<label>5</label>
<title>Experimental Setup and Results Discussion</title>
<p>The proposed MoECO scheduling algorithm was implemented in MATLAB 2023a on a laptop with 2.8 GHZ core i9 Intel processor and 16 GB RAM. The simulation environment devices. The proposed algorithm was compared with the baseline methods, namely Gray Wolf Optimization (GWO), Ant Colony Optimization (ACO), Cheetah Optimizer (CO), and Genetic Algorithm (GA). The analysis with the benchmark schemes was performed using the same simulation parameters as given in <xref ref-type="table" rid="table-3">Table 3</xref>. To ensure the reliability and robustness of the experimental results, each algorithm was executed 10 times. The mean and standard deviation values were computed for all the performance metrics. Moreover, a pairwise student&#x2019;s <italic>t</italic>-test (<italic>p</italic> &#x003C; 0.005) was applied to analyze the statistical significance of the performance differences between the proposed algorithm and the benchmark algorithms. Furthermore, 95% confidence intervals were computed for each metric to demonstrate the consistency and stability of MoECO across multiple executions. The simulation results demonstrate that our proposed solution efficiently minimizes the energy consumption and communication latency while maximizing task completion rate and fairness index. Efficient joint optimization of computation offloading and task scheduling not only improves network lifetime but also escalates its scalability, reliability, and stability. Simulations prove that our solution effectively handles a large, diverse set of tasks coming from different IoT devices. The simulation was performed using four performance metrics by varying different simulation parameters: (i) energy consumption, (ii) transmission delay, (iii) task completion rate, and (iv) fairness index.</p>
<table-wrap id="table-3">
<label>Table 3</label>
<caption>
<title>Simulation parameters</title>
</caption>
<table>
<colgroup>
<col align="center"/>
<col align="center"/> </colgroup>
<thead>
<tr>
<th>List of symbols</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>Max Iterations</td>
<td>150</td>
</tr>
<tr>
<td>Swarm size</td>
<td>100</td>
</tr>
<tr>
<td>Fog Nodes</td>
<td>5&#x2013;50</td>
</tr>
<tr>
<td>Cloud servers</td>
<td>3</td>
</tr>
<tr>
<td>Weight factor &#x03B2;</td>
<td>0.5</td>
</tr>
<tr>
<td>Simulation run</td>
<td>10</td>
</tr>
<tr>
<td>Dimension</td>
<td>2</td>
</tr>
<tr>
<td>Task arrival rate &#x1D702;</td>
<td>3.2</td>
</tr>
<tr>
<td>Repository/archive size</td>
<td>100</td>
</tr>
<tr>
<td>Initial energy Ped</td>
<td>25</td>
</tr>
<tr>
<td>Upper bound</td>
<td>5000</td>
</tr>
<tr>
<td>Lower bound</td>
<td>0</td>
</tr>
<tr>
<td>Service rate &#x03BC;</td>
<td>4.6</td>
</tr>
</tbody>
</table>
</table-wrap>
<sec id="s5_1">
<label>5.1</label>
<title>Number of Tasks vs. Delay</title>
<p>In the first setup, we evaluated the proposed algorithm to measure the total delay of each processed task at levels 50&#x2013;250, while keeping the number of fog nodes fixed at 5. The performance of the proposed task scheduling algorithm was compared with the original CO and other baseline methods, which improves the searching capability of both exploration and exploitation search phases by adjusting various variables, e.g., step length and interaction factor. This improves the computation time and convergence speed of the proposed algorithm. Consequently, the total transmission delay of the system is minimized through smart resource utilization. The proposed approach dynamically offloads tasks to either fog nodes or cloud servers based on the capacity, bandwidth, and task requirements (e.g., delay sensitive or computation sensitive). Additionally, the proposed algorithm reduces total delay by prioritizing task scheduling based on task completion deadline and energy utilization, implementing load balancing to prevent workload bottlenecks. The results in <xref ref-type="fig" rid="fig-5">Fig. 5a</xref> demonstrate that our proposed solution exhibits linear behavior against the increasing number of IoT tasks. This signifies that our scheme is stable and efficient when the workload on the system is high.</p>
<fig id="fig-5">
<label>Figure 5</label>
<caption>
<title>Transmission Delay (ms) vs. Tasks under different fog node settings: (<bold>a</bold>) 5 nodes, (<bold>b</bold>) 10 nodes</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_73818-fig-5.tif"/>
</fig>
<p><xref ref-type="fig" rid="fig-5">Fig. 5b</xref> illustrates the analysis of the same experimental setup with different simulation parameters, i.e., we increase the number of tasks from 300 to 500 while the computation nodes, i.e., fog nodes, increase to 10. Here, the proposed algorithm also performs efficiently compared to the state-of-the-art baseline methods in terms of achieving minimum transmission delay. From the results (<xref ref-type="fig" rid="fig-5">Fig. 5a</xref>,<xref ref-type="fig" rid="fig-5">b</xref>), we can conclude that our proposed MoECO scheduling algorithm is ideal for a real-time environment where task needs an immediate response, like the Internet of Medical Things (IoMT).</p>

</sec>
<sec id="s5_2">
<label>5.2</label>
<title>Number of Tasks vs. Energy Consumption</title>
<p>In this experimental setup, we evaluate the performance of the proposed algorithm by analyzing its energy consumption under varying workloads (50&#x2013;500). The network bandwidth and CPU frequency of a computing node contribute to the energy consumption of a device. The results depicted in <xref ref-type="fig" rid="fig-6">Fig. 6a</xref>,<xref ref-type="fig" rid="fig-6">b</xref> demonstrate that the proposed task scheduling algorithm consumes less energy than other approaches, while also extending the lifetime and resource utilization of the network. Compared to the state-of-the-art benchmark schemes, our proposed solution reveals better energy utilization in a complex dynamic fog computing. Unlike baseline methods, which involve higher computational time and low convergence speed to achieve an optimal solution, the proposed ECO scheduling algorithm has high convergence speed, low network overhead, and low computation time. This minimizes the number of iterations to reach the optimal solution, reducing energy consumption.</p>
<fig id="fig-6">
<label>Figure 6</label>
<caption>
<title>Energy consumption (J) vs. Tasks under different fog node settings: (<bold>a</bold>) 5 nodes, (<bold>b</bold>) 10 nodes</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_73818-fig-6.tif"/>
</fig>
</sec>
<sec id="s5_3">
<label>5.3</label>
<title>Number of Fog Nodes vs. Energy Consumption</title>
<p>In this experimental setup, we evaluate the energy consumption at each tier of the architecture across varying numbers of fog nodes (5&#x2013;50) under the workload of 200 and 300 tasks. It is observed that an increase in the number of fog nodes results in higher energy consumption due to the operational, communication, and data transmission demands of computational nodes. However, an effective resource allocation and scheduling technique can significantly optimize the overall network&#x2019;s energy consumption. The outcomes, as illustrated in <xref ref-type="fig" rid="fig-7">Fig. 7a</xref>,<xref ref-type="fig" rid="fig-7">b</xref>, demonstrate that the proposed algorithm achieves superior energy optimization compared to the baseline algorithms. The proposed scheme dynamically and intelligently identifies the computational requirements of tasks, ensuring that only computationally intensive tasks are offloaded to the cloud server, while others are processed locally on fog nodes. This strategic approach reduces unnecessary data transmission and leverages local processing capabilities, leading to significant energy savings.</p>
<fig id="fig-7">
<label>Figure 7</label>
<caption>
<title>Energy consumption (J) vs. Nodes under different task settings: (<bold>a</bold>) 200 tasks, (<bold>b</bold>) 300 tasks</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_73818-fig-7.tif"/>
</fig>
</sec>
<sec id="s5_4">
<label>5.4</label>
<title>No of Fog Nodes vs. Transmission Delay</title>
<p>The simulation is performed to evaluate the performance of the proposed scheduling algorithm in terms of transmission delay against the number of nodes under the workload fixed at 200 and 300 tasks. Generally, an increase in fog nodes leads to improved processing capabilities but can also introduce challenges related to task allocation and communication overhead. Effective scheduling techniques are essential to minimize delay and ensure the timely processing of tasks. The simulation results, in <xref ref-type="fig" rid="fig-8">Fig. 8a</xref>,<xref ref-type="fig" rid="fig-8">b</xref>, reveal the operational efficacy of our proposed algorithm in reducing transmission delay. By leveraging its efficient convergence speed and computation time, the workloads are effectively distributed among computation nodes, escalating response time and thereby minimizing total transmission delay. In contrast, baseline methods exhibit higher delays due to their comparatively slower convergence and less adaptive resource allocation mechanisms. The efficient results of the proposed algorithm demonstrate the effectiveness in achieving low latency, making it an ideal choice for delay-sensitive applications in fog-cloud computing environments.</p>
<fig id="fig-8">
<label>Figure 8</label>
<caption>
<title>Transmission delay (ms) vs. Nodes under different task settings: (<bold>a</bold>) 200 tasks, (<bold>b</bold>) 300 tasks</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_73818-fig-8.tif"/>
</fig>
</sec>
<sec id="s5_5">
<label>5.5</label>
<title>Task Completion Rate (TCR) and Throughput</title>
<p>TCR indicates the percentage of successfully processed tasks within a given deadline relative to the total number of tasks. In this experimental setup, we calculate the TCR against the number of tasks. The number of tasks varies from 50&#x2013;500 while the computational nodes are kept constant. The simulation results in <xref ref-type="fig" rid="fig-9">Fig. 9a</xref>, reflect the efficiency and reliability of the task scheduling algorithm in terms of achieving high TCR compared to the state-of-the-art similar benchmark algorithms. As observed, TCR decreases with the increase in the workload, this trend is expected because with the increase in the workload, the scheduling complexity increases. Moreover, resource contention and deadline violation also affect the task completion rate. The efficient performance of the ECO scheduling algorithm is attributed to high convergence speed, low computation time, and efficient investigation of local and global search capabilities.</p>
<fig id="fig-9">
<label>Figure 9</label>
<caption>
<title>Tasks vs. Task completion rate (<bold>a</bold>), Throughput (<bold>b</bold>), and Fairness index (<bold>c</bold>) (Nodes &#x003D; 10)</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_73818-fig-9.tif"/>
</fig>
<p>Throughput measures how many tasks are completed per unit of time. It assesses the speed and efficiency of the system, i.e., how fast the system can process tasks. In this simulation, we compute the throughput of the proposed MoECO against the number of tasks varying from 60&#x2013;500. The simulation results in <xref ref-type="fig" rid="fig-9">Fig. 9b</xref> demonstrate the efficient performance of the proposed algorithm in terms of obtaining high throughput compared to the baseline methods. The results were normalized by a scaling constant to facilitate comparative visualization. A higher throughput value indicates faster task processing capability and improved scalability under increased workloads.</p>
</sec>
<sec id="s5_6">
<label>5.6</label>
<title>Fairness Index</title>
<p>The fairness index assesses how fairly tasks are distributed among computational nodes (i.e., fog nodes and cloud servers). The allocation of resources among tasks must be fair and in accordance with the policy to prevent resource starvation of low-priority tasks. The imbalanced utilization of resources can lead to task drop-offs and reduce system performance. <xref ref-type="disp-formula" rid="eqn-23">Eq. (23)</xref> is used to calculate the fairness index.</p>
<p><disp-formula id="eqn-23"><label>(23)</label><mml:math id="mml-eqn-23" display="block"><mml:mi>F</mml:mi><mml:mi>I</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:munderover><mml:msub><mml:mi>R</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mrow><mml:mi>T</mml:mi><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:munderover><mml:msubsup><mml:mi>R</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:mrow></mml:mfrac></mml:math></disp-formula>where <inline-formula id="ieqn-61"><mml:math id="mml-ieqn-61"><mml:msub><mml:mi>R</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> indicate the allocation of resource <italic>R</italic> to task <italic>i</italic>, <italic>T</italic> is the total number of tasks. A high value of the fairness index indicates the fair utilization of resources among tasks, preventing overload on specific computational nodes and underutilization of other computational nodes. Whereas a low fairness index value indicates the uneven distribution of computational resources among tasks. <xref ref-type="fig" rid="fig-9">Fig. 9c</xref> demonstrates the analyses of the fairness index of the MoECO algorithm against similar competitors. The simulation results reveal the efficiency of the proposed algorithm in obtaining high FI values against the total number of tasks ranging from 50&#x2013;500. This means that the resources are efficiently distributed among IoT tasks, preventing resource starvation and resource bottlenecks.</p>
<p><xref ref-type="table" rid="table-4">Tables 4</xref> and <xref ref-type="table" rid="table-5">5</xref> unfold the comparative and performance analysis of MoECO and similar benchmark algorithms, respectively. The numerical data reveals that the proposed algorithm performs well, with improvements of up to 20% in terms of transmission latency, energy consumption, task completion rate, and fairness index. In <xref ref-type="fig" rid="fig-10">Fig. 10</xref>, the convergence behavior of MoECO and baseline scheduling algorithms is presented. The results show that our proposed scheduling algorithm exhibits fast, stable, and reliable convergence behavior while reaching the best fitness value (optimal solution). Particularly, the MoECO algorithm obtains the optimal fitness value much faster than the competitors. This implies that our proposed solution has a high convergence speed compared to other algorithms.</p>
<table-wrap id="table-4">
<label>Table 4</label>
<caption>
<title>Numerical Analysis of proposed and baseline methods</title>
</caption>
<table>
<colgroup>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/> </colgroup>
<thead>
<tr>
<th>Schemes</th>
<th>Latency (ms)</th>
<th>Energy (J)</th>
<th>Latency (ms)</th>
<th>Energy (J)</th>
</tr>
</thead>
<tbody>
<tr>
<td>Tasks/Nodes</td>
<td>200/40</td>
<td>200/40</td>
<td>500/10</td>
<td>500/10</td>
</tr>
<tr>
<td>MoECO</td>
<td>85.05</td>
<td>3.69E&#x002B;05</td>
<td>104.77</td>
<td>1.62E&#x002B;05</td>
</tr>
<tr>
<td>FDQN &#x002B; KMeans</td>
<td>107.33</td>
<td>4.62E&#x002B;05</td>
<td>121.51</td>
<td>1.93E&#x002B;05</td>
</tr>
<tr>
<td>PSO &#x002B; WOA</td>
<td>115.61</td>
<td>5.42E&#x002B;05</td>
<td>130.06</td>
<td>2.38E&#x002B;05</td>
</tr>
<tr>
<td>GWO</td>
<td>120.76</td>
<td>6.73E&#x002B;05</td>
<td>142.29</td>
<td>3.87E&#x002B;05</td>
</tr>
<tr>
<td>CO</td>
<td>127.96</td>
<td>7.68E&#x002B;05</td>
<td>155.58</td>
<td>4.93E&#x002B;05</td>
</tr>
<tr>
<td>ACO</td>
<td>138.52</td>
<td>1.00E&#x002B;06</td>
<td>165.02</td>
<td>5.73E&#x002B;05</td>
</tr>
<tr>
<td>GA</td>
<td>173.70</td>
<td>1.22E&#x002B;06</td>
<td>173.85</td>
<td>9.19E&#x002B;05</td>
</tr>
</tbody>
</table>
</table-wrap><table-wrap id="table-5">
<label>Table 5</label>
<caption>
<title>Performance Analysis with respect to performance metrics</title>
</caption>
<table>
<colgroup>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/> </colgroup>
<thead>
<tr>
<th>Schemes performance</th>
<th>Delay performance</th>
<th>Energy utilization</th>
<th>TCR performance</th>
<th>Fairness index</th>
<th>Net performance</th>
</tr>
</thead>
<tbody>
<tr>
<td>MoECO</td>
<td>59.85%</td>
<td>85.50%</td>
<td>88.63%</td>
<td>88.60%</td>
<td>80.64%</td>
</tr>
<tr>
<td>FDQN &#x002B; KMeans</td>
<td>51.61%</td>
<td>78.71%</td>
<td>83.45%</td>
<td>81.23%</td>
<td>73.75%</td>
</tr>
<tr>
<td>PSO &#x002B; WOA</td>
<td>47.52%</td>
<td>73.41%</td>
<td>79.42%</td>
<td>79.08%</td>
<td>69.85%</td>
</tr>
<tr>
<td>GWO</td>
<td>44.08%</td>
<td>72.32%</td>
<td>76.48%</td>
<td>77.43%</td>
<td>67.57%</td>
</tr>
<tr>
<td>CO</td>
<td>39.90%</td>
<td>67.01%</td>
<td>72.66%</td>
<td>73.98%</td>
<td>63.38%</td>
</tr>
<tr>
<td>ACO</td>
<td>35.55%</td>
<td>58.00%</td>
<td>68.09%</td>
<td>71.21%</td>
<td>58.21%</td>
</tr>
<tr>
<td>GA</td>
<td>24.70%</td>
<td>44.61%</td>
<td>57.23%</td>
<td>68.91%</td>
<td>48.86%</td>
</tr>
</tbody>
</table>
</table-wrap><fig id="fig-10">
<label>Figure 10</label>
<caption>
<title>Convergence behavior vs. Iterations under different fog node and task settings: (<bold>a</bold>) 3 nodes &#x0026; 100 tasks, (<bold>b</bold>) 3 nodes and 200 tasks, (<bold>c</bold>) 10 nodes &#x0026; 300 tasks, (<bold>d</bold>) 10 nodes &#x0026; 400 tasks</title>
</caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CMC_73818-fig-10.tif"/>
</fig>
<p>To validate the reliability of the observed performance improvements, <xref ref-type="table" rid="table-6">Table 6</xref> demonstrates the statistical significance test result between proposed scheme and other baseline methods across two performance metrics, i.e., energy consumption and transmission latency. We employed an MANOVA test to jointly evaluate the differences in both performance metrics, while Cohen&#x2019;s d was used to measure the magnitude of improvement (effect size). The <italic>p</italic>-values indicate that MoECO demonstrates statistically significant improvements (<italic>p</italic> &#x003C; 0.05) over all compared algorithm. The effect size (Cohen&#x2019;s d) values complement these results by quantifying the magnitude of improvement. Overall, both statistical significance and effect size analyses confirm that MoECO achieves statistically reliable and practically meaningful improvements over benchmark methods.</p>
<table-wrap id="table-6">
<label>Table 6</label>
<caption>
<title>Statistical results analysis (Energy consumption and latency)</title>
</caption>
<table>
<colgroup>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/> </colgroup>
<thead>
<tr>
<th>Algorithm</th>
<th><italic>p</italic>-value</th>
<th>Energy effect size (Cohen&#x2019;s <italic>d</italic>)</th>
<th>Delay effect size (Cohen&#x2019;s <italic>d</italic>)</th>
<th>Significance test (0.05)</th>
</tr>
</thead>
<tbody>
<tr>
<td>FDQN &#x002B; KMeans</td>
<td>0.0427</td>
<td>0.27</td>
<td>0.50</td>
<td>True</td>
</tr>
<tr>
<td>PSO &#x002B; WOA</td>
<td>0.0344</td>
<td>0.57</td>
<td>0.71</td>
<td>True</td>
</tr>
<tr>
<td>GWO</td>
<td>0.0218</td>
<td>0.94</td>
<td>0.99</td>
<td>True</td>
</tr>
<tr>
<td>CO</td>
<td>0.0190</td>
<td>1.23</td>
<td>1.23</td>
<td>True</td>
</tr>
<tr>
<td>ACO</td>
<td>0.0092</td>
<td>1.37</td>
<td>1.41</td>
<td>True</td>
</tr>
<tr>
<td>GA</td>
<td>0.0085</td>
<td>1.49</td>
<td>1.58</td>
<td>True</td>
</tr>
</tbody>
</table>
</table-wrap>
<p><xref ref-type="table" rid="table-7">Table 7</xref> presents the convergence behavior of the proposed and baseline methods. In this table, we record the most important parameters of convergence behavior, i.e., minimum fitness value (columns 4th and 8th), maximum fitness value (columns 3rd and 7th), and standard deviation (columns 5th and 9th). These values are recorded for 5 and 10 computational nodes under a workload of 100 and 300 tasks. The convergence behavior of the algorithm indicates the reliability and stability of its performance, where a decrease in standard deviation is an argument that our proposed solution consistently and quickly converges to the optimal solution across different iterations. These features are crucial for task scheduling in fog computing environments, where consistent and reliable performance is required to optimize resource allocation and workload balancing.</p>
<table-wrap id="table-7">
<label>Table 7</label>
<caption>
<title>Analysis of fitness values and standard deviation (S.D)</title>
</caption>
<table>
<colgroup>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/> </colgroup>
<thead>
<tr>
<th>Schemes</th>
<th>Nodes</th>
<th>Tasks</th>
<th>Max fitness</th>
<th>Best fitness</th>
<th>S.D.</th>
<th>Nodes</th>
<th>Tasks</th>
<th>Max fitness</th>
<th>Best fitness</th>
<th>S.D.</th>
</tr>
</thead>
<tbody>
<tr>
<td>MoECO</td>
<td>5</td>
<td>100</td>
<td>6.13E&#x002B;02</td>
<td>5.96E&#x002B;01</td>
<td>45.01</td>
<td>10</td>
<td>300</td>
<td>3.64E&#x002B;02</td>
<td>1.99E&#x002B;02</td>
<td>29.95</td>
</tr>
<tr>
<td>FDQN &#x002B; KMeans</td>
<td>5</td>
<td>100</td>
<td>6.56E&#x002B;02</td>
<td>1.21E&#x002B;02</td>
<td>99.03</td>
<td>10</td>
<td>300</td>
<td>5.47E&#x002B;02</td>
<td>2.72E&#x002B;02</td>
<td>73.46</td>
</tr>
<tr>
<td>PSO &#x002B; WOA</td>
<td>5</td>
<td>100</td>
<td>7.11E&#x002B;02</td>
<td>1.27E&#x002B;02</td>
<td>119.7</td>
<td>10</td>
<td>300</td>
<td>8.12E&#x002B;02</td>
<td>3.07E&#x002B;02</td>
<td>100.26</td>
</tr>
<tr>
<td>GWO</td>
<td>5</td>
<td>100</td>
<td>7.80E&#x002B;02</td>
<td>1.27E&#x002B;02</td>
<td>145.4</td>
<td>10</td>
<td>300</td>
<td>1.09E&#x002B;03</td>
<td>2.93E&#x002B;02</td>
<td>103.28</td>
</tr>
<tr>
<td>CO</td>
<td>5</td>
<td>100</td>
<td>8.59E&#x002B;02</td>
<td>1.40E&#x002B;02</td>
<td>152.2</td>
<td>10</td>
<td>300</td>
<td>1.17E&#x002B;03</td>
<td>3.14E&#x002B;02</td>
<td>110.56</td>
</tr>
<tr>
<td>ACO</td>
<td>5</td>
<td>100</td>
<td>1.12E&#x002B;03</td>
<td>1.83E&#x002B;02</td>
<td>197.9</td>
<td>10</td>
<td>300</td>
<td>1.42E&#x002B;03</td>
<td>3.81E&#x002B;02</td>
<td>134.31</td>
</tr>
<tr>
<td>GA</td>
<td>5</td>
<td>100</td>
<td>1.41E&#x002B;03</td>
<td>1.34E&#x002B;02</td>
<td>219.8</td>
<td>10</td>
<td>300</td>
<td>1.56E&#x002B;03</td>
<td>3.09E&#x002B;02</td>
<td>174.18</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>These capabilities of the presented algorithm make it an efficient and robust solution for solving complex scheduling in a dynamic computing environment.</p>
<p>To measure the scalability of the proposed scheduling algorithm, both the number of computational nodes and tasks are progressively increased within the cloud-fog hierarchy. Based on the results, as the workloads increased exponentially (50&#x2013;500), the proposed algorithm maintained a monotonically increasing behavior with a gradual increase in energy consumption and transmission latency. Upon increasing the workloads, MoECO manage such increase via multi-objective decomposition and parallel execution mechanism. This enables the algorithm to compute the objective values one by one instead of computing altogether in one massive problem. The comparative baseline methods demonstrate a sharp rise in energy consumption and delay after 300 tasks, while MoECO scales in a linear-fashion, sustain convergence stability and ensures consistent task distribution across all the computational nodes. This is attributed to the adaptive step-length control and leader&#x2013;follower coordination mechanism, which prevent premature convergence and ensure balanced exploration across distributed search agents. For large scale IoT networks, this characteristic is particularly valuable because IoT environments often experience bursty workloads, the proposed algorithm can efficiently manage such variation without the loss of convergence stability.</p>
<p>From the results, we conclude that our proposed algorithm efficiently addressed the NP-hard optimization problem, i.e., task scheduling in fog computing. Its success is due to the high convergence speed, low computation time, and diversity in local and global search capabilities. This helps the algorithm to provide the global optimal solution. Given the efficient results of the proposed algorithm, we can successfully apply it to real-world problems. For instance, we can employ the algorithm (after required modifications) in the IoMT environment, where tasks need an immediate response and must be completed within the required deadline. As our proposed solution provides minimum delay and a higher task completion rate, we can efficiently utilize the algorithm in the medical environment for patient monitoring.</p>
</sec>
</sec>
<sec id="s6">
<label>6</label>
<title>Conclusion</title>
<p>In this study, we present a multi-objective and enhanced version of Cheeta Optimizer (CO) for energy-efficient task scheduling specifically tailored for cloud&#x2013;fog computing environments. The proposed algorithm effectively addresses the critical challenge of optimizing computation offloading and task scheduling in dynamic and heterogeneous networks. By integrating the proposed scheme into the fog controller, our approach enables accurate task estimation, analysis, allocation to appropriate computing nodes and orderly execution of offloaded task, ensuring efficient resource utilization. Extensive evaluations in MATLAB demonstrated the superior performance of our algorithm in terms of designated optimization objectives (i.e., energy consumption and transmission delay) compared to CO and other state-of-the-art methodologies. The ECO efficiently increases the local and global search capabilities by adjusting step length and interaction factors variables of the CO, respectively. This significantly improves the convergence speed and computation time, thereby minimizing transmission delay and energy consumption while maximizing task completion rate and fairness index. Additionally, the algorithm exhibits scalability, maintaining robust performance even under increased workloads.</p>
<p>In the future, we will address the security and privacy of the tasks by employing Federated Learning (FL) and Blockchain technology. FL will enable fog nodes to collaboratively train local offloading model without sharing sensitive information, thus preserving the privacy of the user and reducing the communication cost associated with the centralized data collection. Simultaneously, Blockchain can be employed to provide temper-proof and transparent ledger for verifying offloading records and resource utilization. This combination will ensure data integrity, trustworthiness and secure model aggregation across decentralized nodes. We can explore other new bio-inspired methods like Henry Gas Solubility Optimization (HGSO) Algorithm and Artificial Hummingbird Algorithm (AHA). These advancements will further enhance the reliability and adaptability of cloud&#x2013;fog computing systems in addressing the growing demands of modern IoT applications.</p>
</sec>
</body>
<back>
<ack>
<p>The authors extend their appreciation to the Princess Nourah bint Abdulrahman University Researchers, Riyadh, Saudi Arabia, for supporting this research.</p>
</ack>
<sec>
<title>Funding Statement</title>
<p>The authors extend their appreciation to the Princess Nourah bint Abdulrahman University Researchers Supporting Project number (PNURSP2025R384), Princess Nourah bint Abdulrahman University, Riyadh, Saudi Arabia.</p>
</sec>
<sec>
<title>Author Contributions</title>
<p>Conceptualization, Ahmad Zia and Nazia Azim; methodology, Ahmad Zia and Bekarystankyzy Akbayan; software, Nazia Azim and Nouf Al-Kahtani; validation, Ateeq Ur Rehman, Faheem Ullah Khan and Khalid J. Alzahrani; formal analysis, Nouf Al-Kahtani and Ateeq Ur Rehman; investigation, Khalid J. Alzahrani; data curation, Bekarystankyzy Akbayan, Hend Khalid Alkahtani and Faheem Ullah Khan; writing&#x2014;original draft preparation, Ahmad Zia, Ateeq Ur Rehman and Hend Khalid Alkahtani; writing&#x2014;review and editing, all authors; funding acquisition, Hend Khalid Alkahtani. All authors reviewed the results and approved the final version of the manuscript.</p>
</sec>
<sec sec-type="data-availability">
<title>Availability of Data and Materials</title>
<p>All the data are present within the article.</p>
</sec>
<sec>
<title>Ethics Approval</title>
<p>This research did not involve human participants, animals, or any personally identifiable information.</p>
</sec>
<sec sec-type="COI-statement">
<title>Conflicts of Interest</title>
<p>The authors declare no conflicts of interest to report regarding the present study.</p>
</sec>
<ref-list content-type="authoryear">
<title>References</title>
<ref id="ref-1"><label>[1]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Sun</surname> <given-names>G</given-names></string-name>, <string-name><surname>Liao</surname> <given-names>D</given-names></string-name>, <string-name><surname>Zhao</surname> <given-names>D</given-names></string-name>, <string-name><surname>Xu</surname> <given-names>Z</given-names></string-name>, <string-name><surname>Yu</surname> <given-names>H</given-names></string-name></person-group>. <article-title>Live migration for multiple correlated virtual machines in cloud-based data centers</article-title>. <source>IEEE Trans Serv Comput</source>. <year>2015</year>;<volume>11</volume>(<issue>2</issue>):<fpage>279</fpage>&#x2013;<lpage>91</lpage>. doi:<pub-id pub-id-type="doi">10.1109/tsc.2015.2477825</pub-id>.</mixed-citation></ref>
<ref id="ref-2"><label>[2]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Ali</surname> <given-names>A</given-names></string-name>, <string-name><surname>Azim</surname> <given-names>N</given-names></string-name>, <string-name><surname>Othman</surname> <given-names>MTB</given-names></string-name>, <string-name><surname>Rehman</surname> <given-names>AU</given-names></string-name>, <string-name><surname>Alajmi</surname> <given-names>M</given-names></string-name>, <string-name><surname>Al-Adhaileh</surname> <given-names>MH</given-names></string-name>, <etal>et al</etal></person-group>. <article-title>Joint optimization of computation offloading and task scheduling using multi-objective arithmetic optimization algorithm in cloud-fog computing</article-title>. <source>IEEE Access</source>. <year>2024</year>;<volume>12</volume>(<issue>7</issue>):<fpage>184158</fpage>&#x2013;<lpage>8</lpage>. doi:<pub-id pub-id-type="doi">10.1109/access.2024.3512191</pub-id>.</mixed-citation></ref>
<ref id="ref-3"><label>[3]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Kumar</surname> <given-names>M</given-names></string-name>, <string-name><surname>Gupta</surname> <given-names>P</given-names></string-name>, <string-name><surname>Kumar</surname> <given-names>A</given-names></string-name></person-group>. <article-title>Performance analysis of a fog computing-based vehicular communication</article-title>. <source>Int J Veh Inf Commun Syst</source>. <year>2024</year>;<volume>9</volume>(<issue>2</issue>):<fpage>115</fpage>&#x2013;<lpage>34</lpage>. doi:<pub-id pub-id-type="doi">10.1504/ijvics.2024.137761</pub-id>.</mixed-citation></ref>
<ref id="ref-4"><label>[4]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Huang</surname> <given-names>S</given-names></string-name>, <string-name><surname>Sun</surname> <given-names>C</given-names></string-name>, <string-name><surname>Pompili</surname> <given-names>D</given-names></string-name></person-group>. <article-title>Meta-ETI: meta-reinforcement learning with explicit task inference for UAV-IoT coverage</article-title>. <source>IEEE Internet Things J</source>. <year>2025</year>;<volume>12</volume>(<issue>13</issue>):<fpage>23852</fpage>&#x2013;<lpage>65</lpage>. doi:<pub-id pub-id-type="doi">10.1109/jiot.2025.3553808</pub-id>.</mixed-citation></ref>
<ref id="ref-5"><label>[5]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Ali</surname> <given-names>A</given-names></string-name>, <string-name><surname>Shah</surname> <given-names>SAA</given-names></string-name>, <string-name><surname>Shloul</surname> <given-names>TA</given-names></string-name>, <string-name><surname>Assam</surname> <given-names>M</given-names></string-name>, <string-name><surname>Ghadi</surname> <given-names>YY</given-names></string-name>, <string-name><surname>Lim</surname> <given-names>S</given-names></string-name>, <etal>et al</etal></person-group>. <article-title>Multi-objective Harris hawks optimization based task scheduling in cloud-fog computing</article-title>. <source>IEEE Internet Things J</source>. <year>2024</year>;<volume>11</volume>(<issue>13</issue>):<fpage>24334</fpage>&#x2013;<lpage>52</lpage>. doi:<pub-id pub-id-type="doi">10.1109/jiot.2024.3391024</pub-id>.</mixed-citation></ref>
<ref id="ref-6"><label>[6]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Dai</surname> <given-names>X</given-names></string-name>, <string-name><surname>Xiao</surname> <given-names>Z</given-names></string-name>, <string-name><surname>Jiang</surname> <given-names>H</given-names></string-name>, <string-name><surname>Alazab</surname> <given-names>M</given-names></string-name>, <string-name><surname>Lui</surname> <given-names>JC</given-names></string-name>, <string-name><surname>Min</surname> <given-names>G</given-names></string-name>, <etal>et al</etal></person-group>. <article-title>Task offloading for cloud-assisted fog computing with dynamic service caching in enterprise management systems</article-title>. <source>IEEE Trans Ind Inform</source>. <year>2022</year>;<volume>19</volume>(<issue>1</issue>):<fpage>662</fpage>&#x2013;<lpage>72</lpage>. doi:<pub-id pub-id-type="doi">10.1109/tii.2022.3186641</pub-id>.</mixed-citation></ref>
<ref id="ref-7"><label>[7]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Zhang</surname> <given-names>S</given-names></string-name></person-group>. <article-title>Feature-aware task offloading and scheduling mechanism in vehicle edge computing environment</article-title>. <source>Int J Veh Inf Commun Syst</source>. <year>2024</year>;<volume>9</volume>(<issue>4</issue>):<fpage>415</fpage>&#x2013;<lpage>33</lpage>. doi:<pub-id pub-id-type="doi">10.1504/ijvics.2024.142101</pub-id>.</mixed-citation></ref>
<ref id="ref-8"><label>[8]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Sudhakar</surname> <given-names>RV</given-names></string-name>, <string-name><surname>Dastagiraiah</surname> <given-names>C</given-names></string-name>, <string-name><surname>Pattem</surname> <given-names>S</given-names></string-name>, <string-name><surname>Bhukya</surname> <given-names>S</given-names></string-name></person-group>. <article-title>Multi-objective reinforcement learning based algorithm for dynamic workflow scheduling in cloud computing</article-title>. <source>Indones J Electr Eng Inform</source>. <year>2024</year>;<volume>12</volume>(<issue>3</issue>):<fpage>640</fpage>&#x2013;<lpage>9</lpage>.</mixed-citation></ref>
<ref id="ref-9"><label>[9]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Li</surname> <given-names>Z</given-names></string-name>, <string-name><surname>Gu</surname> <given-names>W</given-names></string-name>, <string-name><surname>Shang</surname> <given-names>H</given-names></string-name>, <string-name><surname>Zhang</surname> <given-names>G</given-names></string-name>, <string-name><surname>Zhou</surname> <given-names>G</given-names></string-name></person-group>. <article-title>Research on dynamic job shop scheduling problem with AGV based on DQN</article-title>. <source>Clust Comput</source>. <year>2025</year>;<volume>28</volume>(<issue>4</issue>):<fpage>236</fpage>. doi:<pub-id pub-id-type="doi">10.1007/s10586-024-04970-x</pub-id>.</mixed-citation></ref>
<ref id="ref-10"><label>[10]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Zhang</surname> <given-names>B</given-names></string-name>, <string-name><surname>Sang</surname> <given-names>H</given-names></string-name>, <string-name><surname>Lu</surname> <given-names>C</given-names></string-name>, <string-name><surname>Meng</surname> <given-names>L</given-names></string-name>, <string-name><surname>Song</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Jiang</surname> <given-names>X</given-names></string-name></person-group>. <article-title>Integrated heterogeneous graph and reinforcement learning enabled efficient scheduling for surface mount technology workshop</article-title>. <source>Inf Sci</source>. <year>2025</year>;<volume>708</volume>(<issue>3</issue>):<fpage>122023</fpage>. doi:<pub-id pub-id-type="doi">10.1016/j.ins.2025.122023</pub-id>.</mixed-citation></ref>
<ref id="ref-11"><label>[11]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Wei</surname> <given-names>M</given-names></string-name>, <string-name><surname>Yang</surname> <given-names>S</given-names></string-name>, <string-name><surname>Wu</surname> <given-names>W</given-names></string-name>, <string-name><surname>Sun</surname> <given-names>B</given-names></string-name></person-group>. <article-title>A multi-objective fuzzy optimization model for multi-type aircraft flight scheduling problem</article-title>. <source>Transport</source>. <year>2024</year>;<volume>39</volume>(<issue>4</issue>):<fpage>313</fpage>&#x2013;<lpage>22</lpage>. doi:<pub-id pub-id-type="doi">10.3846/transport.2024.20536</pub-id>.</mixed-citation></ref>
<ref id="ref-12"><label>[12]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><surname>Huang</surname> <given-names>W</given-names></string-name>, <string-name><surname>Li</surname> <given-names>T</given-names></string-name>, <string-name><surname>Cao</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Lyu</surname> <given-names>Z</given-names></string-name>, <string-name><surname>Liang</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Yu</surname> <given-names>L</given-names></string-name>, <etal>et al</etal></person-group>. <article-title>Safe-NORA: safe reinforcement learning-based mobile network resource allocation for diverse user demands</article-title>. In: <conf-name>Proceedings of the 32nd ACM International Conference on Information and Knowledge Management (CIKM 2023)</conf-name>; <year>2023 Oct 21&#x2013;25</year>; <publisher-loc>Birmingham, UK</publisher-loc>.</mixed-citation></ref>
<ref id="ref-13"><label>[13]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Naik</surname> <given-names>BB</given-names></string-name>, <string-name><surname>Priyanka</surname> <given-names>B</given-names></string-name>, <string-name><surname>Ansari</surname> <given-names>MSA</given-names></string-name></person-group>. <article-title>Energy-efficient task offloading and efficient resource allocation for edge computing: a quantum inspired particle swarm optimization approach</article-title>. <source>Clust Comput</source>. <year>2025</year>;<volume>28</volume>(<issue>3</issue>):<fpage>155</fpage>. doi:<pub-id pub-id-type="doi">10.1007/s10586-024-04833-5</pub-id>.</mixed-citation></ref>
<ref id="ref-14"><label>[14]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Gawali</surname> <given-names>MB</given-names></string-name>, <string-name><surname>Shinde</surname> <given-names>SK</given-names></string-name></person-group>. <article-title>Task scheduling and resource allocation in cloud computing using a heuristic approach</article-title>. <source>J Cloud Comput</source>. <year>2018</year>;<volume>7</volume>(<issue>1</issue>):<fpage>1</fpage>&#x2013;<lpage>16</lpage>. doi:<pub-id pub-id-type="doi">10.1186/s13677-018-0105-8</pub-id>.</mixed-citation></ref>
<ref id="ref-15"><label>[15]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Nabi</surname> <given-names>S</given-names></string-name>, <string-name><surname>Ahmad</surname> <given-names>M</given-names></string-name>, <string-name><surname>Ibrahim</surname> <given-names>M</given-names></string-name>, <string-name><surname>Hamam</surname> <given-names>H</given-names></string-name></person-group>. <article-title>AdPSO: adaptive PSO-based task scheduling approach for cloud computing</article-title>. <source>Sensors</source>. <year>2022</year>;<volume>22</volume>(<issue>3</issue>):<fpage>920</fpage>. doi:<pub-id pub-id-type="doi">10.3390/s22030920</pub-id>; <pub-id pub-id-type="pmid">35161665</pub-id></mixed-citation></ref>
<ref id="ref-16"><label>[16]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Sharma</surname> <given-names>N</given-names></string-name>, <string-name><surname>Garg</surname> <given-names>P</given-names></string-name></person-group>. <article-title>Ant colony based optimization model for QoS-Based task scheduling in cloud computing environment</article-title>. <source>Meas Sens</source>. <year>2022</year>;<volume>24</volume>(<issue>1</issue>):<fpage>100531</fpage>. doi:<pub-id pub-id-type="doi">10.1016/j.measen.2022.100531</pub-id>.</mixed-citation></ref>
<ref id="ref-17"><label>[17]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Peng</surname> <given-names>Z</given-names></string-name>, <string-name><surname>Pirozmand</surname> <given-names>P</given-names></string-name>, <string-name><surname>Motevalli</surname> <given-names>M</given-names></string-name>, <string-name><surname>Esmaeili</surname> <given-names>A</given-names></string-name></person-group>. <article-title>Genetic algorithm-based task scheduling in cloud computing using Mapreduce framework</article-title>. <source>Math Probl Eng</source>. <year>2022</year>;<volume>2022</volume>(<issue>1</issue>):<fpage>4290382</fpage>. doi:<pub-id pub-id-type="doi">10.1155/2022/4290382</pub-id>.</mixed-citation></ref>
<ref id="ref-18"><label>[18]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Mangalampalli</surname> <given-names>S</given-names></string-name>, <string-name><surname>Karri</surname> <given-names>GR</given-names></string-name>, <string-name><surname>Kumar</surname> <given-names>M</given-names></string-name></person-group>. <article-title>Multi objective task scheduling algorithm in cloud computing using grey wolf optimization</article-title>. <source>Clust Comput</source>. <year>2023</year>;<volume>26</volume>(<issue>6</issue>):<fpage>3803</fpage>&#x2013;<lpage>22</lpage>. doi:<pub-id pub-id-type="doi">10.1007/s10586-022-03786-x</pub-id>.</mixed-citation></ref>
<ref id="ref-19"><label>[19]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Akbari</surname> <given-names>MA</given-names></string-name>, <string-name><surname>Zare</surname> <given-names>M</given-names></string-name>, <string-name><surname>Azizipanah-Abarghooee</surname> <given-names>R</given-names></string-name>, <string-name><surname>Mirjalili</surname> <given-names>S</given-names></string-name>, <string-name><surname>Deriche</surname> <given-names>M</given-names></string-name></person-group>. <article-title>The cheetah optimizer: a nature-inspired metaheuristic algorithm for large-scale optimization problems</article-title>. <source>Sci Rep</source>. <year>2022</year>;<volume>12</volume>(<issue>1</issue>):<fpage>10953</fpage>. doi:<pub-id pub-id-type="doi">10.1038/s41598-022-14338-z</pub-id>; <pub-id pub-id-type="pmid">35768456</pub-id></mixed-citation></ref>
<ref id="ref-20"><label>[20]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Memon</surname> <given-names>ZA</given-names></string-name>, <string-name><surname>Akbari</surname> <given-names>MA</given-names></string-name>, <string-name><surname>Zare</surname> <given-names>M</given-names></string-name></person-group>. <article-title>An improved cheetah optimizer for accurate and reliable estimation of unknown parameters in photovoltaic cell and module models</article-title>. <source>Appl Sci</source>. <year>2023</year>;<volume>13</volume>(<issue>18</issue>):<fpage>9997</fpage>. doi:<pub-id pub-id-type="doi">10.3390/app13189997</pub-id>.</mixed-citation></ref>
<ref id="ref-21"><label>[21]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Wang</surname> <given-names>Z</given-names></string-name>, <string-name><surname>Goudarzi</surname> <given-names>M</given-names></string-name>, <string-name><surname>Gong</surname> <given-names>M</given-names></string-name>, <string-name><surname>Buyya</surname> <given-names>R</given-names></string-name></person-group>. <article-title>Deep reinforcement learning-based scheduling for optimizing system load and response time in edge and fog computing environments</article-title>. <source>Future Gener Comput Syst</source>. <year>2024</year>;<volume>152</volume>(<issue>6</issue>):<fpage>55</fpage>&#x2013;<lpage>69</lpage>. doi:<pub-id pub-id-type="doi">10.1016/j.future.2023.10.012</pub-id>.</mixed-citation></ref>
<ref id="ref-22"><label>[22]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Singh</surname> <given-names>SP</given-names></string-name></person-group>. <article-title>Effective load balancing strategy using fuzzy golden eagle optimization in fog computing environment</article-title>. <source>Sustain Comput Inform Syst</source>. <year>2022</year>;<volume>35</volume>(<issue>2</issue>):<fpage>100766</fpage>. doi:<pub-id pub-id-type="doi">10.1016/j.suscom.2022.100766</pub-id>.</mixed-citation></ref>
<ref id="ref-23"><label>[23]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Archana</surname> <given-names>R</given-names></string-name></person-group>. <article-title>Multilevel scheduling mechanism for a stochastic fog computing environment using the HIRO model and RNN</article-title>. <source>Sustain Comput Inform Syst</source>. <year>2023</year>;<volume>39</volume>(<issue>8</issue>):<fpage>100887</fpage>. doi:<pub-id pub-id-type="doi">10.1016/j.suscom.2023.100887</pub-id>.</mixed-citation></ref>
<ref id="ref-24"><label>[24]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Yang</surname> <given-names>M</given-names></string-name>, <string-name><surname>Ma</surname> <given-names>H</given-names></string-name>, <string-name><surname>Wei</surname> <given-names>S</given-names></string-name>, <string-name><surname>Zeng</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Chen</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Hu</surname> <given-names>Y</given-names></string-name></person-group>. <article-title>A multi-objective task scheduling method for fog computing in cyber-physical-social services</article-title>. <source>IEEE Access</source>. <year>2020</year>;<volume>8</volume>:<fpage>65085</fpage>&#x2013;<lpage>95</lpage>. doi:<pub-id pub-id-type="doi">10.1109/access.2020.2983742</pub-id>.</mixed-citation></ref>
<ref id="ref-25"><label>[25]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Siyadatzadeh</surname> <given-names>R</given-names></string-name>, <string-name><surname>Mehrafrooz</surname> <given-names>F</given-names></string-name>, <string-name><surname>Ansari</surname> <given-names>M</given-names></string-name>, <string-name><surname>Safaei</surname> <given-names>B</given-names></string-name>, <string-name><surname>Shafique</surname> <given-names>M</given-names></string-name>, <string-name><surname>Henkel</surname> <given-names>J</given-names></string-name>, <etal>et al</etal></person-group>. <article-title>Relief: a reinforcement-learning-based real-time task assignment strategy in emerging fault-tolerant fog computing</article-title>. <source>IEEE Internet Things J</source>. <year>2023</year>;<volume>10</volume>(<issue>12</issue>):<fpage>10752</fpage>&#x2013;<lpage>63</lpage>. doi:<pub-id pub-id-type="doi">10.1109/jiot.2023.3240007</pub-id>.</mixed-citation></ref>
<ref id="ref-26"><label>[26]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Jain</surname> <given-names>V</given-names></string-name>, <string-name><surname>Kumar</surname> <given-names>B</given-names></string-name></person-group>. <article-title>QoS-aware task offloading in fog environment using multi-agent deep reinforcement learning</article-title>. <source>J Netw Syst Manag</source>. <year>2023</year>;<volume>31</volume>(<issue>1</issue>):<fpage>7</fpage>. doi:<pub-id pub-id-type="doi">10.1007/s10922-022-09696-y</pub-id>.</mixed-citation></ref>
<ref id="ref-27"><label>[27]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Chandrashekar</surname> <given-names>C</given-names></string-name>, <string-name><surname>Krishnadoss</surname> <given-names>P</given-names></string-name>, <string-name><surname>Kedalu Poornachary</surname> <given-names>V</given-names></string-name>, <string-name><surname>Ananthakrishnan</surname> <given-names>B</given-names></string-name>, <string-name><surname>Rangasamy</surname> <given-names>K</given-names></string-name></person-group>. <article-title>HWACOA scheduler: hybrid weighted ant colony optimization algorithm for task scheduling in cloud computing</article-title>. <source>Appl Sci</source>. <year>2023</year>;<volume>13</volume>(<issue>6</issue>):<fpage>3433</fpage>. doi:<pub-id pub-id-type="doi">10.3390/app13063433</pub-id>.</mixed-citation></ref>
<ref id="ref-28"><label>[28]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Cheng</surname> <given-names>F</given-names></string-name>, <string-name><surname>Huang</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Tanpure</surname> <given-names>B</given-names></string-name>, <string-name><surname>Sawalani</surname> <given-names>P</given-names></string-name>, <string-name><surname>Cheng</surname> <given-names>L</given-names></string-name>, <string-name><surname>Liu</surname> <given-names>C</given-names></string-name></person-group>. <article-title>Cost-aware job scheduling for cloud instances using deep reinforcement learning</article-title>. <source>Clust Comput</source>. <year>2022</year>;<volume>25</volume>(<issue>1</issue>):<fpage>619</fpage>&#x2013;<lpage>31</lpage>. doi:<pub-id pub-id-type="doi">10.1007/s10586-021-03436-8</pub-id>.</mixed-citation></ref>
<ref id="ref-29"><label>[29]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Chen</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Lin</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Hu</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Hsia</surname> <given-names>C</given-names></string-name>, <string-name><surname>Lian</surname> <given-names>Y</given-names></string-name>, <string-name><surname>Jhong</surname> <given-names>S</given-names></string-name></person-group>. <article-title>Distributed real-time object detection based on edge-cloud collaboration for smart video surveillance applications</article-title>. <source>IEEE Access</source>. <year>2022</year>;<volume>10</volume>(<issue>2</issue>):<fpage>93745</fpage>&#x2013;<lpage>59</lpage>. doi:<pub-id pub-id-type="doi">10.1109/access.2022.3203053</pub-id>.</mixed-citation></ref>
<ref id="ref-30"><label>[30]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Rafique</surname> <given-names>H</given-names></string-name>, <string-name><surname>Shah</surname> <given-names>MA</given-names></string-name>, <string-name><surname>Islam</surname> <given-names>SU</given-names></string-name>, <string-name><surname>Maqsood</surname> <given-names>T</given-names></string-name>, <string-name><surname>Khan</surname> <given-names>S</given-names></string-name>, <string-name><surname>Maple</surname> <given-names>C</given-names></string-name></person-group>. <article-title>A novel bio-inspired hybrid algorithm (NBIHA) for efficient resource management in fog computing</article-title>. <source>IEEE Access</source>. <year>2019</year>;<volume>7</volume>:<fpage>115760</fpage>&#x2013;<lpage>73</lpage>. doi:<pub-id pub-id-type="doi">10.1109/access.2019.2924958</pub-id>.</mixed-citation></ref>
<ref id="ref-31"><label>[31]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Ghobaei-Arani</surname> <given-names>M</given-names></string-name>, <string-name><surname>Souri</surname> <given-names>A</given-names></string-name>, <string-name><surname>Safara</surname> <given-names>F</given-names></string-name>, <string-name><surname>Norouzi</surname> <given-names>M</given-names></string-name></person-group>. <article-title>An efficient task scheduling approach using moth-flame optimization algorithm for cyber-physical system applications in fog computing</article-title>. <source>Trans Emerg Telecommun Technol</source>. <year>2020</year>;<volume>31</volume>(<issue>2</issue>):<fpage>e3770</fpage>. doi:<pub-id pub-id-type="doi">10.1002/ett.3770</pub-id>.</mixed-citation></ref>
<ref id="ref-32"><label>[32]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Ali</surname> <given-names>IM</given-names></string-name>, <string-name><surname>Sallam</surname> <given-names>KM</given-names></string-name>, <string-name><surname>Moustafa</surname> <given-names>N</given-names></string-name>, <string-name><surname>Chakraborty</surname> <given-names>R</given-names></string-name>, <string-name><surname>Ryan</surname> <given-names>M</given-names></string-name>, <string-name><surname>Choo</surname> <given-names>K-KR</given-names></string-name></person-group>. <article-title>An automated task scheduling model using non-dominated sorting genetic algorithm II for fog-cloud systems</article-title>. <source>IEEE Trans Cloud Comput</source>. <year>2020</year>;<volume>10</volume>(<issue>4</issue>):<fpage>2294</fpage>&#x2013;<lpage>308</lpage>. doi:<pub-id pub-id-type="doi">10.1109/tcc.2020.3032386</pub-id>.</mixed-citation></ref>
<ref id="ref-33"><label>[33]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Saif</surname> <given-names>FA</given-names></string-name>, <string-name><surname>Latip</surname> <given-names>R</given-names></string-name>, <string-name><surname>Hanapi</surname> <given-names>ZM</given-names></string-name>, <string-name><surname>Shafinah</surname> <given-names>K</given-names></string-name></person-group>. <article-title>Multi-objective grey wolf optimizer algorithm for task scheduling in cloud-fog computing</article-title>. <source>IEEE Access</source>. <year>2023</year>;<volume>11</volume>:<fpage>20635</fpage>&#x2013;<lpage>46</lpage>. doi:<pub-id pub-id-type="doi">10.1109/access.2023.3241240</pub-id>.</mixed-citation></ref>
<ref id="ref-34"><label>[34]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Tang</surname> <given-names>M</given-names></string-name>, <string-name><surname>Wong</surname> <given-names>VW</given-names></string-name></person-group>. <article-title>Deep reinforcement learning for task offloading in mobile edge computing systems</article-title>. <source>IEEE Trans Mob Comput</source>. <year>2020</year>;<volume>21</volume>(<issue>6</issue>):<fpage>1985</fpage>&#x2013;<lpage>97</lpage>. doi:<pub-id pub-id-type="doi">10.1109/tmc.2020.3036871</pub-id>.</mixed-citation></ref>
<ref id="ref-35"><label>[35]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Razaq</surname> <given-names>MM</given-names></string-name>, <string-name><surname>Rahim</surname> <given-names>S</given-names></string-name>, <string-name><surname>Tak</surname> <given-names>B</given-names></string-name>, <string-name><surname>Peng</surname> <given-names>L</given-names></string-name></person-group>. <article-title>Fragmented task scheduling for load-balanced fog computing based on Q-learning</article-title>. <source>Wirel Commun Mob Comput</source>. <year>2022</year>;<volume>2022</volume>(<issue>1</issue>):<fpage>4218696</fpage>. doi:<pub-id pub-id-type="doi">10.1155/2022/4218696</pub-id>.</mixed-citation></ref>
<ref id="ref-36"><label>[36]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Choppara</surname> <given-names>P</given-names></string-name>, <string-name><surname>Mangalampalli</surname> <given-names>SS</given-names></string-name></person-group>. <article-title>Adaptive task scheduling in fog computing using federated DQN and K-means clustering</article-title>. <source>IEEE Access</source>. <year>2025</year>;<volume>13</volume>(<issue>1</issue>):<fpage>75466</fpage>&#x2013;<lpage>92</lpage>. doi:<pub-id pub-id-type="doi">10.1109/access.2025.3563487</pub-id>.</mixed-citation></ref>
<ref id="ref-37"><label>[37]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Bansal</surname> <given-names>S</given-names></string-name>, <string-name><surname>Aggarwal</surname> <given-names>H</given-names></string-name></person-group>. <article-title>A multiobjective optimization of task workflow scheduling using hybridization of PSO and WOA algorithms in cloud-fog computing</article-title>. <source>Clust Comput</source>. <year>2024</year>;<volume>27</volume>(<issue>8</issue>):<fpage>10921</fpage>&#x2013;<lpage>52</lpage>. doi:<pub-id pub-id-type="doi">10.1007/s10586-024-04522-3</pub-id>.</mixed-citation></ref>
<ref id="ref-38"><label>[38]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Mangalampalli</surname> <given-names>S</given-names></string-name>, <string-name><surname>Hashmi</surname> <given-names>SS</given-names></string-name>, <string-name><surname>Gupta</surname> <given-names>A</given-names></string-name>, <string-name><surname>Karri</surname> <given-names>GR</given-names></string-name>, <string-name><surname>Rajkumar</surname> <given-names>KV</given-names></string-name>, <string-name><surname>Chakrabarti</surname> <given-names>T</given-names></string-name>, <etal>et al</etal></person-group>. <article-title>Multi objective prioritized workflow scheduling using deep reinforcement based learning in cloud computing</article-title>. <source>IEEE Access</source>. <year>2024</year>;<volume>12</volume>:<fpage>5373</fpage>&#x2013;<lpage>92</lpage>. doi:<pub-id pub-id-type="doi">10.1109/access.2024.3350741</pub-id>.</mixed-citation></ref>
<ref id="ref-39"><label>[39]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Li</surname> <given-names>H</given-names></string-name>, <string-name><surname>Zhang</surname> <given-names>X</given-names></string-name>, <string-name><surname>Li</surname> <given-names>H</given-names></string-name>, <string-name><surname>Duan</surname> <given-names>X</given-names></string-name>, <string-name><surname>Xu</surname> <given-names>C</given-names></string-name></person-group>. <article-title>SLA-based task offloading for energy consumption constrained workflows in fog computing</article-title>. <source>Future Gener Comput Syst</source>. <year>2024</year>;<volume>156</volume>(<issue>5</issue>):<fpage>64</fpage>&#x2013;<lpage>76</lpage>. doi:<pub-id pub-id-type="doi">10.1016/j.future.2024.03.013</pub-id>.</mixed-citation></ref>
<ref id="ref-40"><label>[40]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Li</surname> <given-names>H</given-names></string-name>, <string-name><surname>Liu</surname> <given-names>L</given-names></string-name>, <string-name><surname>Duan</surname> <given-names>X</given-names></string-name>, <string-name><surname>Li</surname> <given-names>H</given-names></string-name>, <string-name><surname>Zheng</surname> <given-names>P</given-names></string-name>, <string-name><surname>Tang</surname> <given-names>L</given-names></string-name></person-group>. <article-title>Energy-efficient offloading based on hybrid bio-inspired algorithm for edge-cloud integrated computation</article-title>. <source>Sustain Comput Inform Syst</source>. <year>2024</year>;<volume>42</volume>(<issue>11</issue>):<fpage>100972</fpage>. doi:<pub-id pub-id-type="doi">10.1016/j.suscom.2024.100972</pub-id>.</mixed-citation></ref>
<ref id="ref-41"><label>[41]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Maurya</surname> <given-names>A</given-names></string-name>, <string-name><surname>Pandey</surname> <given-names>A</given-names></string-name></person-group>. <article-title>Cheetah optimization for optimal power management in standalone solar PV systems with EV integration</article-title>. <source>Eng Res Express</source>. <year>2025</year>;<volume>7</volume>(<issue>2</issue>):<fpage>025319</fpage>. doi:<pub-id pub-id-type="doi">10.1088/2631-8695/adc900</pub-id>.</mixed-citation></ref>
<ref id="ref-42"><label>[42]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Sharma</surname> <given-names>S</given-names></string-name>, <string-name><surname>Kumar</surname> <given-names>V</given-names></string-name></person-group>. <article-title>Grid-based multi-objective cheetah optimization for engineering applications</article-title>. <source>Clust Comput</source>. <year>2025</year>;<volume>28</volume>(<issue>4</issue>):<fpage>266</fpage>. doi:<pub-id pub-id-type="doi">10.1007/s10586-024-04907-4</pub-id>.</mixed-citation></ref>
<ref id="ref-43"><label>[43]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><surname>Sivasakthivel</surname> <given-names>R</given-names></string-name>, <string-name><surname>Rajagopal</surname> <given-names>M</given-names></string-name>, <string-name><surname>Anitha</surname> <given-names>G</given-names></string-name>, <string-name><surname>Loganathan</surname> <given-names>K</given-names></string-name>, <string-name><surname>Abbas</surname> <given-names>M</given-names></string-name>, <string-name><surname>Ksibi</surname> <given-names>A</given-names></string-name>, <etal>et al</etal></person-group>. <article-title>Simulating online and offline tasks using hybrid cheetah optimization algorithm for patients affected by neurodegenerative diseases</article-title>. <source>Sci Rep</source>. <year>2025</year>;<volume>15</volume>(<issue>1</issue>):<fpage>8951</fpage>. doi:<pub-id pub-id-type="doi">10.1038/s41598-025-93047-9</pub-id>; <pub-id pub-id-type="pmid">40089573</pub-id></mixed-citation></ref>
</ref-list>
</back></article>