<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.1 20151215//EN" "http://jats.nlm.nih.gov/publishing/1.1/JATS-journalpublishing1.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:mml="http://www.w3.org/1998/Math/MathML" xml:lang="en" article-type="research-article" dtd-version="1.1">
<front>
<journal-meta>
<journal-id journal-id-type="pmc">IASC</journal-id>
<journal-id journal-id-type="nlm-ta">IASC</journal-id>
<journal-id journal-id-type="publisher-id">IASC</journal-id>
<journal-title-group>
<journal-title>Intelligent Automation &#x0026; Soft Computing</journal-title>
</journal-title-group>
<issn pub-type="epub">2326-005X</issn>
<issn pub-type="ppub">1079-8587</issn>
<publisher>
<publisher-name>Tech Science Press</publisher-name>
<publisher-loc>USA</publisher-loc>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">34069</article-id>
<article-id pub-id-type="doi">10.32604/iasc.2023.034069</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Article</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Deep Learning Driven Arabic Text to Speech Synthesizer for Visually Challenged People</article-title><alt-title alt-title-type="left-running-head">Deep Learning Driven Arabic Text to Speech Synthesizer for Visually Challenged People</alt-title><alt-title alt-title-type="right-running-head">Deep Learning Driven Arabic Text to Speech Synthesizer for Visually Challenged People</alt-title>
</title-group>
<contrib-group>
<contrib id="author-1" contrib-type="author">
<name name-style="western"><surname>Alnfiai</surname><given-names>Mrim M.</given-names></name>
<xref ref-type="aff" rid="aff-1">1</xref>
<xref ref-type="aff" rid="aff-2">2</xref>
</contrib>
<contrib id="author-2" contrib-type="author">
<name name-style="western"><surname>Almalki</surname><given-names>Nabil</given-names></name>
<xref ref-type="aff" rid="aff-1">1</xref>
<xref ref-type="aff" rid="aff-3">3</xref>
</contrib>
<contrib id="author-3" contrib-type="author" corresp="yes">
<name name-style="western"><surname>Al-Wesabi</surname><given-names>Fahd N.</given-names></name>
<xref ref-type="aff" rid="aff-4">4</xref><email>falwesabi@kku.edu.sa</email>
</contrib>
<contrib id="author-4" contrib-type="author">
<name name-style="western"><surname>Alduhayyem</surname><given-names>Mesfer</given-names></name>
<xref ref-type="aff" rid="aff-5">5</xref>
</contrib>
<contrib id="author-5" contrib-type="author">
<name name-style="western"><surname>Hilal</surname><given-names>Anwer Mustafa</given-names></name>
<xref ref-type="aff" rid="aff-6">6</xref>
</contrib>
<contrib id="author-6" contrib-type="author">
<name name-style="western"><surname>Hamza</surname><given-names>Manar Ahmed</given-names></name>
<xref ref-type="aff" rid="aff-6">6</xref>
</contrib>
<aff id="aff-1"><label>1</label><institution>King Salman Center for Disability Research</institution>, <addr-line>Riyadh</addr-line>, <country>13369, Saudi Arabia</country></aff>
<aff id="aff-2"><label>2</label><institution>Department of Information Technology, College of Computers and Information Technology, Taif University, P.O. Box 11099</institution>, <addr-line>Taif, 21944</addr-line>, <country>Saudi Arabia</country></aff>
<aff id="aff-3"><label>3</label><institution>Department of Special Education, College of Education, King Saud University</institution>, <addr-line>Riyadh, 12372</addr-line>, <country>Saudi Arabia</country></aff>
<aff id="aff-4"><label>4</label><institution>Department of Computer Science, College of Science &#x0026; Arts at Muhayel, King Khaled University</institution>, <country>Abha, 62217, Saudi Arabia</country></aff>
<aff id="aff-5"><label>5</label><institution>Department of Computer Science, College of Sciences and Humanities-Aflaj, Prince Sattam bin Abdulaziz University</institution>, <country>Al-Aflaj, 16733, Saudi Arabia</country></aff>
<aff id="aff-6"><label>6</label><institution>Department of Computer and Self Development, Preparatory Year Deanship, Prince Sattam bin Abdulaziz University</institution>, <addr-line>AlKharj</addr-line>, <country>16242, Saudi Arabia</country></aff>
</contrib-group><author-notes><corresp id="cor1"><label>&#x002A;</label>Corresponding Author: Fahd N. Al-Wesabi. Email: <email>falwesabi@kku.edu.sa</email></corresp></author-notes>
<pub-date date-type="collection" publication-format="electronic"><year>2023</year></pub-date>
<pub-date date-type="pub" publication-format="electronic"><day>9</day><month>3</month><year>2023</year></pub-date>
<volume>36</volume>
<issue>3</issue>
<fpage>2639</fpage>
<lpage>2652</lpage>
<history>
<date date-type="received"><day>05</day><month>7</month><year>2022</year></date>
<date date-type="accepted"><day>14</day><month>10</month><year>2022</year></date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2023 Alnfiai et al.</copyright-statement>
<copyright-year>2023</copyright-year>
<copyright-holder>Alnfiai et al.</copyright-holder>
<license xlink:href="https://creativecommons.org/licenses/by/4.0/">
<license-p>This work is licensed under a <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</ext-link>, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.</license-p>
</license>
</permissions>
<self-uri content-type="pdf" xlink:href="TSP_IASC_34069.pdf"></self-uri>
<abstract><p>Text-To-Speech (TTS) is a speech processing tool that is highly helpful for visually-challenged people. The TTS tool is applied to transform the texts into human-like sounds. However, it is highly challenging to accomplish the TTS outcomes for the non-diacritized text of the Arabic language since it has multiple unique features and rules. Some special characters like gemination and diacritic signs that correspondingly indicate consonant doubling and short vowels greatly impact the precise pronunciation of the Arabic language. But, such signs are not frequently used in the texts written in the Arabic language since its speakers and readers can guess them from the context itself. In this background, the current research article introduces an Optimal Deep Learning-driven Arab Text-to-Speech Synthesizer (ODLD-ATSS) model to help the visually-challenged people in the Kingdom of Saudi Arabia. The prime aim of the presented ODLD-ATSS model is to convert the text into speech signals for visually-challenged people. To attain this, the presented ODLD-ATSS model initially designs a Gated Recurrent Unit (GRU)-based prediction model for diacritic and gemination signs. Besides, the Buckwalter code is utilized to capture, store and display the Arabic texts. To improve the TSS performance of the GRU method, the Aquila Optimization Algorithm (AOA) is used, which shows the novelty of the work. To illustrate the enhanced performance of the proposed ODLD-ATSS model, further experimental analyses were conducted. The proposed model achieved a maximum accuracy of 96.35%, and the experimental outcomes infer the improved performance of the proposed ODLD-ATSS model over other DL-based TSS models.</p>
</abstract>
<kwd-group kwd-group-type="author">
<kwd>Saudi Arabia</kwd>
<kwd>visually challenged people</kwd>
<kwd>deep learning</kwd>
<kwd>Aquila optimizer</kwd>
<kwd>gated recurrent unit</kwd>
</kwd-group>
</article-meta>
</front>
<body>
<sec id="s1">
<label>1</label>
<title>Introduction</title>
<p>The term &#x2018;Visually Impaired (VI)&#x2019; denotes individuals with non-recoverable low vision or no vision [<xref ref-type="bibr" rid="ref-1">1</xref>]. Per a survey conducted earlier, 89% of visually-impaired persons live in less-developed nations, whereas nearly half are women. Braille is a language that was particularly developed for those who lost their vision. It has numerous compilations of six-dot patterns [<xref ref-type="bibr" rid="ref-2">2</xref>]. In the Braille language, the natural language symbols about the sounds are indicated by the activation or deactivation of the dots. Primarily, the Braille language is written and read on slates or paper with raised dots. It was introduced by Perkin Brailler, a Braille typewriter, who specifically created this writing pattern for visually-impaired people. With the arrival of technology, multiple mechanisms and touchscreen gadgets have been developed. Their applications have expanded in terms of smart magnifiers, talkback services, navigators, screen readers and obstacle detection &#x0026; avoidance mechanisms for visually-impaired people [<xref ref-type="bibr" rid="ref-3">3</xref>]. The Braille language helps them to interact with their counterparts and others globally. It helps enhance natural language communication and conversational technology for educational objectives [<xref ref-type="bibr" rid="ref-4">4</xref>]. Most vision-impaired people hesitate to leverage their smartphones due to usability and accessibility problems. For instance, they find it challenging to identify the location of a place on smartphones. Whenever vision-impaired persons execute a task, a feedback mechanism should be made available to them to provide the result [<xref ref-type="bibr" rid="ref-5">5</xref>,<xref ref-type="bibr" rid="ref-6">6</xref>].</p>
<p>Conventional Machine Learning (ML) techniques are broadly utilized in several research fields. But, such methodologies necessitate profound knowledge in the specific field and utilize the pre-defined characteristics for assessment [<xref ref-type="bibr" rid="ref-7">7</xref>,<xref ref-type="bibr" rid="ref-8">8</xref>]. So, it becomes essential to manually execute an extensive range of feature extraction works in the existing techniques. Deep Learning (DL), one of the ML techniques, is a sub-set of Artificial Intelligence (AI) techniques. It is characterized by a scenario in which computers start learning to think based on the structures found in the human brain [<xref ref-type="bibr" rid="ref-9">9</xref>]. The DL techniques can examine the unstructured data, videos and images in several ways, whereas ML techniques cannot easily perform these tasks [<xref ref-type="bibr" rid="ref-10">10</xref>]. The ML and the DL techniques are applied in multiple industrial domains, whereas language plays a significant role in the day-to-day life of human beings. Language is of prime importance, whether it may be a passion, speech, a coding system or sign language, i.e., to convey meaning via touch. It expresses one&#x2019;s experiences, thoughts, reactions, emotions and intentions. The Text-To-Speech (TTS) synthesizer converts the language data, stored in the form of text, to a speech format. Recently, it has been primarily utilized in audio-reading gadgets for vision-impaired persons [<xref ref-type="bibr" rid="ref-11">11</xref>].</p>
<p>TTS has become one of the major applications of the Natural Language Processing (NLP) technique. Various researchers have worked on speech synthesis processes in literature since the significance of novel applications has increased, such as the information retrieval services over the telephone like banking services, announcements at public locations such as train stations and reading manuscripts to collect the data [<xref ref-type="bibr" rid="ref-12">12</xref>]. Many research works have been conducted earlier in two languages, English and French, in the domain of TTS. However, other languages like Arabic are yet to be explored in detail. Sufficient space exists for the growth and development of research works in this arena. So, the development of an Arabic Text-To-Speech system is still in the nascent stage. Hence, this project&#x2019;s scope is limited in terms of providing the guidelines for developing an Arabic speech synthesis technique and changing the methodology to overcome the difficulties experienced by the authors in this domain. This is done to help the researchers build highly-promising Voice-to-Text applications for the Arabic language [<xref ref-type="bibr" rid="ref-13">13</xref>].</p>
<p>In the study conducted earlier [<xref ref-type="bibr" rid="ref-14">14</xref>], a new structure was devised for signer-independent sign language detection with the help of multiple DL architectures. This method had hand-shaped feature representations, Deep Recurrent Neural Network (RNN) and semantic segmentation. An advanced semantic segmentation technique, i.e., DeepLabv3&#x002B;, was trained to utilize pixel-labelled hand images to extract hand regions from every frame of the input video. Then, the extracted hand regions were scaled and cropped to a fixed size to alleviate the hand-scale variations. The hand-shaped features were attained with the help of a single-layer Convolutional Self-Organizing Map (CSOM) instead of a pre-trained Deep Convolutional Neural Network (CNN). In literature [<xref ref-type="bibr" rid="ref-15">15</xref>], a prototype was developed for a text-to-speech synthesizer for Tigrigna Language.</p>
<p>In literature [<xref ref-type="bibr" rid="ref-16">16</xref>], the Arabic version of the data was constructed as a part of MS COCO and Flickr caption data sets. In addition, a generative merger method was introduced to caption the images in the Arabic language based on CNN and deep RNN methods. The experimental results inferred that when using a large corpus, the merged methods can attain outstanding outcomes in the case of Arabic image captioning. The researchers [<xref ref-type="bibr" rid="ref-17">17</xref>] investigated and reviewed different DL structures and modelling choices for recognising Arabic handwritten texts. Moreover, the imbalanced dataset issue was overcome by offering the model to the DL mechanism. To face this problem, a new adaptive data-augmentation method was presented to promote class diversity. Every word was allocated weight in the database lexicon. The authors [<xref ref-type="bibr" rid="ref-18">18</xref>] aim to automatically detect the Arabic Sign Language (ArSL) alphabet using an image-related method. To be specific, several visual descriptors were analyzed to construct an accurate ArSL alphabet recognizer. The derived visual descriptors were conveyed to the One-<italic>vs</italic>.-All Support Vector Machine (SVM) method.</p>
<p>The current study introduces an Optimal Deep Learning-driven Arab Text-to-Speech Synthesizer (ODLD-ATSS) model to help the visually-challenged people in the Kingdom of Saudi Arabia. The prime objective of the presented ODLD-ATSS model is to convert the text into speech signals for the visually-challenged people. To attain this, the presented ODLD-ATSS model initially designs a Gated Recurrent Unit (GRU)-based prediction model for diacritic and gemination signs. Besides, the Buckwalter code is also utilized to capture, store and display the Arabic text. In order to enhance the TSS performance of the GRU model, the Aquila Optimization Algorithm (AOA) is used. Numerous experimental analyses were conducted to establish the enhanced performance of the proposed ODLD-ATSS model.</p>
</sec>
<sec id="s2">
<label>2</label>
<title>The Proposed Model</title>
<p>In this study, a novel ODLD-ATSS technique has been proposed for the TSS process so that it can be used by the visually-challenged people in the Kingdom of Saudi Arabia. The presented ODLD-ATSS model aims to convert text into speech signals for visually-impaired people. The overall working process of the proposed model is shown in <xref ref-type="fig" rid="fig-1">Fig. 1</xref>.</p>
<fig id="fig-1">
<label>Figure 1</label>
<caption>
<title>Overall process of the ODLD-ATSS model</title></caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="IASC_34069-fig-1.tif"/>
</fig>
<sec id="s2_1">
<label>2.1</label>
<title>GRU-Based Diacritic and Gemination Sign Prediction</title>
<p>The presented ODLD-ATSS model initially designs a GRU-based prediction model for diacritic and gemination signs. The RNN characterizes the Neural Network (NN) through multiple recurrent layers in the form of hidden layers. It diverges from others in the reception of an input dataset. The RNN method is mainly applied in sequence datasets that are closely connected with time series datasets, for instance, stock prices, language, weather and so on. So, the previous dataset creates an impact on the outcomes of the model [<xref ref-type="bibr" rid="ref-19">19</xref>]. The RNN model is a well-developed model that can process the time series datasets easily; it employs the recurrent bias as well as the weights by passing the dataset via a cyclic vector. It attains the <inline-formula id="ieqn-1">
<mml:math id="mml-ieqn-1"><mml:mi>X</mml:mi></mml:math>
</inline-formula> input vector and generates an output vector, y. It has a Fully Connected (FC) structure that is classified based on the unlimited length of the input and output values. The shape and the style are generated for the network by modifying its architecture. However, the existing RNN suffers from long-term dependency issues [<xref ref-type="bibr" rid="ref-19">19</xref>]. The weight gets deviated towards infinity or gets converged towards 0 as the time lag progresses. As a result, the Long Short-Term Memory (LSTM) model is devised to overcome the long-term dependency issue of the existing RNN model. It is identified as a different architecture from RNN and is classified as the occurrence of a cell state. The LSTM model contains the forget gate, output gate and the input gate. The application of the LSTM model brings a remarkable capability to remember the long-term dependencies, as it takes a long period to train the module due to its complex architecture. Consequently, the GRU model is developed to accelerate the trained technique. It is a type of RNN structure that has a gate model based on a simple structure and the LSTM model.</p>
<p>The GRU model is a variant of the LSTM model whereas the latter contains three gate functions with respect to RNN such as the output gate, input gate and the forget gate to the output value, control input and the memory respectively. There are two gates present in the GRU mechanism such as the upgrade gate and the reset gate. <xref ref-type="fig" rid="fig-2">Fig. 2</xref> shows a specific architecture in which <inline-formula id="ieqn-2">
<mml:math id="mml-ieqn-2"><mml:mi>&#x03C3;</mml:mi></mml:math>
</inline-formula> represents the gating function. This technique effectively reduces the calculation amount and the possibility of vanishing the gradient explosions. A specific function model is shown herewith [<xref ref-type="bibr" rid="ref-20">20</xref>]:</p>
<fig id="fig-2">
<label>Figure 2</label>
<caption>
<title>Structure of the GRU model</title></caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="IASC_34069-fig-2.tif"/>
</fig>
<p><disp-formula id="eqn-1"><label>(1)</label>
<mml:math id="mml-eqn-1" display="block"><mml:mrow><mml:msub><mml:mi>Z</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:mi>&#x03C3;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>W</mml:mi><mml:mi>z</mml:mi></mml:msub></mml:mrow><mml:mo>&#x22C5;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>h</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:msup><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:mrow></mml:msup></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:msub><mml:mi>b</mml:mi><mml:mi>z</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:math>
</disp-formula></p>
<p><disp-formula id="eqn-2"><label>(2)</label>
<mml:math id="mml-eqn-2" display="block"><mml:mrow><mml:msub><mml:mi>r</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:mi>&#x03C3;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>W</mml:mi><mml:mi>r</mml:mi></mml:msub></mml:mrow><mml:mo>&#x22C5;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>h</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:msup><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:mrow></mml:msup></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:msub><mml:mi>b</mml:mi><mml:mi>r</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:math>
</disp-formula></p>
<p><disp-formula id="eqn-3"><label>(3)</label>
<mml:math id="mml-eqn-3" display="block"><mml:msubsup><mml:mi>h</mml:mi><mml:mi>t</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mi>t</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>h</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>W</mml:mi><mml:mo>&#x22C5;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>r</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mrow><mml:mspace width="thinmathspace" /><mml:mrow><mml:mo>&#x2217;</mml:mo></mml:mrow><mml:mspace width="thinmathspace" /><mml:mrow><mml:msub><mml:mi>h</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:msub><mml:mi>b</mml:mi><mml:mi>h</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:math>
</disp-formula></p>
<p><disp-formula id="eqn-4"><label>(4)</label>
<mml:math id="mml-eqn-4" display="block"><mml:mrow><mml:msub><mml:mi>h</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:mrow><mml:msub><mml:mi>z</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x2217;</mml:mo><mml:mrow><mml:msub><mml:mi>h</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:msub><mml:mi>z</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mrow><mml:mo>&#x2217;</mml:mo><mml:msubsup><mml:mi>h</mml:mi><mml:mi>t</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:mrow></mml:msubsup><mml:mo>,</mml:mo></mml:math>
</disp-formula></p>
<p><disp-formula id="eqn-5"><label>(5)</label>
<mml:math id="mml-eqn-5" display="block"><mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:mi>&#x03C3;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>W</mml:mi><mml:mn>0</mml:mn></mml:msub></mml:mrow><mml:mo>&#x22C5;</mml:mo><mml:mrow><mml:msub><mml:mi>h</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:math>
</disp-formula></p>
<p>In this expression, <inline-formula id="ieqn-3">
<mml:math id="mml-ieqn-3"><mml:mrow><mml:msub><mml:mi>Z</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> and <inline-formula id="ieqn-4">
<mml:math id="mml-ieqn-4"><mml:mrow><mml:msub><mml:mi>r</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> signify the reset gate and the upgrade gate respectively, <inline-formula id="ieqn-5">
<mml:math id="mml-ieqn-5"><mml:mrow><mml:msub><mml:mi>W</mml:mi><mml:mi>z</mml:mi></mml:msub></mml:mrow><mml:mo>,</mml:mo></mml:math>
</inline-formula> <inline-formula id="ieqn-6">
<mml:math id="mml-ieqn-6"><mml:mrow><mml:msub><mml:mi>W</mml:mi><mml:mi>r</mml:mi></mml:msub></mml:mrow><mml:mo>,</mml:mo></mml:math>
</inline-formula> <inline-formula id="ieqn-7">
<mml:math id="mml-ieqn-7"><mml:mi>W</mml:mi></mml:math>
</inline-formula>, and <inline-formula id="ieqn-8">
<mml:math id="mml-ieqn-8"><mml:mi>W</mml:mi></mml:math>
</inline-formula> symbolize the weight variables of the input dataset, <inline-formula id="ieqn-9">
<mml:math id="mml-ieqn-9"><mml:mrow><mml:msub><mml:mi>h</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:math>
</inline-formula> specifies the output of the preceding layer and <inline-formula id="ieqn-10">
<mml:math id="mml-ieqn-10"><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> represents the input of the existing layer. <inline-formula id="ieqn-11">
<mml:math id="mml-ieqn-11"><mml:mrow><mml:msub><mml:mi>b</mml:mi><mml:mi>z</mml:mi></mml:msub></mml:mrow><mml:mo>,</mml:mo></mml:math>
</inline-formula> <inline-formula id="ieqn-12">
<mml:math id="mml-ieqn-12"><mml:mrow><mml:msub><mml:mi>b</mml:mi><mml:mi>r</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula>, and <inline-formula id="ieqn-13">
<mml:math id="mml-ieqn-13"><mml:mrow><mml:msub><mml:mi>b</mml:mi><mml:mi>h</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> correspond to biases, <inline-formula id="ieqn-14">
<mml:math id="mml-ieqn-14"><mml:mi>&#x03C3;</mml:mi></mml:math>
</inline-formula> indicates the sigmoid function and <inline-formula id="ieqn-15">
<mml:math id="mml-ieqn-15"><mml:mi>t</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>h</mml:mi></mml:math>
</inline-formula> is exploited to change the value flow across the network. The output value, next to <inline-formula id="ieqn-16">
<mml:math id="mml-ieqn-16"><mml:mi>&#x03C3;</mml:mi></mml:math>
</inline-formula> and <inline-formula id="ieqn-17">
<mml:math id="mml-ieqn-17"><mml:mi>t</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>h</mml:mi></mml:math>
</inline-formula> functions, can be controlled between <inline-formula id="ieqn-18">
<mml:math id="mml-ieqn-18"><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mn>1</mml:mn></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</inline-formula> and <inline-formula id="ieqn-19">
<mml:math id="mml-ieqn-19"><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</inline-formula>. Then, the final output is attained whereas the loss values are evaluated based on the loss function given below.</p>
<p><disp-formula id="eqn-6"><label>(6)</label>
<mml:math id="mml-eqn-6" display="block"><mml:mrow><mml:msub><mml:mi>E</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mfrac><mml:mn>1</mml:mn><mml:mn>2</mml:mn></mml:mfrac></mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mi>d</mml:mi></mml:msub></mml:mrow><mml:mo>&#x2212;</mml:mo><mml:msubsup><mml:mi>y</mml:mi><mml:mi>t</mml:mi><mml:mn>0</mml:mn></mml:msubsup><mml:msup><mml:mo stretchy="false">)</mml:mo><mml:mn>2</mml:mn></mml:msup></mml:mrow><mml:mo>,</mml:mo></mml:mstyle></mml:math>
</disp-formula></p>
<p><disp-formula id="eqn-7"><label>(7)</label>
<mml:math id="mml-eqn-7" display="block"><mml:mi>E</mml:mi><mml:mo>=</mml:mo><mml:msubsup><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>T</mml:mi></mml:msubsup><mml:mo>&#x2061;</mml:mo><mml:mrow><mml:msub><mml:mi>E</mml:mi><mml:mrow><mml:msup><mml:mi>t</mml:mi><mml:mrow><mml:mi mathvariant="normal">&#x2032;</mml:mi></mml:mrow></mml:msup></mml:mrow></mml:msub></mml:mrow></mml:math>
</disp-formula></p>
<p>Now, <inline-formula id="ieqn-20">
<mml:math id="mml-ieqn-20"><mml:mrow><mml:msub><mml:mi>E</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> embodies the loss of the instance at <inline-formula id="ieqn-21">
<mml:math id="mml-ieqn-21"><mml:mi>t</mml:mi></mml:math>
</inline-formula> time, <inline-formula id="ieqn-22">
<mml:math id="mml-ieqn-22"><mml:mrow><mml:msub><mml:mi>y</mml:mi><mml:mi>d</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> specifies the real label data, <inline-formula id="ieqn-23">
<mml:math id="mml-ieqn-23"><mml:msubsup><mml:mi>y</mml:mi><mml:mi>t</mml:mi><mml:mn>0</mml:mn></mml:msubsup></mml:math>
</inline-formula> denotes the output value of the preliminary iteration and <inline-formula id="ieqn-24">
<mml:math id="mml-ieqn-24"><mml:mi>E</mml:mi></mml:math>
</inline-formula> indicates the loss of the instance at time <inline-formula id="ieqn-25">
<mml:math id="mml-ieqn-25"><mml:mtext>&#x00A0;</mml:mtext><mml:mi>t</mml:mi></mml:math>
</inline-formula>.</p>
<p>The Backpropagation (BP) methodology is exploited to learn the network. Therefore, the partial derivatives of the loss function must be evaluated for the variable. After the evaluation of the partial derivatives, the loss convergence is iteratively defined and the variable is upgraded.</p>
</sec>
<sec id="s2_2">
<label>2.2</label>
<title>AOA Based Hyperparameter Optimization</title>
<p>In order to improve the TSS performance of the GRU model, the AOA technique is used as a hyperparameter optimizer [<xref ref-type="bibr" rid="ref-21">21</xref>]. Like other birds, Aquila has a dark brown colour complexion. The back of its head has an additional golden brown colour. These birds have agility and speed. Furthermore, it has sharp claws and strong legs that assist in capturing the target. Aquila is famous for attacking the adult deer. It constructs large nests in mountains or in high altitudes. Aquila is a skilled hunter and its intelligence is equal to that of the human beings&#x2019; intelligence. Similar to the population-based algorithms, the AOA methodology starts with a population of the candidate solutions. The technique stochastically initiates with an upper limit and a lower limit [<xref ref-type="bibr" rid="ref-21">21</xref>]. Every iteration almost defines the optimal solution as given below.</p>
<p><disp-formula id="eqn-8"><label>(8)</label>
<mml:math id="mml-eqn-8" display="block"><mml:mrow><mml:mi mathvariant="normal">X</mml:mi></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mtable rowspacing="4pt" columnspacing="1em"><mml:mtr><mml:mtd><mml:mrow><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:mrow></mml:mtd><mml:mtd><mml:mo>&#x2026;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x2026;</mml:mo></mml:mtd><mml:mtd><mml:mrow><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:mrow></mml:mtd><mml:mtd><mml:mo>&#x2026;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x2026;</mml:mo></mml:mtd><mml:mtd><mml:mrow><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mrow><mml:mi mathvariant="normal">n</mml:mi></mml:mrow></mml:mrow></mml:msub></mml:mrow></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd><mml:mtd><mml:mo>&#x22EE;</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mrow><mml:mi mathvariant="normal">m</mml:mi></mml:mrow><mml:mo>,</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:mrow></mml:mtd><mml:mtd><mml:mrow><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mrow><mml:mi mathvariant="normal">m</mml:mi></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:mi mathvariant="normal">n</mml:mi></mml:mrow></mml:mrow></mml:msub></mml:mrow></mml:mrow></mml:mtd><mml:mtd><mml:mo>&#x2026;</mml:mo></mml:mtd><mml:mtd><mml:mrow><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mrow><mml:mi mathvariant="normal">m</mml:mi></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:mi mathvariant="normal">n</mml:mi></mml:mrow></mml:mrow></mml:msub></mml:mrow></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:math>
</disp-formula></p>
<p><disp-formula id="eqn-9"><label>(9)</label>
<mml:math id="mml-eqn-9" display="block"><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>d</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>U</mml:mi><mml:mrow><mml:msub><mml:mi>B</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mi>L</mml:mi><mml:mrow><mml:msub><mml:mi>B</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:mi>L</mml:mi><mml:mrow><mml:msub><mml:mi>B</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo></mml:mrow><mml:mtext>&#xA0;</mml:mtext><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>2</mml:mn><mml:mo>&#x2026;</mml:mo><mml:mo>&#x2026;</mml:mo><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mi>m</mml:mi><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mo>&#x2026;</mml:mo><mml:mo>&#x2026;</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mi>n</mml:mi></mml:math>
</disp-formula></p>
<p>In <xref ref-type="disp-formula" rid="eqn-7">Eq. (7)</xref>, <inline-formula id="ieqn-26">
<mml:math id="mml-ieqn-26"><mml:mi>m</mml:mi></mml:math>
</inline-formula> denotes the overall number of the candidate solutions available and <inline-formula id="ieqn-27">
<mml:math id="mml-ieqn-27"><mml:mi>n</mml:mi></mml:math>
</inline-formula> represents the size of the dimension. <inline-formula id="ieqn-28">
<mml:math id="mml-ieqn-28"><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>d</mml:mi></mml:math>
</inline-formula> corresponds to an arbitrary value and the <inline-formula id="ieqn-29">
<mml:math id="mml-ieqn-29"><mml:mi>j</mml:mi></mml:math>
</inline-formula>-<inline-formula id="ieqn-30">
<mml:math id="mml-ieqn-30"><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:math>
</inline-formula> lower limit is denoted by <inline-formula id="ieqn-31">
<mml:math id="mml-ieqn-31"><mml:mi>L</mml:mi><mml:mrow><mml:msub><mml:mi>B</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula>. UBj denotes the <inline-formula id="ieqn-32">
<mml:math id="mml-ieqn-32"><mml:mi>j</mml:mi></mml:math>
</inline-formula>-<inline-formula id="ieqn-33">
<mml:math id="mml-ieqn-33"><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:math>
</inline-formula> upper limit<inline-formula id="ieqn-34">

</inline-formula>. In general, the AO process simulates the Aquila behaviour in the hunting procedure as shown below.</p>
<p>- Step 1: Increased exploration <inline-formula id="ieqn-35">
<mml:math id="mml-ieqn-35"><mml:mo stretchy="false">(</mml:mo><mml:mo stretchy="false">)</mml:mo></mml:math>
</inline-formula></p>
<p>In step 1, the Aquila explores from the sky to define a searching region and identifies the location of the prey. Then, it recognizes the areas of the prey and selects the finest area for hunting.</p>
<p><disp-formula id="eqn-10"><label>(10)</label>
<mml:math id="mml-eqn-10" display="block"><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>b</mml:mi><mml:mi>e</mml:mi><mml:mi>s</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mfrac><mml:mi>t</mml:mi><mml:mi>T</mml:mi></mml:mfrac></mml:mrow></mml:mstyle></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mi>M</mml:mi></mml:msub></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>b</mml:mi><mml:mi>e</mml:mi><mml:mi>s</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x00D7;</mml:mo><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>d</mml:mi></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</disp-formula></p>
<p><disp-formula id="eqn-11"><label>(11)</label>
<mml:math id="mml-eqn-11" display="block"><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mi>M</mml:mi></mml:msub></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mfrac><mml:mn>1</mml:mn><mml:mi>N</mml:mi></mml:mfrac></mml:mrow><mml:msubsup><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>N</mml:mi></mml:msubsup><mml:mo>&#x2061;</mml:mo><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo><mml:mi>N</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mrow><mml:msub><mml:mo>&#x2026;</mml:mo><mml:mtext>&#x00A0;</mml:mtext></mml:msub></mml:mrow><mml:mrow><mml:mtext>&#x00A0;</mml:mtext></mml:mrow><mml:mi>d</mml:mi><mml:mi>i</mml:mi><mml:mi>m</mml:mi></mml:mstyle></mml:math>
</disp-formula></p>
<p>Here, the solution for iteration <inline-formula id="ieqn-36">
<mml:math id="mml-ieqn-36"><mml:mi>t</mml:mi></mml:math>
</inline-formula> is denoted by <inline-formula id="ieqn-37">
<mml:math id="mml-ieqn-37"><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</inline-formula>. It produces a preliminary searching technique <inline-formula id="ieqn-38">
<mml:math id="mml-ieqn-38"><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow></mml:math>
</inline-formula>, whereas <inline-formula id="ieqn-39">
<mml:math id="mml-ieqn-39"><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>b</mml:mi><mml:mi>e</mml:mi><mml:mi>s</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math>
</inline-formula> denotes the optimally-attained solution during the <inline-formula id="ieqn-40">
<mml:math id="mml-ieqn-40"><mml:mi>t</mml:mi></mml:math>
</inline-formula><sup>th</sup> iteration. This value defines the assessed point of the target. The variable that manages the augmented exploration through the iteration count is denoted by <inline-formula id="ieqn-41">
<mml:math id="mml-ieqn-41"><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mfrac><mml:mi>t</mml:mi><mml:mi>T</mml:mi></mml:mfrac></mml:mrow></mml:mstyle></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</inline-formula>. The mean point value of the existing solution, connected during <inline-formula id="ieqn-42">
<mml:math id="mml-ieqn-42"><mml:mi>t</mml:mi></mml:math>
</inline-formula> iteration, is denoted by <inline-formula id="ieqn-43">
<mml:math id="mml-ieqn-43"><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mi>M</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula>, in which <italic>rand</italic> corresponds to an arbitrary number. The population size is <inline-formula id="ieqn-44">
<mml:math id="mml-ieqn-44"><mml:mi>N</mml:mi><mml:mo>.</mml:mo></mml:math>
</inline-formula> The dimension size is <inline-formula id="ieqn-45">
<mml:math id="mml-ieqn-45"><mml:mi>d</mml:mi><mml:mi>i</mml:mi><mml:mi>m</mml:mi></mml:math>
</inline-formula>.</p>
<p>- Step 2: Limited exploration <inline-formula id="ieqn-46">
<mml:math id="mml-ieqn-46"><mml:mo stretchy="false">(</mml:mo><mml:mo stretchy="false">)</mml:mo></mml:math>
</inline-formula></p>
<p>Next, the target is established at a high altitude. Here, the Aquila encircles in the cloud, reaches the location and gets ready to attack the target. This process is arithmetically expressed through the following equations.</p>
<p><disp-formula id="eqn-12"><label>(12)</label>
<mml:math id="mml-eqn-12" display="block"><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mn>2</mml:mn></mml:msub></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>b</mml:mi><mml:mi>e</mml:mi><mml:mi>s</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x00D7;</mml:mo><mml:mi>L</mml:mi><mml:mi>e</mml:mi><mml:mi>v</mml:mi><mml:mi>y</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>D</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mi>R</mml:mi></mml:msub></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>y</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>x</mml:mi></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x00D7;</mml:mo><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>d</mml:mi></mml:math>
</disp-formula></p>
<p><disp-formula id="eqn-13"><label>(13)</label>
<mml:math id="mml-eqn-13" display="block"><mml:mrow><mml:mi mathvariant="normal">L</mml:mi><mml:mi mathvariant="normal">e</mml:mi><mml:mi mathvariant="normal">v</mml:mi><mml:mi mathvariant="normal">y</mml:mi><mml:mtext>&#x00A0;</mml:mtext></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>D</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mtext>&#x00A0;</mml:mtext></mml:mrow><mml:mo>=</mml:mo><mml:mi>s</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mfrac><mml:mrow><mml:mi>u</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mi>&#x03C3;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo>|</mml:mo><mml:mi>&#x03BD;</mml:mi><mml:mo>|</mml:mo></mml:mrow></mml:mrow></mml:mfrac></mml:mrow><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mfrac><mml:mn>1</mml:mn><mml:mi>&#x03B2;</mml:mi></mml:mfrac></mml:mrow></mml:mstyle></mml:mstyle></mml:math>
</disp-formula></p>
<p><disp-formula id="eqn-14"><label>(14)</label>
<mml:math id="mml-eqn-14" display="block"><mml:mrow><mml:mi>&#x03C3;</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mfrac><mml:mrow><mml:mi>&#x0393;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>+</mml:mo><mml:mi>&#x03B2;</mml:mi></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x00D7;</mml:mo><mml:mi>s</mml:mi><mml:mi>i</mml:mi><mml:mi>n</mml:mi><mml:mi>e</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mfrac><mml:mrow><mml:mi>&#x03C0;</mml:mi><mml:mi>&#x03B2;</mml:mi></mml:mrow><mml:mn>2</mml:mn></mml:mfrac></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>&#x0393;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mfrac><mml:mrow><mml:mn>1</mml:mn><mml:mo>+</mml:mo><mml:mi>&#x03B2;</mml:mi></mml:mrow><mml:mn>2</mml:mn></mml:mfrac></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x00D7;</mml:mo><mml:mi>&#x03B2;</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:msup><mml:mn>2</mml:mn><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mfrac><mml:mrow><mml:mi>&#x03B2;</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mn>2</mml:mn></mml:mfrac></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:msup></mml:mrow></mml:mfrac></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:math></disp-formula></p>
<p><disp-formula id="eqn-15"><label>(15)</label>
<mml:math id="mml-eqn-15" display="block"><mml:mi>y</mml:mi><mml:mo>=</mml:mo><mml:mi>r</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mrow><mml:mi mathvariant="normal">c</mml:mi><mml:mi mathvariant="normal">o</mml:mi><mml:mi mathvariant="normal">s</mml:mi><mml:mtext>&#x00A0;</mml:mtext></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>&#x03B8;</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math>
</disp-formula></p>
<p><disp-formula id="eqn-16"><label>(16)</label>
<mml:math id="mml-eqn-16" display="block"><mml:mi>x</mml:mi><mml:mo>=</mml:mo><mml:mi>r</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mrow><mml:mi mathvariant="normal">s</mml:mi><mml:mi mathvariant="normal">i</mml:mi><mml:mi mathvariant="normal">n</mml:mi><mml:mtext>&#x00A0;</mml:mtext></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>&#x03B8;</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math>
</disp-formula></p>
<p><disp-formula id="eqn-17"><label>(17)</label>
<mml:math id="mml-eqn-17" display="block"><mml:mi>r</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:msub><mml:mi>r</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow><mml:mo>+</mml:mo><mml:mi>U</mml:mi><mml:mo>+</mml:mo><mml:mrow><mml:msub><mml:mi>D</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow></mml:math>
</disp-formula></p>
<p><disp-formula id="eqn-18"><label>(18)</label>
<mml:math id="mml-eqn-18" display="block"><mml:mi>&#x03B8;</mml:mi><mml:mo>=</mml:mo><mml:mo>&#x2212;</mml:mo><mml:mi>&#x03C9;</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mrow><mml:msub><mml:mi>D</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:msub><mml:mi>&#x03B8;</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow></mml:math>
</disp-formula></p>
<p><disp-formula id="eqn-19"><label>(19)</label>
<mml:math id="mml-eqn-19" display="block"><mml:mrow><mml:msub><mml:mi>&#x03B8;</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mfrac><mml:mrow><mml:mn>3</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mi>&#x03C0;</mml:mi></mml:mrow><mml:mn>2</mml:mn></mml:mfrac></mml:mrow></mml:mstyle></mml:math>
</disp-formula></p>
<p>In these expressions, the conclusion of iteration <inline-formula id="ieqn-47">
<mml:math id="mml-ieqn-47"><mml:mi>t</mml:mi></mml:math>
</inline-formula>, created by the next technique phase, is denoted by <inline-formula id="ieqn-48">
<mml:math id="mml-ieqn-48"><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mn>2</mml:mn></mml:msub></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</inline-formula>. The allocation function of the Levy Flight is denoted by Levy (D). The dimensional space is represented by <inline-formula id="ieqn-49">
<mml:math id="mml-ieqn-49"><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mi>R</mml:mi></mml:msub></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math>
</inline-formula>, which is an arbitrary solution in the range of 1 to N. <inline-formula id="ieqn-50">
<mml:math id="mml-ieqn-50"><mml:mi>s</mml:mi></mml:math>
</inline-formula> represents a constant value in the range of 0.01. <inline-formula id="ieqn-51">
<mml:math id="mml-ieqn-51"><mml:mi>u</mml:mi></mml:math>
</inline-formula> and <inline-formula id="ieqn-52">
<mml:math id="mml-ieqn-52"><mml:mi>v</mml:mi></mml:math>
</inline-formula> indicate the arbitrary values between 0 and 1. <inline-formula id="ieqn-53">
<mml:math id="mml-ieqn-53"><mml:mi>&#x03C3;</mml:mi></mml:math>
</inline-formula> denotes a constant value in the range of 1.5. <inline-formula id="ieqn-54">
<mml:math id="mml-ieqn-54"><mml:mi>&#x03C7;</mml:mi></mml:math>
</inline-formula> and <inline-formula id="ieqn-55">
<mml:math id="mml-ieqn-55"><mml:mi>y</mml:mi></mml:math>
</inline-formula> are applied to define the spiral shapes. <inline-formula id="ieqn-56">
<mml:math id="mml-ieqn-56"><mml:mrow><mml:msub><mml:mi>r</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow></mml:math>
</inline-formula> is a value that is chosen between 1 and 20 and is applied to fix the search cycle count. <inline-formula id="ieqn-57">
<mml:math id="mml-ieqn-57"><mml:mi>U</mml:mi></mml:math>
</inline-formula> denotes a parameter that is multiplied by 0.00565. <inline-formula id="ieqn-58">
<mml:math id="mml-ieqn-58"><mml:mrow><mml:msub><mml:mi>D</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow></mml:math>
</inline-formula> indicates an integer from 1 to the maximum searching space parameter (<inline-formula id="ieqn-59">
<mml:math id="mml-ieqn-59"><mml:mrow><mml:mi mathvariant="normal">d</mml:mi><mml:mi mathvariant="normal">i</mml:mi><mml:mi mathvariant="normal">m</mml:mi></mml:mrow></mml:math>
</inline-formula>) value. <inline-formula id="ieqn-60">
<mml:math id="mml-ieqn-60"><mml:mi>&#x03C9;</mml:mi></mml:math>
</inline-formula> denotes that the parameter has constant smaller values multiplied by 0.005.</p>
<p>- Step 3: Increased exploitation <inline-formula id="ieqn-61">
<mml:math id="mml-ieqn-61"><mml:mo stretchy="false">(</mml:mo><mml:mo stretchy="false">)</mml:mo></mml:math>
</inline-formula></p>
<p>In this step, the Aquila is present at the location of exploitation, viz., gets closer to the prey and makes a pre-emptive attack as shown below.</p>
<p><disp-formula id="eqn-20"><label>(20)</label>
<mml:math id="mml-eqn-20" display="block"><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mn>3</mml:mn></mml:msub></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>b</mml:mi><mml:mi>e</mml:mi><mml:mi>s</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mi>R</mml:mi></mml:msub></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x00D7;</mml:mo><mml:mi>&#x03B1;</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>d</mml:mi><mml:mo>+</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>U</mml:mi><mml:mi>B</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>L</mml:mi><mml:mi>B</mml:mi></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x00D7;</mml:mo><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>d</mml:mi><mml:mo>+</mml:mo><mml:mi>L</mml:mi><mml:mi>B</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mi>&#x03B4;</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math>
</disp-formula></p>
<p>In this exploitation tuning parameter set, the small values <inline-formula id="ieqn-62">
<mml:math id="mml-ieqn-62"><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</inline-formula> are denoted by <inline-formula id="ieqn-63">
<mml:math id="mml-ieqn-63"><mml:mi>&#x03B1;</mml:mi></mml:math>
</inline-formula> and <inline-formula id="ieqn-64">
<mml:math id="mml-ieqn-64"><mml:mi>&#x03B4;</mml:mi><mml:mo>.</mml:mo></mml:math>
</inline-formula> <inline-formula id="ieqn-65">
<mml:math id="mml-ieqn-65"><mml:mi>U</mml:mi><mml:mi>B</mml:mi></mml:math>
</inline-formula> and <inline-formula id="ieqn-66">
<mml:math id="mml-ieqn-66"><mml:mi>L</mml:mi><mml:mi>B</mml:mi></mml:math>
</inline-formula> specify the upper and lower limits, respectively.</p>
<p>- Step 4: Limited exploitation <inline-formula id="ieqn-67">
<mml:math id="mml-ieqn-67"><mml:mo stretchy="false">(</mml:mo><mml:mo stretchy="false">)</mml:mo></mml:math>
</inline-formula></p>
<p>Now, the Aquila approaches the nearby prey. Then, the Aquila attacks the target on the ground i.e., its final position, by walking on the ground to catch the prey. The behaviour of the Aquila is modelled as given herewith.</p>
<p><disp-formula id="eqn-21"><label>(21)</label>
<mml:math id="mml-eqn-21" display="block"><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mn>4</mml:mn></mml:msub></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:msub><mml:mi>Q</mml:mi><mml:mi>f</mml:mi></mml:msub></mml:mrow><mml:mo>&#x00D7;</mml:mo><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>b</mml:mi><mml:mi>e</mml:mi><mml:mi>s</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>G</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow><mml:mo>&#x00D7;</mml:mo><mml:mi>X</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x00D7;</mml:mo><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>d</mml:mi></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mrow><mml:msub><mml:mi>G</mml:mi><mml:mn>2</mml:mn></mml:msub></mml:mrow><mml:mo>&#x00D7;</mml:mo><mml:mi>L</mml:mi><mml:mi>e</mml:mi><mml:mi>v</mml:mi><mml:mi>y</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>D</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>d</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mrow><mml:msub><mml:mi>G</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow></mml:math>
</disp-formula></p>
<p><disp-formula id="eqn-22"><label>(22)</label>
<mml:math id="mml-eqn-22" display="block"><mml:mrow><mml:msub><mml:mi>Q</mml:mi><mml:mi>f</mml:mi></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:mi>r</mml:mi><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mfrac><mml:mrow><mml:mn>2</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mi>r</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>d</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mrow><mml:mrow><mml:msup><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:mi>T</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:msup></mml:mrow></mml:mrow></mml:mfrac></mml:mrow></mml:mstyle></mml:math>
</disp-formula></p>
<p><disp-formula id="eqn-23"><label>(23)</label>
<mml:math id="mml-eqn-23" display="block"><mml:mrow><mml:msub><mml:mi>G</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:mn>2</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>d</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:math>
</disp-formula></p>
<p><disp-formula id="eqn-24"><label>(24)</label>
<mml:math id="mml-eqn-24" display="block"><mml:mrow><mml:msub><mml:mi>G</mml:mi><mml:mn>2</mml:mn></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:mn>2</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mfrac><mml:mi>t</mml:mi><mml:mi>T</mml:mi></mml:mfrac></mml:mrow></mml:mstyle></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</disp-formula></p>
<p>In <xref ref-type="disp-formula" rid="eqn-24">Eq. (24)</xref>, the iteration solution that is produced by the last searching technique <inline-formula id="ieqn-68">
<mml:math id="mml-ieqn-68"><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mn>4</mml:mn></mml:msub></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</inline-formula> is <inline-formula id="ieqn-69">
<mml:math id="mml-ieqn-69"><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mn>4</mml:mn></mml:msub></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</inline-formula>. Qf denotes the quality functions that are utilized to balance the searching technique<inline-formula id="ieqn-70">

</inline-formula>. Each type of movement applied for tracking the prey is denoted by <inline-formula id="ieqn-71">
<mml:math id="mml-ieqn-71"><mml:mrow><mml:msub><mml:mi>G</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow><mml:mo>.</mml:mo></mml:math>
</inline-formula> <inline-formula id="ieqn-72">
<mml:math id="mml-ieqn-72"><mml:mrow><mml:msub><mml:mi>G</mml:mi><mml:mn>2</mml:mn></mml:msub></mml:mrow></mml:math>
</inline-formula>, which reduces from [2,<inline-formula id="ieqn-73">
<mml:math id="mml-ieqn-73"><mml:mtext>&#x00A0;</mml:mtext><mml:mn>0</mml:mn></mml:math>
</inline-formula>]. It shows the flight inclination of the Aquila employed to follow the target from its primary to the final spot. The present solution at <inline-formula id="ieqn-74">
<mml:math id="mml-ieqn-74"><mml:mtext>&#x00A0;</mml:mtext><mml:mi>t</mml:mi><mml:mi>h</mml:mi><mml:mi>e</mml:mi><mml:mtext>&#x00A0;</mml:mtext><mml:mi>t</mml:mi><mml:mrow><mml:mo>&#x2212;</mml:mo></mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi><mml:mtext>&#x00A0;</mml:mtext></mml:math>
</inline-formula>iteration is denoted by <inline-formula id="ieqn-75">
<mml:math id="mml-ieqn-75"><mml:mrow><mml:mo>(</mml:mo><mml:mi>T</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math>
</inline-formula>. An arbitrary point in the range of [<inline-formula id="ieqn-76">

</inline-formula>, 1] is denoted by <italic>rand</italic>. The present iteration is denoted by <inline-formula id="ieqn-77">
<mml:math id="mml-ieqn-77"><mml:mi>t</mml:mi></mml:math>
</inline-formula>. The maximal iteration count is denoted by <inline-formula id="ieqn-78">
<mml:math id="mml-ieqn-78"><mml:mi>T</mml:mi></mml:math>
</inline-formula>.</p>
</sec>
<sec id="s2_3">
<label>2.3</label>
<title>Buckwalter Code Based Transcription Model</title>
<p>At last, the Buckwalter code is utilized to capture, store and display the Arabic text. It is predominantly applied in the Arabic transcription process to capture, store and display Arabic text. Being a stringent transcription method, it adheres to the spelling agreements of the language. Further, it substitutes one-to-one mapping and is completely reversible to contain every data that is required for good pronunciation.</p>
</sec>
</sec>
<sec id="s3">
<label>3</label>
<title>Experimental Validation</title>
<p>The current section presents the overall analytical results of the proposed ODLD-ATSS model under distinct aspects. The parameter settings are given herewith; learning rate: 0.01, dropout: 0.5, batch size: 5, epoch count: 50 and activation: ReLU. The proposed model was simulated in Python.</p>
<p><xref ref-type="table" rid="table-1">Table 1</xref> and <xref ref-type="fig" rid="fig-3">Fig. 3</xref> provide the detailed <inline-formula id="ieqn-79">
<mml:math id="mml-ieqn-79"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> examination results achieved by the proposed ODLD-ATSS model under validation and testing datasets of gemination. The experimental values imply that the proposed ODLD-ATSS model achieved enhanced <inline-formula id="ieqn-80">
<mml:math id="mml-ieqn-80"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> values. For instance, on the validation set, the proposed ODLD-ATSS model obtained a maximum <inline-formula id="ieqn-81">
<mml:math id="mml-ieqn-81"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> of 99.61%, whereas the All-Dense, All-LSTM, All-bidirectional LSTM (BLSTM) and the hybrid models attained the least <inline-formula id="ieqn-82">
<mml:math id="mml-ieqn-82"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> values such as 98.60%, 98.70%, 99.34% and 99.04% respectively. On the contrary, on the test dataset, the proposed ODLD-ATSS approach gained a maximum <inline-formula id="ieqn-83">
<mml:math id="mml-ieqn-83"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> of 99.76%, whereas the All-Dense, All-LSTM, All-BLSTM and the hybrid approaches acquired the least <inline-formula id="ieqn-84">
<mml:math id="mml-ieqn-84"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> values such as 98.61%, 98.71%, 98.81% and 99.00% correspondingly.</p>
<table-wrap id="table-1"><label>Table 1</label>
<caption>
<title><inline-formula id="ieqn-85">
<mml:math id="mml-ieqn-85"><mml:mrow><mml:mi mathvariant="bold-italic">A</mml:mi><mml:mi mathvariant="bold-italic">c</mml:mi><mml:mi mathvariant="bold-italic">c</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mi mathvariant="bold-italic">u</mml:mi><mml:mrow><mml:mi mathvariant="bold-italic">y</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:math>
</inline-formula> analytical results of the ODLD-ATSS model on the selected models of gemination</title></caption>
<table><colgroup>
<col/>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th align="center" colspan="3">Accuracy (%)</th>
</tr>
<tr>
<th>Models</th>
<th>Validation set</th>
<th>Test set</th>
</tr>
</thead>
<tbody>
<tr>
<td>ODLD-ATSS</td>
<td>99.61</td>
<td>99.76</td>
</tr>
<tr>
<td>All-Dense</td>
<td>98.60</td>
<td>98.61</td>
</tr>
<tr>
<td>All-LSTM</td>
<td>98.70</td>
<td>98.71</td>
</tr>
<tr>
<td>All-BLSTM</td>
<td>99.34</td>
<td>98.81</td>
</tr>
<tr>
<td>Hybrid model</td>
<td>99.04</td>
<td>99.00</td>
</tr>
</tbody>
</table>
</table-wrap><fig id="fig-3">
<label>Figure 3</label>
<caption>
<title>Comparative <inline-formula id="ieqn-86">
<mml:math id="mml-ieqn-86"><mml:mi>A</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> analytical results of the ODLD-ATSS model on the selected models of gemination</title></caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="IASC_34069-fig-3.tif"/>
</fig>
<p><xref ref-type="table" rid="table-2">Table 2</xref> provides the overall results achieved by the proposed ODLD-ATSS model on germination signs with the DNN model. The results imply that the proposed ODLD-ATSS model produced enhanced outcomes for both non-geminated and geminated classes. For instance, the presented ODLD-ATSS model categorized the non-geminated classes with a <inline-formula id="ieqn-87">
<mml:math id="mml-ieqn-87"><mml:mi>p</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>c</mml:mi><mml:mi>n</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> of 99.71%, <inline-formula id="ieqn-88">
<mml:math id="mml-ieqn-88"><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>a</mml:mi><mml:mi>l</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> of 99.71%, and an <inline-formula id="ieqn-89">
<mml:math id="mml-ieqn-89"><mml:mi>F</mml:mi><mml:mrow><mml:msub><mml:mn>1</mml:mn><mml:mrow><mml:mi>s</mml:mi><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:math>
</inline-formula> of 99.76%. At the same time, the proposed ODLD-ATSS model categorized the germination classes with a <inline-formula id="ieqn-90">
<mml:math id="mml-ieqn-90"><mml:mi>p</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>c</mml:mi><mml:mi>n</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> of 99.70%, <inline-formula id="ieqn-91">
<mml:math id="mml-ieqn-91"><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>a</mml:mi><mml:mi>l</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> of 99.67% and an <inline-formula id="ieqn-92">
<mml:math id="mml-ieqn-92"><mml:mi>F</mml:mi><mml:mrow><mml:msub><mml:mn>1</mml:mn><mml:mrow><mml:mi>s</mml:mi><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:math>
</inline-formula> of 99.74%.</p>
<table-wrap id="table-2"><label>Table 2</label>
<caption>
<title>Overall gemination sign classification results of the proposed ODLD-ATSS model</title></caption>
<table><colgroup>
<col/>
<col/>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th>Gemination classes</th>
<th>Precision</th>
<th>Recall</th>
<th>F1 score</th>
</tr>
</thead>
<tbody>
<tr>
<td colspan="4"><bold>DNN model</bold></td>
</tr>
<tr>
<td>Non geminated</td>
<td>98.39</td>
<td>99.22</td>
<td>98.48</td>
</tr>
<tr>
<td>Geminated</td>
<td>97.99</td>
<td>89.33</td>
<td>94.01</td>
</tr>
<tr>
<td colspan="4"><bold>ODLD-ATSS</bold></td>
</tr>
<tr>
<td>Non geminated</td>
<td>99.71</td>
<td>99.71</td>
<td>99.76</td>
</tr>
<tr>
<td>Geminated</td>
<td>99.70</td>
<td>99.67</td>
<td>99.74</td>
</tr>
</tbody>
</table>
</table-wrap>
<p><xref ref-type="table" rid="table-3">Table 3</xref> reports the overall gemination prediction outcomes accomplished by the proposed ODLD-ATSS model. The results imply that the proposed ODLD-ATSS model attained the effectual prediction outcomes for gemination on the validation dataset and the testing dataset. For instance, on the validation dataset, the proposed ODLD-ATSS model offered an increased <inline-formula id="ieqn-93">
<mml:math id="mml-ieqn-93"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> of 98.58%, whereas the All-Dense, All-LSTM, All-BLSTM and the hybrid models produced the least <inline-formula id="ieqn-94">
<mml:math id="mml-ieqn-94"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> values such as 81.14%, 86.14%, 91.08% and 91% correspondingly. Moreover, on test set, the proposed ODLD-ATSS algorithm accomplished an increased <inline-formula id="ieqn-95">
<mml:math id="mml-ieqn-95"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> of 98.54%, whereas the All-Dense, All-LSTM, All-BLSTM and the hybrid methods achieved the least <inline-formula id="ieqn-96">
<mml:math id="mml-ieqn-96"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> values such as 76.00%, 80.54%, 83.98% and 84.02% respectively.</p>
<table-wrap id="table-3"><label>Table 3</label>
<caption>
<title>Predicted gemination results of the proposed ODLD-ATSS model</title></caption>
<table><colgroup>
<col/>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th align="center" colspan="3">Accuracy (%)</th>
</tr>
<tr>
<th>Models</th>
<th>Actual gemination</th>
<th>Predicted gemination</th>
</tr>
</thead>
<tbody>
<tr>
<td colspan="2"><bold>Validation Set</bold></td>
<td></td>
</tr>
<tr>
<td>ODLD-ATSS</td>
<td>98.79</td>
<td>98.58</td>
</tr>
<tr>
<td>All-Dense</td>
<td>83.12</td>
<td>81.14</td>
</tr>
<tr>
<td>All-LSTM</td>
<td>87.72</td>
<td>86.14</td>
</tr>
<tr>
<td>All-BLSTM</td>
<td>91.32</td>
<td>91.08</td>
</tr>
<tr>
<td>Hybrid Model</td>
<td>92.10</td>
<td>91.00</td>
</tr>
<tr>
<td><bold>Test Set</bold></td>
<td></td>
<td></td>
</tr>
<tr>
<td>ODLD-ATSS</td>
<td>98.66</td>
<td>98.54</td>
</tr>
<tr>
<td>All-Dense</td>
<td>77.73</td>
<td>76.00</td>
</tr>
<tr>
<td>All-LSTM</td>
<td>81.95</td>
<td>80.54</td>
</tr>
<tr>
<td>All-BLSTM</td>
<td>84.75</td>
<td>83.98</td>
</tr>
<tr>
<td>Hybrid Model</td>
<td>86.44</td>
<td>84.02</td>
</tr>
</tbody>
</table>
</table-wrap>
<p><xref ref-type="table" rid="table-4">Table 4</xref> and <xref ref-type="fig" rid="fig-4">Fig. 4</xref> provide the detailed final diacritization <inline-formula id="ieqn-97">
<mml:math id="mml-ieqn-97"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> analysis results achieved by the proposed ODLD-ATSS model and other existing models under different kinds of phonemes. The results infer that the proposed ODLD-ATSS model achieved enhanced <inline-formula id="ieqn-98">
<mml:math id="mml-ieqn-98"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> values under each phoneme. For instance, in terms of plosives, the proposed ODLD-ATSS model obtained a maximum <inline-formula id="ieqn-99">
<mml:math id="mml-ieqn-99"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> of 99.42%, whereas the All-Dense, All-LSTM, All-BLSTM, hybrid 1, and hybrid 2 models produced the least <inline-formula id="ieqn-100">
<mml:math id="mml-ieqn-100"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> values such as 97.97%, 99%, 98.94%, 98.71% and 98.72% respectively. In terms of Fricatives, the proposed ODLD-ATSS method attained a maximum <inline-formula id="ieqn-101">
<mml:math id="mml-ieqn-101"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> of 99.61%, whereas the All-Dense, All-LSTM, All-BLSTM, hybrid 1 and hybrid 2 models accomplished the least <inline-formula id="ieqn-102">
<mml:math id="mml-ieqn-102"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> values, such as 99.06%, 98.73%, 99.01%, 98.61% and 98.75% correspondingly. In addition, in terms of Affricates, the presented ODLD-ATSS model reached an optimal <inline-formula id="ieqn-103">
<mml:math id="mml-ieqn-103"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> of 100%, whereas the All-Dense, All-LSTM, All-BLSTM, hybrid 1, and hybrid 2 models reported the least <inline-formula id="ieqn-104">
<mml:math id="mml-ieqn-104"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> values such as 99.83%, 99.70%, 99.79%, 99.74% and 99.69% correspondingly.</p>
<table-wrap id="table-4"><label>Table 4</label>
<caption>
<title>Overall diacritization accuracy results of the proposed ODLD-ATSS model under diverse phonemes</title></caption>
<table><colgroup>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th>Methods</th>
<th>Plosives</th>
<th>Fricatives</th>
<th>Affricates</th>
<th>Nasals</th>
<th>Trills</th>
<th>Laterals</th>
<th>Semi-vowels</th>
<th>Average</th>
</tr>
</thead>
<tbody>
<tr>
<td>ODLD-ATSS</td>
<td>99.42</td>
<td>99.61</td>
<td>100</td>
<td>98.91</td>
<td>100</td>
<td>99.6</td>
<td>99.15</td>
<td><bold>99.53</bold></td>
</tr>
<tr>
<td>All-Dense</td>
<td>97.97</td>
<td>99.06</td>
<td>99.83</td>
<td>97.89</td>
<td>97.81</td>
<td>99.05</td>
<td>94.67</td>
<td><bold>98.04</bold></td>
</tr>
<tr>
<td>All-LSTM</td>
<td>99.00</td>
<td>98.73</td>
<td>99.70</td>
<td>97.90</td>
<td>100.00</td>
<td>98.71</td>
<td>95.60</td>
<td><bold>98.52</bold></td>
</tr>
<tr>
<td>All-BLSTM</td>
<td>98.94</td>
<td>99.01</td>
<td>99.79</td>
<td>96.79</td>
<td>100.00</td>
<td>98.94</td>
<td>96.63</td>
<td><bold>98.59</bold></td>
</tr>
<tr>
<td>Hybrid 1</td>
<td>98.71</td>
<td>98.61</td>
<td>99.74</td>
<td>97.70</td>
<td>99.70</td>
<td>98.85</td>
<td>95.80</td>
<td><bold>98.44</bold></td>
</tr>
<tr>
<td>Hybrid 2</td>
<td>98.72</td>
<td>98.75</td>
<td>99.69</td>
<td>98.91</td>
<td>98.85</td>
<td>98.67</td>
<td>96.86</td>
<td><bold>98.64</bold></td>
</tr>
</tbody>
</table>
</table-wrap><fig id="fig-4">
<label>Figure 4</label>
<caption>
<title>Comparative diacritization accuracy results of the proposed ODLD-ATSS model under diverse phonemes</title></caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="IASC_34069-fig-4.tif"/>
</fig>
<p><xref ref-type="table" rid="table-5">Table 5</xref> provides the overall classification results accomplished by the proposed ODLD-ATSS model under distinct diacritic classes. <xref ref-type="fig" rid="fig-5">Fig. 5</xref> shows the brief results of the proposed ODLD-ATSS model under distinct diacritic sign classes in terms of <inline-formula id="ieqn-105">
<mml:math id="mml-ieqn-105"><mml:mi>p</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>c</mml:mi><mml:mi>n</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula>. The figure infers that the proposed ODLD-ATSS model achieved an enhanced performance under each class. The proposed ODLD-ATSS model obtained the following <inline-formula id="ieqn-106">
<mml:math id="mml-ieqn-106"><mml:mi>p</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>c</mml:mi><mml:mi>n</mml:mi></mml:msub></mml:mrow><mml:mtext>&#x00A0;</mml:mtext></mml:math>
</inline-formula>values such as 97.56% under fatha class, 96.39% under dhamma class, 97.09% under Kasra class, 96.41% under Fathaten class, 96.58% under Dhammaten class, 97.77% under Kasraten class, 97.57% under sukun class and 97.13% under others&#x2019; class respectively.</p>
<table-wrap id="table-5"><label>Table 5</label>
<caption>
<title>Comparative diacritic sign class outcomes of the proposed ODLD-ATSS model</title></caption>
<table><colgroup>
<col/>
<col/>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th>Diacritic sign class</th>
<th>Precision</th>
<th>Recall</th>
<th>F1 score</th>
</tr>
</thead>
<tbody>
<tr>
<td>Fatha</td>
<td>97.56</td>
<td>96.22</td>
<td>96.37</td>
</tr>
<tr>
<td>Dhamma</td>
<td>96.39</td>
<td>97.32</td>
<td>96.85</td>
</tr>
<tr>
<td>Kasra</td>
<td>97.09</td>
<td>97.03</td>
<td>96.55</td>
</tr>
<tr>
<td>Fathaten</td>
<td>96.41</td>
<td>96.78</td>
<td>96.17</td>
</tr>
<tr>
<td>Dhammaten</td>
<td>96.58</td>
<td>96.2</td>
<td>96.28</td>
</tr>
<tr>
<td>Kasraten</td>
<td>97.77</td>
<td>97.05</td>
<td>97.65</td>
</tr>
<tr>
<td>Sukun</td>
<td>97.57</td>
<td>97.35</td>
<td>97.44</td>
</tr>
<tr>
<td>Others</td>
<td>97.13</td>
<td>96.41</td>
<td>96.71</td>
</tr>
<tr>
<td><bold>Average</bold></td>
<td><bold>97.06</bold></td>
<td><bold>96.80</bold></td>
<td><bold>96.75</bold></td>
</tr>
</tbody>
</table>
</table-wrap><fig id="fig-5">
<label>Figure 5</label>
<caption>
<title>Comparative <inline-formula id="ieqn-109">
<mml:math id="mml-ieqn-109"><mml:mi>p</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>c</mml:mi><mml:mi>n</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> analysis results under different diacritic sign classes</title></caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="IASC_34069-fig-5.tif"/>
</fig>
<p><xref ref-type="fig" rid="fig-6">Fig. 6</xref> portrays the comprehensive analytical results attained by the proposed ODLD-ATSS algorithm under different diacritic sign classes in terms of <inline-formula id="ieqn-107">
<mml:math id="mml-ieqn-107"><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>a</mml:mi><mml:mi>l</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula>. The figure denotes that the proposed ODLD-ATSS model accomplished an improved performance under all the classes. The proposed ODLD-ATSS model gained the following <inline-formula id="ieqn-108">
<mml:math id="mml-ieqn-108"><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>a</mml:mi><mml:mi>l</mml:mi></mml:msub></mml:mrow><mml:mtext>&#x00A0;</mml:mtext></mml:math>
</inline-formula>values such as 96.22% under fatha class, 97.32% under dhamma class, 97.03% under Kasra class, 96.78% under Fathaten class, 96.2% under Dhammaten class, 97.05% under Kasraten class, 97.35% under sukun class and 96.41% under others&#x2019; class correspondingly.</p>
<fig id="fig-6">
<label>Figure 6</label>
<caption>
<title>Comparative <inline-formula id="ieqn-110">
<mml:math id="mml-ieqn-110"><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>a</mml:mi><mml:mi>l</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> analysis results under different diacritic sign classes</title></caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="IASC_34069-fig-6.tif"/>
</fig>
<p><xref ref-type="fig" rid="fig-7">Fig. 7</xref> provides the detailed illustration of the results achieved by the proposed ODLD-ATSS model under different diacritic sign classes in terms of <inline-formula id="ieqn-111">
<mml:math id="mml-ieqn-111"><mml:mi>F</mml:mi><mml:mrow><mml:msub><mml:mn>1</mml:mn><mml:mrow><mml:mi>s</mml:mi><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:math>
</inline-formula>. The figure infers that the proposed ODLD-ATSS model achieved an enhanced performance under each class. The proposed ODLD-ATSS model acquired the following <inline-formula id="ieqn-112">
<mml:math id="mml-ieqn-112"><mml:mi>F</mml:mi><mml:mrow><mml:msub><mml:mn>1</mml:mn><mml:mrow><mml:mi>s</mml:mi><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mtext>&#x00A0;</mml:mtext></mml:math>
</inline-formula>values such as 96.37% under the fatha class, 96.85% under the dhamma class, 96.55% under the Kasra class, 96.17% under Fathaten class, 96.28% under Dhammaten class, 97.65% under Kasraten class, 97.44% under sukun class and 96.71% under others&#x2019; class respectively.</p>
<fig id="fig-7">
<label>Figure 7</label>
<caption>
<title>Comparative <inline-formula id="ieqn-113">
<mml:math id="mml-ieqn-113"><mml:mi>F</mml:mi><mml:mrow><mml:msub><mml:mn>1</mml:mn><mml:mrow><mml:mi>s</mml:mi><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:math>
</inline-formula> analysis results under different diacritic sign classes</title></caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="IASC_34069-fig-7.tif"/>
</fig>
<p><xref ref-type="table" rid="table-6">Table 6</xref> and <xref ref-type="fig" rid="fig-8">Fig. 8</xref> exhibit the comparative <inline-formula id="ieqn-115">
<mml:math id="mml-ieqn-115"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> analysis results achieved by the proposed ODLD-ATSS model and other diacritization models. The experimental values imply that the rule-based model and the DNN model produced the least <inline-formula id="ieqn-116">
<mml:math id="mml-ieqn-116"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> values, such as 77.36% and 79.36%, respectively. At the same time, the SVM and the LSTM models reported slightly enhanced <inline-formula id="ieqn-117">
<mml:math id="mml-ieqn-117"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> values, such as 81.53% and 82.16% respectively. Likewise, the BLSTM model accomplished a reasonable <inline-formula id="ieqn-118">
<mml:math id="mml-ieqn-118"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> of 85.35%. However, the proposed ODLD-ATSS model achieved a maximum <inline-formula id="ieqn-119">
<mml:math id="mml-ieqn-119"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> of 96.35%. Therefore, the proposed ODLD-ATSS model can be considered as an effectual tool for activity recognition.</p>
<table-wrap id="table-6"><label>Table 6</label>
<caption>
<title>Overall accuracy values of the proposed ODLD-ATSS model and other existing models</title></caption>
<table><colgroup>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th>Methods</th>
<th>Accuracy score (%)</th>
</tr>
</thead>
<tbody>
<tr>
<td>ODLD-ATSS</td>
<td>96.35</td>
</tr>
<tr>
<td>SVM</td>
<td>81.53</td>
</tr>
<tr>
<td>Rule-Based</td>
<td>77.36</td>
</tr>
<tr>
<td>DNN</td>
<td>79.36</td>
</tr>
<tr>
<td>LSTM</td>
<td>82.16</td>
</tr>
<tr>
<td>BLSTM</td>
<td>85.35</td>
</tr>
</tbody>
</table>
</table-wrap><fig id="fig-8">
<label>Figure 8</label>
<caption>
<title>Comparative <inline-formula id="ieqn-114">
<mml:math id="mml-ieqn-114"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> analysis results of the proposed ODLD-ATSS model</title></caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="IASC_34069-fig-8.tif"/>
</fig>
</sec>
<sec id="s4">
<label>4</label>
<title>Conclusion</title>
<p>In the current study, the proposed ODLD-ATSS technique has been developed for the TSS process to be used by the visually-challenged people in the Kingdom of Saudi Arabia. The prime objective of the presented ODLD-ATSS model is to convert the text into speech signals for the visually-challenged people. To attain this, the presented ODLD-ATSS model initially designs a GRU-based prediction model for diacritic and gemination signs. Besides, the Buckwalter code is also utilized to capture, store and display the Arabic text. In order to enhance the TSS performance of the GRU method, the AOA technique is used. To establish the enhanced performance of the proposed ODLD-ATSS algorithm, various experimental analyses were conducted. The experimental outcomes confirmed the improved performance of the ODLD-ATSS model over other DL-based TSS models. The proposed ODLD-ATSS model achieved a maximum accuracy of 96.35%. In the future, the performance of the proposed ODLD-ATSS model can be enhanced using hybrid metaheuristic algorithms. Besides, the presented model can also be extended for the object detection process in real-time navigation techniques.</p>
</sec>
</body>
<back>
<sec><title>Funding Statement</title>
<p>The authors extend their appreciation to the <funding-source>King Salman center for Disability Research</funding-source> for funding this work through Research Group no <award-id>KSRG-2022-030</award-id>.</p>
</sec>
<sec sec-type="COI-statement">
<title>Conflicts of Interest</title>
<p>The authors declare that they have no conflicts of interest to report regarding the present study.</p>
</sec>
<ref-list content-type="authoryear">
<title>References</title>
<ref id="ref-1"><label>[1]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>R.</given-names> <surname>Olwan</surname></string-name></person-group>, &#x201C;<article-title>The ratification and implementation of the Marrakesh Treaty for visually impaired persons in the Arab Gulf States</article-title>,&#x201D; <source>The Journal of World Intellectual Property</source>, vol. <volume>20</volume>, no. <issue>5&#x2013;6</issue>, pp. <fpage>178</fpage>&#x2013;<lpage>205</lpage>, <year>2017</year>.</mixed-citation></ref>
<ref id="ref-2"><label>[2]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>R.</given-names> <surname>Sarkar</surname></string-name> and <string-name><given-names>S.</given-names> <surname>Das</surname></string-name></person-group>, &#x201C;<article-title>Analysis of different braille devices for implementing a cost-effective and portable braille system for the visually impaired people</article-title>,&#x201D; <source>International Journal of Computer Applications</source>, vol. <volume>60</volume>, no. <issue>9</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>5</lpage>, <year>2012</year>.</mixed-citation></ref>
<ref id="ref-3"><label>[3]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Awad</surname></string-name>, <string-name><given-names>J. El</given-names> <surname>Haddad</surname></string-name>, <string-name><given-names>E.</given-names> <surname>Khneisser</surname></string-name>, <string-name><given-names>T.</given-names> <surname>Mahmoud</surname></string-name>, <string-name><given-names>E.</given-names> <surname>Yaacoub</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>Intelligent eye: A mobile application for assisting blind people</article-title>,&#x201D; in <conf-name>IEEE Middle East and North Africa Communications Conf. (MENACOMM)</conf-name>, <publisher-loc>Jounieh</publisher-loc>, pp. <fpage>1</fpage>&#x2013;<lpage>6</lpage>, <year>2018</year>. </mixed-citation></ref>
<ref id="ref-4"><label>[4]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>F. N.</given-names> <surname>Al-Wesabi</surname></string-name></person-group>, &#x201C;<article-title>A hybrid intelligent approach for content authentication and tampering detection of Arabic text transmitted via internet</article-title>,&#x201D; <source>Computers, Materials &#x0026; Continua</source>, vol. <volume>66</volume>, no. <issue>1</issue>, pp. <fpage>195</fpage>&#x2013;<lpage>211</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-5"><label>[5]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>N. B.</given-names> <surname>Ibrahim</surname></string-name>, <string-name><given-names>M. M.</given-names> <surname>Selim</surname></string-name> and <string-name><given-names>H. H.</given-names> <surname>Zayed</surname></string-name></person-group>, &#x201C;<article-title>An automatic arabic sign language recognition system (ArSLRS)</article-title>,&#x201D; <source>Journal of King Saud University-Computer and Information Sciences</source>, vol. <volume>30</volume>, no. <issue>4</issue>, pp. <fpage>470</fpage>&#x2013;<lpage>477</lpage>, <year>2018</year>.</mixed-citation></ref>
<ref id="ref-6"><label>[6]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M. Al</given-names> <surname>Duhayyim</surname></string-name>, <string-name><given-names>H. M.</given-names> <surname>Alshahrani</surname></string-name>, <string-name><given-names>F. N.</given-names> <surname>Al-Wesabi</surname></string-name>, <string-name><given-names>M. A.</given-names> <surname>Al-Hagery</surname></string-name>, <string-name><given-names>A. M.</given-names> <surname>Hilal</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>Intelligent machine learning based EEG signal classification model</article-title>,&#x201D; <source>Computers, Materials &#x0026; Continua</source>, vol. <volume>71</volume>, no. <issue>1</issue>, pp. <fpage>1821</fpage>&#x2013;<lpage>1835</lpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-7"><label>[7]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>F. N.</given-names> <surname>Al-Wesabi</surname></string-name></person-group>, &#x201C;<article-title>Proposing high-smart approach for content authentication and tampering detection of Arabic text transmitted via internet</article-title>,&#x201D; <source>IEICE Transactions on Information and Systems</source>, vol. <volume>E103.D</volume>, no. <issue>10</issue>, pp. <fpage>2104</fpage>&#x2013;<lpage>2112</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-8"><label>[8]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A. L.</given-names> <surname>Valvo</surname></string-name>, <string-name><given-names>D.</given-names> <surname>Croce</surname></string-name>, <string-name><given-names>D.</given-names> <surname>Garlisi</surname></string-name>, <string-name><given-names>F.</given-names> <surname>Giuliano</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Giarr&#x00E9;</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>A navigation and augmented reality system for visually impaired people</article-title>,&#x201D; <source>Sensors</source>, vol. <volume>21</volume>, no. <issue>9</issue>, pp. <fpage>3061</fpage>, <year>2021</year>; <pub-id pub-id-type="pmid">33924773</pub-id></mixed-citation></ref>
<ref id="ref-9"><label>[9]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>H.</given-names> <surname>Luqman</surname></string-name> and <string-name><given-names>S. A.</given-names> <surname>Mahmoud</surname></string-name></person-group>, &#x201C;<article-title>Automatic translation of Arabic text-to-Arabic sign language</article-title>,&#x201D; <source>Universal Access in the Information Society</source>, vol. <volume>18</volume>, no. <issue>4</issue>, pp. <fpage>939</fpage>&#x2013;<lpage>951</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-10"><label>[10]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>R.</given-names> <surname>Jafri</surname></string-name> and <string-name><given-names>M.</given-names> <surname>Khan</surname></string-name></person-group>, &#x201C;<article-title>User-centered design of a depth data based obstacle detection and avoidance system for the visually impaired</article-title>,&#x201D; <source>Human-Centric Computing and Information Sciences</source>, vol. <volume>8</volume>, no. <issue>1</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>30</lpage>, <year>2018</year>.</mixed-citation></ref>
<ref id="ref-11"><label>[11]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Y. S.</given-names> <surname>Su</surname></string-name>, <string-name><given-names>C. H.</given-names> <surname>Chou</surname></string-name>, <string-name><given-names>Y. L.</given-names> <surname>Chu</surname></string-name> and <string-name><given-names>Z. Y.</given-names> <surname>Yang</surname></string-name></person-group>, &#x201C;<article-title>A finger-worn device for exploring chinese printed text with using cnn algorithm on a micro IoT processor</article-title>,&#x201D; <source>IEEE Access</source>, vol. <volume>7</volume>, pp. <fpage>116529</fpage>&#x2013;<lpage>116541</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-12"><label>[12]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Brour</surname></string-name> and <string-name><given-names>A.</given-names> <surname>Benabbou</surname></string-name></person-group>, &#x201C;<article-title>ATLASLang MTS 1: Arabic text language into arabic sign language machine translation system</article-title>,&#x201D; <source>Procedia Computer Science</source>, vol. <volume>148</volume>, no. <issue>6</issue>, pp. <fpage>236</fpage>&#x2013;<lpage>245</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-13"><label>[13]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>T.</given-names> <surname>Kanan</surname></string-name>, <string-name><given-names>O.</given-names> <surname>Sadaqa</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Aldajeh</surname></string-name>, <string-name><given-names>H.</given-names> <surname>Alshwabka</surname></string-name>, <string-name><given-names>S.</given-names> <surname>AlZu&#x2019;bi</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>A review of natural language processing and machine learning tools used to analyze arabic social media</article-title>,&#x201D; in <conf-name>IEEE Jordan Int. Joint Conf. on Electrical Engineering and Information Technology (JEEIT)</conf-name>, <publisher-loc>Amman, Jordan</publisher-loc>, pp. <fpage>622</fpage>&#x2013;<lpage>628</lpage>, <year>2019</year>. </mixed-citation></ref>
<ref id="ref-14"><label>[14]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Aly</surname></string-name> and <string-name><given-names>W.</given-names> <surname>Aly</surname></string-name></person-group>, &#x201C;<article-title>DeepArSLR: A novel signer-independent deep learning framework for isolated Arabic sign language gestures recognition</article-title>,&#x201D; <source>IEEE Access</source>, vol. <volume>8</volume>, pp. <fpage>83199</fpage>&#x2013;<lpage>83212</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-15"><label>[15]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Araya</surname></string-name> and <string-name><given-names>M.</given-names> <surname>Alehegn</surname></string-name></person-group>, &#x201C;<article-title>Text to speech synthesizer for tigrigna linguistic using concatenative based approach with LSTM model</article-title>,&#x201D; <source>Indian Journal of Science and Technology</source>, vol. <volume>15</volume>, no. <issue>1</issue>, pp. <fpage>19</fpage>&#x2013;<lpage>27</lpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-16"><label>[16]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>O. A.</given-names> <surname>Berkhemer</surname></string-name>, <string-name><given-names>P. S.</given-names> <surname>Fransen</surname></string-name>, <string-name><given-names>D.</given-names> <surname>Beumer</surname></string-name>, <string-name><given-names>L. A. V. D.</given-names> <surname>Berg</surname></string-name>, <string-name><given-names>H. F.</given-names> <surname>Lingsma</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>A randomized trial of intraarterial treatment for acute ischemic stroke</article-title>,&#x201D; <source>The New England Journal of Medicine</source>, vol. <volume>372</volume>, no. <issue>1</issue>, pp. <fpage>11</fpage>&#x2013;<lpage>20</lpage>, <year>2015</year>; <pub-id pub-id-type="pmid">25517348</pub-id></mixed-citation></ref>
<ref id="ref-17"><label>[17]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Eltay</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Zidouri</surname></string-name> and <string-name><given-names>I.</given-names> <surname>Ahmad</surname></string-name></person-group>, &#x201C;<article-title>Exploring deep learning approaches to recognize handwritten arabic texts</article-title>,&#x201D; <source>IEEE Access</source>, vol. <volume>8</volume>, pp. <fpage>89882</fpage>&#x2013;<lpage>89889</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-18"><label>[18]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>R.</given-names> <surname>Alzohairi</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Alghonaim</surname></string-name>, <string-name><given-names>W.</given-names> <surname>Alshehri</surname></string-name> and <string-name><given-names>S.</given-names> <surname>Aloqeely</surname></string-name></person-group>, &#x201C;<article-title>Image based arabic sign language recognition system</article-title>,&#x201D; <source>International Journal of Advanced Computer Science and Applications</source>, vol. <volume>9</volume>, no. <issue>3</issue>, pp. <fpage>185</fpage>&#x2013;<lpage>194</lpage>, <year>2018</year>.</mixed-citation></ref>
<ref id="ref-19"><label>[19]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>J. X.</given-names> <surname>Chen</surname></string-name>, <string-name><given-names>D. M.</given-names> <surname>Jiang</surname></string-name> and <string-name><given-names>Y. N.</given-names> <surname>Zhang</surname></string-name></person-group>, &#x201C;<article-title>A hierarchical bidirectional GRU model with attention for EEG-based emotion classification</article-title>,&#x201D; <source>IEEE Access</source>, vol. <volume>7</volume>, pp. <fpage>118530</fpage>&#x2013;<lpage>118540</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-20"><label>[20]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>H. M.</given-names> <surname>Lynn</surname></string-name>, <string-name><given-names>S. B.</given-names> <surname>Pan</surname></string-name> and <string-name><given-names>P.</given-names> <surname>Kim</surname></string-name></person-group>, &#x201C;<article-title>A deep bidirectional GRU network model for biometric electrocardiogram classification based on recurrent neural networks</article-title>,&#x201D; <source>IEEE Access</source>, vol. <volume>7</volume>, pp. <fpage>145395</fpage>&#x2013;<lpage>145405</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-21"><label>[21]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>I. H.</given-names> <surname>Ali</surname></string-name>, <string-name><given-names>Z.</given-names> <surname>Mnasri</surname></string-name> and <string-name><given-names>Z.</given-names> <surname>Lachiri</surname></string-name></person-group>, &#x201C;<article-title>DNN-based grapheme-to-phoneme conversion for Arabic text-to-speech synthesis</article-title>,&#x201D; <source>International Journal of Speech Technology</source>, vol. <volume>23</volume>, no. <issue>3</issue>, pp. <fpage>569</fpage>&#x2013;<lpage>584</lpage>, <year>2020</year>.</mixed-citation></ref>
</ref-list>
</back>
</article>













