<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.1 20151215//EN" "http://jats.nlm.nih.gov/publishing/1.1/JATS-journalpublishing1.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:mml="http://www.w3.org/1998/Math/MathML" xml:lang="en" article-type="research-article" dtd-version="1.1">
<front>
<journal-meta>
<journal-id journal-id-type="pmc">CSSE</journal-id>
<journal-id journal-id-type="nlm-ta">CSSE</journal-id>
<journal-id journal-id-type="publisher-id">CSSE</journal-id>
<journal-title-group>
<journal-title>Computer Systems Science &#x0026; Engineering</journal-title>
</journal-title-group>
<issn pub-type="ppub">0267-6192</issn>
<publisher>
<publisher-name>Tech Science Press</publisher-name>
<publisher-loc>USA</publisher-loc>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">33834</article-id>
<article-id pub-id-type="doi">10.32604/csse.2023.033834</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Article</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Parameter Tuned Machine Learning Based Emotion Recognition on Arabic Twitter Data</article-title>
<alt-title alt-title-type="left-running-head">Parameter Tuned Machine Learning Based Emotion Recognition on Arabic Twitter Data</alt-title>
<alt-title alt-title-type="right-running-head">Parameter Tuned Machine Learning Based Emotion Recognition on Arabic Twitter Data</alt-title>
</title-group>
<contrib-group>
<contrib id="author-1" contrib-type="author">
<name name-style="western"><surname>Alwayle</surname><given-names>Ibrahim M.</given-names></name><xref ref-type="aff" rid="aff-1">1</xref></contrib>
<contrib id="author-2" contrib-type="author">
<name name-style="western"><surname>Al-onazi</surname><given-names>Badriyya B.</given-names></name><xref ref-type="aff" rid="aff-2">2</xref></contrib>
<contrib id="author-3" contrib-type="author">
<name name-style="western"><surname>Alzahrani</surname><given-names>Jaber S.</given-names></name><xref ref-type="aff" rid="aff-3">3</xref></contrib>
<contrib id="author-4" contrib-type="author">
<name name-style="western"><surname>Alalayah</surname><given-names>Khaled M.</given-names></name><xref ref-type="aff" rid="aff-1">1</xref></contrib>
<contrib id="author-5" contrib-type="author">
<name name-style="western"><surname>Alaidarous</surname><given-names>Khadija M.</given-names></name><xref ref-type="aff" rid="aff-1">1</xref></contrib>
<contrib id="author-6" contrib-type="author">
<name name-style="western"><surname>Ahmed</surname><given-names>Ibrahim Abdulrab</given-names></name><xref ref-type="aff" rid="aff-4">4</xref></contrib>
<contrib id="author-7" contrib-type="author">
<name name-style="western"><surname>Othman</surname><given-names>Mahmoud</given-names></name><xref ref-type="aff" rid="aff-5">5</xref></contrib>
<contrib id="author-8" contrib-type="author" corresp="yes">
<name name-style="western"><surname>Motwakel</surname><given-names>Abdelwahed</given-names></name><xref ref-type="aff" rid="aff-6">6</xref><email>a.ismaeil@psau.edu.sa</email></contrib>
<aff id="aff-1"><label>1</label><institution>Department of Computer Science, College of Science and Arts, Sharurah, Najran University</institution>, <addr-line>Najran, 55461</addr-line>, <country>Saudi Arabia</country></aff>
<aff id="aff-2"><label>2</label><institution>Department of Language Preparation, Arabic Language Teaching Institute, Princess Nourah bint Abdulrahman University</institution>, <addr-line>P.O. Box 84428, Riyadh, 11671</addr-line>, <country>Saudi Arabia</country></aff>
<aff id="aff-3"><label>3</label><institution>Department of Industrial Engineering, College of Engineering at Alqunfudah, Umm Al-Qura University</institution>, <addr-line>Najran, 24211</addr-line>, <country>Saudi Arabia</country></aff>
<aff id="aff-4"><label>4</label><institution>Computer Department, Applied College, Najran University</institution>, <addr-line>Najran, 66462</addr-line>, <country>Saudi Arabia</country></aff>
<aff id="aff-5"><label>5</label><institution>Department of Computer Science, Faculty of Computers and Information Technology, Future University in Egypt</institution>, <addr-line>New Cairo, 11835</addr-line>, <country>Egypt</country></aff>
<aff id="aff-6"><label>6</label><institution>Department of Computer and Self Development, Preparatory Year Deanship, Prince Sattam bin Abdulaziz University</institution>, <addr-line>AlKharj</addr-line>, <country>Saudi Arabia</country></aff>
</contrib-group>
<author-notes>
<corresp id="cor1"><label>&#x002A;</label>Corresponding Author: Abdelwahed Motwakel. Email: <email>a.ismaeil@psau.edu.sa</email></corresp>
</author-notes>
<pub-date date-type="collection" publication-format="electronic">
<year>2023</year></pub-date>
<pub-date date-type="pub" publication-format="electronic">
<day>31</day>
<month>3</month>
<year>2023</year>
</pub-date>
<volume>46</volume>
<issue>3</issue>
<fpage>3423</fpage>
<lpage>3438</lpage>
<history>
<date date-type="received"><day>29</day><month>6</month><year>2022</year></date>
<date date-type="accepted"><day>03</day><month>11</month><year>2022</year></date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2023 Alwayle et al.</copyright-statement>
<copyright-year>2023</copyright-year>
<copyright-holder>Alwayle et al.</copyright-holder>
<license xlink:href="https://creativecommons.org/licenses/by/4.0/">
<license-p>This work is licensed under a <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</ext-link>, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.</license-p>
</license>
</permissions>
<self-uri content-type="pdf" xlink:href="TSP_CSSE_33834.pdf"></self-uri>
<abstract>
<p>Arabic is one of the most spoken languages across the globe. However, there are fewer studies concerning Sentiment Analysis (SA) in Arabic. In recent years, the detected sentiments and emotions expressed in tweets have received significant interest. The substantial role played by the Arab region in international politics and the global economy has urged the need to examine the sentiments and emotions in the Arabic language. Two common models are available: Machine Learning and lexicon-based approaches to address emotion classification problems. With this motivation, the current research article develops a Teaching and Learning Optimization with Machine Learning Based Emotion Recognition and Classification (TLBOML-ERC) model for Sentiment Analysis on tweets made in the Arabic language. The presented TLBOML-ERC model focuses on recognising emotions and sentiments expressed in Arabic tweets. To attain this, the proposed TLBOML-ERC model initially carries out data pre-processing and a Continuous Bag Of Words (CBOW)-based word embedding process. In addition, Denoising Autoencoder (DAE) model is also exploited to categorise different emotions expressed in Arabic tweets. To improve the efficacy of the DAE model, the Teaching and Learning-based Optimization (TLBO) algorithm is utilized to optimize the parameters. The proposed TLBOML-ERC method was experimentally validated with the help of an Arabic tweets dataset. The obtained results show the promising performance of the proposed TLBOML-ERC model on Arabic emotion classification.</p>
</abstract>
<kwd-group kwd-group-type="author">
<kwd>Arabic language</kwd>
<kwd>Twitter data</kwd>
<kwd>machine learning</kwd>
<kwd>teaching and learning-based optimization</kwd>
<kwd>sentiment analysis</kwd>
<kwd>emotion classification</kwd>
</kwd-group>
</article-meta>
</front>
<body>
<sec id="s1"><label>1</label><title>Introduction</title>
<p>Arabic is one of the six official languages of the United Nations. As an official language in 27 nations, Arabic is spoken by nearly 422 million people worldwide [<xref ref-type="bibr" rid="ref-1">1</xref>]. Arabic has rich morphology and is highly complex since every word carries significant meaning. Since space is one of the delimited tokens, the terms in the Arabic language expose numerous morphological prospects such as agglutination, derivation, and inflection [<xref ref-type="bibr" rid="ref-2">2</xref>]. Unlike Latin languages, Arabic is written from right to left and can be distinguished by the absence of lower or upper case. Its alphabet has 28 letters, of which 25 are consonants and 3 are vowels [<xref ref-type="bibr" rid="ref-3">3</xref>,<xref ref-type="bibr" rid="ref-4">4</xref>]. However, the Arabic script employs diacritical marks as short vowels and vocal parts. This can be positioned either below or above the letters to ensure the right pronunciation and convey clear meaning for the words [<xref ref-type="bibr" rid="ref-5">5</xref>]. Most Arabic texts are written without shorter vowels. So, proficient speakers do not require diacritical marks to understand the presented text [<xref ref-type="bibr" rid="ref-6">6</xref>]. But, it is frequently utilized in books written for Arabic learners and children. The lack of diacritical marks in most textbooks brings lexical ambiguity issues that challenge the computational mechanisms [<xref ref-type="bibr" rid="ref-7">7</xref>,<xref ref-type="bibr" rid="ref-8">8</xref>].</p>
<p>There is a shortage of studies or linguistics research in Arabic, especially social emotion analysis. Further, no structured methods exist for extracting and classifying emotions in Arabic tweets. If available, such mechanisms can be applied to improve customer service management, E-learning applications, product quality, detection techniques for psychologists to identify terrorist conduct, and so on [<xref ref-type="bibr" rid="ref-9">9</xref>,<xref ref-type="bibr" rid="ref-10">10</xref>]. The emotion analysis process allows the analysis and classification of more complex emotions. Emotion is a part of the nervous system function and is associated with different mental states, such as sadness, joy, or annoyance. Emotion analysis can identify whether the content under study has emotions and can categorize the emotions under appropriate emotion categories [<xref ref-type="bibr" rid="ref-11">11</xref>]. The data for sentiment analysis is available on Twitter in the form of tweets. This Twitter data has text in several languages, whereas users post around 4.00 billion tweets daily. The tweets posted on Twitter exhibit the feelings and emotions of the users in distinct languages. Several challenges are associated with the emotional analysis of Twitter data since the tweets contain several social shortcuts, grammatical mistakes, multimedia content, misspellings, and slang [<xref ref-type="bibr" rid="ref-12">12</xref>]. Various authors have investigated emotions in English-language tweets. However, no single author has categorized the emotions exhibited in Arabic language tweets since the language has intricate difficulties. Many sentiment analysis studies on Arabic tweets merely categorized a sentiment as either positive or negative [<xref ref-type="bibr" rid="ref-13">13</xref>]. As mentioned, there is a lack of resources and studies in Arabic language social emotion analysis. In contrast, such studies or mechanisms need the hour to be applied in different fields [<xref ref-type="bibr" rid="ref-14">14</xref>], for instance, support psychiatrists in understanding terrorist conduct, improving E-learning applications, enhancing customer service and product quality, etc.</p>
<p>The current study develops a Teaching and Learning Optimization with Machine Learning Based Emotion Recognition and Classification (TLBOML-ERC) model on Arabic Twitter data. The presented TLBOML-ERC model focuses on recognising emotions and sentiments expressed in Arabic tweets. To attain this, the proposed TLBOML-ERC model initially carries out data pre-processing and Continuous Bag Of Words (CBOW)-based word embedding. The Denoising Autoencoder (DAE) model is exploited for emotion recognition, which categorizes the emotions found in Arabic tweets. To improve the efficacy of the DAE model, the Teaching and Learning Based Optimization (TLBO) algorithm is utilized for parameter optimization. The experimental analysis of the proposed TLBOML-ERC model was conducted using the Arabic tweets dataset.</p>
</sec>
<sec id="s2"><label>2</label><title>Related Works</title>
<p>Baali et al. [<xref ref-type="bibr" rid="ref-15">15</xref>] proposed a classification method for emotions found in Arabic tweets. In this technique, Deep Convolution Neural Network (DCNN) was trained on top of a training word vector for sentence classification, especially upon the dataset. The outcomes of the proposed method were compared with three other Machine Learning (ML) techniques, such as Multilayer Perceptron (MLP), Support Vector Machine (SVM), and Na&#x00EF;ve Bayes (NB). The structure of the deep learning algorithm was an end-to-end network with sentence, word, and document vectorization steps. Khalil et al. [<xref ref-type="bibr" rid="ref-16">16</xref>] developed a new multi-layer Bidirectional Long Short Term Memory (BiLSTM) that was trained on top of pre-trained word-embedded vectors. This method obtained excellent performance improvement and was also related to other techniques for similar tasks, such as SVM, Random Forest (RF), and Fully Convolution Neural Network (FCNN). In the study conducted earlier [<xref ref-type="bibr" rid="ref-17">17</xref>], empirical research was executed on the progression of language methods from conventional Term Frequency&#x2013;Inverse Document Frequency (TF&#x2013;IDF) to highly-sophisticated word embedding word2vec and finally to the existing pre-trained language method, i.e., Bidirectional Encoder Representations from Transformers (BERT). It observed and examined how the performance can be increased to bring a change in language methods. Additionally, various BERT techniques were inspected for the Arabic language earlier.</p>
<p>Poorna et al. [<xref ref-type="bibr" rid="ref-18">18</xref>] developed a speech emotion recognition mechanism for the Arabic population. A speech database elicited emotions such as surprise, anger, disgust, happiness, neutrality, and sadness was developed from 14 non-native yet efficient speakers of the language. Spectral, cepstral, and prosodic features were derived after preprocessing the data. Then, the features were exposed to single-stage classification using supervised learning techniques. SVM and Extreme Learning Machine (ELM). Al-Hagery et al. [<xref ref-type="bibr" rid="ref-19">19</xref>] intended to achieve optimal performance in emotion classification upon the tweets made in Arabic. In this background, it is evident that various research investigations have been conducted earlier to investigate the impact of feature extraction methods and the N-gram method on the performances of three supervised ML techniques, such as SVM, NB, and Logistic Regression (LR).</p>
</sec>
<sec id="s3"><label>3</label><title>The Proposed Model</title>
<p>In the current study, a new TLBOML-ERC model is proposed for the recognition of emotions and sentiments found in Arabic tweets. The TLBOML-ERC model initially carries out data pre-processing and the CBOW-based word embedding process to attain this. For emotion recognition, TLBO is exploited along with the DAE model that identifies and categorizes the emotions found in Arabic tweets. <xref ref-type="fig" rid="fig-1">Fig. 1</xref> showcases the block diagram of the TLBOML-ERC approach.</p>
<fig id="fig-1"><label>Figure 1</label><caption><title>Block diagram of TLBOML-ERC approach</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_33834-fig-1.tif"/></fig>
<sec id="s3_1"><label>3.1</label><title>Data Pre-processing</title>
<p>The original Arabic tweets from training and testing datasets are tokenized. After removing the white spaces, the punctuation marks are preserved as individual words (&#x201D;.,?!:;()[]&#x0023;@&#x2019;). It is worth declaring that the pre-processing approaches not present in the current study model have normalized the Arabic characters, whereas diacritics, punctuations, and repetitive characters are eliminated.</p>
</sec>
<sec id="s3_2"><label>3.2</label><title>Continuous BoW Model</title>
<p>CBOW approach employs Bag-of-Word models in which every word shares a prediction layer. Furthermore, the nonlinear hidden state is detached to reduce the computational time. CBOW employs the word from history and the future, whereas <inline-formula id="ieqn-1"><mml:math id="mml-ieqn-1"><mml:mrow><mml:mtext>log</mml:mtext></mml:mrow></mml:math></inline-formula>-a linear classifier is applied, which is generally used in classifying middle (current) words [<xref ref-type="bibr" rid="ref-20">20</xref>]. Moreover, CBOW utilizes context-continuous distributed demonstration. Consider <inline-formula id="ieqn-2"><mml:math id="mml-ieqn-2"><mml:mrow><mml:mo>{</mml:mo><mml:msub><mml:mi>w</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>c</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mo>&#x22EF;</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:msub><mml:mi>w</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:msub><mml:mi>w</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:msub><mml:mi>w</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:msub><mml:mi>w</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mo>&#x2026;</mml:mo><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:msub><mml:mi>w</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mi>c</mml:mi></mml:mrow></mml:msub><mml:mo>}</mml:mo></mml:mrow></mml:math></inline-formula>, CBOW method works to maximise <xref ref-type="disp-formula" rid="eqn-1">Eq. (1)</xref>.
<disp-formula id="eqn-1"><label>(1)</label><mml:math id="mml-eqn-1" display="block"><mml:mfrac><mml:mn>1</mml:mn><mml:mrow><mml:mo>|</mml:mo><mml:mi>V</mml:mi><mml:mo>|</mml:mo></mml:mrow></mml:mfrac><mml:msubsup><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mrow><mml:mo>|</mml:mo><mml:mi>v</mml:mi><mml:mo>|</mml:mo></mml:mrow></mml:mrow></mml:msubsup><mml:mrow><mml:mtext>log</mml:mtext></mml:mrow><mml:mrow><mml:mo>[</mml:mo><mml:mi>p</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>w</mml:mi><mml:mi>t</mml:mi><mml:msubsup><mml:mrow><mml:mo fence="false" stretchy="false">|</mml:mo></mml:mrow><mml:mrow><mml:mi>w</mml:mi><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mi>w</mml:mi><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mo>&#x2026;</mml:mo><mml:mo>,</mml:mo><mml:mi>w</mml:mi><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mi>c</mml:mi></mml:mrow><mml:mrow><mml:mi>w</mml:mi><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>c</mml:mi><mml:mo>,</mml:mo><mml:mo>&#x2026;</mml:mo><mml:mi>w</mml:mi><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>2</mml:mn><mml:mo>,</mml:mo><mml:mi>w</mml:mi><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo></mml:mrow></mml:msubsup><mml:mo>)</mml:mo></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:math></disp-formula></p>
<p>In <xref ref-type="disp-formula" rid="eqn-1">Eq. (1)</xref>, <inline-formula id="ieqn-3"><mml:math id="mml-ieqn-3"><mml:mrow><mml:mo>|</mml:mo><mml:mi>v</mml:mi><mml:mo>|</mml:mo></mml:mrow></mml:math></inline-formula> indicates the corpus&#x2019;s number of words or vocabularies, while <italic>c</italic> denotes the context size. The context size is decided based on the sliding window size. When the sliding window size is 9, it corresponds to the presence of 9 words, implying that the value of <italic>c</italic> is 4. So, 4 words beforehand and afterwards should be considered to forecast certain words. After forecasting the word, the window must slide to predict the following word.</p>
</sec>
<sec id="s3_3"><label>3.3</label><title>DAE-Based Emotion Classification</title>
<p>For emotion recognition, the DAE model is exploited, which identifies and categorizes the emotions found in Arabic tweets. Autoencoder (AE) is a Fully Connected (FC) layer of the unsupervised ML process, implying the Backpropagation (BP) model [<xref ref-type="bibr" rid="ref-21">21</xref>]. AE comprises an input layer, several hidden states, and a single output layer. The resultant of the neural network is denoted in <xref ref-type="disp-formula" rid="eqn-2">Eq. (2)</xref> by permitting bold letters to indicate the vector.
<disp-formula id="eqn-2"><label>(2)</label><mml:math id="mml-eqn-2" display="block"><mml:mi>y</mml:mi><mml:mo>=</mml:mo><mml:msub><mml:mi>h</mml:mi><mml:mrow><mml:mi>&#x03B8;</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>x</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mspace width="negativethinmathspace" /><mml:mo>,</mml:mo></mml:math></disp-formula>whereas <inline-formula id="ieqn-4"><mml:math id="mml-ieqn-4"><mml:mi>x</mml:mi><mml:mo>=</mml:mo><mml:mo stretchy="false">[</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mo>&#x22EF;</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:msub><mml:msup><mml:mo stretchy="false">]</mml:mo><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msup></mml:math></inline-formula> is the input vector, <inline-formula id="ieqn-5"><mml:math id="mml-ieqn-5"><mml:msub><mml:mi>h</mml:mi><mml:mrow><mml:mi>&#x03B8;</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> denotes the full forward propagation function, and <inline-formula id="ieqn-6"><mml:math id="mml-ieqn-6"><mml:mi>&#x03B8;</mml:mi></mml:math></inline-formula> denotes the set of biases and weights that should be studied through the network during the training stage. The learning procedure can be performed by setting the target value at the output, which is similar to the input value <inline-formula id="ieqn-7"><mml:math id="mml-ieqn-7"><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:mo>(</mml:mo><mml:mi>y</mml:mi><mml:mo>=</mml:mo><mml:mi>x</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula>:
<disp-formula id="eqn-3"><label>(3)</label><mml:math id="mml-eqn-3" display="block"><mml:msub><mml:mi>h</mml:mi><mml:mrow><mml:mi>&#x03B8;</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>x</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mi>x</mml:mi><mml:mo>.</mml:mo></mml:math></disp-formula></p>
<p>As stated earlier, AE operates as an unsupervised learning method, i.e., backpropagation. Unsupervised learning refers to the training stage in which the basic features are merely needed without labelling. At the same time, backpropagation is the fault in the foretold resultant. In contrast, the target resultant propagates back from the resultant to every neuron in the network to update the weights based on a certain learning rate <inline-formula id="ieqn-8"><mml:math id="mml-ieqn-8"><mml:mo stretchy="false">(</mml:mo><mml:mi>&#x03B1;</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math></inline-formula>:
<disp-formula id="eqn-4"><label>(4)</label><mml:math id="mml-eqn-4" display="block"><mml:msub><mml:mi>&#x03B8;</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mi>&#x03B8;</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:mo>&#x25B3;</mml:mo><mml:msub><mml:mi>&#x03B8;</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo></mml:math></disp-formula>whereas <italic>i</italic> refers to the index of trained epochs and <inline-formula id="ieqn-9"><mml:math id="mml-ieqn-9"><mml:mo>&#x25B3;</mml:mo><mml:msub><mml:mi>&#x03B8;</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> denotes the upgraded weight that can be computed as follows.
<disp-formula id="eqn-5"><label>(5)</label><mml:math id="mml-eqn-5" display="block"><mml:mi mathvariant="normal">&#x0394;</mml:mi><mml:msub><mml:mi>&#x03B8;</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mo>&#x2212;</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mi>&#x03B1;</mml:mi><mml:mrow><mml:mo fence="true" stretchy="true" symmetric="true"></mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mfrac><mml:mrow><mml:mi mathvariant="normal">&#x2202;</mml:mi><mml:mi>E</mml:mi></mml:mrow><mml:mrow><mml:mi mathvariant="normal">&#x2202;</mml:mi><mml:msub><mml:mrow><mml:mi>&#x03B8;</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mfrac><mml:mo fence="true" stretchy="true" symmetric="true"></mml:mo></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula></p>
<p>Here, <italic>E</italic> denotes the cost function. In such cases, Mean Square Error (MSE) is employed as an error measure, calculated as given below.
<disp-formula id="eqn-6"><label>(6)</label><mml:math id="mml-eqn-6" display="block"><mml:mi>E</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mi>N</mml:mi></mml:mfrac><mml:msub><mml:mo movablelimits="false">&#x2211;</mml:mo><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:msub><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mo>,</mml:mo></mml:math></disp-formula></p>
<p>Here, <italic>N</italic> denotes the training set size, <inline-formula id="ieqn-10"><mml:math id="mml-ieqn-10"><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> corresponds to <inline-formula id="ieqn-11"><mml:math id="mml-ieqn-11"><mml:mi>t</mml:mi><mml:mi>h</mml:mi><mml:mi>e</mml:mi><mml:mtext>&#x00A0;</mml:mtext><mml:mi>m</mml:mi><mml:mrow><mml:mtext>-</mml:mtext></mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:math></inline-formula> input vector, and <inline-formula id="ieqn-12"><mml:math id="mml-ieqn-12"><mml:msub><mml:mi>y</mml:mi><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> refers to the <inline-formula id="ieqn-13"><mml:math id="mml-ieqn-13"><mml:mi>m</mml:mi><mml:mrow><mml:mtext>-</mml:mtext></mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:math></inline-formula> output vector. AE has one more type, i.e., Denoising AE <inline-formula id="ieqn-14"><mml:math id="mml-ieqn-14"><mml:mrow><mml:mo>(</mml:mo><mml:mi>D</mml:mi><mml:mi>A</mml:mi><mml:mi>E</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula>, which has a similar structure to <inline-formula id="ieqn-15"><mml:math id="mml-ieqn-15"><mml:mi>A</mml:mi><mml:mi>E</mml:mi><mml:mi>s</mml:mi><mml:mo>.</mml:mo></mml:math></inline-formula> But, the principle behind DAEs can rebuild data from the input of the corrupted dataset. It trains DAE by corrupting the datasets and giving them to Neural Network (NN). During the training stage, the target value is fixed for the output in the case of the actual dataset, whereas the input remains in the corrupted form of the dataset.
<disp-formula id="eqn-7"><label>(7)</label><mml:math id="mml-eqn-7" display="block"><mml:mrow><mml:mtext mathvariant="italic">target</mml:mtext></mml:mrow><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:mtext mathvariant="italic">output</mml:mtext></mml:mrow><mml:mtext>&#x00A0;</mml:mtext><mml:mi>o</mml:mi><mml:mi>f</mml:mi><mml:mtext>&#x00A0;</mml:mtext><mml:msub><mml:mi>h</mml:mi><mml:mrow><mml:mi>&#x03B8;</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mover><mml:mi>x</mml:mi><mml:mo>&#x007E;</mml:mo></mml:mover></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mi>x</mml:mi><mml:mo>,</mml:mo></mml:math></disp-formula></p>
<p>Here, <inline-formula id="ieqn-16"><mml:math id="mml-ieqn-16"><mml:mrow><mml:mover><mml:mi>x</mml:mi><mml:mo>&#x007E;</mml:mo></mml:mover></mml:mrow></mml:math></inline-formula> denotes the corrupted pack of input neurons and <inline-formula id="ieqn-17"><mml:math id="mml-ieqn-17"><mml:msub><mml:mi>h</mml:mi><mml:mrow><mml:mi>&#x03B8;</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mover><mml:mi>x</mml:mi><mml:mo>&#x007E;</mml:mo></mml:mover></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> indicates the output of DAE. DAE diminishes the cost function <inline-formula id="ieqn-18"><mml:math id="mml-ieqn-18"><mml:mrow><mml:mo>(</mml:mo><mml:mi>x</mml:mi><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:msub><mml:mi>h</mml:mi><mml:mrow><mml:mi>&#x03B8;</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mover><mml:mi>x</mml:mi><mml:mo>&#x007E;</mml:mo></mml:mover></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula>, whereas <italic>E</italic> denotes a certain error measure. DAE should undo the corruption instead of replicating the input at the output and capturing the essential feature of the trained dataset. This training allows the DAE to recover the correlation amongst input neurons via the original dataset. It is to be noted that the neuron count necessitated in AE hidden state is lower compared to the count of input or output neurons. Though this is not a common constraint in <inline-formula id="ieqn-19"><mml:math id="mml-ieqn-19"><mml:mi>A</mml:mi><mml:mi>E</mml:mi></mml:math></inline-formula>, other AE models have the count of neurons in their hidden stage, which should be higher than the input or output neurons, such as Sparse AE.</p>
</sec>
<sec id="s3_4"><label>3.4</label><title>Parameter Tuning Using TLBO Algorithm</title>
<p>To improve the efficacy of the DAE model, the TLBO algorithm is utilized for parameter optimization. TLBO is a population-based metaheuristic approach in which the optimum solution is characterized in terms of the population [<xref ref-type="bibr" rid="ref-22">22</xref>]. The TLBO approach works based on classroom learning mechanisms. Here, the teacher is presented to teach the learner with a goal, i.e., to increase the learning ability of the learner. However, in the classroom learning mechanism, learners can improve their skills by obtaining knowledge from others. The TLBO approach comprises two stages learner and teacher stages. A comprehensive discussion of both stages is summarized herewith. The teacher phase aims to impart the student&#x2019;s learning skills so that the entire class&#x2019;s outcomes are considerably enhanced. This results in an increased mean outcome of the class. Generally, a teacher improves the outcomes of the class learning process to a certain extent. Various limitations are accountable for the outcomes: the learner&#x2019;s grasping ability, teaching technique, teacher&#x2019;s capability, knowledge of the learners, and interaction of the learners with others. In the teacher stage, <inline-formula id="ieqn-20"><mml:math id="mml-ieqn-20"><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mi>L</mml:mi><mml:mi>m</mml:mi><mml:mi>e</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi></mml:mrow></mml:msub><mml:msup><mml:mrow></mml:mrow><mml:mo>&#x2032;</mml:mo></mml:msup></mml:mrow></mml:math></inline-formula> indicates the learner&#x2019;s knowledge, and <inline-formula id="ieqn-21"><mml:math id="mml-ieqn-21"><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>T</mml:mi><mml:mi>e</mml:mi><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>h</mml:mi><mml:mi>e</mml:mi><mml:mi>r</mml:mi></mml:mrow></mml:msub><mml:msup><mml:mrow></mml:mrow><mml:mo>&#x2032;</mml:mo></mml:msup></mml:mrow></mml:math></inline-formula> denotes the teacher in iteration. The major concern of the teachers is to improve the learners&#x2019; knowledge. To accomplish this task, the existing mean knowledge of the learners, i.e., <inline-formula id="ieqn-22"><mml:math id="mml-ieqn-22"><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:mtext mathvariant="italic">Lmean</mml:mtext></mml:mrow></mml:mrow></mml:msub><mml:mo>,</mml:mo></mml:math></inline-formula> is moved to the teacher&#x2019;s knowledge, i.e., <inline-formula id="ieqn-23"><mml:math id="mml-ieqn-23"><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>T</mml:mi><mml:mi>e</mml:mi><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>h</mml:mi><mml:mi>e</mml:mi><mml:mi>r</mml:mi></mml:mrow></mml:msub><mml:msup><mml:mrow></mml:mrow><mml:mo>&#x2032;</mml:mo></mml:msup></mml:mrow></mml:math></inline-formula> and is defined by the following expression.
<disp-formula id="eqn-8"><label>(8)</label><mml:math id="mml-eqn-8" display="block"><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>n</mml:mi><mml:mi>e</mml:mi><mml:mi>w</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>o</mml:mi><mml:mi>l</mml:mi><mml:mi>d</mml:mi></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:mi>r</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">Teacher</mml:mtext></mml:mrow></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>T</mml:mi><mml:mrow><mml:mi>f</mml:mi></mml:mrow></mml:msub><mml:mo>&#x00D7;</mml:mo><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mrow><mml:mtext mathvariant="italic">Lmean</mml:mtext></mml:mrow></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula></p>
<p>Now, <inline-formula id="ieqn-24"><mml:math id="mml-ieqn-24"><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">Teacher</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> represents the teacher&#x2019;s mean knowledge. At the same time, <inline-formula id="ieqn-25"><mml:math id="mml-ieqn-25"><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:mtext mathvariant="italic">Lmean</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> denotes the mean knowledge of the <inline-formula id="ieqn-26"><mml:math id="mml-ieqn-26"><mml:msup><mml:mi>i</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msup></mml:math></inline-formula> learner, <inline-formula id="ieqn-27"><mml:math id="mml-ieqn-27"><mml:msub><mml:mi>T</mml:mi><mml:mrow><mml:mi>f</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> symbolizes the teaching aspect, and <italic>r</italic> indicates an arbitrary value in the range of [0, 1]. <inline-formula id="ieqn-28"><mml:math id="mml-ieqn-28"><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mi>n</mml:mi><mml:mi>e</mml:mi><mml:mi>w</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, defines the upgraded knowledge of the <inline-formula id="ieqn-29"><mml:math id="mml-ieqn-29"><mml:msup><mml:mi>i</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msup></mml:math></inline-formula> learner and <inline-formula id="ieqn-30"><mml:math id="mml-ieqn-30"><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, old, indicates the preceding knowledge of the <inline-formula id="ieqn-31"><mml:math id="mml-ieqn-31"><mml:msup><mml:mi>i</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msup></mml:math></inline-formula> learner.
<disp-formula id="eqn-9"><label>(9)</label><mml:math id="mml-eqn-9" display="block"><mml:msub><mml:mi>T</mml:mi><mml:mrow><mml:mi>f</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mrow><mml:mtext mathvariant="italic">round</mml:mtext></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mn>1</mml:mn><mml:mo>+</mml:mo><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>d</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:mn>1</mml:mn><mml:mo>)</mml:mo></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula></p>
<p>The learner phase aims at boosting the learner&#x2019;s knowledge from other learners. Therefore, to increase one&#x2019;s learning capability, a learner should interact randomly with others. In the learner stage of the TLBO approach, the learner also gains knowledge from other learners. The learning ability of a learner is formulated herewith. Once an <inline-formula id="ieqn-32"><mml:math id="mml-ieqn-32"><mml:msup><mml:mi>i</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msup></mml:math></inline-formula> learner wants to communicate with <inline-formula id="ieqn-33"><mml:math id="mml-ieqn-33"><mml:mi>t</mml:mi><mml:mi>h</mml:mi><mml:mi>e</mml:mi><mml:mtext>&#x00A0;</mml:mtext><mml:msup><mml:mi>k</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msup></mml:math></inline-formula> learner and the fitness of the <inline-formula id="ieqn-34"><mml:math id="mml-ieqn-34"><mml:msup><mml:mi>k</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msup></mml:math></inline-formula> learner is maximum than the <inline-formula id="ieqn-35"><mml:math id="mml-ieqn-35"><mml:msup><mml:mi>i</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msup></mml:math></inline-formula> learner, then the location of the <inline-formula id="ieqn-36"><mml:math id="mml-ieqn-36"><mml:msup><mml:mi>i</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msup></mml:math></inline-formula> learner or else <inline-formula id="ieqn-37"><mml:math id="mml-ieqn-37"><mml:msup><mml:mi>k</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msup></mml:math></inline-formula> learner is upgraded.
<disp-formula id="eqn-10"><label>(10)</label><mml:math id="mml-eqn-10" display="block"><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>n</mml:mi><mml:mi>e</mml:mi><mml:mi>w</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>o</mml:mi><mml:mi>l</mml:mi><mml:mi>d</mml:mi></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:mi>r</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>k</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula>
<disp-formula id="eqn-11"><label>(11)</label><mml:math id="mml-eqn-11" display="block"><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>n</mml:mi><mml:mi>e</mml:mi><mml:mi>w</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>o</mml:mi><mml:mi>l</mml:mi><mml:mi>d</mml:mi></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:mi>r</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>X</mml:mi><mml:mrow><mml:mi>k</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:math></disp-formula></p>
<p>Once the fitness of the novel location of the <inline-formula id="ieqn-38"><mml:math id="mml-ieqn-38"><mml:msup><mml:mi>i</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msup></mml:math></inline-formula> learner is superior to the fitness value of the older location, then the novel location takes over the older one or else it does not. <xref ref-type="fig" rid="fig-2">Fig. 2</xref> depicts the steps involved in TLBO.
</p>
<fig id="fig-2"><label>Figure 2</label><caption><title>Steps involved in TLBO</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_33834-fig-2.tif"/></fig>
<fig id="fig-12">
<graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_33834-fig-12.tif"/>
</fig>
</sec>
</sec>
<sec id="s4"><label>4</label><title>Results and Discussion</title>
<p>The proposed TLBOML-ERC model was experimentally validated using a dataset that contains 5,600 Arabic tweets under four class labels, as depicted in <xref ref-type="table" rid="table-1">Table 1</xref>. Each class holds a set of 1,400 samples.</p>
<table-wrap id="table-1"><label>Table 1</label><caption><title>Dataset details</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="left">Class</th>
<th align="left">No. of samples</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">Anger</td>
<td align="left">1400</td>
</tr>
<tr>
<td align="left">Joy</td>
<td align="left">1400</td>
</tr>
<tr>
<td align="left">Sadness</td>
<td align="left">1400</td>
</tr>
<tr>
<td align="left">Fear</td>
<td align="left">1400</td>
</tr>
<tr>
<td align="left">Total number of samples</td>
<td align="left">5600</td>
</tr>
</tbody>
</table>
</table-wrap>
<p><xref ref-type="fig" rid="fig-3">Fig. 3</xref> illustrates the confusion matrices generated by the TLBOML-ERC model on Arabic tweets using Training Set (TRS) and Testing Set (TSS). On 90&#x0025; of TRS, the proposed TLBOML-ERC model categorized 1,218 samples under anger, 1,217 samples under joy, 1,179 samples under sadness, and 1,212 samples under fear classes, respectively. Also, on 10&#x0025; of TSS, the presented TLBOML-ERC approach classified 138 samples under anger, 120 samples under joy, 139 samples under sadness, and 139 samples under fear classes correspondingly. Additionally, on 70&#x0025; of TRS, the proposed TLBOML-ERC technique recognized 970 samples as anger, 926 samples as joy, 918 samples as sadness, and 968 samples as fear classes, respectively. Followed by 30&#x0025; of TSS, the TLBOML-ERC algorithm classified 383 samples under anger, 405 samples under joy, 436 samples under sadness and 397 samples under fear categories correspondingly.</p>
<fig id="fig-3"><label>Figure 3</label><caption><title>Confusion matrices of TLBOML-ERC approach for (a) 90&#x0025; of TRS, (b) 10&#x0025; of TSS, (c) 80&#x0025; of TRS, (d) 20&#x0025; of TSS, (e) 70&#x0025; of TRS, and (f) 30&#x0025; of TSS</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_33834-fig-3.tif"/></fig>
<p><xref ref-type="table" rid="table-2">Table 2</xref> and <xref ref-type="fig" rid="fig-4">Fig. 4</xref> portray the results attained by the proposed TLBOML-ERC model on 90&#x0025; of TRS and 10&#x0025; of TSS. With 90&#x0025; of TRS, the proposed TLBOML-ERC model achieved an average <inline-formula id="ieqn-52"><mml:math id="mml-ieqn-52"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 97.88&#x0025;, <inline-formula id="ieqn-53"><mml:math id="mml-ieqn-53"><mml:mi>s</mml:mi><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 95.75&#x0025;, <inline-formula id="ieqn-54"><mml:math id="mml-ieqn-54"><mml:mi>s</mml:mi><mml:mi>p</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 98.59&#x0025;, <inline-formula id="ieqn-55"><mml:math id="mml-ieqn-55"><mml:mi>F</mml:mi><mml:msub><mml:mn>1</mml:mn><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">score</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> of 95.75&#x0025;, and an MCC (Mathew Correlation Coefficient) of 94.34&#x0025;. Also, with 10&#x0025; of TSS, the proposed TLBOML-ERC methodology offered an average <inline-formula id="ieqn-56"><mml:math id="mml-ieqn-56"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 97.86&#x0025;, <inline-formula id="ieqn-57"><mml:math id="mml-ieqn-57"><mml:mi>s</mml:mi><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 95.76&#x0025;, <inline-formula id="ieqn-58"><mml:math id="mml-ieqn-58"><mml:mi>s</mml:mi><mml:mi>p</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 98.57&#x0025;, <inline-formula id="ieqn-59"><mml:math id="mml-ieqn-59"><mml:mi>F</mml:mi><mml:msub><mml:mn>1</mml:mn><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">score</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> of 95.71&#x0025;, and an MCC of 94.29&#x0025;.</p>
<table-wrap id="table-2"><label>Table 2</label><caption><title>Results of the analysis of TLBOML-ERC approach upon 90:10 of TRS/TSS datasets under different measures</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="center" colspan="6">Training/Testing (90:10)</th>
</tr>
<tr>
<th align="left">Labels</th>
<th align="left">Accuracy</th>
<th align="left">Sensitivity</th>
<th align="left">Specificity</th>
<th align="left">F1-score</th>
<th align="left">MCC</th>
</tr>
</thead>
<tbody>
<tr>
<td align="center" colspan="6">Training phase</td>
</tr>
<tr>
<td align="left">Anger</td>
<td align="left">98.33</td>
<td align="left">96.82</td>
<td align="left">98.84</td>
<td align="left">96.67</td>
<td align="left">95.56</td>
</tr>
<tr>
<td align="left">Joy</td>
<td align="left">97.96</td>
<td align="left">95.45</td>
<td align="left">98.80</td>
<td align="left">95.94</td>
<td align="left">94.58</td>
</tr>
<tr>
<td align="left">Sadness</td>
<td align="left">97.40</td>
<td align="left">94.32</td>
<td align="left">98.42</td>
<td align="left">94.74</td>
<td align="left">93.01</td>
</tr>
<tr>
<td align="left">Fear</td>
<td align="left">97.82</td>
<td align="left">96.42</td>
<td align="left">98.28</td>
<td align="left">95.66</td>
<td align="left">94.21</td>
</tr>
<tr>
<td align="left">Average</td>
<td align="left">97.88</td>
<td align="left">95.75</td>
<td align="left">98.59</td>
<td align="left">95.75</td>
<td align="left">94.34</td>
</tr>
<tr>
<td align="center" colspan="6">Testing phase</td>
</tr>
<tr>
<td align="left">Anger</td>
<td align="left">98.57</td>
<td align="left">97.18</td>
<td align="left">99.04</td>
<td align="left">97.18</td>
<td align="left">96.23</td>
</tr>
<tr>
<td align="left">Joy</td>
<td align="left">97.86</td>
<td align="left">96.00</td>
<td align="left">98.39</td>
<td align="left">95.24</td>
<td align="left">93.86</td>
</tr>
<tr>
<td align="left">Sadness</td>
<td align="left">97.14</td>
<td align="left">92.67</td>
<td align="left">98.78</td>
<td align="left">94.56</td>
<td align="left">92.66</td>
</tr>
<tr>
<td align="left">Fear</td>
<td align="left">97.86</td>
<td align="left">97.20</td>
<td align="left">98.08</td>
<td align="left">95.86</td>
<td align="left">94.43</td>
</tr>
<tr>
<td align="left">Average</td>
<td align="left">97.86</td>
<td align="left">95.76</td>
<td align="left">98.57</td>
<td align="left">95.71</td>
<td align="left">94.29</td>
</tr>
</tbody>
</table>
</table-wrap><fig id="fig-4"><label>Figure 4</label><caption><title>Results of the analysis of TLBOML-ERC approach upon 90:10 of TRS/TSS datasets</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_33834-fig-4.tif"/></fig>
<p><xref ref-type="table" rid="table-3">Table 3</xref> and <xref ref-type="fig" rid="fig-5">Fig. 5</xref> depict the results accomplished by the proposed TLBOML-ERC model on 80&#x0025; of TRS and 20&#x0025; of TSS datasets. With 80&#x0025; of TRS, the TLBOML-ERC method achieved an average <inline-formula id="ieqn-60"><mml:math id="mml-ieqn-60"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 98.38&#x0025;, <inline-formula id="ieqn-61"><mml:math id="mml-ieqn-61"><mml:mi>s</mml:mi><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 96.76&#x0025;, <inline-formula id="ieqn-62"><mml:math id="mml-ieqn-62"><mml:mi>s</mml:mi><mml:mi>p</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 98.92&#x0025;, <inline-formula id="ieqn-63"><mml:math id="mml-ieqn-63"><mml:mi>F</mml:mi><mml:msub><mml:mn>1</mml:mn><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">score</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> of 96.76&#x0025;, and an MCC of 95.68&#x0025;. Moreover, with 20&#x0025; of TSS, the TLBOML-ERC model presented an average <inline-formula id="ieqn-64"><mml:math id="mml-ieqn-64"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 98.84&#x0025;, <inline-formula id="ieqn-65"><mml:math id="mml-ieqn-65"><mml:mi>s</mml:mi><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 97.66&#x0025;, <inline-formula id="ieqn-66"><mml:math id="mml-ieqn-66"><mml:mi>s</mml:mi><mml:mi>p</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 99.22&#x0025;, <inline-formula id="ieqn-67"><mml:math id="mml-ieqn-67"><mml:mi>F</mml:mi><mml:msub><mml:mn>1</mml:mn><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">score</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> of 97.67&#x0025;, and an MCC of 96.90&#x0025;.</p>
<table-wrap id="table-3"><label>Table 3</label><caption><title>Results of the analysis of TLBOML-ERC approach upon 80:20 of TRS/TSS datasets under different measures</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="center" colspan="6">Training/Testing (80:20)</th>
</tr>
<tr>
<th align="left">Labels</th>
<th align="left">Accuracy</th>
<th align="left">Sensitivity</th>
<th align="left">Specificity</th>
<th align="left">F1-score</th>
<th align="left">MCC</th>
</tr>
</thead>
<tbody>
<tr>
<td align="center" colspan="6">Training phase</td>
</tr>
<tr>
<td align="left">Anger</td>
<td align="left">98.30</td>
<td align="left">96.58</td>
<td align="left">98.87</td>
<td align="left">96.58</td>
<td align="left">95.45</td>
</tr>
<tr>
<td align="left">Joy</td>
<td align="left">98.06</td>
<td align="left">96.30</td>
<td align="left">98.65</td>
<td align="left">96.18</td>
<td align="left">94.87</td>
</tr>
<tr>
<td align="left">Sadness</td>
<td align="left">99.11</td>
<td align="left">98.30</td>
<td align="left">99.38</td>
<td align="left">98.22</td>
<td align="left">97.62</td>
</tr>
<tr>
<td align="left">Fear</td>
<td align="left">98.06</td>
<td align="left">95.87</td>
<td align="left">98.78</td>
<td align="left">96.09</td>
<td align="left">94.80</td>
</tr>
<tr>
<td align="left">Average</td>
<td align="left">98.38</td>
<td align="left">96.76</td>
<td align="left">98.92</td>
<td align="left">96.76</td>
<td align="left">95.68</td>
</tr>
<tr>
<td align="center" colspan="6">Testing phase</td>
</tr>
<tr>
<td align="left">Anger</td>
<td align="left">98.57</td>
<td align="left">97.59</td>
<td align="left">98.92</td>
<td align="left">97.25</td>
<td align="left">96.29</td>
</tr>
<tr>
<td align="left">Joy</td>
<td align="left">98.57</td>
<td align="left">96.21</td>
<td align="left">99.30</td>
<td align="left">96.95</td>
<td align="left">96.02</td>
</tr>
<tr>
<td align="left">Sadness</td>
<td align="left">99.46</td>
<td align="left">99.29</td>
<td align="left">99.52</td>
<td align="left">98.93</td>
<td align="left">98.58</td>
</tr>
<tr>
<td align="left">Fear</td>
<td align="left">98.75</td>
<td align="left">97.55</td>
<td align="left">99.16</td>
<td align="left">97.55</td>
<td align="left">96.71</td>
</tr>
<tr>
<td align="left">Average</td>
<td align="left">98.84</td>
<td align="left">97.66</td>
<td align="left">99.22</td>
<td align="left">97.67</td>
<td align="left">96.90</td>
</tr>
</tbody>
</table>
</table-wrap><fig id="fig-5"><label>Figure 5</label><caption><title>Results of the analysis of TLBOML-ERC approach upon 80:20 of TRS/TSS datasets</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_33834-fig-5.tif"/></fig>
<p><xref ref-type="table" rid="table-4">Table 4</xref> and <xref ref-type="fig" rid="fig-6">Fig. 6</xref> illustrate the results of the proposed TLBOML-ERC model on 80&#x0025; of TRS and 20&#x0025; of TSS datasets. With 80&#x0025; of TRS, the TLBOML-ERC approach yielded an average <inline-formula id="ieqn-68"><mml:math id="mml-ieqn-68"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 98.24&#x0025;, <inline-formula id="ieqn-69"><mml:math id="mml-ieqn-69"><mml:mi>s</mml:mi><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 96.47&#x0025;, <inline-formula id="ieqn-70"><mml:math id="mml-ieqn-70"><mml:mi>s</mml:mi><mml:mi>p</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 98.82&#x0025;, <inline-formula id="ieqn-71"><mml:math id="mml-ieqn-71"><mml:mi>F</mml:mi><mml:msub><mml:mn>1</mml:mn><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">score</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> of 96.49&#x0025;, and an MCC of 95.32&#x0025;. In addition to these, with 20&#x0025; of TSS, the proposed TLBOML-ERC model accomplished an average <inline-formula id="ieqn-72"><mml:math id="mml-ieqn-72"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 98.24&#x0025;, <inline-formula id="ieqn-73"><mml:math id="mml-ieqn-73"><mml:mi>s</mml:mi><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 96.50&#x0025;, <inline-formula id="ieqn-74"><mml:math id="mml-ieqn-74"><mml:mi>s</mml:mi><mml:mi>p</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 98.84&#x0025;, <inline-formula id="ieqn-75"><mml:math id="mml-ieqn-75"><mml:mi>F</mml:mi><mml:msub><mml:mn>1</mml:mn><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">score</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> of 96.46&#x0025; and an MCC of 95.32&#x0025;.</p>
<table-wrap id="table-4"><label>Table 4</label><caption><title>Results of the analysis of the TLBOML-ERC approach upon 70:30 of TRS/TSS datasets under different measures</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="center" colspan="6">Training/Testing (70:30)</th>
</tr>
<tr>
<th align="left">Labels</th>
<th align="left">Accuracy</th>
<th align="left">Sensitivity</th>
<th align="left">Specificity</th>
<th align="left">F1-score</th>
<th align="left">MCC</th>
</tr>
</thead>
<tbody>
<tr>
<td align="center" colspan="6">Training phase</td>
</tr>
<tr>
<td align="left">Anger</td>
<td align="left">97.55</td>
<td align="left">96.33</td>
<td align="left">97.97</td>
<td align="left">95.28</td>
<td align="left">93.64</td>
</tr>
<tr>
<td align="left">Joy</td>
<td align="left">98.11</td>
<td align="left">95.37</td>
<td align="left">99.02</td>
<td align="left">96.16</td>
<td align="left">94.91</td>
</tr>
<tr>
<td align="left">Sadness</td>
<td align="left">98.65</td>
<td align="left">96.33</td>
<td align="left">99.39</td>
<td align="left">97.19</td>
<td align="left">96.31</td>
</tr>
<tr>
<td align="left">Fear</td>
<td align="left">98.65</td>
<td align="left">97.88</td>
<td align="left">98.91</td>
<td align="left">97.34</td>
<td align="left">96.43</td>
</tr>
<tr>
<td align="left">Average</td>
<td align="left">98.24</td>
<td align="left">96.47</td>
<td align="left">98.82</td>
<td align="left">96.49</td>
<td align="left">95.32</td>
</tr>
<tr>
<td align="center" colspan="6">Testing phase</td>
</tr>
<tr>
<td align="left">Anger</td>
<td align="left">97.56</td>
<td align="left">97.46</td>
<td align="left">97.59</td>
<td align="left">94.92</td>
<td align="left">93.37</td>
</tr>
<tr>
<td align="left">Joy</td>
<td align="left">98.10</td>
<td align="left">94.41</td>
<td align="left">99.36</td>
<td align="left">96.20</td>
<td align="left">94.96</td>
</tr>
<tr>
<td align="left">Sadness</td>
<td align="left">98.69</td>
<td align="left">97.54</td>
<td align="left">99.11</td>
<td align="left">97.54</td>
<td align="left">96.65</td>
</tr>
<tr>
<td align="left">Fear</td>
<td align="left">98.63</td>
<td align="left">96.59</td>
<td align="left">99.29</td>
<td align="left">97.18</td>
<td align="left">96.28</td>
</tr>
<tr>
<td align="left">Average</td>
<td align="left">98.24</td>
<td align="left">96.50</td>
<td align="left">98.84</td>
<td align="left">96.46</td>
<td align="left">95.32</td>
</tr>
</tbody>
</table>
</table-wrap><fig id="fig-6"><label>Figure 6</label><caption><title>Results of the analysis of the TLBOML-ERC approach upon 70:30 of TRS/TSS datasets</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_33834-fig-6.tif"/></fig>
<p>Training Accuracy (TA) and Validation Accuracy (VA) values acquired by the proposed TLBOML-ERC method on the test dataset are shown in <xref ref-type="fig" rid="fig-7">Fig. 7</xref>. The experimental outcomes imply that the proposed TLBOML-ERC method achieved the highest TA and VA values, while VA values were higher than TA.</p>
<fig id="fig-7"><label>Figure 7</label><caption><title>TA and VA analyses results of TLBOML-ERC approach</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_33834-fig-7.tif"/></fig>
<p>Both Training Loss (TL) and Validation Loss (VL) values, achieved by the proposed TLBOML-ERC approach on the test dataset, are displayed in <xref ref-type="fig" rid="fig-8">Fig. 8</xref>. The experimental outcomes infer that the proposed TLBOML-ERC algorithm exhibited the least TL and VL values, while VL values were lower compared to TL.</p>
<fig id="fig-8"><label>Figure 8</label><caption><title>TL and VL analyses results of TLBOML-ERC approach</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_33834-fig-8.tif"/></fig>
<p>A clear precision-recall analysis was conducted on the TLBOML-ERC method using the test dataset, and the results are shown in <xref ref-type="fig" rid="fig-9">Fig. 9</xref>. The figure indicates that the proposed TLBOML-ERC method produced enhanced precision-recall values under all the classes.</p>
<fig id="fig-9"><label>Figure 9</label><caption><title>Precision-recall curve analysis of TLBOML-ERC approach</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_33834-fig-9.tif"/></fig>
<p>A brief Receiver Operating Characteristic (ROC) analysis was conducted on the TLBOML-ERC method using the test dataset, and the results are displayed in <xref ref-type="fig" rid="fig-10">Fig. 10</xref>. The results infer that the proposed TLBOML-ERC approach excelled in categorizing distinct classes on the test dataset.</p>
<fig id="fig-10"><label>Figure 10</label><caption><title>ROC curve analysis of TLBOML-ERC approach</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_33834-fig-10.tif"/></fig>
<p><xref ref-type="table" rid="table-5">Table 5</xref> and <xref ref-type="fig" rid="fig-11">Fig. 11</xref> show the comprehensive comparison study outcomes accomplished by the proposed TLBOML-ERC model and other existing models [<xref ref-type="bibr" rid="ref-23">23</xref>]. The results infer that the TLBOML-ERC model outperformed other models. With respect to <inline-formula id="ieqn-76"><mml:math id="mml-ieqn-76"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, the proposed TLBOML-ERC model achieved the highest <inline-formula id="ieqn-77"><mml:math id="mml-ieqn-77"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 98.84&#x0025;, whereas BiLSTM, Gated Recurrent Unit (GRU), Bidirectional GRU (BiGRU), Artificial Neural Network (ANN), Convolutional Neural Network (CNN), and GoogLeNet models produced the least <inline-formula id="ieqn-78"><mml:math id="mml-ieqn-78"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> values such as 93.85&#x0025;, 96.19&#x0025;, 92.96&#x0025;, 92.79&#x0025;, 94.86&#x0025;, and 95.59&#x0025; respectively. Also, in terms of <inline-formula id="ieqn-79"><mml:math id="mml-ieqn-79"><mml:mi>s</mml:mi><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, the TLBOML-ERC model reached the highest <inline-formula id="ieqn-80"><mml:math id="mml-ieqn-80"><mml:mi>s</mml:mi><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 97.66&#x0025;, whereas BiLSTM, GRU, BiGRU, ANN, CNN, and GoogLeNet models produced the least <inline-formula id="ieqn-81"><mml:math id="mml-ieqn-81"><mml:mi>s</mml:mi><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> values such as 94.58&#x0025;, 94.01&#x0025;, 92.33&#x0025;, 93.84&#x0025;, 95.28&#x0025;, and 94.34&#x0025; correspondingly. With respect to <inline-formula id="ieqn-82"><mml:math id="mml-ieqn-82"><mml:mi>s</mml:mi><mml:mi>p</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula>, the proposed TLBOML-ERC model attained the highest <inline-formula id="ieqn-83"><mml:math id="mml-ieqn-83"><mml:mi>s</mml:mi><mml:mi>p</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> of 99.22&#x0025;, whereas BiLSTM, GRU, BiGRU, ANN, CNN, and GoogLeNet models yielded low <inline-formula id="ieqn-84"><mml:math id="mml-ieqn-84"><mml:mi>s</mml:mi><mml:mi>p</mml:mi><mml:mi>e</mml:mi><mml:msub><mml:mi>c</mml:mi><mml:mrow><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> values such as 92.55&#x0025;, 96.59&#x0025;, 95.45&#x0025;, 92.08&#x0025;, 95.44&#x0025;, and 96.19&#x0025; correspondingly.</p>
<table-wrap id="table-5"><label>Table 5</label><caption><title>Comparative analysis results of TLBOML-ERC approach and other existing algorithms</title></caption>
<table frame="hsides">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th align="left">Methods</th>
<th align="left">Accuracy</th>
<th align="left">Sensitivity</th>
<th align="left">Specificity</th>
<th align="left">F1-score</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">TLBOML-ERC</td>
<td align="left">98.84</td>
<td align="left">97.66</td>
<td align="left">99.22</td>
<td align="left">97.67</td>
</tr>
<tr>
<td align="left">Bi-LSTM</td>
<td align="left">93.85</td>
<td align="left">94.58</td>
<td align="left">92.55</td>
<td align="left">94.24</td>
</tr>
<tr>
<td align="left">GRU</td>
<td align="left">96.19</td>
<td align="left">94.01</td>
<td align="left">96.59</td>
<td align="left">93.07</td>
</tr>
<tr>
<td align="left">Bi-GRU</td>
<td align="left">92.96</td>
<td align="left">92.33</td>
<td align="left">95.45</td>
<td align="left">93.73</td>
</tr>
<tr>
<td align="left">ANN model</td>
<td align="left">92.79</td>
<td align="left">93.84</td>
<td align="left">92.08</td>
<td align="left">94.31</td>
</tr>
<tr>
<td align="left">CNN model</td>
<td align="left">94.86</td>
<td align="left">95.28</td>
<td align="left">95.44</td>
<td align="left">92.27</td>
</tr>
<tr>
<td align="left">GoogLeNet</td>
<td align="left">95.59</td>
<td align="left">94.34</td>
<td align="left">96.19</td>
<td align="left">93.87</td>
</tr>
</tbody>
</table>
</table-wrap><fig id="fig-11"><label>Figure 11</label><caption><title>Comparative analysis of TLBOML-ERC approach and other existing algorithms</title></caption><graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_33834-fig-11.tif"/></fig>
<p>At last, with respect to <inline-formula id="ieqn-85"><mml:math id="mml-ieqn-85"><mml:mi>F</mml:mi><mml:msub><mml:mn>1</mml:mn><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">score</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula>, the proposed TLBOML-ERC model gained a high <inline-formula id="ieqn-86"><mml:math id="mml-ieqn-86"><mml:mi>F</mml:mi><mml:msub><mml:mn>1</mml:mn><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">score</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> of 97.67&#x0025;, whereas BiLSTM, GRU, BiGRU, ANN, CNN, and GoogLeNet models produced low <inline-formula id="ieqn-87"><mml:math id="mml-ieqn-87"><mml:mi>F</mml:mi><mml:msub><mml:mn>1</mml:mn><mml:mrow><mml:mrow><mml:mtext mathvariant="italic">score</mml:mtext></mml:mrow></mml:mrow></mml:msub></mml:math></inline-formula> values such as 94.24&#x0025;, 93.07&#x0025;, 93.73&#x0025;, 94.31&#x0025;, 92.27&#x0025;, and 93.87&#x0025; correspondingly. From the detailed results and discussion, it can be inferred that the proposed TLBOML-ERC model produced a superior performance compared to other models.</p>
</sec>
<sec id="s5"><label>5</label><title>Conclusion</title>
<p>In current study, a new TLBOML-ERC method has been devised for the recognition of emotions and sentiments found in Arabic tweets. To attain this, TLBOML-ERC model initially carries out data pre-processing and CBOW-based word embedding process. For emotion recognition, DAE model is exploited which identifies the categories of emotions found in Arabic tweets. In order to improve the efficacy of DAE model, TLBO algorithm is utilized for parameter optimization. The experimental analysis was conducted upon the proposed TLBOML-ERC approach using Arabic tweets&#x2019; dataset. The obtained results show the promising performance of TLBOML-ERC model on Arabic emotion classification. In the future, TLBOML-ERC model can be modified to utilize Feature Selection approaches to boost the classification results.</p>
</sec>
</body>
<back>
<sec><title>Funding Statement</title>
<p>Princess Nourah bint Abdulrahman University Researchers Supporting Project Number (<award-id>PNURSP2022R263</award-id>), <funding-source>Princess Nourah bint Abdulrahman University</funding-source>, Riyadh, Saudi Arabia. The authors would like to thank the <funding-source>Deanship of Scientific Research at Umm Al-Qura University</funding-source> for supporting this work by Grant Code: <award-id>22UQU4340237DSR36</award-id>. The authors are thankful to the <funding-source>Deanship of Scientific Research</funding-source> at Najran University for funding this work under the Research Groups Funding program grant code (<award-id>NU/RG/SERC/11/7</award-id>).</p></sec>
<sec sec-type="COI-statement"><title>Conflicts of Interest</title>
<p>The authors declare that they have no conflicts of interest to report regarding the present study.</p></sec>
<ref-list content-type="authoryear">
<title>References</title>
<ref id="ref-1"><label>[1]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Elnagar</surname></string-name>, <string-name><given-names>R. A.</given-names> <surname>Debsi</surname></string-name> and <string-name><given-names>O.</given-names> <surname>Einea</surname></string-name></person-group>, &#x201C;<article-title>Arabic text classification using deep learning models</article-title>,&#x201D; <source>Information Processing &#x0026; Management</source>, vol. <volume>57</volume>, no. <issue>1</issue>, pp. <fpage>102121</fpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-2"><label>[2]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Klaylat</surname></string-name>, <string-name><given-names>Z.</given-names> <surname>Osman</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Hamandi</surname></string-name> and <string-name><given-names>R.</given-names> <surname>Zantout</surname></string-name></person-group>, &#x201C;<article-title>Emotion recognition in Arabic speech</article-title>,&#x201D; <source>Analog Integrated Circuits and Signal Processing</source>, vol. <volume>96</volume>, no. <issue>2</issue>, pp. <fpage>337</fpage>&#x2013;<lpage>351</lpage>, <year>2018</year>.</mixed-citation></ref>
<ref id="ref-3"><label>[3]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>P.</given-names> <surname>Nandwani</surname></string-name> and <string-name><given-names>R.</given-names> <surname>Verma</surname></string-name></person-group>, &#x201C;<article-title>A review on sentiment analysis and emotion detection from text</article-title>,&#x201D; <source>Social Network Analysis and Mining</source>, vol. <volume>11</volume>, no. <issue>1</issue>, pp. <fpage>81</fpage>, <year>2021</year>; <pub-id pub-id-type="pmid">34484462</pub-id></mixed-citation></ref>
<ref id="ref-4"><label>[4]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>F. N.</given-names> <surname>Al-Wesabi</surname></string-name></person-group>, &#x201C;<article-title>Proposing high-smart approach for content authentication and tampering detection of arabic text transmitted via internet</article-title>,&#x201D; <source>IEICE Transactions on Information and Systems</source>, vol. <volume>E103.D</volume>, no. <issue>10</issue>, pp. <fpage>2104</fpage>&#x2013;<lpage>2112</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-5"><label>[5]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Alharbi</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Taileb</surname></string-name> and <string-name><given-names>M.</given-names> <surname>Kalkatawi</surname></string-name></person-group>, &#x201C;<article-title>Deep learning in Arabic sentiment analysis: An overview</article-title>,&#x201D; <source>Journal of Information Science</source>, vol. <volume>47</volume>, no. <issue>1</issue>, pp. <fpage>129</fpage>&#x2013;<lpage>140</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-6"><label>[6]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>F. N.</given-names> <surname>Al-Wesabi</surname></string-name></person-group>, &#x201C;<article-title>A hybrid intelligent approach for content authentication and tampering detection of Arabic text transmitted via internet</article-title>,&#x201D; <source>Computers, Materials &#x0026; Continua</source>, vol. <volume>66</volume>, no. <issue>1</issue>, pp. <fpage>195</fpage>&#x2013;<lpage>211</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-7"><label>[7]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>T. S.</given-names> <surname>Kumar</surname></string-name></person-group>, &#x201C;<article-title>Construction of hybrid deep learning model for predicting children behavior based on their emotional reaction</article-title>,&#x201D; <source>Journal of Information Technology and Digital World</source>, vol. <volume>3</volume>, no. <issue>1</issue>, pp. <fpage>29</fpage>&#x2013;<lpage>43</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-8"><label>[8]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>F. N.</given-names> <surname>Al-Wesabi</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Abdelmaboud</surname></string-name>, <string-name><given-names>A. A.</given-names> <surname>Zain</surname></string-name>, <string-name><given-names>M. M.</given-names> <surname>Almazah</surname></string-name> and <string-name><given-names>A.</given-names> <surname>Zahary</surname></string-name></person-group>, &#x201C;<article-title>Tampering detection approach of Arabic-text based on contents interrelationship</article-title>,&#x201D; <source>Intelligent Automation &#x0026; Soft Computing</source>, vol. <volume>27</volume>, no. <issue>2</issue>, pp. <fpage>483</fpage>&#x2013;<lpage>498</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-9"><label>[9]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M. A.</given-names> <surname>Ahmed</surname></string-name>, <string-name><given-names>R. A.</given-names> <surname>Hasan</surname></string-name>, <string-name><given-names>A. H.</given-names> <surname>Ali</surname></string-name> and <string-name><given-names>M. A.</given-names> <surname>Mohammed</surname></string-name></person-group>, &#x201C;<article-title>The classification of the modern Arabic poetry using machine learning</article-title>,&#x201D; <source>TELKOMNIKA</source>, vol. <volume>17</volume>, no. <issue>5</issue>, pp. <fpage>2667</fpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-10"><label>[10]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>F. N.</given-names> <surname>Al-Wesabi</surname></string-name></person-group>, &#x201C;<article-title>Entropy-based watermarking approach for sensitive tamper detection of Arabic text</article-title>,&#x201D; <source>Computers, Materials &#x0026; Continua</source>, vol. <volume>67</volume>, no. <issue>3</issue>, pp. <fpage>3635</fpage>&#x2013;<lpage>3648</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-11"><label>[11]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>F. A.</given-names> <surname>Acheampong</surname></string-name>, <string-name><given-names>C.</given-names> <surname>Wenyu</surname></string-name> and <string-name><given-names>H. N.</given-names> <surname>Mensah</surname></string-name></person-group>, &#x201C;<article-title>Text-based emotion detection: Advances, challenges, and opportunities</article-title>,&#x201D; <source>Engineering Reports</source>, vol. <volume>2</volume>, no. <issue>7</issue>, pp. <fpage>1</fpage>&#x2013;<lpage>24</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-12"><label>[12]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Abdullah</surname></string-name>, <string-name><given-names>M.</given-names> <surname>AlMasawa</surname></string-name>, <string-name><given-names>I.</given-names> <surname>Makki</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Alsolmi</surname></string-name> and <string-name><given-names>S.</given-names> <surname>Mahrous</surname></string-name></person-group>, &#x201C;<article-title>Emotions extraction from Arabic tweets</article-title>,&#x201D; <source>International Journal of Computers and Applications</source>, vol. <volume>42</volume>, no. <issue>7</issue>, pp. <fpage>661</fpage>&#x2013;<lpage>675</lpage>, <year>2018</year>.</mixed-citation></ref>
<ref id="ref-13"><label>[13]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>E.</given-names> <surname>Lieskovsk&#x00E1;</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Jakubec</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Jarina</surname></string-name> and <string-name><given-names>M.</given-names> <surname>Chmul&#x00ED;k</surname></string-name></person-group>, &#x201C;<article-title>A review on speech emotion recognition using deep learning and attention mechanism</article-title>,&#x201D; <source>Electronics</source>, vol. <volume>10</volume>, no. <issue>10</issue>, pp. <fpage>1163</fpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-14"><label>[14]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>I.</given-names> <surname>Aljarah</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Habib</surname></string-name>, <string-name><given-names>N.</given-names> <surname>Hijazi</surname></string-name>, <string-name><given-names>H.</given-names> <surname>Faris</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Qaddoura</surname></string-name> <etal>et al.,</etal></person-group> &#x201C;<article-title>Intelligent detection of hate speech in Arabic social network: A machine learning approach</article-title>,&#x201D; <source>Journal of Information Science</source>, vol. <volume>47</volume>, no. <issue>4</issue>, pp. <fpage>483</fpage>&#x2013;<lpage>501</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-15"><label>[15]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Baali</surname></string-name> and <string-name><given-names>N.</given-names> <surname>Ghneim</surname></string-name></person-group>, &#x201C;<article-title>Emotion analysis of Arabic tweets using deep learning approach</article-title>,&#x201D; <source>Journal of Big Data</source>, vol. <volume>6</volume>, no. <issue>1</issue>, pp. <fpage>89</fpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-16"><label>[16]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>E. A. H.</given-names> <surname>Khalil</surname></string-name>, <string-name><given-names>E. M. F. E.</given-names> <surname>Houby</surname></string-name> and <string-name><given-names>H. K.</given-names> <surname>Mohamed</surname></string-name></person-group>, &#x201C;<article-title>Deep learning for emotion analysis in Arabic tweets</article-title>,&#x201D; <source>Journal of Big Data</source>, vol. <volume>8</volume>, no. <issue>1</issue>, pp. <fpage>136</fpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-17"><label>[17]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>N.</given-names> <surname>Al-Twairesh</surname></string-name></person-group>, &#x201C;<article-title>The evolution of language models applied to emotion analysis of Arabic tweets</article-title>,&#x201D; <source>Information</source>, vol. <volume>12</volume>, no. <issue>2</issue>, pp. <fpage>84</fpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-18"><label>[18]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S. S.</given-names> <surname>Poorna</surname></string-name> and <string-name><given-names>G. J.</given-names> <surname>Nair</surname></string-name></person-group>, &#x201C;<article-title>Multistage classification scheme to enhance speech emotion recognition</article-title>,&#x201D; <source>International Journal of Speech Technology</source>, vol. <volume>22</volume>, no. <issue>2</issue>, pp. <fpage>327</fpage>&#x2013;<lpage>340</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-19"><label>[19]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M. A.</given-names> <surname>Al-Hagery</surname></string-name>, <string-name><given-names>M. A.</given-names> <surname>Al-assaf</surname></string-name> and <string-name><given-names>F. M.</given-names> <surname>Al-kharboush</surname></string-name></person-group>, &#x201C;<article-title>Exploration of the best performance method of emotions classification for Arabic tweets</article-title>,&#x201D; <source>Indonesian Journal of Electrical Engineering and Computer Science</source>, vol. <volume>19</volume>, no. <issue>2</issue>, pp. <fpage>1010</fpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-20"><label>[20]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>D.</given-names> <surname>Qiu</surname></string-name>, <string-name><given-names>H.</given-names> <surname>Jiang</surname></string-name> and <string-name><given-names>S.</given-names> <surname>Chen</surname></string-name></person-group>, &#x201C;<article-title>Fuzzy information retrieval based on continuous bag-of-words model</article-title>,&#x201D; <source>Symmetry</source>, vol. <volume>12</volume>, no. <issue>2</issue>, pp. <fpage>225</fpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-21"><label>[21]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Z.</given-names> <surname>Shang</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Sun</surname></string-name>, <string-name><given-names>Y.</given-names> <surname>Xia</surname></string-name> and <string-name><given-names>W.</given-names> <surname>Zhang</surname></string-name></person-group>, &#x201C;<article-title>Vibration-based damage detection for bridges by deep convolutional denoising autoencoder</article-title>,&#x201D; <source>Structural Health Monitoring</source>, vol. <volume>20</volume>, no. <issue>4</issue>, pp. <fpage>1880</fpage>&#x2013;<lpage>1903</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-22"><label>[22]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>W.</given-names> <surname>Chen</surname></string-name>, <string-name><given-names>X.</given-names> <surname>Chen</surname></string-name>, <string-name><given-names>J.</given-names> <surname>Peng</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Panahi</surname></string-name> and <string-name><given-names>S.</given-names> <surname>Lee</surname></string-name></person-group>, &#x201C;<article-title>Landslide susceptibility modeling based on ANFIS with teaching-learning-based optimization and satin bowerbird optimizer</article-title>,&#x201D; <source>Geoscience Frontiers</source>, vol. <volume>12</volume>, no. <issue>1</issue>, pp. <fpage>93</fpage>&#x2013;<lpage>107</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-23"><label>[23]</label><mixed-citation publication-type="conf-proc"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Abdullah</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Hadzikadicy</surname></string-name> and <string-name><given-names>S.</given-names> <surname>Shaikhz</surname></string-name></person-group>, &#x201C;<article-title>SEDAT: Sentiment and emotion detection in Arabic text using cnn-lstm deep learning</article-title>,&#x201D; in <conf-name>2018 17th IEEE Int. Conf. on Machine Learning and Applications (ICMLA)</conf-name>, <conf-loc>Orlando, FL, USA</conf-loc>, pp. <fpage>835</fpage>&#x2013;<lpage>840</lpage>, <year>2018</year>.</mixed-citation></ref>
</ref-list>
</back>
</article>