<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.1 20151215//EN" "http://jats.nlm.nih.gov/publishing/1.1/JATS-journalpublishing1.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:mml="http://www.w3.org/1998/Math/MathML" xml:lang="en" article-type="research-article" dtd-version="1.1">
<front>
<journal-meta>
<journal-id journal-id-type="pmc">CSSE</journal-id>
<journal-id journal-id-type="nlm-ta">CSSE</journal-id>
<journal-id journal-id-type="publisher-id">CSSE</journal-id>
<journal-title-group>
<journal-title>Computer Systems Science &#x0026; Engineering</journal-title>
</journal-title-group>
<issn pub-type="ppub">0267-6192</issn>
<publisher>
<publisher-name>Tech Science Press</publisher-name>
<publisher-loc>USA</publisher-loc>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">34609</article-id>
<article-id pub-id-type="doi">10.32604/csse.2023.034609</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Article</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Optimal Deep Hybrid Boltzmann Machine Based Arabic Corpus Classification Model</article-title><alt-title alt-title-type="left-running-head">Optimal Deep Hybrid Boltzmann Machine Based Arabic Corpus Classification Model</alt-title><alt-title alt-title-type="right-running-head">Optimal Deep Hybrid Boltzmann Machine Based Arabic Corpus Classification Model</alt-title>
</title-group>
<contrib-group>
<contrib id="author-1" contrib-type="author" corresp="yes">
<name name-style="western"><surname>Duhayyim</surname><given-names>Mesfer Al</given-names></name>
<xref ref-type="aff" rid="aff-1">1</xref><email>m.alduhayyim@psau.edu.sa</email>
</contrib>
<contrib id="author-2" contrib-type="author">
<name name-style="western"><surname>Al-onazi</surname><given-names>Badriyya B.</given-names></name>
<xref ref-type="aff" rid="aff-2">2</xref>
</contrib>
<contrib id="author-3" contrib-type="author">
<name name-style="western"><surname>Nour</surname><given-names>Mohamed K.</given-names></name>
<xref ref-type="aff" rid="aff-3">3</xref>
</contrib>
<contrib id="author-4" contrib-type="author">
<name name-style="western"><surname>Yafoz</surname><given-names>Ayman</given-names></name>
<xref ref-type="aff" rid="aff-4">4</xref>
</contrib>
<contrib id="author-5" contrib-type="author">
<name name-style="western"><surname>Mehanna</surname><given-names>Amal S.</given-names></name>
<xref ref-type="aff" rid="aff-5">5</xref>
</contrib>
<contrib id="author-6" contrib-type="author">
<name name-style="western"><surname>Yaseen</surname><given-names>Ishfaq</given-names></name>
<xref ref-type="aff" rid="aff-6">6</xref>
</contrib>
<contrib id="author-7" contrib-type="author">
<name name-style="western"><surname>Abdelmageed</surname><given-names>Amgad Atta</given-names></name>
<xref ref-type="aff" rid="aff-6">6</xref>
</contrib>
<contrib id="author-8" contrib-type="author">
<name name-style="western"><surname>Mohammed</surname><given-names>Gouse Pasha</given-names></name>
<xref ref-type="aff" rid="aff-6">6</xref>
</contrib>
<aff id="aff-1"><label>1</label><institution>Department of Computer Science, College of Computer Engineering and Sciences, Prince Sattam bin Abdulaziz University</institution>, <addr-line>Al-Kharj, 16273</addr-line>, <country>Saudi Arabia</country></aff>
<aff id="aff-2"><label>2</label><institution>Department of Language Preparation, Arabic Language Teaching Institute, Princess Nourah bint Abdulrahman University, P.O. Box 84428</institution>, <addr-line>Riyadh, 11671</addr-line>, <country>Saudi Arabia</country></aff>
<aff id="aff-3"><label>3</label><institution>Department of Computer Sciences, College of Computing and Information System, Umm Al-Qura University</institution>, <country>Makkah 24211, Saudi Arabia</country></aff>
<aff id="aff-4"><label>4</label><institution>Department of Information Systems, Faculty of Computing and Information Technology, King Abdulaziz University</institution>, <addr-line>Jeddah</addr-line>, <country>Saudi Arabia</country></aff>
<aff id="aff-5"><label>5</label><institution>Department of Digital Media, Faculty of Computers and Information Technology, Future University in Egypt</institution>, <addr-line>New Cairo, 11845</addr-line>, <country>Egypt</country></aff>
<aff id="aff-6"><label>6</label><institution>Department of Computer and Self Development, Preparatory Year Deanship, Prince Sattam bin Abdulaziz University</institution>, <addr-line>AlKharj</addr-line>, <country>Saudi Arabia</country></aff>
</contrib-group><author-notes><corresp id="cor1"><label>&#x002A;</label>Corresponding Author: Mesfer Al Duhayyim. Email: <email>m.alduhayyim@psau.edu.sa</email></corresp></author-notes>
<pub-date date-type="collection" publication-format="electronic">
<year>2023</year></pub-date>
<pub-date date-type="pub" publication-format="electronic">
<day>31</day>
<month>3</month>
<year>2023</year>
</pub-date>
<volume>46</volume>
<issue>3</issue>
<fpage>2755</fpage>
<lpage>2772</lpage>
<history>
<date date-type="received"><day>21</day><month>7</month><year>2022</year></date>
<date date-type="accepted"><day>11</day><month>10</month><year>2022</year></date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2023 Duhayyim et al.</copyright-statement>
<copyright-year>2023</copyright-year>
<copyright-holder>Duhayyim et al.</copyright-holder>
<license xlink:href="https://creativecommons.org/licenses/by/4.0/">
<license-p>This work is licensed under a <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</ext-link>, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.</license-p>
</license>
</permissions>
<self-uri content-type="pdf" xlink:href="TSP_CSSE_34609.pdf"></self-uri>
<abstract>
<p>Natural Language Processing (NLP) for the Arabic language has gained much significance in recent years. The most commonly-utilized NLP task is the &#x2018;Text Classification&#x2019; process. Its main intention is to apply the Machine Learning (ML) approaches for automatically classifying the textual files into one or more pre-defined categories. In ML approaches, the first and foremost crucial step is identifying an appropriate large dataset to test and train the method. One of the trending ML techniques, i.e., Deep Learning (DL) technique needs huge volumes of different types of datasets for training to yield the best outcomes. The current study designs a new Dice Optimization with a Deep Hybrid Boltzmann Machine-based Arabic Corpus Classification (DODHBM-ACC) model in this background. The presented DODHBM-ACC model primarily relies upon different stages of pre-processing and the word2vec word embedding process. For Arabic text classification, the DHBM technique is utilized. This technique is a hybrid version of the Deep Boltzmann Machine (DBM) and Deep Belief Network (DBN). It has the advantage of learning the decisive intention of the classification process. To adjust the hyperparameters of the DHBM technique, the Dice Optimization Algorithm (DOA) is exploited in this study. The experimental analysis was conducted to establish the superior performance of the proposed DODHBM-ACC model. The outcomes inferred the better performance of the proposed DODHBM-ACC model over other recent approaches.</p>
</abstract>
<kwd-group kwd-group-type="author">
<kwd>Arabic corpus</kwd>
<kwd>text classification</kwd>
<kwd>machine learning</kwd>
<kwd>deep learning</kwd>
<kwd>dice optimization</kwd>
</kwd-group>
</article-meta>
</front>
<body>
<sec id="s1">
<label>1</label>
<title>Introduction</title>
<p>With the advancements in Natural Language Processing (NLP), the Arabic Text Categorization (ATC) process has become an active research domain since the Arabic language has several difficulties, such as highly-complicated structure, unique morphological characters and so on [<xref ref-type="bibr" rid="ref-1">1</xref>]. Indeed, the derivational and the inflectional nature of the Arabic language examine highly-complex structures and morphology. The key objective of the ATC approach is to allow pre-defined classes for the Arabic text based on its content. Text representation is a decisive stage that suggestively impacts the performance of the ATC process. In literature, an extensive array of Arabic text representation techniques was reviewed [<xref ref-type="bibr" rid="ref-2">2</xref>]. For example, a conventional text modelling related to the Bag-Of-Words (BOW) representation was attained for the existing acts in the NLP domain. But, this technique suffered from the curse of dimensionality and the non-existence of semantic relations among different text units [<xref ref-type="bibr" rid="ref-3">3</xref>,<xref ref-type="bibr" rid="ref-4">4</xref>]. Text Classification (TC) can be described as a text-mining procedure in which a category or a class is specified for the presented textual file [<xref ref-type="bibr" rid="ref-5">5</xref>]. The productivity of this procedure is measured in terms of class sets, whereas all the class sets contain a set of text files that belong to a particular kind or a topic [<xref ref-type="bibr" rid="ref-6">6</xref>].</p>
<p>The single-label TC presents a single label for every file, whereas the multi-label TC provides different labels for every file [<xref ref-type="bibr" rid="ref-7">7</xref>]. A dynamic classification method is required to handle the huge volumes of text generated on the web every minute. This method should categorize every file under a suitable category and simplify the tasks in other areas like information retrieval and NLP. Unsupervised and supervised Machine Learning (ML) techniques were scrutinized in the domain of TC earlier [<xref ref-type="bibr" rid="ref-8">8</xref>,<xref ref-type="bibr" rid="ref-9">9</xref>]. The unsupervised learning method varies from supervised learning in the labelled dataset. Supervised learning methods utilize labelled datasets to forecast the future, whereas these labelled datasets are known to be the knowledge repository of the models [<xref ref-type="bibr" rid="ref-10">10</xref>]. Precisely, it can be explained as a teaching process in which a method is trained with adequate information and is allowed to perform the predictions after the teaching process is over [<xref ref-type="bibr" rid="ref-11">11</xref>]. In the unsupervised learning approach, the data is not labelled. These methods are unaware of any data or its categories or classes in a dataset; such methods try to find the significant paradigms in a dataset. Both characteristics, as well as the complexities of the Arabic language, make the processing of Arabic texts [<xref ref-type="bibr" rid="ref-12">12</xref>] a challenging process. It is difficult to handle numerous complexities in Arabic like diglossia, ambiguity, etc.; at first, it is challenging to understand and read the Arabic script since the meaning conveyed by the Arabic letters changes according to their position in a word. Secondly, the language has no dedicated letter or capitalization method. Finally, the language has a complex morphology framework, while its alphabet system is not easy to understand [<xref ref-type="bibr" rid="ref-13">13</xref>]. In addition, it is also challenging to normalize the inconsistencies when using a few letters, diacritical marks and dialects. Linguistics researchers and technology developers deal with complexities in NLP tools through morphology analysis, tokenization and stemming from the Arabic language [<xref ref-type="bibr" rid="ref-14">14</xref>].</p>
<p>The current article designs a new Dice Optimization with a Deep Hybrid Boltzmann Machine-based Arabic Corpus Classification (DODHBM-ACC) model. The presented DODHBM-ACC model primarily relies upon different stages of pre-processing and the word2vec word embedding process. For Arabic text classification, the DHBM technique is utilized. It is a hybrid version of the Deep Boltzmann Machine (DBM) and Deep Belief Network (DBN). It has the advantage of learning the decisive intention of a classification process. To fine-tune the hyperparameters involved in the DHBM technique, the Dice Optimization Algorithm (DOA) is exploited in this study. The experimental analysis was conducted to establish the superior performance of the proposed DODHBM-ACC model.</p>
<p>The rest of the paper is organized as follows. Section 2 offers information about the works conducted earlier in this domain, and Section 3 explains the proposed model. Next, Section 4 provides the information on experimental validation, whereas Section 5 concludes the work.</p>
</sec>
<sec id="s2">
<label>2</label>
<title>Related Works</title>
<p>In recent times, Deep Learning (DL) methods are extensively utilized in Sentiment Analysis (SA) research. Few researchers have employed NLP or pre-processing methods to prepare the data for the classification process. El-Alami et al. [<xref ref-type="bibr" rid="ref-15">15</xref>] examined Long Short Term Memory (LSTM), Convolutional Neural Network (CNN) and a combination of these methods to accomplish the ATC process. This work further dealt with the morphological diversity of the Arabic letters by sightseeing the word embedding method using sub-word information and the position weights. This study framed a policy to refine the Arabic vector space representation to ensure an adjacent vector representation for the linked words. It was done with the help of semantic data embedding in lexical sources. The earlier study [<xref ref-type="bibr" rid="ref-16">16</xref>] devised a feature selection algorithm by integrating the Artificial Bee Colony (ABC) technique and the chi-square technique. Chi-square is a filtering technique that can perform calculations simply and rapidly. It can handle a large-dimensional feature and can be utilized as an initial level in feature selection procedures. In this study, the ABC technique, i.e., a wrapper approach, was utilized as another level, after which Naive Base was employed as a Fitness Function (FF).</p>
<p>Al-Anzi et al. [<xref ref-type="bibr" rid="ref-17">17</xref>] developed an innovative text classification technique that neither practised dimensionality reduction nor SA approaches. The presented technique was a space-efficient approach, i.e., it made use of an initial-order Markov method for the hierarchical ATC. A Markov chain method was arranged based on the neighbouring character series for every category and its sub-categories. Then, the preparation methods were utilized to score the files for classification. Alhaj et al. [<xref ref-type="bibr" rid="ref-18">18</xref>] developed a new TC technique to improve the performance of the ATC process utilizing ML approaches. The identification of an appropriate Feature Selection (FS) methodology along with an ideal sum of the features remains the most important step in the ATC process to achieve the finest classification outcomes. Thus, the authors devised an algorithm named Optimal Configuration Determination for ATC (OCATC). It can also be used as a Particle Swarm Optimization (PSO) technique to find the best configuration. The presented algorithm derived and transformed the attributes from the text data into an arithmetic vector with the help of Term Frequency-Inverse Document Frequency (TF&#x2013;IDF) method.</p>
<p>Alshaer et al. [<xref ref-type="bibr" rid="ref-19">19</xref>] focused on learning the impact of the enhanced Chi (ImpCHI) square method on the performances of the six-renowned classification models. The proposed method was significant enough to enhance Arabic text classification. Further, it was considered a promising basis for the classification of the text due to its contribution in terms of pre-defined classes. Ababneh [<xref ref-type="bibr" rid="ref-20">20</xref>] attempted to find the best dataset that could offer fair evaluation and, importantly, train the method for TC. In this examination, renowned and accurate learning methods were employed. The author provided time measures and emphasized the relevance of training the methods using such datasets to enable the Arabic language authors to choose a suitable dataset and leverage a solid basis for comparison.</p>
</sec>
<sec id="s3">
<label>3</label>
<title>The Proposed DODHBM-ACC model</title>
<p>The current study has developed a new DODHBM-ACC model for automatic Arabic corpus classification. The presented DODHBM-ACC model primarily relies on four processes: data pre-processing, word embedding, Arabic text classification and hyperparameter tuning. <xref ref-type="fig" rid="fig-1">Fig. 1</xref> depicts the overall processes of the DODHBM-ACC technique.</p>
<fig id="fig-1">
<label>Figure 1</label>
<caption>
<title>Overall processes of the DODHBM-ACC approach</title></caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_34609-fig-1.tif"/>
</fig>
<sec id="s3_1">
<label>3.1</label>
<title>Data Preprocessing</title>
<p>The overall steps involved in this pre-processing function are briefed herewith.<list list-type="bullet"><list-item>
<p>Tokenization: This function tokenizes a text and classifies a text as either a token or a word set.</p></list-item><list-item>
<p>Stop-words removal: It excludes any type of speech, neither verb nor noun. The list of different stop words in Arabic has more than 400 terminologies.</p></list-item><list-item>
<p>Stemming: In this procedure, both suffixes and token prefixes are eliminated. The steaming function is a vital process and positively impacts a model&#x0027;s performance and efficacy.</p></list-item></list></p>
</sec>
<sec id="s3_2">
<label>3.2</label>
<title>Word Embedding</title>
<p>Word2Vec methodology uses neural network techniques to accomplish word representation [<xref ref-type="bibr" rid="ref-21">21</xref>]. This algorithm considers a large corpus as its input and considers each vocabulary in the corpora. Harris proposed this concept in which a word has the same meaning and is used in the same context. This technique upgrades the vector of a word based on its appearance in the external environment with the help of a pre-determined size window. The comparison amongst those words is higher than the earlier one, and the vector becomes convergent. The Word2Vec process follows two approaches such as the Skip-gram (SG) approach and the Continuous Bag-of-Words (CBOW) approach to generate a word vector. In the SG approach, the vector of an external environment within the window size is transformed based on the centre word. In the CBOW approach, the vector of a word centre is upgraded based on the external environment within the size of the window. In the current study, three dissimilar dimensions have been created for these approaches, such as 100, 200 and 300. Further, a window at 5 is also applied, whereas the minimum word appearance in the corpus is equivalent to 5.</p>
</sec>
<sec id="s3_3">
<label>3.3</label>
<title>Arabic Text Classification Using DHBM Technique</title>
<p>For the classification of Arabic texts, the DHBM technique is utilized. This technique is a hybrid version of DBM and DBN and is regarded as an increasingly-complicated variant of both methods [<xref ref-type="bibr" rid="ref-22">22</xref>]. The hybrid structure was created with the intention of conducting the classification process. An alternative method to consider the DHBM approach is to configure a strongly-incorporated Hybrid Restricted Boltzmann Machine (HRBM) instead of an individual module. SBEN is a stack of HRBM in which every model <inline-formula id="ieqn-1">
<mml:math id="mml-ieqn-1"><mml:mi>p</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>y</mml:mi><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:msup><mml:mi>h</mml:mi><mml:mi>l</mml:mi></mml:msup></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</inline-formula> is present at their corresponding level <inline-formula id="ieqn-2">
<mml:math id="mml-ieqn-2"><mml:mi>l</mml:mi></mml:math>
</inline-formula> of generalization in its general framework. To leverage the prediction power at all the generalization levels, an intermediate step of each layer-by-layer predictor is employed to compute the SBEN <inline-formula id="ieqn-3">
<mml:math id="mml-ieqn-3"><mml:mi>p</mml:mi><mml:mrow><mml:mrow><mml:msub><mml:mo stretchy="false">(</mml:mo><mml:mi>y</mml:mi></mml:msub></mml:mrow><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mi>x</mml:mi><mml:msub><mml:mo stretchy="false">)</mml:mo><mml:mrow><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:mi>s</mml:mi><mml:mi>e</mml:mi><mml:mi>m</mml:mi><mml:mi>b</mml:mi><mml:mi>l</mml:mi><mml:mi>e</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:math>
</inline-formula> at the time of inference. The first edition of these models is trained through a bottom-up greedy algorithm from which every layer learns to independently forecast the output of another layer (trained on hidden presentation). Through relative observation of the SBEN, the DHBM model is created based on the stacked HRBM expertise. Afterwards, the predictors are connected, which implies that the prediction of the total model is based on the learning of every layer about their respective abstract levels. Additionally, the relationships are built between every layer through a full Bi-directional expertise. In other terms, in order to compute the layer of the hidden parameters in the algorithm (excluding input and top hidden states), the activation from above and below layers must be integrated instantaneously. As per the discussion given above, the three layers of the DHBM model are determined. As shown in <xref ref-type="fig" rid="fig-1">Fig. 1</xref>, the description lengthens up to <italic>L</italic>-layer model. With the help of a pattern vector input <inline-formula id="ieqn-5">
<mml:math id="mml-ieqn-5"><mml:mi>x</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mo>&#x22EF;</mml:mo><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>D</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</inline-formula> and the corresponding target parameter <inline-formula id="ieqn-6">
<mml:math id="mml-ieqn-6"><mml:mi>y</mml:mi><mml:mo>&#x2208;</mml:mo><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mo>&#x22EF;</mml:mo><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mi>C</mml:mi></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:math>
</inline-formula>, two sets of the hidden parameters <inline-formula id="ieqn-7">
<mml:math id="mml-ieqn-7"><mml:mrow><mml:msup><mml:mi>h</mml:mi><mml:mn>1</mml:mn></mml:msup></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msubsup><mml:mi>h</mml:mi><mml:mn>1</mml:mn><mml:mn>1</mml:mn></mml:msubsup><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mo>&#x22EF;</mml:mo><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:msubsup><mml:mi>h</mml:mi><mml:mrow><mml:mrow><mml:msub><mml:mi>H</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow></mml:mrow><mml:mn>1</mml:mn></mml:msubsup></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</inline-formula> and <inline-formula id="ieqn-8">
<mml:math id="mml-ieqn-8"><mml:mrow><mml:msup><mml:mi>h</mml:mi><mml:mn>2</mml:mn></mml:msup></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msubsup><mml:mi>h</mml:mi><mml:mn>1</mml:mn><mml:mn>2</mml:mn></mml:msubsup><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mo>&#x22EF;</mml:mo><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:msubsup><mml:mi>h</mml:mi><mml:mrow><mml:mrow><mml:msub><mml:mi>H</mml:mi><mml:mn>2</mml:mn></mml:msub></mml:mrow></mml:mrow><mml:mn>2</mml:mn></mml:msubsup></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</inline-formula> and the model variables <inline-formula id="ieqn-9">
<mml:math id="mml-ieqn-9"><mml:mrow><mml:msup><mml:mi mathvariant="normal">&#x0398;</mml:mi><mml:mi>m</mml:mi></mml:msup></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mi>W</mml:mi><mml:mn>1</mml:mn></mml:msup></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:mtext>&#x00A0;</mml:mtext></mml:mrow><mml:mrow><mml:msup><mml:mi>U</mml:mi><mml:mn>1</mml:mn></mml:msup></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:mtext>&#x00A0;</mml:mtext></mml:mrow><mml:mrow><mml:msup><mml:mi>W</mml:mi><mml:mn>2</mml:mn></mml:msup></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:mtext>&#x00A0;</mml:mtext></mml:mrow><mml:mrow><mml:msup><mml:mi>U</mml:mi><mml:mn>2</mml:mn></mml:msup></mml:mrow><mml:msup><mml:mo stretchy="false">)</mml:mo><mml:mn>3</mml:mn></mml:msup></mml:mrow></mml:math>
</inline-formula> are applied. At the same time, the energy of the DHBM method is determined as given below.</p>
<p><disp-formula id="eqn-1"><label>(1)</label>
<mml:math id="mml-eqn-1" display="block"><mml:mi>E</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>y</mml:mi><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mi>x</mml:mi><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:msup><mml:mi>h</mml:mi><mml:mn>1</mml:mn></mml:msup></mml:mrow><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:msup><mml:mi>h</mml:mi><mml:mn>2</mml:mn></mml:msup></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mo>&#x2212;</mml:mo><mml:mrow><mml:msup><mml:mi>h</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mi>T</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mrow><mml:msup><mml:mi>W</mml:mi><mml:mn>1</mml:mn></mml:msup></mml:mrow><mml:mi>x</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mrow><mml:msup><mml:mi>h</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mi>T</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mrow><mml:msup><mml:mi>U</mml:mi><mml:mn>1</mml:mn></mml:msup></mml:mrow><mml:mrow><mml:msub><mml:mi>e</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mrow><mml:msup><mml:mi>h</mml:mi><mml:mrow><mml:mn>2</mml:mn><mml:mi>T</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mrow><mml:msup><mml:mi>W</mml:mi><mml:mn>2</mml:mn></mml:msup></mml:mrow><mml:mrow><mml:msup><mml:mi>h</mml:mi><mml:mn>1</mml:mn></mml:msup></mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mrow><mml:msup><mml:mi>h</mml:mi><mml:mrow><mml:mn>2</mml:mn><mml:mi>T</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mrow><mml:msup><mml:mi>U</mml:mi><mml:mn>2</mml:mn></mml:msup></mml:mrow><mml:mrow><mml:msub><mml:mi>e</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow><mml:mo>.</mml:mo></mml:math>
</disp-formula></p>
<p><xref ref-type="disp-formula" rid="eqn-1">Eq. (1)</xref> notes that <inline-formula id="ieqn-10">
<mml:math id="mml-ieqn-10"><mml:mrow><mml:msub><mml:mi>e</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mn>1</mml:mn><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mi>y</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:msubsup><mml:mo stretchy="false">)</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>C</mml:mi></mml:msubsup></mml:math>
</inline-formula> refers to a one-hot vector encoder of <inline-formula id="ieqn-11">
<mml:math id="mml-ieqn-11"><mml:mi>y</mml:mi></mml:math>
</inline-formula>. It is likely that a 3-DHBM model is allocated to a 4-tuple <inline-formula id="ieqn-12">
<mml:math id="mml-ieqn-12"><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>y</mml:mi><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mi>x</mml:mi><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:msup><mml:mi>h</mml:mi><mml:mn>1</mml:mn></mml:msup></mml:mrow><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:msup><mml:mi>h</mml:mi><mml:mn>2</mml:mn></mml:msup></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</inline-formula> layer as given below.</p>
<p><disp-formula id="eqn-2"><label>(2)</label>
<mml:math id="mml-eqn-2" display="block"><mml:mi>p</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>y</mml:mi><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mi>x</mml:mi><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mi mathvariant="normal">&#x0398;</mml:mi></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mfrac><mml:mn>1</mml:mn><mml:mi>Z</mml:mi></mml:mfrac></mml:mrow><mml:msubsup><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mi>h</mml:mi><mml:mtext>&#x00A0;</mml:mtext></mml:msubsup><mml:mo>&#x2061;</mml:mo><mml:mrow><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>E</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>y</mml:mi><mml:mo>,</mml:mo><mml:mi>x</mml:mi><mml:mo>,</mml:mo><mml:mrow><mml:msup><mml:mi>h</mml:mi><mml:mn>1</mml:mn></mml:msup></mml:mrow><mml:mo>,</mml:mo><mml:mrow><mml:msup><mml:mi>h</mml:mi><mml:mn>2</mml:mn></mml:msup></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:msup></mml:mrow></mml:mstyle></mml:math>
</disp-formula></p>
<p>In <xref ref-type="disp-formula" rid="eqn-2">Eq. (2)</xref>, <inline-formula id="ieqn-13">
<mml:math id="mml-ieqn-13"><mml:mi>Z</mml:mi></mml:math>
</inline-formula> denotes the partition function which guarantees a valid and a likelihood distribution. This is evaluated by totalling each feasible module configuration.</p>
<p>It is to be noted that in the overview of top-down calculation, the hidden as well as the visible layers of the 3-DHBM model are calculated using the executable equations given below.</p>
<p><disp-formula id="eqn-3"><label>(3)</label>
<mml:math id="mml-eqn-3" display="block"><mml:mi>p</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mi>h</mml:mi><mml:mn>1</mml:mn></mml:msup></mml:mrow><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mi>y</mml:mi><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mi>x</mml:mi><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:msup><mml:mi>h</mml:mi><mml:mn>2</mml:mn></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:munder><mml:mrow><mml:mo movablelimits="false">&#x220F;</mml:mo></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:munder><mml:mtext>&#x00A0;</mml:mtext><mml:mi>p</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:msubsup><mml:mi>h</mml:mi><mml:mi>j</mml:mi><mml:mn>1</mml:mn></mml:msubsup><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mi>y</mml:mi><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mi>X</mml:mi><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:msup><mml:mi>h</mml:mi><mml:mn>2</mml:mn></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mo>,</mml:mo><mml:mspace width="thinmathspace" /><mml:mrow><mml:mi mathvariant="normal">w</mml:mi><mml:mi mathvariant="normal">i</mml:mi><mml:mi mathvariant="normal">t</mml:mi><mml:mi mathvariant="normal">h</mml:mi></mml:mrow><mml:mtext>&#x00A0;</mml:mtext><mml:mi>p</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:msubsup><mml:mi>h</mml:mi><mml:mi>j</mml:mi><mml:mn>1</mml:mn></mml:msubsup><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mi>y</mml:mi><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mi>X</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:mi>&#x03D5;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msubsup><mml:mi>U</mml:mi><mml:mrow><mml:mi>j</mml:mi><mml:mi>y</mml:mi></mml:mrow><mml:mn>1</mml:mn></mml:msubsup><mml:mo>+</mml:mo><mml:msubsup><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mi>i</mml:mi><mml:mtext>&#x00A0;</mml:mtext></mml:msubsup><mml:mo>&#x2061;</mml:mo><mml:msubsup><mml:mi>W</mml:mi><mml:mrow><mml:mi>j</mml:mi><mml:mi>i</mml:mi></mml:mrow><mml:mn>1</mml:mn></mml:msubsup><mml:msubsup><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mi>k</mml:mi><mml:mtext>&#x00A0;</mml:mtext></mml:msubsup><mml:mo>&#x2061;</mml:mo><mml:msubsup><mml:mi>W</mml:mi><mml:mrow><mml:mi>k</mml:mi><mml:mi>j</mml:mi></mml:mrow><mml:mn>2</mml:mn></mml:msubsup><mml:msubsup><mml:mi>h</mml:mi><mml:mi>k</mml:mi><mml:mn>2</mml:mn></mml:msubsup></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</disp-formula></p>
<p><disp-formula id="eqn-4"><label>(4)</label>
<mml:math id="mml-eqn-4" display="block"><mml:mi>p</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mi>h</mml:mi><mml:mn>2</mml:mn></mml:msup></mml:mrow><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mi>y</mml:mi><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:msup><mml:mi>h</mml:mi><mml:mn>1</mml:mn></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:munder><mml:mrow><mml:mo movablelimits="false">&#x220F;</mml:mo></mml:mrow><mml:mrow><mml:mi>k</mml:mi></mml:mrow></mml:munder><mml:mtext>&#x00A0;</mml:mtext><mml:mi>p</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:msubsup><mml:mi>h</mml:mi><mml:mi>k</mml:mi><mml:mn>2</mml:mn></mml:msubsup><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mi>y</mml:mi><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:msup><mml:mi>h</mml:mi><mml:mn>1</mml:mn></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mo>,</mml:mo><mml:mspace width="thinmathspace" /><mml:mrow><mml:mi mathvariant="normal">w</mml:mi><mml:mi mathvariant="normal">i</mml:mi><mml:mi mathvariant="normal">t</mml:mi><mml:mi mathvariant="normal">h</mml:mi></mml:mrow><mml:mspace width="thinmathspace" /><mml:mi>p</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:msubsup><mml:mi>h</mml:mi><mml:mi>k</mml:mi><mml:mn>2</mml:mn></mml:msubsup><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mrow><mml:msup><mml:mi>h</mml:mi><mml:mn>1</mml:mn></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:mi>&#x03D5;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msubsup><mml:mi>U</mml:mi><mml:mrow><mml:mi>k</mml:mi><mml:mi>y</mml:mi></mml:mrow><mml:mn>2</mml:mn></mml:msubsup><mml:mo>+</mml:mo><mml:msubsup><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mi>j</mml:mi><mml:mtext>&#x00A0;</mml:mtext></mml:msubsup><mml:mo>&#x2061;</mml:mo><mml:msubsup><mml:mi>W</mml:mi><mml:mrow><mml:mi>k</mml:mi><mml:mi>j</mml:mi></mml:mrow><mml:mn>2</mml:mn></mml:msubsup><mml:msubsup><mml:mi>h</mml:mi><mml:mi>j</mml:mi><mml:mn>1</mml:mn></mml:msubsup></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</disp-formula></p>
<p><disp-formula id="eqn-5"><label>(5)</label>
<mml:math id="mml-eqn-5" display="block"><mml:mi>p</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>x</mml:mi><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mrow><mml:msup><mml:mi>h</mml:mi><mml:mn>1</mml:mn></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:munder><mml:mrow><mml:mo movablelimits="false">&#x220F;</mml:mo></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:munder><mml:mtext>&#x00A0;</mml:mtext><mml:mi>p</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mrow><mml:msup><mml:mi>h</mml:mi><mml:mn>1</mml:mn></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mo>,</mml:mo><mml:mspace width="thinmathspace" /><mml:mrow><mml:mi mathvariant="normal">w</mml:mi><mml:mi mathvariant="normal">i</mml:mi><mml:mi mathvariant="normal">t</mml:mi><mml:mi mathvariant="normal">h</mml:mi></mml:mrow><mml:mspace width="thinmathspace" /><mml:mi>p</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mi>h</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:mi>&#x03D5;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mi>j</mml:mi><mml:mtext>&#x00A0;</mml:mtext></mml:msubsup><mml:mo>&#x2061;</mml:mo><mml:msubsup><mml:mi>W</mml:mi><mml:mrow><mml:mi>j</mml:mi><mml:mi>i</mml:mi></mml:mrow><mml:mn>1</mml:mn></mml:msubsup><mml:msubsup><mml:mi>h</mml:mi><mml:mi>j</mml:mi><mml:mn>1</mml:mn></mml:msubsup></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</disp-formula></p>
<p><disp-formula id="eqn-6"><label>(6)</label>
<mml:math id="mml-eqn-6" display="block"><mml:mi>p</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>y</mml:mi><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mrow><mml:msup><mml:mi>h</mml:mi><mml:mn>1</mml:mn></mml:msup></mml:mrow><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:msup><mml:mi>h</mml:mi><mml:mn>2</mml:mn></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mfrac><mml:mrow><mml:mrow><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:mrow><mml:msub><mml:mi mathvariant="normal">&#x03A3;</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow><mml:msubsup><mml:mi>U</mml:mi><mml:mrow><mml:mi>j</mml:mi><mml:mi>y</mml:mi></mml:mrow><mml:mn>1</mml:mn></mml:msubsup><mml:msubsup><mml:mi>h</mml:mi><mml:mi>j</mml:mi><mml:mn>1</mml:mn></mml:msubsup><mml:mo>+</mml:mo><mml:mrow><mml:msub><mml:mi mathvariant="normal">&#x03A3;</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow><mml:msubsup><mml:mi>U</mml:mi><mml:mrow><mml:mi>j</mml:mi><mml:mi>y</mml:mi></mml:mrow><mml:mn>2</mml:mn></mml:msubsup><mml:msubsup><mml:mi>h</mml:mi><mml:mi>j</mml:mi><mml:mn>2</mml:mn></mml:msubsup></mml:mrow></mml:msup></mml:mrow></mml:mrow><mml:mrow><mml:msubsup><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>y</mml:mi><mml:mo>&#x2217;</mml:mo></mml:mrow><mml:mtext>&#x00A0;</mml:mtext></mml:msubsup><mml:mo>&#x2061;</mml:mo><mml:mrow><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:mrow><mml:msub><mml:mi mathvariant="normal">&#x03A3;</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow><mml:msubsup><mml:mi>U</mml:mi><mml:mrow><mml:mi>j</mml:mi><mml:mrow><mml:msup><mml:mi>y</mml:mi><mml:mrow><mml:mo>&#x22C6;</mml:mo></mml:mrow></mml:msup></mml:mrow></mml:mrow><mml:mn>1</mml:mn></mml:msubsup><mml:msubsup><mml:mi>h</mml:mi><mml:mi>j</mml:mi><mml:mn>1</mml:mn></mml:msubsup><mml:mo>+</mml:mo><mml:mrow><mml:msub><mml:mi mathvariant="normal">&#x03A3;</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow><mml:msubsup><mml:mi>U</mml:mi><mml:mrow><mml:mi>j</mml:mi><mml:mrow><mml:msup><mml:mi>y</mml:mi><mml:mrow><mml:mo>&#x22C6;</mml:mo></mml:mrow></mml:msup></mml:mrow></mml:mrow><mml:mn>2</mml:mn></mml:msubsup><mml:msubsup><mml:mi>h</mml:mi><mml:mi>j</mml:mi><mml:mn>2</mml:mn></mml:msubsup></mml:mrow></mml:msup></mml:mrow></mml:mrow></mml:mfrac></mml:mrow></mml:mstyle></mml:math>
</disp-formula></p>
<p>In this expression, a logistic sigmoid or an activation function is denoted by <inline-formula id="ieqn-14">
<mml:math id="mml-ieqn-14"><mml:mi>&#x03D5;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>v</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mrow><mml:mo>/</mml:mo></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>+</mml:mo><mml:mrow><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mi>v</mml:mi></mml:mrow></mml:msup></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</inline-formula> and <inline-formula id="ieqn-15">
<mml:math id="mml-ieqn-15"><mml:mi>y</mml:mi></mml:math>
</inline-formula> is utilized to access a specific class filter from <inline-formula id="ieqn-16">
<mml:math id="mml-ieqn-16"><mml:mrow><mml:msup><mml:mi>U</mml:mi><mml:mi>l</mml:mi></mml:msup></mml:mrow></mml:math>
</inline-formula>. In order to adapt the mechanism for distinct kinds of inputs like continuously-valued parameters, <inline-formula id="ieqn-17">
<mml:math id="mml-ieqn-17"><mml:mi>&#x03D5;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>v</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math>
</inline-formula> is substituted with some other alternative functions like Rectified Linear Unit (ReLU). The subset of a formula can be used as a fixed-point formula to run the mean-field inference in the deep structure to obtain <inline-formula id="ieqn-18">
<mml:math id="mml-ieqn-18"><mml:mi>&#x03BC;</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mrow><mml:msup><mml:mi>&#x03BC;</mml:mi><mml:mn>1</mml:mn></mml:msup></mml:mrow><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:msup><mml:mi>&#x03BC;</mml:mi><mml:mn>2</mml:mn></mml:msup></mml:mrow></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:math>
</inline-formula>. It is to be noted that the dependence among those conditions and a consequent bottom-up pass calculation should be weighed double to initialize the mean field cycling via the <xref ref-type="disp-formula" rid="eqn-3">Eqs. (3)</xref>&#x2013;<xref ref-type="disp-formula" rid="eqn-6">(6)</xref> so as to achieve the reconstructed model of the input and target values (or predictive models). <xref ref-type="fig" rid="fig-2">Fig. 2</xref> portrays the infrastructure of the DHBM technique.</p>
<fig id="fig-2">
<label>Figure 2</label>
<caption>
<title>Structure of the DHBM technique</title></caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_34609-fig-2.tif"/>
</fig>
<p>In order to accelerate the prediction process and training time, the DHBM structure is expanded by means of a separate auxiliary network or a co-model that is formerly applied to infer the state of the hidden parameters in the DBM model. Here, the aim is to exploit the individual bottom-up pass. MLP or the detection model, performs a part of the approximation function that is successfully merged with the deep structures of the concentration. Further, it is also trained based on the gradient-descent method. During the fundamental co-training of the detection model, it is anticipated that the mean-field parameter of the target model remains unchanged. Then, an individual learning step is experimentally demonstrated to study the realistic training of the DBM model. Similar principles are claimed for training an in-depth hybrid structure, i.e., DHBM too.</p>
<p>In this detection model, the weight is initialized to DHBM at the beginning of training and is calculated as a completely-factorized element, as given below.</p>
<p><disp-formula id="eqn-7"><label>(7)</label>
<mml:math id="mml-eqn-7" display="block"><mml:mrow><mml:msup><mml:mi>Q</mml:mi><mml:mrow><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>h</mml:mi><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mi>v</mml:mi><mml:mo>;</mml:mo><mml:mi>v</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:msubsup><mml:mrow><mml:mo movablelimits="false">&#x220F;</mml:mo></mml:mrow><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mrow><mml:msub><mml:mi>H</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow></mml:mrow></mml:msubsup><mml:mo>&#x2061;</mml:mo><mml:msubsup><mml:mrow><mml:mo movablelimits="false">&#x220F;</mml:mo></mml:mrow><mml:mrow><mml:mi>k</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mrow><mml:msub><mml:mi>H</mml:mi><mml:mn>2</mml:mn></mml:msub></mml:mrow></mml:mrow></mml:msubsup><mml:mo>&#x2061;</mml:mo><mml:mrow><mml:msup><mml:mi>q</mml:mi><mml:mrow><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msubsup><mml:mi>h</mml:mi><mml:mi>j</mml:mi><mml:mn>1</mml:mn></mml:msubsup></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:msup><mml:mi>q</mml:mi><mml:mrow><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msubsup><mml:mi>h</mml:mi><mml:mi>k</mml:mi><mml:mn>2</mml:mn></mml:msubsup></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</disp-formula></p>
<p>In <xref ref-type="disp-formula" rid="eqn-7">Eq. (7)</xref>, the likelihood of <inline-formula id="ieqn-19">
<mml:math id="mml-ieqn-19"><mml:mrow><mml:msup><mml:mi>q</mml:mi><mml:mrow><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msubsup><mml:mi>h</mml:mi><mml:mi>i</mml:mi><mml:mi>l</mml:mi></mml:msubsup><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msubsup><mml:mi>v</mml:mi><mml:mi>i</mml:mi><mml:mi>l</mml:mi></mml:msubsup></mml:math>
</inline-formula> for layers <inline-formula id="ieqn-20">
<mml:math id="mml-ieqn-20"><mml:mi>l</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mn>2</mml:mn></mml:math>
</inline-formula> and <inline-formula id="ieqn-21">
<mml:math id="mml-ieqn-21"><mml:mi>v</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mrow><mml:msup><mml:mi>v</mml:mi><mml:mn>1</mml:mn></mml:msup></mml:mrow><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:msup><mml:mi>v</mml:mi><mml:mn>2</mml:mn></mml:msup></mml:mrow></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:math>
</inline-formula> is high. Then, the detection model is run with the help of a variable <inline-formula id="ieqn-22">
<mml:math id="mml-ieqn-22"><mml:mrow><mml:msup><mml:mi mathvariant="normal">&#x0398;</mml:mi><mml:mrow><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mrow><mml:msup><mml:mi>R</mml:mi><mml:mn>1</mml:mn></mml:msup></mml:mrow><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:msup><mml:mi>R</mml:mi><mml:mn>2</mml:mn></mml:msup></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</inline-formula> (neglecting bias term for simplicity). The equation given below shows the feedforward process.</p>
<p><disp-formula id="eqn-8"><label>(8)</label>
<mml:math id="mml-eqn-8" display="block"><mml:msubsup><mml:mi>v</mml:mi><mml:mi>j</mml:mi><mml:mn>1</mml:mn></mml:msubsup><mml:mo>=</mml:mo><mml:mi>&#x03D5;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>D</mml:mi></mml:msubsup><mml:mo>&#x2061;</mml:mo><mml:mn>2</mml:mn><mml:msubsup><mml:mi>R</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow><mml:mn>1</mml:mn></mml:msubsup><mml:mrow><mml:msub><mml:mi>v</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</disp-formula></p>
<p><disp-formula id="eqn-9"><label>(9)</label>
<mml:math id="mml-eqn-9" display="block"><mml:msubsup><mml:mi>v</mml:mi><mml:mi>j</mml:mi><mml:mn>2</mml:mn></mml:msubsup><mml:mo>=</mml:mo><mml:mi>&#x03D5;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msubsup><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mrow><mml:msub><mml:mi>H</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow></mml:mrow></mml:msubsup><mml:mo>&#x2061;</mml:mo><mml:msubsup><mml:mi>R</mml:mi><mml:mrow><mml:mi>j</mml:mi><mml:mi>k</mml:mi></mml:mrow><mml:mn>2</mml:mn></mml:msubsup><mml:msubsup><mml:mi>v</mml:mi><mml:mi>j</mml:mi><mml:mn>1</mml:mn></mml:msubsup></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:math>
</disp-formula></p>
<p>In these expressions, the inference network weight is doubled at all the layers, except the top-most layer, to compensate for the missing top-down feedback. In this hybrid mechanism, especially at the time of prediction, the structure might straightaway produce a suitable calculation for <inline-formula id="ieqn-23">
<mml:math id="mml-ieqn-23"><mml:mi>y</mml:mi></mml:math>
</inline-formula> with the help of the trained detection model to infer the hidden state of the DHBM model.</p>
<p>The detection model can be trained based on <xref ref-type="disp-formula" rid="eqn-10">Eq. (10)</xref>:</p>
<p><disp-formula id="eqn-10"><label>(10)</label>
<mml:math id="mml-eqn-10" display="block"><mml:mi>K</mml:mi><mml:mi>L</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mi>Q</mml:mi><mml:mrow><mml:mi>M</mml:mi><mml:mi>F</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>h</mml:mi><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mi>v</mml:mi><mml:mo>;</mml:mo><mml:mi>&#x03BC;</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mrow><mml:msup><mml:mi>Q</mml:mi><mml:mrow><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>h</mml:mi><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mi>v</mml:mi><mml:mo>;</mml:mo><mml:mi>v</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo stretchy="false">)</mml:mo><mml:mo>=</mml:mo><mml:mo>&#x2212;</mml:mo><mml:msubsup><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mi>i</mml:mi><mml:mtext>&#x00A0;</mml:mtext></mml:msubsup><mml:mo>&#x2061;</mml:mo><mml:mrow><mml:msub><mml:mi>&#x03BC;</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mi>l</mml:mi><mml:mi>o</mml:mi><mml:mi>g</mml:mi><mml:mrow><mml:msub><mml:mi>v</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mo>&#x2212;</mml:mo><mml:msubsup><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mi>i</mml:mi><mml:mtext>&#x00A0;</mml:mtext></mml:msubsup><mml:mo>&#x2061;</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:mrow><mml:msub><mml:mi>&#x03BC;</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mi>l</mml:mi><mml:mi>o</mml:mi><mml:mi>g</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:mrow><mml:msub><mml:mi>v</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:mi>C</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:mi>s</mml:mi><mml:mi>t</mml:mi></mml:math>
</disp-formula></p>
<p>This shows a minimized Kullback-Leibler (KL) divergence between <inline-formula id="ieqn-24">
<mml:math id="mml-ieqn-24"><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:msup><mml:mi>Q</mml:mi><mml:mrow><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>h</mml:mi><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mi>v</mml:mi><mml:mo>;</mml:mo><mml:mi>v</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math>
</inline-formula>, the factorial posterior of the detection model and <inline-formula id="ieqn-25">
<mml:math id="mml-ieqn-25"><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:msup><mml:mi>Q</mml:mi><mml:mrow><mml:mi>M</mml:mi><mml:mi>F</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>h</mml:mi><mml:mrow><mml:mo stretchy="false">|</mml:mo></mml:mrow><mml:mi>v</mml:mi><mml:mo>;</mml:mo><mml:mi>&#x03BC;</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:math>
</inline-formula> that corresponds to the posterior of the DBM mean-field.</p>
</sec>
<sec id="s3_4">
<label>3.4</label>
<title>Hyperparameter Tuning Using DOA</title>
<p>In order to fine-tune the hyperparameters involved in the DHBM technique, the DOA approach is exploited in this study. DOA is a game-based optimization approach that simulates the old-age game rules i.e., dice games. In this DOA approach, the primary location of a player is randomly generated on the playing field i.e., problem description space, as expressed in the following equation [<xref ref-type="bibr" rid="ref-23">23</xref>]:</p>
<p><disp-formula id="eqn-11"><label>(11)</label>
<mml:math id="mml-eqn-11" display="block"><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msubsup><mml:mi>x</mml:mi><mml:mi>i</mml:mi><mml:mn>1</mml:mn></mml:msubsup><mml:mo>,</mml:mo><mml:mo>&#x2026;</mml:mo><mml:mo>,</mml:mo><mml:msubsup><mml:mi>x</mml:mi><mml:mi>i</mml:mi><mml:mi>d</mml:mi></mml:msubsup><mml:mo>,</mml:mo><mml:mo>&#x2026;</mml:mo><mml:mo>,</mml:mo><mml:msubsup><mml:mi>x</mml:mi><mml:mi>i</mml:mi><mml:mi>n</mml:mi></mml:msubsup></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:math>
</disp-formula></p>
<p>After the formation of the system, the rule is quantified. The players compete in line with the game rules set earlier and determine the winner.</p>
<p><bold>Calculation of each player&#x2019;s score</bold></p>
<p>A fitness function is applied to simulate the score of all the players. A high score is allocated to the player with the best position, calculated as follows.</p>
<p><disp-formula id="eqn-12"><label>(12)</label>
<mml:math id="mml-eqn-12" display="block"><mml:mi>S</mml:mi><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mrow><mml:msub><mml:mi>e</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:mo>,</mml:mo><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mfrac><mml:mrow><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mrow><mml:msub><mml:mi>t</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mi>t</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>p</mml:mi><mml:mi>l</mml:mi><mml:mi>a</mml:mi><mml:mi>y</mml:mi><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>r</mml:mi><mml:mrow><mml:mi>b</mml:mi><mml:mi>e</mml:mi><mml:mi>s</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:msubsup><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>N</mml:mi></mml:msubsup><mml:mo>&#x2061;</mml:mo><mml:mrow><mml:msub><mml:mtext>&#x00A0;</mml:mtext><mml:mrow><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mrow><mml:msub><mml:mi>t</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mi>t</mml:mi><mml:mo stretchy="false">(</mml:mo><mml:mi>p</mml:mi><mml:mi>l</mml:mi><mml:mi>a</mml:mi><mml:mi>y</mml:mi><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>r</mml:mi><mml:mrow><mml:mi>w</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mi>s</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mfrac></mml:mrow></mml:mstyle></mml:math>
</disp-formula></p>
<p>In <xref ref-type="disp-formula" rid="eqn-12">Eq. (12)</xref>, <inline-formula id="ieqn-26">
<mml:math id="mml-ieqn-26"><mml:mi>S</mml:mi><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mrow><mml:msub><mml:mi>e</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> refers to the score of a player <italic>i</italic><inline-formula id="ieqn-27">
<mml:math id="mml-ieqn-27"><mml:mo>,</mml:mo><mml:mi>f</mml:mi><mml:mi>i</mml:mi><mml:mrow><mml:msub><mml:mi>t</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> denotes their fitness function value, <inline-formula id="ieqn-28">
<mml:math id="mml-ieqn-28"><mml:mi>N</mml:mi></mml:math>
</inline-formula> indicates the number of players, <inline-formula id="ieqn-29">
<mml:math id="mml-ieqn-29"><mml:mi>p</mml:mi><mml:mi>l</mml:mi><mml:mi>a</mml:mi><mml:mi>y</mml:mi><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>r</mml:mi><mml:mrow><mml:mtext>&#x00A0;</mml:mtext><mml:mi>b</mml:mi><mml:mi>e</mml:mi><mml:mi>s</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:math>
</inline-formula> shows the location of the optimal player, and <inline-formula id="ieqn-30">
<mml:math id="mml-ieqn-30"><mml:mi>p</mml:mi><mml:mi>l</mml:mi><mml:mi>a</mml:mi><mml:mi>y</mml:mi><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>r</mml:mi><mml:mrow><mml:mi>w</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mi>s</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:math>
</inline-formula> shows the location of the worst player as given below.</p>
<p><disp-formula id="eqn-13"><label>(13)</label>
<mml:math id="mml-eqn-13" display="block"><mml:mi>p</mml:mi><mml:mi>l</mml:mi><mml:mi>a</mml:mi><mml:mi>y</mml:mi><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>r</mml:mi><mml:mrow><mml:mi>b</mml:mi><mml:mi>e</mml:mi><mml:mi>s</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mi mathvariant="normal">l</mml:mi><mml:mi mathvariant="normal">o</mml:mi><mml:mi mathvariant="normal">c</mml:mi><mml:mi mathvariant="normal">a</mml:mi><mml:mi mathvariant="normal">t</mml:mi><mml:mi mathvariant="normal">i</mml:mi><mml:mi mathvariant="normal">o</mml:mi><mml:mi mathvariant="normal">n</mml:mi><mml:mspace width="thinmathspace" /><mml:mi mathvariant="normal">o</mml:mi><mml:mi mathvariant="normal">f</mml:mi><mml:mspace width="thinmathspace" /><mml:mi mathvariant="normal">m</mml:mi><mml:mi mathvariant="normal">i</mml:mi><mml:mi mathvariant="normal">n</mml:mi></mml:mrow><mml:mspace width="thinmathspace" /><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>f</mml:mi><mml:mi>i</mml:mi><mml:mrow><mml:msub><mml:mi>t</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mspace width="thinmathspace" /><mml:mi mathvariant="normal">&#x0026;</mml:mi><mml:mspace width="thinmathspace" /><mml:mi>j</mml:mi><mml:mo>&#x2208;</mml:mo><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mrow><mml:mo>:</mml:mo></mml:mrow><mml:mi>N</mml:mi></mml:mrow><mml:mo>}</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:math>
</disp-formula></p>
<p><disp-formula id="eqn-14"><label>(14)</label>
<mml:math id="mml-eqn-14" display="block"><mml:mi>p</mml:mi><mml:mi>l</mml:mi><mml:mi>a</mml:mi><mml:mi>y</mml:mi><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>r</mml:mi><mml:mrow><mml:mi>w</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mi>s</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mi mathvariant="normal">l</mml:mi><mml:mi mathvariant="normal">o</mml:mi><mml:mi mathvariant="normal">c</mml:mi><mml:mi mathvariant="normal">a</mml:mi><mml:mi mathvariant="normal">t</mml:mi><mml:mi mathvariant="normal">i</mml:mi><mml:mi mathvariant="normal">o</mml:mi><mml:mi mathvariant="normal">n</mml:mi><mml:mspace width="thinmathspace" /><mml:mi mathvariant="normal">o</mml:mi><mml:mi mathvariant="normal">f</mml:mi><mml:mspace width="thinmathspace" /><mml:mi mathvariant="normal">m</mml:mi><mml:mi mathvariant="normal">a</mml:mi><mml:mi mathvariant="normal">x</mml:mi></mml:mrow><mml:mspace width="thinmathspace" /><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>f</mml:mi><mml:mi>i</mml:mi><mml:mrow><mml:msub><mml:mi>t</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mspace width="thinmathspace" /><mml:mi mathvariant="normal">&#x0026;</mml:mi><mml:mspace width="thinmathspace" /><mml:mi>j</mml:mi><mml:mo>&#x2208;</mml:mo><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mrow><mml:mo>:</mml:mo></mml:mrow><mml:mi>N</mml:mi></mml:mrow><mml:mo>}</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:math>
</disp-formula></p>
<p><bold>Tossing dice for each player</bold></p>
<p>Here, every player tosses a dice. A dice count can be a discrete value between 1 and 6 that signifies the number of players guided by every player and is expressed as follows.</p>
<p><disp-formula id="eqn-15"><label>(15)</label>
<mml:math id="mml-eqn-15" display="block"><mml:mi>D</mml:mi><mml:mi>i</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>e</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mo>=</mml:mo><mml:mi>K</mml:mi><mml:mi mathvariant="normal">&#x0026;</mml:mi><mml:mi>K</mml:mi><mml:mo>&#x2208;</mml:mo><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mn>123456</mml:mn></mml:mrow><mml:mo>}</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:math>
</disp-formula></p>
<p>In <xref ref-type="disp-formula" rid="eqn-15">Eq. (15)</xref> <inline-formula id="ieqn-31">
<mml:math id="mml-ieqn-31"><mml:mi>D</mml:mi><mml:mi>i</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:mi>e</mml:mi></mml:mrow></mml:math>
</inline-formula> refers to the dice count for the <inline-formula id="ieqn-32">
<mml:math id="mml-ieqn-32"><mml:mi>i</mml:mi></mml:math>
</inline-formula>-<inline-formula id="ieqn-33">
<mml:math id="mml-ieqn-33"><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:math>
</inline-formula> player.</p>
<p><bold>Selection of the Guide&#x2019;s players for each player</bold></p>
<p>For every player, according to the count of the dice (K), a player guide is arbitrarily chosen amongst the players, as shown below</p>
<p><disp-formula id="eqn-16"><label>(16)</label>
<mml:math id="mml-eqn-16" display="block"><mml:msubsup><mml:mi>X</mml:mi><mml:mrow><mml:mi>G</mml:mi><mml:mi>u</mml:mi><mml:mi>i</mml:mi><mml:mi>d</mml:mi><mml:mrow><mml:msub><mml:mi>e</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mi>k</mml:mi></mml:msubsup><mml:mo>=</mml:mo><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow><mml:mrow><mml:mo>:</mml:mo></mml:mrow><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mi>K</mml:mi></mml:msub></mml:mrow><mml:mo>,</mml:mo></mml:math>
</disp-formula></p>
<p>In <xref ref-type="disp-formula" rid="eqn-16">Eq. (16)</xref>, <inline-formula id="ieqn-34">
<mml:math id="mml-ieqn-34"><mml:msubsup><mml:mi>X</mml:mi><mml:mrow><mml:mi>G</mml:mi><mml:mi>u</mml:mi><mml:mi>i</mml:mi><mml:mi>d</mml:mi><mml:mrow><mml:msub><mml:mi>e</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mi>k</mml:mi></mml:msubsup></mml:math>
</inline-formula> refers to the location guide&#x2019;s player count<inline-formula id="ieqn-35">
<mml:math id="mml-ieqn-35"><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mi>k</mml:mi></mml:math>
</inline-formula>.</p>
<p><bold>Update the position of each player.</bold></p>
<p>Here, <inline-formula id="ieqn-36">
<mml:math id="mml-ieqn-36"><mml:mrow><mml:msup><mml:mi>X</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>d</mml:mi></mml:mrow></mml:msup></mml:mrow></mml:math>
</inline-formula> is evaluated by <xref ref-type="disp-formula" rid="eqn-17">Eq. (17)</xref></p>
<p><disp-formula id="eqn-17"><label>(17)</label>
<mml:math id="mml-eqn-17" display="block"><mml:mrow><mml:msup><mml:mi>X</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>d</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mo>=</mml:mo><mml:msubsup><mml:mi>X</mml:mi><mml:mn>0</mml:mn><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>d</mml:mi></mml:mrow></mml:msubsup><mml:mo>+</mml:mo><mml:msubsup><mml:mrow><mml:mo movablelimits="false">&#x2211;</mml:mo></mml:mrow><mml:mrow><mml:mi>k</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>D</mml:mi><mml:mi>i</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>e</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:mrow></mml:msubsup><mml:mo>&#x2061;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo></mml:mrow><mml:mi>R</mml:mi><mml:mi>K</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mrow><mml:msup><mml:mi>X</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>d</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mo>&#x2212;</mml:mo><mml:msubsup><mml:mi>X</mml:mi><mml:mrow><mml:mi>G</mml:mi><mml:mi>u</mml:mi><mml:mi>i</mml:mi><mml:mi>d</mml:mi><mml:mrow><mml:msub><mml:mi>e</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:mrow><mml:mrow><mml:mi>k</mml:mi><mml:mo>,</mml:mo><mml:mi>d</mml:mi></mml:mrow></mml:msubsup></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mi>s</mml:mi><mml:mi>i</mml:mi><mml:mi>g</mml:mi><mml:mi>n</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>S</mml:mi><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mi>S</mml:mi><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mrow><mml:msub><mml:mi>e</mml:mi><mml:mrow><mml:mi>G</mml:mi><mml:mi>u</mml:mi><mml:mi>i</mml:mi><mml:mi>d</mml:mi><mml:mrow><mml:msub><mml:mi>e</mml:mi><mml:mi>k</mml:mi></mml:msub></mml:mrow></mml:mrow></mml:msub></mml:mrow></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo stretchy="false">)</mml:mo><mml:mo>,</mml:mo></mml:math>
</disp-formula></p>
<p>Now, <inline-formula id="ieqn-37">
<mml:math id="mml-ieqn-37"><mml:mrow><mml:msub><mml:mi>r</mml:mi><mml:mi>k</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> refers to an arbitrary count with a standard distribution within [0,1] and <inline-formula id="ieqn-38">
<mml:math id="mml-ieqn-38"><mml:mi>S</mml:mi><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mrow><mml:msub><mml:mi>e</mml:mi><mml:mrow><mml:mi>G</mml:mi><mml:mi>u</mml:mi><mml:mi>i</mml:mi><mml:mi>d</mml:mi><mml:mrow><mml:msub><mml:mi>e</mml:mi><mml:mi>k</mml:mi></mml:msub></mml:mrow></mml:mrow></mml:msub></mml:mrow></mml:math>
</inline-formula> denotes the score of a player guide&#x2019;s count<inline-formula id="ieqn-39">
<mml:math id="mml-ieqn-39"><mml:mo>,</mml:mo><mml:mtext>&#x00A0;</mml:mtext><mml:mi>k</mml:mi><mml:mo>.</mml:mo></mml:math>
</inline-formula></p>
</sec>
</sec>
<sec id="s4">
<label>4</label>
<title>Experimental Validation</title>
<p>The proposed DODHBM-ACC method was experimentally validated using two data sets such as Waten2004 dataset (dataset 1) and Khaleej2004 dataset (dataset 2). The first dataset has a total of 4,217 samples under six classes as depicted in <xref ref-type="table" rid="table-1">Table 1</xref>. The parameter settings are as follows: learning rate: 0.01, dropout: 0.5, batch size: 5, epoch count: 50, and activation: ReLU.</p>
<table-wrap id="table-1"><label>Table 1</label>
<caption>
<title>Details on dataset-1</title></caption>
<table><colgroup>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th colspan="2">Dataset 1-Waten2004 dataset</th>
</tr>
</thead>
<tbody>
<tr>
<td>Class</td>
<td>No. of samples</td>
</tr>
<tr>
<td>Culture</td>
<td>656</td>
</tr>
<tr>
<td>Economy</td>
<td>965</td>
</tr>
<tr>
<td>Internews</td>
<td>415</td>
</tr>
<tr>
<td>Local</td>
<td>703</td>
</tr>
<tr>
<td>Religion</td>
<td>667</td>
</tr>
<tr>
<td>Sports</td>
<td>811</td>
</tr>
<tr>
<td><bold>Total no. of samples</bold></td>
<td><bold>4217</bold></td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The confusion matrices generated by the proposed DODHBM-ACC model on dataset-1 are portrayed in <xref ref-type="fig" rid="fig-3">Fig. 3</xref>. The results indicate that the proposed DODHBM-ACC method achieved improved outcomes under all the aspects. With the entire dataset, the DODHBM-ACC system identified 633 samples as culture class, 941 samples as economy class, 400 samples as Internews class, 689 samples as local class, 635 samples as religion class and 775 samples as sports class. In line with this, with 70% of TR dataset, the proposed DODHBM-ACC approach categorized 435 samples under culture class, 661 samples under economy class, 280 samples under Internews class, 497 samples under local class, 443 samples under religion class and 544 samples under sports class. Similarly, with 30% of TS dataset, the proposed DODHBM-ACC method classified 198 samples under culture class, 280 samples under economy class, 120 samples under Internews class, 192 samples under local class, 192 samples under religion class and 231 samples under sports class respectively.</p>
<fig id="fig-3">
<label>Figure 3</label>
<caption>
<title>Confusion matrices of the DODHBM-ACC approach under dataset-1 (a) Entire dataset, (b) 70% of TR data, and (c) 30% of TS data</title></caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_34609-fig-3.tif"/>
</fig>
<p><xref ref-type="table" rid="table-2">Table 2</xref> demonstrates the overall classification results achieved by the proposed DODHBM-ACC model. With entire dataset, the DODHBM-ACC model reached average <inline-formula id="ieqn-40">
<mml:math id="mml-ieqn-40"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula>, <inline-formula id="ieqn-41">
<mml:math id="mml-ieqn-41"><mml:mi>p</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>c</mml:mi><mml:mi>n</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula>, <inline-formula id="ieqn-42">
<mml:math id="mml-ieqn-42"><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>a</mml:mi><mml:mi>l</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula>, <inline-formula id="ieqn-43">
<mml:math id="mml-ieqn-43"><mml:mrow><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mi>s</mml:mi><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:math>
</inline-formula> and <inline-formula id="ieqn-44">
<mml:math id="mml-ieqn-44"><mml:mi>A</mml:mi><mml:mi>U</mml:mi><mml:mrow><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mi>s</mml:mi><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:math>
</inline-formula> values such as 98.86%, 96.45%, 96.53%, 96.48% and 97.92% correspondingly. Meanwhile, with 70% of TR data, the DODHBM-ACC methodology attained average <inline-formula id="ieqn-45">
<mml:math id="mml-ieqn-45"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula>, <inline-formula id="ieqn-46">
<mml:math id="mml-ieqn-46"><mml:mi>p</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>c</mml:mi><mml:mi>n</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula>, <inline-formula id="ieqn-47">
<mml:math id="mml-ieqn-47"><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>a</mml:mi><mml:mi>l</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula>, <inline-formula id="ieqn-48">
<mml:math id="mml-ieqn-48"><mml:mrow><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mi>s</mml:mi><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:math>
</inline-formula> and <inline-formula id="ieqn-49">
<mml:math id="mml-ieqn-49"><mml:mi>A</mml:mi><mml:mi>U</mml:mi><mml:mrow><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mi>s</mml:mi><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:math>
</inline-formula> values such as 98.97%, 96.81%, 96.80%, 96.80% and 98.09% correspondingly. Also, with 30% of TS data, the proposed DODHBM-ACC approach gained average <inline-formula id="ieqn-50">
<mml:math id="mml-ieqn-50"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula>, <inline-formula id="ieqn-51">
<mml:math id="mml-ieqn-51"><mml:mi>p</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>c</mml:mi><mml:mi>n</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula>, <inline-formula id="ieqn-52">
<mml:math id="mml-ieqn-52"><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>a</mml:mi><mml:mi>l</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula>, <inline-formula id="ieqn-53">
<mml:math id="mml-ieqn-53"><mml:mrow><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mi>s</mml:mi><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:math>
</inline-formula> and <inline-formula id="ieqn-54">
<mml:math id="mml-ieqn-54"><mml:mi>A</mml:mi><mml:mi>U</mml:mi><mml:mrow><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mi>s</mml:mi><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:math>
</inline-formula> values such as 98.60%, 95.61%, 95.90%, 95.74% and 97.53% correspondingly.</p>
<table-wrap id="table-2"><label>Table 2</label>
<caption>
<title>Analytical results of the DODHBM-ACC approach upon dataset-1 under distinct class labels</title></caption>
<table><colgroup>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th colspan="6">Dataset-1</th>
</tr>
</thead>
<tbody><tr>
<td>Labels</td>
<td>Accuracy</td>
<td>Precision</td>
<td>Recall</td>
<td>F-score</td>
<td>AUC score</td>
</tr><tr>
<td colspan="6">Entire dataset</td>
</tr>
<tr>
<td>Culture</td>
<td>99.05</td>
<td>97.38</td>
<td>96.49</td>
<td>96.94</td>
<td>98.01</td>
</tr>
<tr>
<td>Economy</td>
<td>98.93</td>
<td>97.82</td>
<td>97.51</td>
<td>97.66</td>
<td>98.43</td>
</tr>
<tr>
<td>InterNews</td>
<td>99.15</td>
<td>95.01</td>
<td>96.39</td>
<td>95.69</td>
<td>97.92</td>
</tr>
<tr>
<td>Local</td>
<td>98.86</td>
<td>95.30</td>
<td>98.01</td>
<td>96.63</td>
<td>98.52</td>
</tr>
<tr>
<td>Religion</td>
<td>98.84</td>
<td>97.39</td>
<td>95.20</td>
<td>96.29</td>
<td>97.36</td>
</tr><tr>
<td>Sports</td>
<td>98.34</td>
<td>95.80</td>
<td>95.56</td>
<td>95.68</td>
<td>97.28</td>
</tr><tr>
<td><bold>Average</bold></td>
<td><bold>98.86</bold></td>
<td><bold>96.45</bold></td>
<td><bold>96.53</bold></td>
<td><bold>96.48</bold></td>
<td><bold>97.92</bold></td>
</tr><tr>
<td colspan="6">Training phase (70%)</td>
</tr>
<tr>
<td>Culture</td>
<td>98.98</td>
<td>97.32</td>
<td>96.03</td>
<td>96.67</td>
<td>97.77</td>
</tr>
<tr>
<td>Economy</td>
<td>99.02</td>
<td>97.64</td>
<td>98.07</td>
<td>97.85</td>
<td>98.68</td>
</tr>
<tr>
<td>InterNews</td>
<td>99.19</td>
<td>95.56</td>
<td>96.22</td>
<td>95.89</td>
<td>97.87</td>
</tr>
<tr>
<td>Local</td>
<td>99.02</td>
<td>95.95</td>
<td>98.42</td>
<td>97.17</td>
<td>98.78</td>
</tr>
<tr>
<td>Religion</td>
<td>99.02</td>
<td>97.58</td>
<td>96.10</td>
<td>96.83</td>
<td>97.83</td>
</tr><tr>
<td>Sports</td>
<td>98.61</td>
<td>96.80</td>
<td>95.94</td>
<td>96.37</td>
<td>97.59</td>
</tr><tr>
<td><bold>Average</bold></td>
<td><bold>98.97</bold></td>
<td><bold>96.81</bold></td>
<td><bold>96.80</bold></td>
<td><bold>96.80</bold></td>
<td><bold>98.09</bold></td>
</tr><tr>
<td colspan="6">Testing phase (30%)</td>
</tr>
<tr>
<td>Culture</td>
<td>99.21</td>
<td>97.54</td>
<td>97.54</td>
<td>97.54</td>
<td>98.53</td>
</tr>
<tr>
<td>Economy</td>
<td>98.74</td>
<td>98.25</td>
<td>96.22</td>
<td>97.22</td>
<td>97.85</td>
</tr>
<tr>
<td>InterNews</td>
<td>99.05</td>
<td>93.75</td>
<td>96.77</td>
<td>95.24</td>
<td>98.04</td>
</tr>
<tr>
<td>Local</td>
<td>98.50</td>
<td>93.66</td>
<td>96.97</td>
<td>95.29</td>
<td>97.88</td>
</tr>
<tr>
<td>Religion</td>
<td>98.42</td>
<td>96.97</td>
<td>93.20</td>
<td>95.05</td>
<td>96.32</td>
</tr><tr>
<td>Sports</td>
<td>97.71</td>
<td>93.52</td>
<td>94.67</td>
<td>94.09</td>
<td>96.55</td>
</tr>
<tr>
<td><bold>Average</bold></td>
<td><bold>98.60</bold></td>
<td><bold>95.61</bold></td>
<td><bold>95.90</bold></td>
<td><bold>95.74</bold></td>
<td><bold>97.53</bold></td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Both Training Accuracy (TRA) and Validation Accuracy (VLA) values, attained by the proposed DODHBM-ACC algorithm on dataset-1, are displayed in <xref ref-type="fig" rid="fig-4">Fig. 4</xref>. The experimental outcomes denote that the proposed DODHBM-ACC approach obtained the maximal TRA and VLA values while VLA values were higher than the TRA values.</p>
<fig id="fig-4">
<label>Figure 4</label>
<caption>
<title>TRA and VLA analyses results of the DODHBM-ACC approach on dataset-1</title></caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_34609-fig-4.tif"/>
</fig>
<p>Both Training Loss (TRL) and Validation Loss (VLL) values, obtained by the proposed DODHBM-ACC technique on dataset-1, are exhibited in <xref ref-type="fig" rid="fig-5">Fig. 5</xref>. The experimental outcomes represent that the proposed DODHBM-ACC algorithm outperformed other methods with minimal TRL and VLL values whereas the VLL values were lower than the TRL values.</p>
<fig id="fig-5">
<label>Figure 5</label>
<caption>
<title>TRL and VLL analyses results of the DODHBM-ACC methodology on dataset-1</title></caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_34609-fig-5.tif"/>
</fig>
<p>A clear precision-recall analysis was conducted upon the proposed DODHBM-ACC methodology using dataset-1, and the results are shown in <xref ref-type="fig" rid="fig-6">Fig. 6</xref>. The figure signifies that the proposed DODHBM-ACC algorithm produced enhanced precision-recall values under all the classes.</p>
<fig id="fig-6">
<label>Figure 6</label>
<caption>
<title>Precision-recall analyses results of the DODHBM-ACC approach on dataset-1</title></caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_34609-fig-6.tif"/>
</fig>
<p>A detailed ROC analysis was conducted upon the presented DODHBM-ACC methodology using dataset-1, and the results are presented in <xref ref-type="fig" rid="fig-7">Fig. 7</xref>. The results indicate that the proposed DODHBM-ACC technique showcased its ability in categorizing the dataset-1 under distinct classes.</p>
<fig id="fig-7">
<label>Figure 7</label>
<caption>
<title>ROC analysis results of the DODHBM-ACC approach on dataset-1</title></caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_34609-fig-7.tif"/>
</fig>
<p>The proposed DODHBM-ACC algorithm was experimentally validated using Khaleej2004 dataset (dataset 2). The dataset holds 1,498 samples under four classes and is depicted in <xref ref-type="table" rid="table-3">Table 3</xref>.</p>
<table-wrap id="table-3"><label>Table 3</label>
<caption>
<title>Details on dataset-2</title></caption>
<table><colgroup>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th colspan="2">Dataset 2-Khaleej2004 dataset</th>
</tr>
</thead>
<tbody><tr>
<td>Class</td>
<td>No. of samples</td>
</tr>
<tr>
<td>Economy</td>
<td>217</td>
</tr>
<tr>
<td>Internews</td>
<td>217</td>
</tr>
<tr>
<td>Local</td>
<td>613</td>
</tr>
<tr>
<td>Sports</td>
<td>451</td>
</tr>
<tr>
<td><bold>Total no. of samples</bold></td>
<td><bold>1498</bold></td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The confusion matrices generated by the proposed DODHBM-ACC method on dataset-2 are shown in <xref ref-type="fig" rid="fig-8">Fig. 8</xref>. The results indicate that the proposed DODHBM-ACC system displayed improved outcomes under all the aspects. With the entire dataset, the DODHBM-ACC technique identified 214 samples as Economy, 210 samples as Internews, 600 samples as Local class and 438 samples as Sports class respectively. Further, upon 70% of TR dataset, the proposed DODHBM-ACC approach classified 142 samples under Economy, 146 samples under Internews, 430 samples under Local class and 302 samples under Sports class. Meanwhile, with 30% of TS, the presented DODHBM-ACC algorithm categorized 72 samples under Economy, 64 samples under Internews, 170 samples under Local class and 136 samples under Sports class.</p>
<fig id="fig-8">
<label>Figure 8</label>
<caption>
<title>Confusion matrices of the DODHBM-ACC methodology under dataset-2 (a) Entire dataset, (b) 70% of TR data, and (c) 30% of TS dataset</title></caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_34609-fig-8.tif"/>
</fig>
<p><xref ref-type="table" rid="table-4">Table 4</xref> demonstrates the overall classification results achieved by the proposed DODHBM-ACC methodology. With entire dataset, the DODHBM-ACC approach produced average <inline-formula id="ieqn-55">
<mml:math id="mml-ieqn-55"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula>, <inline-formula id="ieqn-56">
<mml:math id="mml-ieqn-56"><mml:mi>p</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>c</mml:mi><mml:mi>n</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula>, <inline-formula id="ieqn-57">
<mml:math id="mml-ieqn-57"><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>a</mml:mi><mml:mi>l</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula>, <inline-formula id="ieqn-58">
<mml:math id="mml-ieqn-58"><mml:mrow><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mi>s</mml:mi><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:math>
</inline-formula> and <inline-formula id="ieqn-59">
<mml:math id="mml-ieqn-59"><mml:mi>A</mml:mi><mml:mi>U</mml:mi><mml:mrow><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mi>s</mml:mi><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:math>
</inline-formula> values such as 98.80%, 97.12%, 97.60%, 97.34% and 98.39% respectively. Eventually, with 70% of TR, the proposed DODHBM-ACC method achieved average <inline-formula id="ieqn-60">
<mml:math id="mml-ieqn-60"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula>, <inline-formula id="ieqn-61">
<mml:math id="mml-ieqn-61"><mml:mi>p</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>c</mml:mi><mml:mi>n</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula>, <inline-formula id="ieqn-62">
<mml:math id="mml-ieqn-62"><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>a</mml:mi><mml:mi>l</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula>, <inline-formula id="ieqn-63">
<mml:math id="mml-ieqn-63"><mml:mrow><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mi>s</mml:mi><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:math>
</inline-formula> and <inline-formula id="ieqn-64">
<mml:math id="mml-ieqn-64"><mml:mi>A</mml:mi><mml:mi>U</mml:mi><mml:mrow><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mi>s</mml:mi><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:math>
</inline-formula> values such as 98.66%, 96.71%, 97.27%, 96.96% and 98.18% correspondingly. Also, with 30% of TS, the presented DODHBM-ACC approach attained average <inline-formula id="ieqn-65">
<mml:math id="mml-ieqn-65"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula>, <inline-formula id="ieqn-66">
<mml:math id="mml-ieqn-66"><mml:mi>p</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>c</mml:mi><mml:mi>n</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula>, <inline-formula id="ieqn-67">
<mml:math id="mml-ieqn-67"><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>a</mml:mi><mml:mi>l</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula>, <inline-formula id="ieqn-68">
<mml:math id="mml-ieqn-68"><mml:mrow><mml:msub><mml:mi>F</mml:mi><mml:mrow><mml:mi>s</mml:mi><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:math>
</inline-formula> and <inline-formula id="ieqn-69">
<mml:math id="mml-ieqn-69"><mml:mi>A</mml:mi><mml:mi>U</mml:mi><mml:mrow><mml:msub><mml:mi>C</mml:mi><mml:mrow><mml:mi>s</mml:mi><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:math>
</inline-formula> values such as 99.11%, 98.03%, 98.34%, 98.17% and 98.86% correspondingly.</p>
<table-wrap id="table-4"><label>Table 4</label>
<caption>
<title>Analytical results of the DODHBM-ACC approach upon dataset-2 under distinct class labels</title></caption>
<table><colgroup>
<col/>
<col/>
<col/>
<col/>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th colspan="6">Dataset-2</th>
</tr>
</thead>
<tbody><tr>
<td>Labels</td>
<td>Accuracy</td>
<td>Precision</td>
<td>Recall</td>
<td>F-score</td>
<td>AUC score</td>
</tr><tr>
<td colspan="6">Entire dataset</td>
</tr>
<tr>
<td>Economy</td>
<td>98.73</td>
<td>93.04</td>
<td>98.62</td>
<td>95.75</td>
<td>98.68</td>
</tr>
<tr>
<td>InterNews</td>
<td>99.33</td>
<td>98.59</td>
<td>96.77</td>
<td>97.67</td>
<td>98.27</td>
</tr>
<tr>
<td>Local</td>
<td>98.40</td>
<td>98.20</td>
<td>97.88</td>
<td>98.04</td>
<td>98.32</td>
</tr><tr>
<td>Sports</td>
<td>98.73</td>
<td>98.65</td>
<td>97.12</td>
<td>97.88</td>
<td>98.27</td>
</tr>
<tr>
<td><bold>Average</bold></td>
<td><bold>98.80</bold></td>
<td><bold>97.12</bold></td>
<td><bold>97.60</bold></td>
<td><bold>97.34</bold></td>
<td><bold>98.39</bold></td>
</tr><tr>
<td colspan="6">Training phase (70%)</td>
</tr>
<tr>
<td>Economy</td>
<td>98.66</td>
<td>92.21</td>
<td>98.61</td>
<td>95.30</td>
<td>98.64</td>
</tr>
<tr>
<td>InterNews</td>
<td>99.14</td>
<td>97.99</td>
<td>96.05</td>
<td>97.01</td>
<td>97.86</td>
</tr>
<tr>
<td>Local</td>
<td>98.28</td>
<td>97.95</td>
<td>97.95</td>
<td>97.95</td>
<td>98.24</td>
</tr><tr>
<td>Sports</td>
<td>98.57</td>
<td>98.69</td>
<td>96.49</td>
<td>97.58</td>
<td>97.97</td>
</tr><tr>
<td><bold>Average</bold></td>
<td><bold>98.66</bold></td>
<td><bold>96.71</bold></td>
<td><bold>97.27</bold></td>
<td><bold>96.96</bold></td>
<td><bold>98.18</bold></td>
</tr><tr>
<td colspan="6">Testing phase (30%)</td>
</tr>
<tr>
<td>Economy</td>
<td>98.89</td>
<td>94.74</td>
<td>98.63</td>
<td>96.64</td>
<td>98.78</td>
</tr>
<tr>
<td>Internews</td>
<td>99.78</td>
<td>100.00</td>
<td>98.46</td>
<td>99.22</td>
<td>99.23</td>
</tr>
<tr>
<td>Local</td>
<td>98.67</td>
<td>98.84</td>
<td>97.70</td>
<td>98.27</td>
<td>98.49</td>
</tr><tr>
<td>Sports</td>
<td>99.11</td>
<td>98.55</td>
<td>98.55</td>
<td>98.55</td>
<td>98.95</td>
</tr>
<tr>
<td><bold>Average</bold></td>
<td><bold>99.11</bold></td>
<td><bold>98.03</bold></td>
<td><bold>98.34</bold></td>
<td><bold>98.17</bold></td>
<td><bold>98.86</bold></td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Both TRA and VLA values, acquired by the DODHBM-ACC methodology on dataset-2, are illustrated in <xref ref-type="fig" rid="fig-9">Fig. 9</xref>. The experimental outcomes denote that the proposed DODHBM-ACC approach gained the maximal TRA and VLA values while the VLA values were higher than the TRA values.</p>
<fig id="fig-9">
<label>Figure 9</label>
<caption>
<title>TRA and VLA analyses values of the DODHBM-ACC approach on dataset-2</title></caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_34609-fig-9.tif"/>
</fig>
<p>Both TRL and VLL values, attained by the proposed DODHBM-ACC method on dataset-2, are shown in <xref ref-type="fig" rid="fig-10">Fig. 10</xref>. The experimental outcomes imply that the proposed DODHBM-ACC technique exhibited the least TRL and VRL values while the VLL values were lesser than the TRL values.</p>
<fig id="fig-10">
<label>Figure 10</label>
<caption>
<title>TRL and VLL analyses results of the DODHBM-ACC approach on dataset-2</title></caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_34609-fig-10.tif"/>
</fig>
<p>A clear precision-recall analysis was conducted upon the proposed DODHBM-ACC approach on dataset-2 and the results are portrayed in <xref ref-type="fig" rid="fig-11">Fig. 11</xref>. The figure represents that the proposed DODHBM-ACC algorithm produced enhanced precision-recall values under all classes.</p>
<fig id="fig-11">
<label>Figure 11</label>
<caption>
<title>Precision-recall analysis results of the DODHBM-ACC approach on dataset-2</title></caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_34609-fig-11.tif"/>
</fig>
<p>A brief ROC analysis was conducted upon the proposed DODHBM-ACC method using dataset-2, and the results are shown in <xref ref-type="fig" rid="fig-12">Fig. 12</xref>. The results denote that the proposed DODHBM-ACC methodology established its ability in categorizing the dataset-2 under distinct classes.</p>
<fig id="fig-12">
<label>Figure 12</label>
<caption>
<title>ROC analysis results of the DODHBM-ACC approach under dataset-2</title></caption>
<graphic mimetype="image" mime-subtype="tif" xlink:href="CSSE_34609-fig-12.tif"/>
</fig>
<p><xref ref-type="table" rid="table-5">Table 5</xref> highlights the comparative inspection results accomplished by the proposed DODHBM-ACC model on two datasets [<xref ref-type="bibr" rid="ref-16">16</xref>]. The results imply that the DODHBM-ACC model achieved improved performance on both the datasets. For instance, on dataset-1, the proposed DODHBM-ACC model achieved an increased <inline-formula id="ieqn-70">
<mml:math id="mml-ieqn-70"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> of 98.60%, whereas the KNN, LOR, NB, RF and the SVM models obtained the least <inline-formula id="ieqn-71">
<mml:math id="mml-ieqn-71"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> values such as 94.96%, 96.07%, 95.56%, 97.26% and 96.80% respectively.</p>
<table-wrap id="table-5"><label>Table 5</label>
<caption>
<title>Comparative analysis results of the DODHBM-ACC algorithm and other existing approaches on two datasets</title></caption>
<table><colgroup>
<col/>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th colspan="3">Accuracy (%)</th>
</tr>
</thead>
<tbody><tr>
<td>Methods</td>
<td>Dataset-1</td>
<td>Dataset-2</td>
</tr>
<tr>
<td>DODHBM-ACC</td>
<td>98.60</td>
<td>99.11</td>
</tr>
<tr>
<td>KNN algorithm</td>
<td>94.96</td>
<td>95.03</td>
</tr>
<tr>
<td>LOR Model</td>
<td>96.07</td>
<td>96.14</td>
</tr>
<tr>
<td>NB Model</td>
<td>95.56</td>
<td>95.08</td>
</tr>
<tr>
<td>Random forest algorithm</td>
<td>97.26</td>
<td>95.96</td>
</tr>
<tr>
<td>SVM model</td>
<td>96.80</td>
<td>95.63</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Moreover, on dataset-2, the presented DODHBM-ACC approach offered an increased <inline-formula id="ieqn-72">
<mml:math id="mml-ieqn-72"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> of 99.11%, whereas the other models such as KNN, LOR, NB, RF and SVM achieved low <inline-formula id="ieqn-73">
<mml:math id="mml-ieqn-73"><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mi>y</mml:mi></mml:msub></mml:mrow></mml:math>
</inline-formula> values such as 95.03%, 96.14%, 95.08%, 95.96% and 95.63% correspondingly. Thus, the proposed DODHBM-ACC model can be utilized for the classification of the Arabic text in an effectual manner.</p>
</sec>
<sec id="s5">
<label>5</label>
<title>Conclusion</title>
<p>In the current study, a new DODHBM-ACC model has been developed for automated Arabic corpus classification. The presented DODHBM-ACC model primarily relies on different stages of pre-processing and word2vec word embedding process. In addition, the presented model uses the DHBM-based classification and DOA-based hyperparameter tuning processes. To adjust the hyperparameters of the DHBM technique, the DOA is exploited in this study. The experimental analysis was conducted to establish the supreme performance of the proposed DODHBM-ACC model. The outcomes confirmed the supremacy of the proposed DODHBM-ACC model over other recent approaches. In the future, the feature selection models can be utilized to reduce the computational complexity of the DODHBM-ACC model.</p>
</sec>
</body>
<back>
<sec>
<title>Funding Statement</title>
<p><funding-source>Princess Nourah bint Abdulrahman University Researchers</funding-source> Supporting Project number (<award-id>PNURSP2022R263</award-id>), <funding-source>Princess Nourah bint Abdulrahman University</funding-source>, Riyadh, Saudi Arabia. The authors would like to thank the <funding-source>Deanship of Scientific Research at Umm Al-Qura University</funding-source> for supporting this work by Grant Code: (<award-id>22UQU4310373DSR53</award-id>).</p>
</sec>
<sec sec-type="COI-statement">
<title>Conflicts of Interest</title>
<p>The authors declare that they have no conflicts of interest to report regarding the present study.</p>
</sec>
<ref-list content-type="authoryear">
<title>References</title>
<ref id="ref-1"><label>[1]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Sayed</surname></string-name>, <string-name><given-names>R. K.</given-names> <surname>Salem</surname></string-name> and <string-name><given-names>A. E.</given-names> <surname>Khder</surname></string-name></person-group>, &#x201C;<article-title>A survey of Arabic text classification approaches</article-title>,&#x201D; <source>International Journal of Computer Applications in Technology</source>, vol. <volume>59</volume>, no. <issue>3</issue>, pp. <fpage>236</fpage>&#x2013;<lpage>251</lpage>, <year>2019</year>.</mixed-citation></ref>
<ref id="ref-2"><label>[2]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S. L. M.</given-names> <surname>Sainte</surname></string-name> and <string-name><given-names>N.</given-names> <surname>Alalyani</surname></string-name></person-group>, &#x201C;<article-title>Firefly algorithm based feature selection for Arabic text classification</article-title>,&#x201D; <source>Journal of King Saud University&#x2014;Computer and Information Sciences</source>, vol. <volume>32</volume>, no. <issue>3</issue>, pp. <fpage>320</fpage>&#x2013;<lpage>328</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-3"><label>[3]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A. S.</given-names> <surname>Alammary</surname></string-name></person-group>, &#x201C;<article-title>BERT models for Arabic text classification: A systematic review</article-title>,&#x201D; <source>Applied Sciences</source>, vol. <volume>12</volume>, no. <issue>11</issue>, pp. <fpage>5720</fpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-4"><label>[4]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>F. N.</given-names> <surname>Al-Wasabi</surname></string-name></person-group>, &#x201C;<article-title>A smart English text zero-watermarking approach based on third-level order and word mechanism of Markov model</article-title>,&#x201D; <source>Computers, Materials &#x0026; Continua</source>, vol. <volume>65</volume>, no. <issue>2</issue>, pp. <fpage>1137</fpage>&#x2013;<lpage>1156</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-5"><label>[5]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>H.</given-names> <surname>Chantar</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Mafarja</surname></string-name>, <string-name><given-names>H.</given-names> <surname>Alsawalqah</surname></string-name>, <string-name><given-names>A. A.</given-names> <surname>Heidari</surname></string-name>, <string-name><given-names>I.</given-names> <surname>Aljarah</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>Feature selection using binary grey wolf optimizer with elite-based crossover for Arabic text classification</article-title>,&#x201D; <source>Neural Computing and Applications</source>, vol. <volume>32</volume>, no. <issue>16</issue>, pp. <fpage>12201</fpage>&#x2013;<lpage>12220</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-6"><label>[6]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>F. N.</given-names> <surname>Al-Wasabi</surname></string-name></person-group>, &#x201C;<article-title>Proposing high-smart approach for content authentication and tampering detection of Arabic text transmitted via internet</article-title>,&#x201D; <source>IEICE Transactions on Information and Systems</source>, vol. <volume>E103.D</volume>, no. <issue>10</issue>, pp. <fpage>2104</fpage>&#x2013;<lpage>2112</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-7"><label>[7]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A.</given-names> <surname>Elnagar</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Al-Debsi</surname></string-name> and <string-name><given-names>O.</given-names> <surname>Einea</surname></string-name></person-group>, &#x201C;<article-title>Arabic text classification using deep learning models</article-title>,&#x201D; <source>Information Processing &#x0026; Management</source>, vol. <volume>57</volume>, no. <issue>1</issue>, pp. <fpage>102121</fpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-8"><label>[8]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>F. N.</given-names> <surname>Al-Wasabi</surname></string-name></person-group>, &#x201C;<article-title>A hybrid intelligent approach for content authentication and tampering detection of Arabic text transmitted via internet</article-title>,&#x201D; <source>Computers, Materials &#x0026; Continua</source>, vol. <volume>66</volume>, no. <issue>1</issue>, pp. <fpage>195</fpage>&#x2013;<lpage>211</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-9"><label>[9]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>S.</given-names> <surname>Bahassine</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Madani</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Al-Sarem</surname></string-name> and <string-name><given-names>M.</given-names> <surname>Kissi</surname></string-name></person-group>, &#x201C;<article-title>Feature selection using an improved Chi-square for Arabic text classification</article-title>,&#x201D; <source>Journal of King Saud University&#x2014;Computer and Information Sciences</source>, vol. <volume>32</volume>, no. <issue>2</issue>, pp. <fpage>225</fpage>&#x2013;<lpage>231</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-10"><label>[10]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>F. N.</given-names> <surname>Al-Wesabi</surname></string-name></person-group>, &#x201C;<article-title>Entropy-based watermarking approach for sensitive tamper detection of Arabic text</article-title>,&#x201D; <source>Computers, Materials &#x0026; Continua</source>, vol. <volume>67</volume>, no. <issue>3</issue>, pp. <fpage>3635</fpage>&#x2013;<lpage>3648</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-11"><label>[11]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>K. A.</given-names> <surname>Wahdan</surname></string-name>, <string-name><given-names>S.</given-names> <surname>Hantoobi</surname></string-name>, <string-name><given-names>S. A.</given-names> <surname>Salloum</surname></string-name> and <string-name><given-names>K.</given-names> <surname>Shaalan</surname></string-name></person-group>, &#x201C;<article-title>A systematic review of text classification research based on deep learning models in Arabic language</article-title>,&#x201D; <source>International Journal of Electrical and Computer Engineering</source>, vol. <volume>10</volume>, no. <issue>6</issue>, pp. <fpage>6629</fpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-12"><label>[12]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A. El</given-names> <surname>Kah</surname></string-name> and <string-name><given-names>I.</given-names> <surname>Zeroual</surname></string-name></person-group>, &#x201C;<article-title>The effects of pre-processing techniques on Arabic text classification</article-title>,&#x201D; <source>International Journal of Advanced Trends in Computer Science and Engineering</source>, vol. <volume>10</volume>, no. <issue>1</issue>, pp. <fpage>41</fpage>&#x2013;<lpage>48</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-13"><label>[13]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>N.</given-names> <surname>Aljedani</surname></string-name>, <string-name><given-names>R.</given-names> <surname>Alotaibi</surname></string-name> and <string-name><given-names>M.</given-names> <surname>Taileb</surname></string-name></person-group>, &#x201C;<article-title>HMATC: Hierarchical multi-label Arabic text classification model using machine learning</article-title>,&#x201D; <source>Egyptian Informatics Journal</source>, vol. <volume>22</volume>, no. <issue>3</issue>, pp. <fpage>225</fpage>&#x2013;<lpage>237</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-14"><label>[14]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Hijazi</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Zeki</surname></string-name> and <string-name><given-names>A.</given-names> <surname>Ismail</surname></string-name></person-group>, &#x201C;<article-title>Arabic text classification based on semantic and relation</article-title>,&#x201D; <source>Computer Science</source>, vol. <volume>37</volume>, no. <issue>4</issue>, pp. <fpage>992</fpage>, <year>2018</year>.</mixed-citation></ref>
<ref id="ref-15"><label>[15]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>F. -Z.</given-names> <surname>El-Alami</surname></string-name>, <string-name><given-names>S. O. El</given-names> <surname>Alaoui</surname></string-name> and <string-name><given-names>N.</given-names> <surname>En-Nahnahi</surname></string-name></person-group>, &#x201C;<article-title>Deep neural models and retrofitting for Arabic text categorization</article-title>,&#x201D; <source>International Journal of Intelligent Information Technologies</source>, vol. <volume>16</volume>, no. <issue>2</issue>, pp. <fpage>74</fpage>&#x2013;<lpage>86</lpage>, <year>2020</year>.</mixed-citation></ref>
<ref id="ref-16"><label>[16]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Hijazi</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Zeki</surname></string-name> and <string-name><given-names>A.</given-names> <surname>Ismail</surname></string-name></person-group>, &#x201C;<article-title>Arabic text classification using hybrid feature selection method using chi-square binary artificial bee colony algorithm</article-title>,&#x201D; <source>International Journal of Mathematics and Computer Science</source>, vol. <volume>16</volume>, no. <issue>1</issue>, pp. <fpage>213</fpage>&#x2013;<lpage>228</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-17"><label>[17]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>F. S.</given-names> <surname>Al-Anzi</surname></string-name> and <string-name><given-names>D.</given-names> <surname>AbuZeina</surname></string-name></person-group>, &#x201C;<article-title>Beyond vector space model for hierarchical Arabic text classification: A Markov chain approach</article-title>,&#x201D; <source>Information Processing &#x0026; Management</source>, vol. <volume>54</volume>, no. <issue>1</issue>, pp. <fpage>105</fpage>&#x2013;<lpage>115</lpage>, <year>2018</year>.</mixed-citation></ref>
<ref id="ref-18"><label>[18]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Y. A.</given-names> <surname>Alhaj</surname></string-name>, <string-name><given-names>A.</given-names> <surname>Dahou</surname></string-name>, <string-name><given-names>M. A.</given-names> <surname>Al-qaness</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Abualigah</surname></string-name> and <string-name><given-names>A. A.</given-names> <surname>Almaweri</surname></string-name></person-group>, &#x201C;<article-title>A novel text classification technique using improved particle swarm optimization: A case study of Arabic language</article-title>,&#x201D; <source>Future Internet</source>, vol. <volume>14</volume>, no. <issue>7</issue>, pp. <fpage>194</fpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-19"><label>[19]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>H. N.</given-names> <surname>Alshaer</surname></string-name>, <string-name><given-names>M. A.</given-names> <surname>Otair</surname></string-name>, <string-name><given-names>L.</given-names> <surname>Abualigah</surname></string-name>, <string-name><given-names>M.</given-names> <surname>Alshinwan</surname></string-name> and <string-name><given-names>A. M.</given-names> <surname>Khasawneh</surname></string-name></person-group>, &#x201C;<article-title>Feature selection method using improved CHI Square on Arabic text classifiers: Analysis and application</article-title>,&#x201D; <source>Multimedia Tools and Applications</source>, vol. <volume>80</volume>, no. <issue>7</issue>, pp. <fpage>10373</fpage>&#x2013;<lpage>10390</lpage>, <year>2021</year>.</mixed-citation></ref>
<ref id="ref-20"><label>[20]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>A. H.</given-names> <surname>Ababneh</surname></string-name></person-group>, &#x201C;<article-title>Investigating the relevance of Arabic text classification datasets based on supervised learning</article-title>,&#x201D; <source>Journal of Electronic Science and Technology</source>, vol. <volume>20</volume>, no. <issue>2</issue>, pp. <fpage>100160</fpage>, <year>2022</year>.</mixed-citation></ref>
<ref id="ref-21"><label>[21]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>Y.</given-names> <surname>Yao</surname></string-name>, <string-name><given-names>X.</given-names> <surname>Li</surname></string-name>, <string-name><given-names>X.</given-names> <surname>Liu</surname></string-name>, <string-name><given-names>P.</given-names> <surname>Liu</surname></string-name>, <string-name><given-names>Z.</given-names> <surname>Liang</surname></string-name> <etal>et al.</etal></person-group><italic>,</italic> &#x201C;<article-title>Sensing spatial distribution of urban land use by integrating points-of-interest and Google Word2Vec model</article-title>,&#x201D; <source>International Journal of Geographical Information Science</source>, vol. <volume>31</volume>, no. <issue>4</issue>, pp. <fpage>825</fpage>&#x2013;<lpage>848</lpage>, <year>2017</year>.</mixed-citation></ref>
<ref id="ref-22"><label>[22]</label><mixed-citation publication-type="other"><person-group person-group-type="author"><string-name><given-names>A. G.</given-names> <surname>Ororbia</surname> <suffix>II</suffix></string-name>, <string-name><given-names>C. L.</given-names> <surname>Giles</surname></string-name> and <string-name><given-names>D.</given-names> <surname>Reitter</surname></string-name></person-group>, &#x201C;<article-title>Online semi-supervised learning with deep hybrid boltzmann machines and denoising autoencoders</article-title>,&#x201D; <comment>arXiv preprint arXiv:1511.06964</comment>, <year>2015</year>.</mixed-citation></ref>
<ref id="ref-23"><label>[23]</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><string-name><given-names>M.</given-names> <surname>Dehghani</surname></string-name>, <string-name><given-names>Z.</given-names> <surname>Montazeri</surname></string-name> and <string-name><given-names>O. P.</given-names> <surname>Malik</surname></string-name></person-group>, &#x201C;<article-title>DGO: Dice game optimizer</article-title>,&#x201D; <source>Gazi University Journal of Science</source>, vol. <volume>32</volume>, no. <issue>3</issue>, pp. <fpage>871</fpage>&#x2013;<lpage>882</lpage>, <year>2019</year>.</mixed-citation></ref>
</ref-list>
</back>
</article>
















